genfs_io.c revision 1.1.14.2 1 1.1.14.2 matt /* $NetBSD: genfs_io.c,v 1.1.14.2 2007/11/06 23:33:16 matt Exp $ */
2 1.1.14.2 matt
3 1.1.14.2 matt /*
4 1.1.14.2 matt * Copyright (c) 1982, 1986, 1989, 1993
5 1.1.14.2 matt * The Regents of the University of California. All rights reserved.
6 1.1.14.2 matt *
7 1.1.14.2 matt * Redistribution and use in source and binary forms, with or without
8 1.1.14.2 matt * modification, are permitted provided that the following conditions
9 1.1.14.2 matt * are met:
10 1.1.14.2 matt * 1. Redistributions of source code must retain the above copyright
11 1.1.14.2 matt * notice, this list of conditions and the following disclaimer.
12 1.1.14.2 matt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1.14.2 matt * notice, this list of conditions and the following disclaimer in the
14 1.1.14.2 matt * documentation and/or other materials provided with the distribution.
15 1.1.14.2 matt * 3. Neither the name of the University nor the names of its contributors
16 1.1.14.2 matt * may be used to endorse or promote products derived from this software
17 1.1.14.2 matt * without specific prior written permission.
18 1.1.14.2 matt *
19 1.1.14.2 matt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 1.1.14.2 matt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 1.1.14.2 matt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 1.1.14.2 matt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 1.1.14.2 matt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 1.1.14.2 matt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 1.1.14.2 matt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 1.1.14.2 matt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 1.1.14.2 matt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 1.1.14.2 matt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 1.1.14.2 matt * SUCH DAMAGE.
30 1.1.14.2 matt *
31 1.1.14.2 matt */
32 1.1.14.2 matt
33 1.1.14.2 matt #include <sys/cdefs.h>
34 1.1.14.2 matt __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.1.14.2 2007/11/06 23:33:16 matt Exp $");
35 1.1.14.2 matt
36 1.1.14.2 matt #include <sys/param.h>
37 1.1.14.2 matt #include <sys/systm.h>
38 1.1.14.2 matt #include <sys/proc.h>
39 1.1.14.2 matt #include <sys/kernel.h>
40 1.1.14.2 matt #include <sys/mount.h>
41 1.1.14.2 matt #include <sys/namei.h>
42 1.1.14.2 matt #include <sys/vnode.h>
43 1.1.14.2 matt #include <sys/fcntl.h>
44 1.1.14.2 matt #include <sys/kmem.h>
45 1.1.14.2 matt #include <sys/poll.h>
46 1.1.14.2 matt #include <sys/mman.h>
47 1.1.14.2 matt #include <sys/file.h>
48 1.1.14.2 matt #include <sys/kauth.h>
49 1.1.14.2 matt #include <sys/fstrans.h>
50 1.1.14.2 matt
51 1.1.14.2 matt #include <miscfs/genfs/genfs.h>
52 1.1.14.2 matt #include <miscfs/genfs/genfs_node.h>
53 1.1.14.2 matt #include <miscfs/specfs/specdev.h>
54 1.1.14.2 matt
55 1.1.14.2 matt #include <uvm/uvm.h>
56 1.1.14.2 matt #include <uvm/uvm_pager.h>
57 1.1.14.2 matt
58 1.1.14.2 matt static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
59 1.1.14.2 matt off_t, enum uio_rw);
60 1.1.14.2 matt static void genfs_dio_iodone(struct buf *);
61 1.1.14.2 matt
62 1.1.14.2 matt static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
63 1.1.14.2 matt void (*)(struct buf *));
64 1.1.14.2 matt static inline void genfs_rel_pages(struct vm_page **, int);
65 1.1.14.2 matt
66 1.1.14.2 matt #define MAX_READ_PAGES 16 /* XXXUBC 16 */
67 1.1.14.2 matt
68 1.1.14.2 matt int genfs_maxdio = MAXPHYS;
69 1.1.14.2 matt
70 1.1.14.2 matt static inline void
71 1.1.14.2 matt genfs_rel_pages(struct vm_page **pgs, int npages)
72 1.1.14.2 matt {
73 1.1.14.2 matt int i;
74 1.1.14.2 matt
75 1.1.14.2 matt for (i = 0; i < npages; i++) {
76 1.1.14.2 matt struct vm_page *pg = pgs[i];
77 1.1.14.2 matt
78 1.1.14.2 matt if (pg == NULL || pg == PGO_DONTCARE)
79 1.1.14.2 matt continue;
80 1.1.14.2 matt if (pg->flags & PG_FAKE) {
81 1.1.14.2 matt pg->flags |= PG_RELEASED;
82 1.1.14.2 matt }
83 1.1.14.2 matt }
84 1.1.14.2 matt uvm_lock_pageq();
85 1.1.14.2 matt uvm_page_unbusy(pgs, npages);
86 1.1.14.2 matt uvm_unlock_pageq();
87 1.1.14.2 matt }
88 1.1.14.2 matt
89 1.1.14.2 matt /*
90 1.1.14.2 matt * generic VM getpages routine.
91 1.1.14.2 matt * Return PG_BUSY pages for the given range,
92 1.1.14.2 matt * reading from backing store if necessary.
93 1.1.14.2 matt */
94 1.1.14.2 matt
95 1.1.14.2 matt int
96 1.1.14.2 matt genfs_getpages(void *v)
97 1.1.14.2 matt {
98 1.1.14.2 matt struct vop_getpages_args /* {
99 1.1.14.2 matt struct vnode *a_vp;
100 1.1.14.2 matt voff_t a_offset;
101 1.1.14.2 matt struct vm_page **a_m;
102 1.1.14.2 matt int *a_count;
103 1.1.14.2 matt int a_centeridx;
104 1.1.14.2 matt vm_prot_t a_access_type;
105 1.1.14.2 matt int a_advice;
106 1.1.14.2 matt int a_flags;
107 1.1.14.2 matt } */ *ap = v;
108 1.1.14.2 matt
109 1.1.14.2 matt off_t newsize, diskeof, memeof;
110 1.1.14.2 matt off_t offset, origoffset, startoffset, endoffset;
111 1.1.14.2 matt daddr_t lbn, blkno;
112 1.1.14.2 matt int i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
113 1.1.14.2 matt int fs_bshift, fs_bsize, dev_bshift;
114 1.1.14.2 matt int flags = ap->a_flags;
115 1.1.14.2 matt size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
116 1.1.14.2 matt vaddr_t kva;
117 1.1.14.2 matt struct buf *bp, *mbp;
118 1.1.14.2 matt struct vnode *vp = ap->a_vp;
119 1.1.14.2 matt struct vnode *devvp;
120 1.1.14.2 matt struct genfs_node *gp = VTOG(vp);
121 1.1.14.2 matt struct uvm_object *uobj = &vp->v_uobj;
122 1.1.14.2 matt struct vm_page *pg, **pgs, *pgs_onstack[MAX_READ_PAGES];
123 1.1.14.2 matt int pgs_size;
124 1.1.14.2 matt kauth_cred_t cred = curlwp->l_cred; /* XXXUBC curlwp */
125 1.1.14.2 matt bool async = (flags & PGO_SYNCIO) == 0;
126 1.1.14.2 matt bool write = (ap->a_access_type & VM_PROT_WRITE) != 0;
127 1.1.14.2 matt bool sawhole = false;
128 1.1.14.2 matt bool has_trans = false;
129 1.1.14.2 matt bool overwrite = (flags & PGO_OVERWRITE) != 0;
130 1.1.14.2 matt bool blockalloc = write && (flags & PGO_NOBLOCKALLOC) == 0;
131 1.1.14.2 matt voff_t origvsize;
132 1.1.14.2 matt UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
133 1.1.14.2 matt
134 1.1.14.2 matt UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
135 1.1.14.2 matt vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
136 1.1.14.2 matt
137 1.1.14.2 matt KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
138 1.1.14.2 matt vp->v_type == VLNK || vp->v_type == VBLK);
139 1.1.14.2 matt
140 1.1.14.2 matt /* XXXUBC temp limit */
141 1.1.14.2 matt if (*ap->a_count > MAX_READ_PAGES) {
142 1.1.14.2 matt panic("genfs_getpages: too many pages");
143 1.1.14.2 matt }
144 1.1.14.2 matt
145 1.1.14.2 matt pgs = pgs_onstack;
146 1.1.14.2 matt pgs_size = sizeof(pgs_onstack);
147 1.1.14.2 matt
148 1.1.14.2 matt startover:
149 1.1.14.2 matt error = 0;
150 1.1.14.2 matt origvsize = vp->v_size;
151 1.1.14.2 matt origoffset = ap->a_offset;
152 1.1.14.2 matt orignpages = *ap->a_count;
153 1.1.14.2 matt GOP_SIZE(vp, origvsize, &diskeof, 0);
154 1.1.14.2 matt if (flags & PGO_PASTEOF) {
155 1.1.14.2 matt #if defined(DIAGNOSTIC)
156 1.1.14.2 matt off_t writeeof;
157 1.1.14.2 matt #endif /* defined(DIAGNOSTIC) */
158 1.1.14.2 matt
159 1.1.14.2 matt newsize = MAX(origvsize,
160 1.1.14.2 matt origoffset + (orignpages << PAGE_SHIFT));
161 1.1.14.2 matt GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
162 1.1.14.2 matt #if defined(DIAGNOSTIC)
163 1.1.14.2 matt GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
164 1.1.14.2 matt if (newsize > round_page(writeeof)) {
165 1.1.14.2 matt panic("%s: past eof", __func__);
166 1.1.14.2 matt }
167 1.1.14.2 matt #endif /* defined(DIAGNOSTIC) */
168 1.1.14.2 matt } else {
169 1.1.14.2 matt GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
170 1.1.14.2 matt }
171 1.1.14.2 matt KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
172 1.1.14.2 matt KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
173 1.1.14.2 matt KASSERT(orignpages > 0);
174 1.1.14.2 matt
175 1.1.14.2 matt /*
176 1.1.14.2 matt * Bounds-check the request.
177 1.1.14.2 matt */
178 1.1.14.2 matt
179 1.1.14.2 matt if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
180 1.1.14.2 matt if ((flags & PGO_LOCKED) == 0) {
181 1.1.14.2 matt simple_unlock(&uobj->vmobjlock);
182 1.1.14.2 matt }
183 1.1.14.2 matt UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
184 1.1.14.2 matt origoffset, *ap->a_count, memeof,0);
185 1.1.14.2 matt error = EINVAL;
186 1.1.14.2 matt goto out_err;
187 1.1.14.2 matt }
188 1.1.14.2 matt
189 1.1.14.2 matt /* uobj is locked */
190 1.1.14.2 matt
191 1.1.14.2 matt if ((flags & PGO_NOTIMESTAMP) == 0 &&
192 1.1.14.2 matt (vp->v_type != VBLK ||
193 1.1.14.2 matt (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
194 1.1.14.2 matt int updflags = 0;
195 1.1.14.2 matt
196 1.1.14.2 matt if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
197 1.1.14.2 matt updflags = GOP_UPDATE_ACCESSED;
198 1.1.14.2 matt }
199 1.1.14.2 matt if (write) {
200 1.1.14.2 matt updflags |= GOP_UPDATE_MODIFIED;
201 1.1.14.2 matt }
202 1.1.14.2 matt if (updflags != 0) {
203 1.1.14.2 matt GOP_MARKUPDATE(vp, updflags);
204 1.1.14.2 matt }
205 1.1.14.2 matt }
206 1.1.14.2 matt
207 1.1.14.2 matt if (write) {
208 1.1.14.2 matt gp->g_dirtygen++;
209 1.1.14.2 matt if ((vp->v_iflag & VI_ONWORKLST) == 0) {
210 1.1.14.2 matt vn_syncer_add_to_worklist(vp, filedelay);
211 1.1.14.2 matt }
212 1.1.14.2 matt if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
213 1.1.14.2 matt vp->v_iflag |= VI_WRMAPDIRTY;
214 1.1.14.2 matt }
215 1.1.14.2 matt }
216 1.1.14.2 matt
217 1.1.14.2 matt /*
218 1.1.14.2 matt * For PGO_LOCKED requests, just return whatever's in memory.
219 1.1.14.2 matt */
220 1.1.14.2 matt
221 1.1.14.2 matt if (flags & PGO_LOCKED) {
222 1.1.14.2 matt int nfound;
223 1.1.14.2 matt
224 1.1.14.2 matt npages = *ap->a_count;
225 1.1.14.2 matt #if defined(DEBUG)
226 1.1.14.2 matt for (i = 0; i < npages; i++) {
227 1.1.14.2 matt pg = ap->a_m[i];
228 1.1.14.2 matt KASSERT(pg == NULL || pg == PGO_DONTCARE);
229 1.1.14.2 matt }
230 1.1.14.2 matt #endif /* defined(DEBUG) */
231 1.1.14.2 matt nfound = uvn_findpages(uobj, origoffset, &npages,
232 1.1.14.2 matt ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(write ? UFP_NORDONLY : 0));
233 1.1.14.2 matt KASSERT(npages == *ap->a_count);
234 1.1.14.2 matt if (nfound == 0) {
235 1.1.14.2 matt error = EBUSY;
236 1.1.14.2 matt goto out_err;
237 1.1.14.2 matt }
238 1.1.14.2 matt if (!rw_tryenter(&gp->g_glock, RW_READER)) {
239 1.1.14.2 matt genfs_rel_pages(ap->a_m, npages);
240 1.1.14.2 matt
241 1.1.14.2 matt /*
242 1.1.14.2 matt * restore the array.
243 1.1.14.2 matt */
244 1.1.14.2 matt
245 1.1.14.2 matt for (i = 0; i < npages; i++) {
246 1.1.14.2 matt pg = ap->a_m[i];
247 1.1.14.2 matt
248 1.1.14.2 matt if (pg != NULL || pg != PGO_DONTCARE) {
249 1.1.14.2 matt ap->a_m[i] = NULL;
250 1.1.14.2 matt }
251 1.1.14.2 matt }
252 1.1.14.2 matt } else {
253 1.1.14.2 matt rw_exit(&gp->g_glock);
254 1.1.14.2 matt }
255 1.1.14.2 matt error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
256 1.1.14.2 matt goto out_err;
257 1.1.14.2 matt }
258 1.1.14.2 matt simple_unlock(&uobj->vmobjlock);
259 1.1.14.2 matt
260 1.1.14.2 matt /*
261 1.1.14.2 matt * find the requested pages and make some simple checks.
262 1.1.14.2 matt * leave space in the page array for a whole block.
263 1.1.14.2 matt */
264 1.1.14.2 matt
265 1.1.14.2 matt if (vp->v_type != VBLK) {
266 1.1.14.2 matt fs_bshift = vp->v_mount->mnt_fs_bshift;
267 1.1.14.2 matt dev_bshift = vp->v_mount->mnt_dev_bshift;
268 1.1.14.2 matt } else {
269 1.1.14.2 matt fs_bshift = DEV_BSHIFT;
270 1.1.14.2 matt dev_bshift = DEV_BSHIFT;
271 1.1.14.2 matt }
272 1.1.14.2 matt fs_bsize = 1 << fs_bshift;
273 1.1.14.2 matt
274 1.1.14.2 matt orignpages = MIN(orignpages,
275 1.1.14.2 matt round_page(memeof - origoffset) >> PAGE_SHIFT);
276 1.1.14.2 matt npages = orignpages;
277 1.1.14.2 matt startoffset = origoffset & ~(fs_bsize - 1);
278 1.1.14.2 matt endoffset = round_page((origoffset + (npages << PAGE_SHIFT) +
279 1.1.14.2 matt fs_bsize - 1) & ~(fs_bsize - 1));
280 1.1.14.2 matt endoffset = MIN(endoffset, round_page(memeof));
281 1.1.14.2 matt ridx = (origoffset - startoffset) >> PAGE_SHIFT;
282 1.1.14.2 matt
283 1.1.14.2 matt pgs_size = sizeof(struct vm_page *) *
284 1.1.14.2 matt ((endoffset - startoffset) >> PAGE_SHIFT);
285 1.1.14.2 matt if (pgs_size > sizeof(pgs_onstack)) {
286 1.1.14.2 matt pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
287 1.1.14.2 matt if (pgs == NULL) {
288 1.1.14.2 matt pgs = pgs_onstack;
289 1.1.14.2 matt error = ENOMEM;
290 1.1.14.2 matt goto out_err;
291 1.1.14.2 matt }
292 1.1.14.2 matt } else {
293 1.1.14.2 matt /* pgs == pgs_onstack */
294 1.1.14.2 matt memset(pgs, 0, pgs_size);
295 1.1.14.2 matt }
296 1.1.14.2 matt UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
297 1.1.14.2 matt ridx, npages, startoffset, endoffset);
298 1.1.14.2 matt
299 1.1.14.2 matt if (!has_trans) {
300 1.1.14.2 matt fstrans_start(vp->v_mount, FSTRANS_SHARED);
301 1.1.14.2 matt has_trans = true;
302 1.1.14.2 matt }
303 1.1.14.2 matt
304 1.1.14.2 matt /*
305 1.1.14.2 matt * hold g_glock to prevent a race with truncate.
306 1.1.14.2 matt *
307 1.1.14.2 matt * check if our idea of v_size is still valid.
308 1.1.14.2 matt */
309 1.1.14.2 matt
310 1.1.14.2 matt if (blockalloc) {
311 1.1.14.2 matt rw_enter(&gp->g_glock, RW_WRITER);
312 1.1.14.2 matt } else {
313 1.1.14.2 matt rw_enter(&gp->g_glock, RW_READER);
314 1.1.14.2 matt }
315 1.1.14.2 matt simple_lock(&uobj->vmobjlock);
316 1.1.14.2 matt if (vp->v_size < origvsize) {
317 1.1.14.2 matt rw_exit(&gp->g_glock);
318 1.1.14.2 matt if (pgs != pgs_onstack)
319 1.1.14.2 matt kmem_free(pgs, pgs_size);
320 1.1.14.2 matt goto startover;
321 1.1.14.2 matt }
322 1.1.14.2 matt
323 1.1.14.2 matt if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
324 1.1.14.2 matt async ? UFP_NOWAIT : UFP_ALL) != orignpages) {
325 1.1.14.2 matt rw_exit(&gp->g_glock);
326 1.1.14.2 matt KASSERT(async != 0);
327 1.1.14.2 matt genfs_rel_pages(&pgs[ridx], orignpages);
328 1.1.14.2 matt simple_unlock(&uobj->vmobjlock);
329 1.1.14.2 matt error = EBUSY;
330 1.1.14.2 matt goto out_err;
331 1.1.14.2 matt }
332 1.1.14.2 matt
333 1.1.14.2 matt /*
334 1.1.14.2 matt * if the pages are already resident, just return them.
335 1.1.14.2 matt */
336 1.1.14.2 matt
337 1.1.14.2 matt for (i = 0; i < npages; i++) {
338 1.1.14.2 matt struct vm_page *pg1 = pgs[ridx + i];
339 1.1.14.2 matt
340 1.1.14.2 matt if ((pg1->flags & PG_FAKE) ||
341 1.1.14.2 matt (blockalloc && (pg1->flags & PG_RDONLY))) {
342 1.1.14.2 matt break;
343 1.1.14.2 matt }
344 1.1.14.2 matt }
345 1.1.14.2 matt if (i == npages) {
346 1.1.14.2 matt rw_exit(&gp->g_glock);
347 1.1.14.2 matt UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
348 1.1.14.2 matt npages += ridx;
349 1.1.14.2 matt goto out;
350 1.1.14.2 matt }
351 1.1.14.2 matt
352 1.1.14.2 matt /*
353 1.1.14.2 matt * if PGO_OVERWRITE is set, don't bother reading the pages.
354 1.1.14.2 matt */
355 1.1.14.2 matt
356 1.1.14.2 matt if (overwrite) {
357 1.1.14.2 matt rw_exit(&gp->g_glock);
358 1.1.14.2 matt UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
359 1.1.14.2 matt
360 1.1.14.2 matt for (i = 0; i < npages; i++) {
361 1.1.14.2 matt struct vm_page *pg1 = pgs[ridx + i];
362 1.1.14.2 matt
363 1.1.14.2 matt pg1->flags &= ~(PG_RDONLY|PG_CLEAN);
364 1.1.14.2 matt }
365 1.1.14.2 matt npages += ridx;
366 1.1.14.2 matt goto out;
367 1.1.14.2 matt }
368 1.1.14.2 matt
369 1.1.14.2 matt /*
370 1.1.14.2 matt * the page wasn't resident and we're not overwriting,
371 1.1.14.2 matt * so we're going to have to do some i/o.
372 1.1.14.2 matt * find any additional pages needed to cover the expanded range.
373 1.1.14.2 matt */
374 1.1.14.2 matt
375 1.1.14.2 matt npages = (endoffset - startoffset) >> PAGE_SHIFT;
376 1.1.14.2 matt if (startoffset != origoffset || npages != orignpages) {
377 1.1.14.2 matt
378 1.1.14.2 matt /*
379 1.1.14.2 matt * we need to avoid deadlocks caused by locking
380 1.1.14.2 matt * additional pages at lower offsets than pages we
381 1.1.14.2 matt * already have locked. unlock them all and start over.
382 1.1.14.2 matt */
383 1.1.14.2 matt
384 1.1.14.2 matt genfs_rel_pages(&pgs[ridx], orignpages);
385 1.1.14.2 matt memset(pgs, 0, pgs_size);
386 1.1.14.2 matt
387 1.1.14.2 matt UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
388 1.1.14.2 matt startoffset, endoffset, 0,0);
389 1.1.14.2 matt npgs = npages;
390 1.1.14.2 matt if (uvn_findpages(uobj, startoffset, &npgs, pgs,
391 1.1.14.2 matt async ? UFP_NOWAIT : UFP_ALL) != npages) {
392 1.1.14.2 matt rw_exit(&gp->g_glock);
393 1.1.14.2 matt KASSERT(async != 0);
394 1.1.14.2 matt genfs_rel_pages(pgs, npages);
395 1.1.14.2 matt simple_unlock(&uobj->vmobjlock);
396 1.1.14.2 matt error = EBUSY;
397 1.1.14.2 matt goto out_err;
398 1.1.14.2 matt }
399 1.1.14.2 matt }
400 1.1.14.2 matt simple_unlock(&uobj->vmobjlock);
401 1.1.14.2 matt
402 1.1.14.2 matt /*
403 1.1.14.2 matt * read the desired page(s).
404 1.1.14.2 matt */
405 1.1.14.2 matt
406 1.1.14.2 matt totalbytes = npages << PAGE_SHIFT;
407 1.1.14.2 matt bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
408 1.1.14.2 matt tailbytes = totalbytes - bytes;
409 1.1.14.2 matt skipbytes = 0;
410 1.1.14.2 matt
411 1.1.14.2 matt kva = uvm_pagermapin(pgs, npages,
412 1.1.14.2 matt UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
413 1.1.14.2 matt
414 1.1.14.2 matt mbp = getiobuf();
415 1.1.14.2 matt mbp->b_bufsize = totalbytes;
416 1.1.14.2 matt mbp->b_data = (void *)kva;
417 1.1.14.2 matt mbp->b_resid = mbp->b_bcount = bytes;
418 1.1.14.2 matt mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL|B_ASYNC : 0);
419 1.1.14.2 matt mbp->b_iodone = (async ? uvm_aio_biodone : 0);
420 1.1.14.2 matt mbp->b_vp = vp;
421 1.1.14.2 matt if (async)
422 1.1.14.2 matt BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
423 1.1.14.2 matt else
424 1.1.14.2 matt BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
425 1.1.14.2 matt
426 1.1.14.2 matt /*
427 1.1.14.2 matt * if EOF is in the middle of the range, zero the part past EOF.
428 1.1.14.2 matt * skip over pages which are not PG_FAKE since in that case they have
429 1.1.14.2 matt * valid data that we need to preserve.
430 1.1.14.2 matt */
431 1.1.14.2 matt
432 1.1.14.2 matt tailstart = bytes;
433 1.1.14.2 matt while (tailbytes > 0) {
434 1.1.14.2 matt const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
435 1.1.14.2 matt
436 1.1.14.2 matt KASSERT(len <= tailbytes);
437 1.1.14.2 matt if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
438 1.1.14.2 matt memset((void *)(kva + tailstart), 0, len);
439 1.1.14.2 matt UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
440 1.1.14.2 matt kva, tailstart, len, 0);
441 1.1.14.2 matt }
442 1.1.14.2 matt tailstart += len;
443 1.1.14.2 matt tailbytes -= len;
444 1.1.14.2 matt }
445 1.1.14.2 matt
446 1.1.14.2 matt /*
447 1.1.14.2 matt * now loop over the pages, reading as needed.
448 1.1.14.2 matt */
449 1.1.14.2 matt
450 1.1.14.2 matt bp = NULL;
451 1.1.14.2 matt for (offset = startoffset;
452 1.1.14.2 matt bytes > 0;
453 1.1.14.2 matt offset += iobytes, bytes -= iobytes) {
454 1.1.14.2 matt
455 1.1.14.2 matt /*
456 1.1.14.2 matt * skip pages which don't need to be read.
457 1.1.14.2 matt */
458 1.1.14.2 matt
459 1.1.14.2 matt pidx = (offset - startoffset) >> PAGE_SHIFT;
460 1.1.14.2 matt while ((pgs[pidx]->flags & PG_FAKE) == 0) {
461 1.1.14.2 matt size_t b;
462 1.1.14.2 matt
463 1.1.14.2 matt KASSERT((offset & (PAGE_SIZE - 1)) == 0);
464 1.1.14.2 matt if ((pgs[pidx]->flags & PG_RDONLY)) {
465 1.1.14.2 matt sawhole = true;
466 1.1.14.2 matt }
467 1.1.14.2 matt b = MIN(PAGE_SIZE, bytes);
468 1.1.14.2 matt offset += b;
469 1.1.14.2 matt bytes -= b;
470 1.1.14.2 matt skipbytes += b;
471 1.1.14.2 matt pidx++;
472 1.1.14.2 matt UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
473 1.1.14.2 matt offset, 0,0,0);
474 1.1.14.2 matt if (bytes == 0) {
475 1.1.14.2 matt goto loopdone;
476 1.1.14.2 matt }
477 1.1.14.2 matt }
478 1.1.14.2 matt
479 1.1.14.2 matt /*
480 1.1.14.2 matt * bmap the file to find out the blkno to read from and
481 1.1.14.2 matt * how much we can read in one i/o. if bmap returns an error,
482 1.1.14.2 matt * skip the rest of the top-level i/o.
483 1.1.14.2 matt */
484 1.1.14.2 matt
485 1.1.14.2 matt lbn = offset >> fs_bshift;
486 1.1.14.2 matt error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
487 1.1.14.2 matt if (error) {
488 1.1.14.2 matt UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
489 1.1.14.2 matt lbn, error,0,0);
490 1.1.14.2 matt skipbytes += bytes;
491 1.1.14.2 matt goto loopdone;
492 1.1.14.2 matt }
493 1.1.14.2 matt
494 1.1.14.2 matt /*
495 1.1.14.2 matt * see how many pages can be read with this i/o.
496 1.1.14.2 matt * reduce the i/o size if necessary to avoid
497 1.1.14.2 matt * overwriting pages with valid data.
498 1.1.14.2 matt */
499 1.1.14.2 matt
500 1.1.14.2 matt iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
501 1.1.14.2 matt bytes);
502 1.1.14.2 matt if (offset + iobytes > round_page(offset)) {
503 1.1.14.2 matt pcount = 1;
504 1.1.14.2 matt while (pidx + pcount < npages &&
505 1.1.14.2 matt pgs[pidx + pcount]->flags & PG_FAKE) {
506 1.1.14.2 matt pcount++;
507 1.1.14.2 matt }
508 1.1.14.2 matt iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
509 1.1.14.2 matt (offset - trunc_page(offset)));
510 1.1.14.2 matt }
511 1.1.14.2 matt
512 1.1.14.2 matt /*
513 1.1.14.2 matt * if this block isn't allocated, zero it instead of
514 1.1.14.2 matt * reading it. unless we are going to allocate blocks,
515 1.1.14.2 matt * mark the pages we zeroed PG_RDONLY.
516 1.1.14.2 matt */
517 1.1.14.2 matt
518 1.1.14.2 matt if (blkno < 0) {
519 1.1.14.2 matt int holepages = (round_page(offset + iobytes) -
520 1.1.14.2 matt trunc_page(offset)) >> PAGE_SHIFT;
521 1.1.14.2 matt UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
522 1.1.14.2 matt
523 1.1.14.2 matt sawhole = true;
524 1.1.14.2 matt memset((char *)kva + (offset - startoffset), 0,
525 1.1.14.2 matt iobytes);
526 1.1.14.2 matt skipbytes += iobytes;
527 1.1.14.2 matt
528 1.1.14.2 matt for (i = 0; i < holepages; i++) {
529 1.1.14.2 matt if (write) {
530 1.1.14.2 matt pgs[pidx + i]->flags &= ~PG_CLEAN;
531 1.1.14.2 matt }
532 1.1.14.2 matt if (!blockalloc) {
533 1.1.14.2 matt pgs[pidx + i]->flags |= PG_RDONLY;
534 1.1.14.2 matt }
535 1.1.14.2 matt }
536 1.1.14.2 matt continue;
537 1.1.14.2 matt }
538 1.1.14.2 matt
539 1.1.14.2 matt /*
540 1.1.14.2 matt * allocate a sub-buf for this piece of the i/o
541 1.1.14.2 matt * (or just use mbp if there's only 1 piece),
542 1.1.14.2 matt * and start it going.
543 1.1.14.2 matt */
544 1.1.14.2 matt
545 1.1.14.2 matt if (offset == startoffset && iobytes == bytes) {
546 1.1.14.2 matt bp = mbp;
547 1.1.14.2 matt } else {
548 1.1.14.2 matt bp = getiobuf();
549 1.1.14.2 matt nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
550 1.1.14.2 matt }
551 1.1.14.2 matt bp->b_lblkno = 0;
552 1.1.14.2 matt
553 1.1.14.2 matt /* adjust physical blkno for partial blocks */
554 1.1.14.2 matt bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
555 1.1.14.2 matt dev_bshift);
556 1.1.14.2 matt
557 1.1.14.2 matt UVMHIST_LOG(ubchist,
558 1.1.14.2 matt "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
559 1.1.14.2 matt bp, offset, iobytes, bp->b_blkno);
560 1.1.14.2 matt
561 1.1.14.2 matt VOP_STRATEGY(devvp, bp);
562 1.1.14.2 matt }
563 1.1.14.2 matt
564 1.1.14.2 matt loopdone:
565 1.1.14.2 matt nestiobuf_done(mbp, skipbytes, error);
566 1.1.14.2 matt if (async) {
567 1.1.14.2 matt UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
568 1.1.14.2 matt rw_exit(&gp->g_glock);
569 1.1.14.2 matt error = 0;
570 1.1.14.2 matt goto out_err;
571 1.1.14.2 matt }
572 1.1.14.2 matt if (bp != NULL) {
573 1.1.14.2 matt error = biowait(mbp);
574 1.1.14.2 matt }
575 1.1.14.2 matt putiobuf(mbp);
576 1.1.14.2 matt uvm_pagermapout(kva, npages);
577 1.1.14.2 matt
578 1.1.14.2 matt /*
579 1.1.14.2 matt * if this we encountered a hole then we have to do a little more work.
580 1.1.14.2 matt * for read faults, we marked the page PG_RDONLY so that future
581 1.1.14.2 matt * write accesses to the page will fault again.
582 1.1.14.2 matt * for write faults, we must make sure that the backing store for
583 1.1.14.2 matt * the page is completely allocated while the pages are locked.
584 1.1.14.2 matt */
585 1.1.14.2 matt
586 1.1.14.2 matt if (!error && sawhole && blockalloc) {
587 1.1.14.2 matt error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0,
588 1.1.14.2 matt cred);
589 1.1.14.2 matt UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
590 1.1.14.2 matt startoffset, npages << PAGE_SHIFT, error,0);
591 1.1.14.2 matt if (!error) {
592 1.1.14.2 matt for (i = 0; i < npages; i++) {
593 1.1.14.2 matt if (pgs[i] == NULL) {
594 1.1.14.2 matt continue;
595 1.1.14.2 matt }
596 1.1.14.2 matt pgs[i]->flags &= ~(PG_CLEAN|PG_RDONLY);
597 1.1.14.2 matt UVMHIST_LOG(ubchist, "mark dirty pg %p",
598 1.1.14.2 matt pgs[i],0,0,0);
599 1.1.14.2 matt }
600 1.1.14.2 matt }
601 1.1.14.2 matt }
602 1.1.14.2 matt rw_exit(&gp->g_glock);
603 1.1.14.2 matt simple_lock(&uobj->vmobjlock);
604 1.1.14.2 matt
605 1.1.14.2 matt /*
606 1.1.14.2 matt * we're almost done! release the pages...
607 1.1.14.2 matt * for errors, we free the pages.
608 1.1.14.2 matt * otherwise we activate them and mark them as valid and clean.
609 1.1.14.2 matt * also, unbusy pages that were not actually requested.
610 1.1.14.2 matt */
611 1.1.14.2 matt
612 1.1.14.2 matt if (error) {
613 1.1.14.2 matt for (i = 0; i < npages; i++) {
614 1.1.14.2 matt if (pgs[i] == NULL) {
615 1.1.14.2 matt continue;
616 1.1.14.2 matt }
617 1.1.14.2 matt UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
618 1.1.14.2 matt pgs[i], pgs[i]->flags, 0,0);
619 1.1.14.2 matt if (pgs[i]->flags & PG_FAKE) {
620 1.1.14.2 matt pgs[i]->flags |= PG_RELEASED;
621 1.1.14.2 matt }
622 1.1.14.2 matt }
623 1.1.14.2 matt uvm_lock_pageq();
624 1.1.14.2 matt uvm_page_unbusy(pgs, npages);
625 1.1.14.2 matt uvm_unlock_pageq();
626 1.1.14.2 matt simple_unlock(&uobj->vmobjlock);
627 1.1.14.2 matt UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
628 1.1.14.2 matt goto out_err;
629 1.1.14.2 matt }
630 1.1.14.2 matt
631 1.1.14.2 matt out:
632 1.1.14.2 matt UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
633 1.1.14.2 matt error = 0;
634 1.1.14.2 matt uvm_lock_pageq();
635 1.1.14.2 matt for (i = 0; i < npages; i++) {
636 1.1.14.2 matt pg = pgs[i];
637 1.1.14.2 matt if (pg == NULL) {
638 1.1.14.2 matt continue;
639 1.1.14.2 matt }
640 1.1.14.2 matt UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
641 1.1.14.2 matt pg, pg->flags, 0,0);
642 1.1.14.2 matt if (pg->flags & PG_FAKE && !overwrite) {
643 1.1.14.2 matt pg->flags &= ~(PG_FAKE);
644 1.1.14.2 matt pmap_clear_modify(pgs[i]);
645 1.1.14.2 matt }
646 1.1.14.2 matt KASSERT(!write || !blockalloc || (pg->flags & PG_RDONLY) == 0);
647 1.1.14.2 matt if (i < ridx || i >= ridx + orignpages || async) {
648 1.1.14.2 matt UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
649 1.1.14.2 matt pg, pg->offset,0,0);
650 1.1.14.2 matt if (pg->flags & PG_WANTED) {
651 1.1.14.2 matt wakeup(pg);
652 1.1.14.2 matt }
653 1.1.14.2 matt if (pg->flags & PG_FAKE) {
654 1.1.14.2 matt KASSERT(overwrite);
655 1.1.14.2 matt uvm_pagezero(pg);
656 1.1.14.2 matt }
657 1.1.14.2 matt if (pg->flags & PG_RELEASED) {
658 1.1.14.2 matt uvm_pagefree(pg);
659 1.1.14.2 matt continue;
660 1.1.14.2 matt }
661 1.1.14.2 matt uvm_pageenqueue(pg);
662 1.1.14.2 matt pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
663 1.1.14.2 matt UVM_PAGE_OWN(pg, NULL);
664 1.1.14.2 matt }
665 1.1.14.2 matt }
666 1.1.14.2 matt uvm_unlock_pageq();
667 1.1.14.2 matt simple_unlock(&uobj->vmobjlock);
668 1.1.14.2 matt if (ap->a_m != NULL) {
669 1.1.14.2 matt memcpy(ap->a_m, &pgs[ridx],
670 1.1.14.2 matt orignpages * sizeof(struct vm_page *));
671 1.1.14.2 matt }
672 1.1.14.2 matt
673 1.1.14.2 matt out_err:
674 1.1.14.2 matt if (pgs != pgs_onstack)
675 1.1.14.2 matt kmem_free(pgs, pgs_size);
676 1.1.14.2 matt if (has_trans)
677 1.1.14.2 matt fstrans_done(vp->v_mount);
678 1.1.14.2 matt return (error);
679 1.1.14.2 matt }
680 1.1.14.2 matt
681 1.1.14.2 matt /*
682 1.1.14.2 matt * generic VM putpages routine.
683 1.1.14.2 matt * Write the given range of pages to backing store.
684 1.1.14.2 matt *
685 1.1.14.2 matt * => "offhi == 0" means flush all pages at or after "offlo".
686 1.1.14.2 matt * => object should be locked by caller. we return with the
687 1.1.14.2 matt * object unlocked.
688 1.1.14.2 matt * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
689 1.1.14.2 matt * thus, a caller might want to unlock higher level resources
690 1.1.14.2 matt * (e.g. vm_map) before calling flush.
691 1.1.14.2 matt * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
692 1.1.14.2 matt * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
693 1.1.14.2 matt * => NOTE: we rely on the fact that the object's memq is a TAILQ and
694 1.1.14.2 matt * that new pages are inserted on the tail end of the list. thus,
695 1.1.14.2 matt * we can make a complete pass through the object in one go by starting
696 1.1.14.2 matt * at the head and working towards the tail (new pages are put in
697 1.1.14.2 matt * front of us).
698 1.1.14.2 matt * => NOTE: we are allowed to lock the page queues, so the caller
699 1.1.14.2 matt * must not be holding the page queue lock.
700 1.1.14.2 matt *
701 1.1.14.2 matt * note on "cleaning" object and PG_BUSY pages:
702 1.1.14.2 matt * this routine is holding the lock on the object. the only time
703 1.1.14.2 matt * that it can run into a PG_BUSY page that it does not own is if
704 1.1.14.2 matt * some other process has started I/O on the page (e.g. either
705 1.1.14.2 matt * a pagein, or a pageout). if the PG_BUSY page is being paged
706 1.1.14.2 matt * in, then it can not be dirty (!PG_CLEAN) because no one has
707 1.1.14.2 matt * had a chance to modify it yet. if the PG_BUSY page is being
708 1.1.14.2 matt * paged out then it means that someone else has already started
709 1.1.14.2 matt * cleaning the page for us (how nice!). in this case, if we
710 1.1.14.2 matt * have syncio specified, then after we make our pass through the
711 1.1.14.2 matt * object we need to wait for the other PG_BUSY pages to clear
712 1.1.14.2 matt * off (i.e. we need to do an iosync). also note that once a
713 1.1.14.2 matt * page is PG_BUSY it must stay in its object until it is un-busyed.
714 1.1.14.2 matt *
715 1.1.14.2 matt * note on page traversal:
716 1.1.14.2 matt * we can traverse the pages in an object either by going down the
717 1.1.14.2 matt * linked list in "uobj->memq", or we can go over the address range
718 1.1.14.2 matt * by page doing hash table lookups for each address. depending
719 1.1.14.2 matt * on how many pages are in the object it may be cheaper to do one
720 1.1.14.2 matt * or the other. we set "by_list" to true if we are using memq.
721 1.1.14.2 matt * if the cost of a hash lookup was equal to the cost of the list
722 1.1.14.2 matt * traversal we could compare the number of pages in the start->stop
723 1.1.14.2 matt * range to the total number of pages in the object. however, it
724 1.1.14.2 matt * seems that a hash table lookup is more expensive than the linked
725 1.1.14.2 matt * list traversal, so we multiply the number of pages in the
726 1.1.14.2 matt * range by an estimate of the relatively higher cost of the hash lookup.
727 1.1.14.2 matt */
728 1.1.14.2 matt
729 1.1.14.2 matt int
730 1.1.14.2 matt genfs_putpages(void *v)
731 1.1.14.2 matt {
732 1.1.14.2 matt struct vop_putpages_args /* {
733 1.1.14.2 matt struct vnode *a_vp;
734 1.1.14.2 matt voff_t a_offlo;
735 1.1.14.2 matt voff_t a_offhi;
736 1.1.14.2 matt int a_flags;
737 1.1.14.2 matt } */ *ap = v;
738 1.1.14.2 matt
739 1.1.14.2 matt return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
740 1.1.14.2 matt ap->a_flags, NULL);
741 1.1.14.2 matt }
742 1.1.14.2 matt
743 1.1.14.2 matt int
744 1.1.14.2 matt genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff, int flags,
745 1.1.14.2 matt struct vm_page **busypg)
746 1.1.14.2 matt {
747 1.1.14.2 matt struct uvm_object *uobj = &vp->v_uobj;
748 1.1.14.2 matt struct simplelock *slock = &uobj->vmobjlock;
749 1.1.14.2 matt off_t off;
750 1.1.14.2 matt /* Even for strange MAXPHYS, the shift rounds down to a page */
751 1.1.14.2 matt #define maxpages (MAXPHYS >> PAGE_SHIFT)
752 1.1.14.2 matt int i, s, error, npages, nback;
753 1.1.14.2 matt int freeflag;
754 1.1.14.2 matt struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
755 1.1.14.2 matt bool wasclean, by_list, needs_clean, yld;
756 1.1.14.2 matt bool async = (flags & PGO_SYNCIO) == 0;
757 1.1.14.2 matt bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
758 1.1.14.2 matt struct lwp *l = curlwp ? curlwp : &lwp0;
759 1.1.14.2 matt struct genfs_node *gp = VTOG(vp);
760 1.1.14.2 matt int dirtygen;
761 1.1.14.2 matt bool modified = false;
762 1.1.14.2 matt bool has_trans = false;
763 1.1.14.2 matt bool cleanall;
764 1.1.14.2 matt
765 1.1.14.2 matt UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
766 1.1.14.2 matt
767 1.1.14.2 matt KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
768 1.1.14.2 matt KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
769 1.1.14.2 matt KASSERT(startoff < endoff || endoff == 0);
770 1.1.14.2 matt
771 1.1.14.2 matt UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
772 1.1.14.2 matt vp, uobj->uo_npages, startoff, endoff - startoff);
773 1.1.14.2 matt
774 1.1.14.2 matt KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
775 1.1.14.2 matt (vp->v_iflag & VI_WRMAPDIRTY) == 0);
776 1.1.14.2 matt if (uobj->uo_npages == 0) {
777 1.1.14.2 matt s = splbio();
778 1.1.14.2 matt if (vp->v_iflag & VI_ONWORKLST) {
779 1.1.14.2 matt vp->v_iflag &= ~VI_WRMAPDIRTY;
780 1.1.14.2 matt if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
781 1.1.14.2 matt vn_syncer_remove_from_worklist(vp);
782 1.1.14.2 matt }
783 1.1.14.2 matt splx(s);
784 1.1.14.2 matt simple_unlock(slock);
785 1.1.14.2 matt return (0);
786 1.1.14.2 matt }
787 1.1.14.2 matt
788 1.1.14.2 matt /*
789 1.1.14.2 matt * the vnode has pages, set up to process the request.
790 1.1.14.2 matt */
791 1.1.14.2 matt
792 1.1.14.2 matt if ((flags & PGO_CLEANIT) != 0) {
793 1.1.14.2 matt simple_unlock(slock);
794 1.1.14.2 matt if (pagedaemon) {
795 1.1.14.2 matt error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
796 1.1.14.2 matt if (error)
797 1.1.14.2 matt return error;
798 1.1.14.2 matt } else
799 1.1.14.2 matt fstrans_start(vp->v_mount, FSTRANS_LAZY);
800 1.1.14.2 matt has_trans = true;
801 1.1.14.2 matt simple_lock(slock);
802 1.1.14.2 matt }
803 1.1.14.2 matt
804 1.1.14.2 matt error = 0;
805 1.1.14.2 matt s = splbio();
806 1.1.14.2 matt simple_lock(&global_v_numoutput_slock);
807 1.1.14.2 matt wasclean = (vp->v_numoutput == 0);
808 1.1.14.2 matt simple_unlock(&global_v_numoutput_slock);
809 1.1.14.2 matt splx(s);
810 1.1.14.2 matt off = startoff;
811 1.1.14.2 matt if (endoff == 0 || flags & PGO_ALLPAGES) {
812 1.1.14.2 matt endoff = trunc_page(LLONG_MAX);
813 1.1.14.2 matt }
814 1.1.14.2 matt by_list = (uobj->uo_npages <=
815 1.1.14.2 matt ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
816 1.1.14.2 matt
817 1.1.14.2 matt #if !defined(DEBUG)
818 1.1.14.2 matt /*
819 1.1.14.2 matt * if this vnode is known not to have dirty pages,
820 1.1.14.2 matt * don't bother to clean it out.
821 1.1.14.2 matt */
822 1.1.14.2 matt
823 1.1.14.2 matt if ((vp->v_iflag & VI_ONWORKLST) == 0) {
824 1.1.14.2 matt if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
825 1.1.14.2 matt goto skip_scan;
826 1.1.14.2 matt }
827 1.1.14.2 matt flags &= ~PGO_CLEANIT;
828 1.1.14.2 matt }
829 1.1.14.2 matt #endif /* !defined(DEBUG) */
830 1.1.14.2 matt
831 1.1.14.2 matt /*
832 1.1.14.2 matt * start the loop. when scanning by list, hold the last page
833 1.1.14.2 matt * in the list before we start. pages allocated after we start
834 1.1.14.2 matt * will be added to the end of the list, so we can stop at the
835 1.1.14.2 matt * current last page.
836 1.1.14.2 matt */
837 1.1.14.2 matt
838 1.1.14.2 matt cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
839 1.1.14.2 matt startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
840 1.1.14.2 matt (vp->v_iflag & VI_ONWORKLST) != 0;
841 1.1.14.2 matt dirtygen = gp->g_dirtygen;
842 1.1.14.2 matt freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
843 1.1.14.2 matt if (by_list) {
844 1.1.14.2 matt curmp.uobject = uobj;
845 1.1.14.2 matt curmp.offset = (voff_t)-1;
846 1.1.14.2 matt curmp.flags = PG_BUSY;
847 1.1.14.2 matt endmp.uobject = uobj;
848 1.1.14.2 matt endmp.offset = (voff_t)-1;
849 1.1.14.2 matt endmp.flags = PG_BUSY;
850 1.1.14.2 matt pg = TAILQ_FIRST(&uobj->memq);
851 1.1.14.2 matt TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
852 1.1.14.2 matt uvm_lwp_hold(l);
853 1.1.14.2 matt } else {
854 1.1.14.2 matt pg = uvm_pagelookup(uobj, off);
855 1.1.14.2 matt }
856 1.1.14.2 matt nextpg = NULL;
857 1.1.14.2 matt while (by_list || off < endoff) {
858 1.1.14.2 matt
859 1.1.14.2 matt /*
860 1.1.14.2 matt * if the current page is not interesting, move on to the next.
861 1.1.14.2 matt */
862 1.1.14.2 matt
863 1.1.14.2 matt KASSERT(pg == NULL || pg->uobject == uobj);
864 1.1.14.2 matt KASSERT(pg == NULL ||
865 1.1.14.2 matt (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
866 1.1.14.2 matt (pg->flags & PG_BUSY) != 0);
867 1.1.14.2 matt if (by_list) {
868 1.1.14.2 matt if (pg == &endmp) {
869 1.1.14.2 matt break;
870 1.1.14.2 matt }
871 1.1.14.2 matt if (pg->offset < startoff || pg->offset >= endoff ||
872 1.1.14.2 matt pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
873 1.1.14.2 matt if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
874 1.1.14.2 matt wasclean = false;
875 1.1.14.2 matt }
876 1.1.14.2 matt pg = TAILQ_NEXT(pg, listq);
877 1.1.14.2 matt continue;
878 1.1.14.2 matt }
879 1.1.14.2 matt off = pg->offset;
880 1.1.14.2 matt } else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
881 1.1.14.2 matt if (pg != NULL) {
882 1.1.14.2 matt wasclean = false;
883 1.1.14.2 matt }
884 1.1.14.2 matt off += PAGE_SIZE;
885 1.1.14.2 matt if (off < endoff) {
886 1.1.14.2 matt pg = uvm_pagelookup(uobj, off);
887 1.1.14.2 matt }
888 1.1.14.2 matt continue;
889 1.1.14.2 matt }
890 1.1.14.2 matt
891 1.1.14.2 matt /*
892 1.1.14.2 matt * if the current page needs to be cleaned and it's busy,
893 1.1.14.2 matt * wait for it to become unbusy.
894 1.1.14.2 matt */
895 1.1.14.2 matt
896 1.1.14.2 matt yld = (l->l_cpu->ci_schedstate.spc_flags &
897 1.1.14.2 matt SPCF_SHOULDYIELD) && !pagedaemon;
898 1.1.14.2 matt if (pg->flags & PG_BUSY || yld) {
899 1.1.14.2 matt UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
900 1.1.14.2 matt if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
901 1.1.14.2 matt UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
902 1.1.14.2 matt error = EDEADLK;
903 1.1.14.2 matt if (busypg != NULL)
904 1.1.14.2 matt *busypg = pg;
905 1.1.14.2 matt break;
906 1.1.14.2 matt }
907 1.1.14.2 matt if (pagedaemon) {
908 1.1.14.2 matt /*
909 1.1.14.2 matt * someone has taken the page while we
910 1.1.14.2 matt * dropped the lock for fstrans_start.
911 1.1.14.2 matt */
912 1.1.14.2 matt break;
913 1.1.14.2 matt }
914 1.1.14.2 matt if (by_list) {
915 1.1.14.2 matt TAILQ_INSERT_BEFORE(pg, &curmp, listq);
916 1.1.14.2 matt UVMHIST_LOG(ubchist, "curmp next %p",
917 1.1.14.2 matt TAILQ_NEXT(&curmp, listq), 0,0,0);
918 1.1.14.2 matt }
919 1.1.14.2 matt if (yld) {
920 1.1.14.2 matt simple_unlock(slock);
921 1.1.14.2 matt preempt();
922 1.1.14.2 matt simple_lock(slock);
923 1.1.14.2 matt } else {
924 1.1.14.2 matt pg->flags |= PG_WANTED;
925 1.1.14.2 matt UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
926 1.1.14.2 matt simple_lock(slock);
927 1.1.14.2 matt }
928 1.1.14.2 matt if (by_list) {
929 1.1.14.2 matt UVMHIST_LOG(ubchist, "after next %p",
930 1.1.14.2 matt TAILQ_NEXT(&curmp, listq), 0,0,0);
931 1.1.14.2 matt pg = TAILQ_NEXT(&curmp, listq);
932 1.1.14.2 matt TAILQ_REMOVE(&uobj->memq, &curmp, listq);
933 1.1.14.2 matt } else {
934 1.1.14.2 matt pg = uvm_pagelookup(uobj, off);
935 1.1.14.2 matt }
936 1.1.14.2 matt continue;
937 1.1.14.2 matt }
938 1.1.14.2 matt
939 1.1.14.2 matt /*
940 1.1.14.2 matt * if we're freeing, remove all mappings of the page now.
941 1.1.14.2 matt * if we're cleaning, check if the page is needs to be cleaned.
942 1.1.14.2 matt */
943 1.1.14.2 matt
944 1.1.14.2 matt if (flags & PGO_FREE) {
945 1.1.14.2 matt pmap_page_protect(pg, VM_PROT_NONE);
946 1.1.14.2 matt } else if (flags & PGO_CLEANIT) {
947 1.1.14.2 matt
948 1.1.14.2 matt /*
949 1.1.14.2 matt * if we still have some hope to pull this vnode off
950 1.1.14.2 matt * from the syncer queue, write-protect the page.
951 1.1.14.2 matt */
952 1.1.14.2 matt
953 1.1.14.2 matt if (cleanall && wasclean &&
954 1.1.14.2 matt gp->g_dirtygen == dirtygen) {
955 1.1.14.2 matt
956 1.1.14.2 matt /*
957 1.1.14.2 matt * uobj pages get wired only by uvm_fault
958 1.1.14.2 matt * where uobj is locked.
959 1.1.14.2 matt */
960 1.1.14.2 matt
961 1.1.14.2 matt if (pg->wire_count == 0) {
962 1.1.14.2 matt pmap_page_protect(pg,
963 1.1.14.2 matt VM_PROT_READ|VM_PROT_EXECUTE);
964 1.1.14.2 matt } else {
965 1.1.14.2 matt cleanall = false;
966 1.1.14.2 matt }
967 1.1.14.2 matt }
968 1.1.14.2 matt }
969 1.1.14.2 matt
970 1.1.14.2 matt if (flags & PGO_CLEANIT) {
971 1.1.14.2 matt needs_clean = pmap_clear_modify(pg) ||
972 1.1.14.2 matt (pg->flags & PG_CLEAN) == 0;
973 1.1.14.2 matt pg->flags |= PG_CLEAN;
974 1.1.14.2 matt } else {
975 1.1.14.2 matt needs_clean = false;
976 1.1.14.2 matt }
977 1.1.14.2 matt
978 1.1.14.2 matt /*
979 1.1.14.2 matt * if we're cleaning, build a cluster.
980 1.1.14.2 matt * the cluster will consist of pages which are currently dirty,
981 1.1.14.2 matt * but they will be returned to us marked clean.
982 1.1.14.2 matt * if not cleaning, just operate on the one page.
983 1.1.14.2 matt */
984 1.1.14.2 matt
985 1.1.14.2 matt if (needs_clean) {
986 1.1.14.2 matt KDASSERT((vp->v_iflag & VI_ONWORKLST));
987 1.1.14.2 matt wasclean = false;
988 1.1.14.2 matt memset(pgs, 0, sizeof(pgs));
989 1.1.14.2 matt pg->flags |= PG_BUSY;
990 1.1.14.2 matt UVM_PAGE_OWN(pg, "genfs_putpages");
991 1.1.14.2 matt
992 1.1.14.2 matt /*
993 1.1.14.2 matt * first look backward.
994 1.1.14.2 matt */
995 1.1.14.2 matt
996 1.1.14.2 matt npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
997 1.1.14.2 matt nback = npages;
998 1.1.14.2 matt uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
999 1.1.14.2 matt UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
1000 1.1.14.2 matt if (nback) {
1001 1.1.14.2 matt memmove(&pgs[0], &pgs[npages - nback],
1002 1.1.14.2 matt nback * sizeof(pgs[0]));
1003 1.1.14.2 matt if (npages - nback < nback)
1004 1.1.14.2 matt memset(&pgs[nback], 0,
1005 1.1.14.2 matt (npages - nback) * sizeof(pgs[0]));
1006 1.1.14.2 matt else
1007 1.1.14.2 matt memset(&pgs[npages - nback], 0,
1008 1.1.14.2 matt nback * sizeof(pgs[0]));
1009 1.1.14.2 matt }
1010 1.1.14.2 matt
1011 1.1.14.2 matt /*
1012 1.1.14.2 matt * then plug in our page of interest.
1013 1.1.14.2 matt */
1014 1.1.14.2 matt
1015 1.1.14.2 matt pgs[nback] = pg;
1016 1.1.14.2 matt
1017 1.1.14.2 matt /*
1018 1.1.14.2 matt * then look forward to fill in the remaining space in
1019 1.1.14.2 matt * the array of pages.
1020 1.1.14.2 matt */
1021 1.1.14.2 matt
1022 1.1.14.2 matt npages = maxpages - nback - 1;
1023 1.1.14.2 matt uvn_findpages(uobj, off + PAGE_SIZE, &npages,
1024 1.1.14.2 matt &pgs[nback + 1],
1025 1.1.14.2 matt UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
1026 1.1.14.2 matt npages += nback + 1;
1027 1.1.14.2 matt } else {
1028 1.1.14.2 matt pgs[0] = pg;
1029 1.1.14.2 matt npages = 1;
1030 1.1.14.2 matt nback = 0;
1031 1.1.14.2 matt }
1032 1.1.14.2 matt
1033 1.1.14.2 matt /*
1034 1.1.14.2 matt * apply FREE or DEACTIVATE options if requested.
1035 1.1.14.2 matt */
1036 1.1.14.2 matt
1037 1.1.14.2 matt if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1038 1.1.14.2 matt uvm_lock_pageq();
1039 1.1.14.2 matt }
1040 1.1.14.2 matt for (i = 0; i < npages; i++) {
1041 1.1.14.2 matt tpg = pgs[i];
1042 1.1.14.2 matt KASSERT(tpg->uobject == uobj);
1043 1.1.14.2 matt if (by_list && tpg == TAILQ_NEXT(pg, listq))
1044 1.1.14.2 matt pg = tpg;
1045 1.1.14.2 matt if (tpg->offset < startoff || tpg->offset >= endoff)
1046 1.1.14.2 matt continue;
1047 1.1.14.2 matt if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
1048 1.1.14.2 matt (void) pmap_clear_reference(tpg);
1049 1.1.14.2 matt uvm_pagedeactivate(tpg);
1050 1.1.14.2 matt } else if (flags & PGO_FREE) {
1051 1.1.14.2 matt pmap_page_protect(tpg, VM_PROT_NONE);
1052 1.1.14.2 matt if (tpg->flags & PG_BUSY) {
1053 1.1.14.2 matt tpg->flags |= freeflag;
1054 1.1.14.2 matt if (pagedaemon) {
1055 1.1.14.2 matt uvmexp.paging++;
1056 1.1.14.2 matt uvm_pagedequeue(tpg);
1057 1.1.14.2 matt }
1058 1.1.14.2 matt } else {
1059 1.1.14.2 matt
1060 1.1.14.2 matt /*
1061 1.1.14.2 matt * ``page is not busy''
1062 1.1.14.2 matt * implies that npages is 1
1063 1.1.14.2 matt * and needs_clean is false.
1064 1.1.14.2 matt */
1065 1.1.14.2 matt
1066 1.1.14.2 matt nextpg = TAILQ_NEXT(tpg, listq);
1067 1.1.14.2 matt uvm_pagefree(tpg);
1068 1.1.14.2 matt if (pagedaemon)
1069 1.1.14.2 matt uvmexp.pdfreed++;
1070 1.1.14.2 matt }
1071 1.1.14.2 matt }
1072 1.1.14.2 matt }
1073 1.1.14.2 matt if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1074 1.1.14.2 matt uvm_unlock_pageq();
1075 1.1.14.2 matt }
1076 1.1.14.2 matt if (needs_clean) {
1077 1.1.14.2 matt modified = true;
1078 1.1.14.2 matt
1079 1.1.14.2 matt /*
1080 1.1.14.2 matt * start the i/o. if we're traversing by list,
1081 1.1.14.2 matt * keep our place in the list with a marker page.
1082 1.1.14.2 matt */
1083 1.1.14.2 matt
1084 1.1.14.2 matt if (by_list) {
1085 1.1.14.2 matt TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
1086 1.1.14.2 matt listq);
1087 1.1.14.2 matt }
1088 1.1.14.2 matt simple_unlock(slock);
1089 1.1.14.2 matt error = GOP_WRITE(vp, pgs, npages, flags);
1090 1.1.14.2 matt simple_lock(slock);
1091 1.1.14.2 matt if (by_list) {
1092 1.1.14.2 matt pg = TAILQ_NEXT(&curmp, listq);
1093 1.1.14.2 matt TAILQ_REMOVE(&uobj->memq, &curmp, listq);
1094 1.1.14.2 matt }
1095 1.1.14.2 matt if (error) {
1096 1.1.14.2 matt break;
1097 1.1.14.2 matt }
1098 1.1.14.2 matt if (by_list) {
1099 1.1.14.2 matt continue;
1100 1.1.14.2 matt }
1101 1.1.14.2 matt }
1102 1.1.14.2 matt
1103 1.1.14.2 matt /*
1104 1.1.14.2 matt * find the next page and continue if there was no error.
1105 1.1.14.2 matt */
1106 1.1.14.2 matt
1107 1.1.14.2 matt if (by_list) {
1108 1.1.14.2 matt if (nextpg) {
1109 1.1.14.2 matt pg = nextpg;
1110 1.1.14.2 matt nextpg = NULL;
1111 1.1.14.2 matt } else {
1112 1.1.14.2 matt pg = TAILQ_NEXT(pg, listq);
1113 1.1.14.2 matt }
1114 1.1.14.2 matt } else {
1115 1.1.14.2 matt off += (npages - nback) << PAGE_SHIFT;
1116 1.1.14.2 matt if (off < endoff) {
1117 1.1.14.2 matt pg = uvm_pagelookup(uobj, off);
1118 1.1.14.2 matt }
1119 1.1.14.2 matt }
1120 1.1.14.2 matt }
1121 1.1.14.2 matt if (by_list) {
1122 1.1.14.2 matt TAILQ_REMOVE(&uobj->memq, &endmp, listq);
1123 1.1.14.2 matt uvm_lwp_rele(l);
1124 1.1.14.2 matt }
1125 1.1.14.2 matt
1126 1.1.14.2 matt if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
1127 1.1.14.2 matt (vp->v_type != VBLK ||
1128 1.1.14.2 matt (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
1129 1.1.14.2 matt GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
1130 1.1.14.2 matt }
1131 1.1.14.2 matt
1132 1.1.14.2 matt /*
1133 1.1.14.2 matt * if we're cleaning and there was nothing to clean,
1134 1.1.14.2 matt * take us off the syncer list. if we started any i/o
1135 1.1.14.2 matt * and we're doing sync i/o, wait for all writes to finish.
1136 1.1.14.2 matt */
1137 1.1.14.2 matt
1138 1.1.14.2 matt s = splbio();
1139 1.1.14.2 matt if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
1140 1.1.14.2 matt (vp->v_iflag & VI_ONWORKLST) != 0) {
1141 1.1.14.2 matt vp->v_iflag &= ~VI_WRMAPDIRTY;
1142 1.1.14.2 matt if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
1143 1.1.14.2 matt vn_syncer_remove_from_worklist(vp);
1144 1.1.14.2 matt }
1145 1.1.14.2 matt splx(s);
1146 1.1.14.2 matt
1147 1.1.14.2 matt #if !defined(DEBUG)
1148 1.1.14.2 matt skip_scan:
1149 1.1.14.2 matt #endif /* !defined(DEBUG) */
1150 1.1.14.2 matt if (!wasclean && !async) {
1151 1.1.14.2 matt s = splbio();
1152 1.1.14.2 matt /*
1153 1.1.14.2 matt * XXX - we want simple_unlock(&global_v_numoutput_slock);
1154 1.1.14.2 matt * but the slot in ltsleep() is taken!
1155 1.1.14.2 matt * XXX - try to recover from missed wakeups with a timeout..
1156 1.1.14.2 matt * must think of something better.
1157 1.1.14.2 matt */
1158 1.1.14.2 matt while (vp->v_numoutput != 0) {
1159 1.1.14.2 matt vp->v_iflag |= VI_BWAIT;
1160 1.1.14.2 matt UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock, false,
1161 1.1.14.2 matt "genput2", hz);
1162 1.1.14.2 matt simple_lock(slock);
1163 1.1.14.2 matt }
1164 1.1.14.2 matt splx(s);
1165 1.1.14.2 matt }
1166 1.1.14.2 matt simple_unlock(slock);
1167 1.1.14.2 matt
1168 1.1.14.2 matt if (has_trans)
1169 1.1.14.2 matt fstrans_done(vp->v_mount);
1170 1.1.14.2 matt
1171 1.1.14.2 matt return (error);
1172 1.1.14.2 matt }
1173 1.1.14.2 matt
1174 1.1.14.2 matt int
1175 1.1.14.2 matt genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1176 1.1.14.2 matt {
1177 1.1.14.2 matt off_t off;
1178 1.1.14.2 matt vaddr_t kva;
1179 1.1.14.2 matt size_t len;
1180 1.1.14.2 matt int error;
1181 1.1.14.2 matt UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1182 1.1.14.2 matt
1183 1.1.14.2 matt UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1184 1.1.14.2 matt vp, pgs, npages, flags);
1185 1.1.14.2 matt
1186 1.1.14.2 matt off = pgs[0]->offset;
1187 1.1.14.2 matt kva = uvm_pagermapin(pgs, npages,
1188 1.1.14.2 matt UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1189 1.1.14.2 matt len = npages << PAGE_SHIFT;
1190 1.1.14.2 matt
1191 1.1.14.2 matt error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
1192 1.1.14.2 matt uvm_aio_biodone);
1193 1.1.14.2 matt
1194 1.1.14.2 matt return error;
1195 1.1.14.2 matt }
1196 1.1.14.2 matt
1197 1.1.14.2 matt /*
1198 1.1.14.2 matt * Backend routine for doing I/O to vnode pages. Pages are already locked
1199 1.1.14.2 matt * and mapped into kernel memory. Here we just look up the underlying
1200 1.1.14.2 matt * device block addresses and call the strategy routine.
1201 1.1.14.2 matt */
1202 1.1.14.2 matt
1203 1.1.14.2 matt static int
1204 1.1.14.2 matt genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
1205 1.1.14.2 matt enum uio_rw rw, void (*iodone)(struct buf *))
1206 1.1.14.2 matt {
1207 1.1.14.2 matt int s, error, run;
1208 1.1.14.2 matt int fs_bshift, dev_bshift;
1209 1.1.14.2 matt off_t eof, offset, startoffset;
1210 1.1.14.2 matt size_t bytes, iobytes, skipbytes;
1211 1.1.14.2 matt daddr_t lbn, blkno;
1212 1.1.14.2 matt struct buf *mbp, *bp;
1213 1.1.14.2 matt struct vnode *devvp;
1214 1.1.14.2 matt bool async = (flags & PGO_SYNCIO) == 0;
1215 1.1.14.2 matt bool write = rw == UIO_WRITE;
1216 1.1.14.2 matt int brw = write ? B_WRITE : B_READ;
1217 1.1.14.2 matt UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1218 1.1.14.2 matt
1219 1.1.14.2 matt UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
1220 1.1.14.2 matt vp, kva, len, flags);
1221 1.1.14.2 matt
1222 1.1.14.2 matt KASSERT(vp->v_size <= vp->v_writesize);
1223 1.1.14.2 matt GOP_SIZE(vp, vp->v_writesize, &eof, 0);
1224 1.1.14.2 matt if (vp->v_type != VBLK) {
1225 1.1.14.2 matt fs_bshift = vp->v_mount->mnt_fs_bshift;
1226 1.1.14.2 matt dev_bshift = vp->v_mount->mnt_dev_bshift;
1227 1.1.14.2 matt } else {
1228 1.1.14.2 matt fs_bshift = DEV_BSHIFT;
1229 1.1.14.2 matt dev_bshift = DEV_BSHIFT;
1230 1.1.14.2 matt }
1231 1.1.14.2 matt error = 0;
1232 1.1.14.2 matt startoffset = off;
1233 1.1.14.2 matt bytes = MIN(len, eof - startoffset);
1234 1.1.14.2 matt skipbytes = 0;
1235 1.1.14.2 matt KASSERT(bytes != 0);
1236 1.1.14.2 matt
1237 1.1.14.2 matt if (write) {
1238 1.1.14.2 matt s = splbio();
1239 1.1.14.2 matt simple_lock(&global_v_numoutput_slock);
1240 1.1.14.2 matt vp->v_numoutput += 2;
1241 1.1.14.2 matt simple_unlock(&global_v_numoutput_slock);
1242 1.1.14.2 matt splx(s);
1243 1.1.14.2 matt }
1244 1.1.14.2 matt mbp = getiobuf();
1245 1.1.14.2 matt UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
1246 1.1.14.2 matt vp, mbp, vp->v_numoutput, bytes);
1247 1.1.14.2 matt mbp->b_bufsize = len;
1248 1.1.14.2 matt mbp->b_data = (void *)kva;
1249 1.1.14.2 matt mbp->b_resid = mbp->b_bcount = bytes;
1250 1.1.14.2 matt mbp->b_flags = B_BUSY | brw | B_AGE | (async ? (B_CALL | B_ASYNC) : 0);
1251 1.1.14.2 matt mbp->b_iodone = iodone;
1252 1.1.14.2 matt mbp->b_vp = vp;
1253 1.1.14.2 matt if (curlwp == uvm.pagedaemon_lwp)
1254 1.1.14.2 matt BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
1255 1.1.14.2 matt else if (async)
1256 1.1.14.2 matt BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
1257 1.1.14.2 matt else
1258 1.1.14.2 matt BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
1259 1.1.14.2 matt
1260 1.1.14.2 matt bp = NULL;
1261 1.1.14.2 matt for (offset = startoffset;
1262 1.1.14.2 matt bytes > 0;
1263 1.1.14.2 matt offset += iobytes, bytes -= iobytes) {
1264 1.1.14.2 matt lbn = offset >> fs_bshift;
1265 1.1.14.2 matt error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
1266 1.1.14.2 matt if (error) {
1267 1.1.14.2 matt UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
1268 1.1.14.2 matt skipbytes += bytes;
1269 1.1.14.2 matt bytes = 0;
1270 1.1.14.2 matt break;
1271 1.1.14.2 matt }
1272 1.1.14.2 matt
1273 1.1.14.2 matt iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
1274 1.1.14.2 matt bytes);
1275 1.1.14.2 matt if (blkno == (daddr_t)-1) {
1276 1.1.14.2 matt if (!write) {
1277 1.1.14.2 matt memset((char *)kva + (offset - startoffset), 0,
1278 1.1.14.2 matt iobytes);
1279 1.1.14.2 matt }
1280 1.1.14.2 matt skipbytes += iobytes;
1281 1.1.14.2 matt continue;
1282 1.1.14.2 matt }
1283 1.1.14.2 matt
1284 1.1.14.2 matt /* if it's really one i/o, don't make a second buf */
1285 1.1.14.2 matt if (offset == startoffset && iobytes == bytes) {
1286 1.1.14.2 matt bp = mbp;
1287 1.1.14.2 matt } else {
1288 1.1.14.2 matt UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
1289 1.1.14.2 matt vp, bp, vp->v_numoutput, 0);
1290 1.1.14.2 matt bp = getiobuf();
1291 1.1.14.2 matt nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
1292 1.1.14.2 matt }
1293 1.1.14.2 matt bp->b_lblkno = 0;
1294 1.1.14.2 matt
1295 1.1.14.2 matt /* adjust physical blkno for partial blocks */
1296 1.1.14.2 matt bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
1297 1.1.14.2 matt dev_bshift);
1298 1.1.14.2 matt UVMHIST_LOG(ubchist,
1299 1.1.14.2 matt "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
1300 1.1.14.2 matt vp, offset, bp->b_bcount, bp->b_blkno);
1301 1.1.14.2 matt
1302 1.1.14.2 matt VOP_STRATEGY(devvp, bp);
1303 1.1.14.2 matt }
1304 1.1.14.2 matt if (skipbytes) {
1305 1.1.14.2 matt UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
1306 1.1.14.2 matt }
1307 1.1.14.2 matt nestiobuf_done(mbp, skipbytes, error);
1308 1.1.14.2 matt if (async) {
1309 1.1.14.2 matt UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
1310 1.1.14.2 matt return (0);
1311 1.1.14.2 matt }
1312 1.1.14.2 matt UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
1313 1.1.14.2 matt error = biowait(mbp);
1314 1.1.14.2 matt s = splbio();
1315 1.1.14.2 matt (*iodone)(mbp);
1316 1.1.14.2 matt splx(s);
1317 1.1.14.2 matt UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
1318 1.1.14.2 matt return (error);
1319 1.1.14.2 matt }
1320 1.1.14.2 matt
1321 1.1.14.2 matt /*
1322 1.1.14.2 matt * VOP_PUTPAGES() for vnodes which never have pages.
1323 1.1.14.2 matt */
1324 1.1.14.2 matt
1325 1.1.14.2 matt int
1326 1.1.14.2 matt genfs_null_putpages(void *v)
1327 1.1.14.2 matt {
1328 1.1.14.2 matt struct vop_putpages_args /* {
1329 1.1.14.2 matt struct vnode *a_vp;
1330 1.1.14.2 matt voff_t a_offlo;
1331 1.1.14.2 matt voff_t a_offhi;
1332 1.1.14.2 matt int a_flags;
1333 1.1.14.2 matt } */ *ap = v;
1334 1.1.14.2 matt struct vnode *vp = ap->a_vp;
1335 1.1.14.2 matt
1336 1.1.14.2 matt KASSERT(vp->v_uobj.uo_npages == 0);
1337 1.1.14.2 matt simple_unlock(&vp->v_interlock);
1338 1.1.14.2 matt return (0);
1339 1.1.14.2 matt }
1340 1.1.14.2 matt
1341 1.1.14.2 matt int
1342 1.1.14.2 matt genfs_compat_getpages(void *v)
1343 1.1.14.2 matt {
1344 1.1.14.2 matt struct vop_getpages_args /* {
1345 1.1.14.2 matt struct vnode *a_vp;
1346 1.1.14.2 matt voff_t a_offset;
1347 1.1.14.2 matt struct vm_page **a_m;
1348 1.1.14.2 matt int *a_count;
1349 1.1.14.2 matt int a_centeridx;
1350 1.1.14.2 matt vm_prot_t a_access_type;
1351 1.1.14.2 matt int a_advice;
1352 1.1.14.2 matt int a_flags;
1353 1.1.14.2 matt } */ *ap = v;
1354 1.1.14.2 matt
1355 1.1.14.2 matt off_t origoffset;
1356 1.1.14.2 matt struct vnode *vp = ap->a_vp;
1357 1.1.14.2 matt struct uvm_object *uobj = &vp->v_uobj;
1358 1.1.14.2 matt struct vm_page *pg, **pgs;
1359 1.1.14.2 matt vaddr_t kva;
1360 1.1.14.2 matt int i, error, orignpages, npages;
1361 1.1.14.2 matt struct iovec iov;
1362 1.1.14.2 matt struct uio uio;
1363 1.1.14.2 matt kauth_cred_t cred = curlwp->l_cred;
1364 1.1.14.2 matt bool write = (ap->a_access_type & VM_PROT_WRITE) != 0;
1365 1.1.14.2 matt
1366 1.1.14.2 matt error = 0;
1367 1.1.14.2 matt origoffset = ap->a_offset;
1368 1.1.14.2 matt orignpages = *ap->a_count;
1369 1.1.14.2 matt pgs = ap->a_m;
1370 1.1.14.2 matt
1371 1.1.14.2 matt if (write && (vp->v_iflag & VI_ONWORKLST) == 0) {
1372 1.1.14.2 matt vn_syncer_add_to_worklist(vp, filedelay);
1373 1.1.14.2 matt }
1374 1.1.14.2 matt if (ap->a_flags & PGO_LOCKED) {
1375 1.1.14.2 matt uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
1376 1.1.14.2 matt UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
1377 1.1.14.2 matt
1378 1.1.14.2 matt return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
1379 1.1.14.2 matt }
1380 1.1.14.2 matt if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
1381 1.1.14.2 matt simple_unlock(&uobj->vmobjlock);
1382 1.1.14.2 matt return (EINVAL);
1383 1.1.14.2 matt }
1384 1.1.14.2 matt if ((ap->a_flags & PGO_SYNCIO) == 0) {
1385 1.1.14.2 matt simple_unlock(&uobj->vmobjlock);
1386 1.1.14.2 matt return 0;
1387 1.1.14.2 matt }
1388 1.1.14.2 matt npages = orignpages;
1389 1.1.14.2 matt uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
1390 1.1.14.2 matt simple_unlock(&uobj->vmobjlock);
1391 1.1.14.2 matt kva = uvm_pagermapin(pgs, npages,
1392 1.1.14.2 matt UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
1393 1.1.14.2 matt for (i = 0; i < npages; i++) {
1394 1.1.14.2 matt pg = pgs[i];
1395 1.1.14.2 matt if ((pg->flags & PG_FAKE) == 0) {
1396 1.1.14.2 matt continue;
1397 1.1.14.2 matt }
1398 1.1.14.2 matt iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
1399 1.1.14.2 matt iov.iov_len = PAGE_SIZE;
1400 1.1.14.2 matt uio.uio_iov = &iov;
1401 1.1.14.2 matt uio.uio_iovcnt = 1;
1402 1.1.14.2 matt uio.uio_offset = origoffset + (i << PAGE_SHIFT);
1403 1.1.14.2 matt uio.uio_rw = UIO_READ;
1404 1.1.14.2 matt uio.uio_resid = PAGE_SIZE;
1405 1.1.14.2 matt UIO_SETUP_SYSSPACE(&uio);
1406 1.1.14.2 matt /* XXX vn_lock */
1407 1.1.14.2 matt error = VOP_READ(vp, &uio, 0, cred);
1408 1.1.14.2 matt if (error) {
1409 1.1.14.2 matt break;
1410 1.1.14.2 matt }
1411 1.1.14.2 matt if (uio.uio_resid) {
1412 1.1.14.2 matt memset(iov.iov_base, 0, uio.uio_resid);
1413 1.1.14.2 matt }
1414 1.1.14.2 matt }
1415 1.1.14.2 matt uvm_pagermapout(kva, npages);
1416 1.1.14.2 matt simple_lock(&uobj->vmobjlock);
1417 1.1.14.2 matt uvm_lock_pageq();
1418 1.1.14.2 matt for (i = 0; i < npages; i++) {
1419 1.1.14.2 matt pg = pgs[i];
1420 1.1.14.2 matt if (error && (pg->flags & PG_FAKE) != 0) {
1421 1.1.14.2 matt pg->flags |= PG_RELEASED;
1422 1.1.14.2 matt } else {
1423 1.1.14.2 matt pmap_clear_modify(pg);
1424 1.1.14.2 matt uvm_pageactivate(pg);
1425 1.1.14.2 matt }
1426 1.1.14.2 matt }
1427 1.1.14.2 matt if (error) {
1428 1.1.14.2 matt uvm_page_unbusy(pgs, npages);
1429 1.1.14.2 matt }
1430 1.1.14.2 matt uvm_unlock_pageq();
1431 1.1.14.2 matt simple_unlock(&uobj->vmobjlock);
1432 1.1.14.2 matt return (error);
1433 1.1.14.2 matt }
1434 1.1.14.2 matt
1435 1.1.14.2 matt int
1436 1.1.14.2 matt genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
1437 1.1.14.2 matt int flags)
1438 1.1.14.2 matt {
1439 1.1.14.2 matt off_t offset;
1440 1.1.14.2 matt struct iovec iov;
1441 1.1.14.2 matt struct uio uio;
1442 1.1.14.2 matt kauth_cred_t cred = curlwp->l_cred;
1443 1.1.14.2 matt struct buf *bp;
1444 1.1.14.2 matt vaddr_t kva;
1445 1.1.14.2 matt int s, error;
1446 1.1.14.2 matt
1447 1.1.14.2 matt offset = pgs[0]->offset;
1448 1.1.14.2 matt kva = uvm_pagermapin(pgs, npages,
1449 1.1.14.2 matt UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1450 1.1.14.2 matt
1451 1.1.14.2 matt iov.iov_base = (void *)kva;
1452 1.1.14.2 matt iov.iov_len = npages << PAGE_SHIFT;
1453 1.1.14.2 matt uio.uio_iov = &iov;
1454 1.1.14.2 matt uio.uio_iovcnt = 1;
1455 1.1.14.2 matt uio.uio_offset = offset;
1456 1.1.14.2 matt uio.uio_rw = UIO_WRITE;
1457 1.1.14.2 matt uio.uio_resid = npages << PAGE_SHIFT;
1458 1.1.14.2 matt UIO_SETUP_SYSSPACE(&uio);
1459 1.1.14.2 matt /* XXX vn_lock */
1460 1.1.14.2 matt error = VOP_WRITE(vp, &uio, 0, cred);
1461 1.1.14.2 matt
1462 1.1.14.2 matt s = splbio();
1463 1.1.14.2 matt V_INCR_NUMOUTPUT(vp);
1464 1.1.14.2 matt splx(s);
1465 1.1.14.2 matt
1466 1.1.14.2 matt bp = getiobuf();
1467 1.1.14.2 matt bp->b_flags = B_BUSY | B_WRITE | B_AGE;
1468 1.1.14.2 matt bp->b_vp = vp;
1469 1.1.14.2 matt bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
1470 1.1.14.2 matt bp->b_data = (char *)kva;
1471 1.1.14.2 matt bp->b_bcount = npages << PAGE_SHIFT;
1472 1.1.14.2 matt bp->b_bufsize = npages << PAGE_SHIFT;
1473 1.1.14.2 matt bp->b_resid = 0;
1474 1.1.14.2 matt bp->b_error = error;
1475 1.1.14.2 matt uvm_aio_aiodone(bp);
1476 1.1.14.2 matt return (error);
1477 1.1.14.2 matt }
1478 1.1.14.2 matt
1479 1.1.14.2 matt /*
1480 1.1.14.2 matt * Process a uio using direct I/O. If we reach a part of the request
1481 1.1.14.2 matt * which cannot be processed in this fashion for some reason, just return.
1482 1.1.14.2 matt * The caller must handle some additional part of the request using
1483 1.1.14.2 matt * buffered I/O before trying direct I/O again.
1484 1.1.14.2 matt */
1485 1.1.14.2 matt
1486 1.1.14.2 matt void
1487 1.1.14.2 matt genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
1488 1.1.14.2 matt {
1489 1.1.14.2 matt struct vmspace *vs;
1490 1.1.14.2 matt struct iovec *iov;
1491 1.1.14.2 matt vaddr_t va;
1492 1.1.14.2 matt size_t len;
1493 1.1.14.2 matt const int mask = DEV_BSIZE - 1;
1494 1.1.14.2 matt int error;
1495 1.1.14.2 matt
1496 1.1.14.2 matt /*
1497 1.1.14.2 matt * We only support direct I/O to user space for now.
1498 1.1.14.2 matt */
1499 1.1.14.2 matt
1500 1.1.14.2 matt if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
1501 1.1.14.2 matt return;
1502 1.1.14.2 matt }
1503 1.1.14.2 matt
1504 1.1.14.2 matt /*
1505 1.1.14.2 matt * If the vnode is mapped, we would need to get the getpages lock
1506 1.1.14.2 matt * to stabilize the bmap, but then we would get into trouble whil e
1507 1.1.14.2 matt * locking the pages if the pages belong to this same vnode (or a
1508 1.1.14.2 matt * multi-vnode cascade to the same effect). Just fall back to
1509 1.1.14.2 matt * buffered I/O if the vnode is mapped to avoid this mess.
1510 1.1.14.2 matt */
1511 1.1.14.2 matt
1512 1.1.14.2 matt if (vp->v_vflag & VV_MAPPED) {
1513 1.1.14.2 matt return;
1514 1.1.14.2 matt }
1515 1.1.14.2 matt
1516 1.1.14.2 matt /*
1517 1.1.14.2 matt * Do as much of the uio as possible with direct I/O.
1518 1.1.14.2 matt */
1519 1.1.14.2 matt
1520 1.1.14.2 matt vs = uio->uio_vmspace;
1521 1.1.14.2 matt while (uio->uio_resid) {
1522 1.1.14.2 matt iov = uio->uio_iov;
1523 1.1.14.2 matt if (iov->iov_len == 0) {
1524 1.1.14.2 matt uio->uio_iov++;
1525 1.1.14.2 matt uio->uio_iovcnt--;
1526 1.1.14.2 matt continue;
1527 1.1.14.2 matt }
1528 1.1.14.2 matt va = (vaddr_t)iov->iov_base;
1529 1.1.14.2 matt len = MIN(iov->iov_len, genfs_maxdio);
1530 1.1.14.2 matt len &= ~mask;
1531 1.1.14.2 matt
1532 1.1.14.2 matt /*
1533 1.1.14.2 matt * If the next chunk is smaller than DEV_BSIZE or extends past
1534 1.1.14.2 matt * the current EOF, then fall back to buffered I/O.
1535 1.1.14.2 matt */
1536 1.1.14.2 matt
1537 1.1.14.2 matt if (len == 0 || uio->uio_offset + len > vp->v_size) {
1538 1.1.14.2 matt return;
1539 1.1.14.2 matt }
1540 1.1.14.2 matt
1541 1.1.14.2 matt /*
1542 1.1.14.2 matt * Check alignment. The file offset must be at least
1543 1.1.14.2 matt * sector-aligned. The exact constraint on memory alignment
1544 1.1.14.2 matt * is very hardware-dependent, but requiring sector-aligned
1545 1.1.14.2 matt * addresses there too is safe.
1546 1.1.14.2 matt */
1547 1.1.14.2 matt
1548 1.1.14.2 matt if (uio->uio_offset & mask || va & mask) {
1549 1.1.14.2 matt return;
1550 1.1.14.2 matt }
1551 1.1.14.2 matt error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
1552 1.1.14.2 matt uio->uio_rw);
1553 1.1.14.2 matt if (error) {
1554 1.1.14.2 matt break;
1555 1.1.14.2 matt }
1556 1.1.14.2 matt iov->iov_base = (char *)iov->iov_base + len;
1557 1.1.14.2 matt iov->iov_len -= len;
1558 1.1.14.2 matt uio->uio_offset += len;
1559 1.1.14.2 matt uio->uio_resid -= len;
1560 1.1.14.2 matt }
1561 1.1.14.2 matt }
1562 1.1.14.2 matt
1563 1.1.14.2 matt /*
1564 1.1.14.2 matt * Iodone routine for direct I/O. We don't do much here since the request is
1565 1.1.14.2 matt * always synchronous, so the caller will do most of the work after biowait().
1566 1.1.14.2 matt */
1567 1.1.14.2 matt
1568 1.1.14.2 matt static void
1569 1.1.14.2 matt genfs_dio_iodone(struct buf *bp)
1570 1.1.14.2 matt {
1571 1.1.14.2 matt int s;
1572 1.1.14.2 matt
1573 1.1.14.2 matt KASSERT((bp->b_flags & B_ASYNC) == 0);
1574 1.1.14.2 matt s = splbio();
1575 1.1.14.2 matt if ((bp->b_flags & (B_READ | B_AGE)) == B_AGE) {
1576 1.1.14.2 matt vwakeup(bp);
1577 1.1.14.2 matt }
1578 1.1.14.2 matt putiobuf(bp);
1579 1.1.14.2 matt splx(s);
1580 1.1.14.2 matt }
1581 1.1.14.2 matt
1582 1.1.14.2 matt /*
1583 1.1.14.2 matt * Process one chunk of a direct I/O request.
1584 1.1.14.2 matt */
1585 1.1.14.2 matt
1586 1.1.14.2 matt static int
1587 1.1.14.2 matt genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
1588 1.1.14.2 matt off_t off, enum uio_rw rw)
1589 1.1.14.2 matt {
1590 1.1.14.2 matt struct vm_map *map;
1591 1.1.14.2 matt struct pmap *upm, *kpm;
1592 1.1.14.2 matt size_t klen = round_page(uva + len) - trunc_page(uva);
1593 1.1.14.2 matt off_t spoff, epoff;
1594 1.1.14.2 matt vaddr_t kva, puva;
1595 1.1.14.2 matt paddr_t pa;
1596 1.1.14.2 matt vm_prot_t prot;
1597 1.1.14.2 matt int error, rv, poff, koff;
1598 1.1.14.2 matt const int pgoflags = PGO_CLEANIT | PGO_SYNCIO |
1599 1.1.14.2 matt (rw == UIO_WRITE ? PGO_FREE : 0);
1600 1.1.14.2 matt
1601 1.1.14.2 matt /*
1602 1.1.14.2 matt * For writes, verify that this range of the file already has fully
1603 1.1.14.2 matt * allocated backing store. If there are any holes, just punt and
1604 1.1.14.2 matt * make the caller take the buffered write path.
1605 1.1.14.2 matt */
1606 1.1.14.2 matt
1607 1.1.14.2 matt if (rw == UIO_WRITE) {
1608 1.1.14.2 matt daddr_t lbn, elbn, blkno;
1609 1.1.14.2 matt int bsize, bshift, run;
1610 1.1.14.2 matt
1611 1.1.14.2 matt bshift = vp->v_mount->mnt_fs_bshift;
1612 1.1.14.2 matt bsize = 1 << bshift;
1613 1.1.14.2 matt lbn = off >> bshift;
1614 1.1.14.2 matt elbn = (off + len + bsize - 1) >> bshift;
1615 1.1.14.2 matt while (lbn < elbn) {
1616 1.1.14.2 matt error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
1617 1.1.14.2 matt if (error) {
1618 1.1.14.2 matt return error;
1619 1.1.14.2 matt }
1620 1.1.14.2 matt if (blkno == (daddr_t)-1) {
1621 1.1.14.2 matt return ENOSPC;
1622 1.1.14.2 matt }
1623 1.1.14.2 matt lbn += 1 + run;
1624 1.1.14.2 matt }
1625 1.1.14.2 matt }
1626 1.1.14.2 matt
1627 1.1.14.2 matt /*
1628 1.1.14.2 matt * Flush any cached pages for parts of the file that we're about to
1629 1.1.14.2 matt * access. If we're writing, invalidate pages as well.
1630 1.1.14.2 matt */
1631 1.1.14.2 matt
1632 1.1.14.2 matt spoff = trunc_page(off);
1633 1.1.14.2 matt epoff = round_page(off + len);
1634 1.1.14.2 matt simple_lock(&vp->v_interlock);
1635 1.1.14.2 matt error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
1636 1.1.14.2 matt if (error) {
1637 1.1.14.2 matt return error;
1638 1.1.14.2 matt }
1639 1.1.14.2 matt
1640 1.1.14.2 matt /*
1641 1.1.14.2 matt * Wire the user pages and remap them into kernel memory.
1642 1.1.14.2 matt */
1643 1.1.14.2 matt
1644 1.1.14.2 matt prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
1645 1.1.14.2 matt error = uvm_vslock(vs, (void *)uva, len, prot);
1646 1.1.14.2 matt if (error) {
1647 1.1.14.2 matt return error;
1648 1.1.14.2 matt }
1649 1.1.14.2 matt
1650 1.1.14.2 matt map = &vs->vm_map;
1651 1.1.14.2 matt upm = vm_map_pmap(map);
1652 1.1.14.2 matt kpm = vm_map_pmap(kernel_map);
1653 1.1.14.2 matt kva = uvm_km_alloc(kernel_map, klen, 0,
1654 1.1.14.2 matt UVM_KMF_VAONLY | UVM_KMF_WAITVA);
1655 1.1.14.2 matt puva = trunc_page(uva);
1656 1.1.14.2 matt for (poff = 0; poff < klen; poff += PAGE_SIZE) {
1657 1.1.14.2 matt rv = pmap_extract(upm, puva + poff, &pa);
1658 1.1.14.2 matt KASSERT(rv);
1659 1.1.14.2 matt pmap_enter(kpm, kva + poff, pa, prot, prot | PMAP_WIRED);
1660 1.1.14.2 matt }
1661 1.1.14.2 matt pmap_update(kpm);
1662 1.1.14.2 matt
1663 1.1.14.2 matt /*
1664 1.1.14.2 matt * Do the I/O.
1665 1.1.14.2 matt */
1666 1.1.14.2 matt
1667 1.1.14.2 matt koff = uva - trunc_page(uva);
1668 1.1.14.2 matt error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
1669 1.1.14.2 matt genfs_dio_iodone);
1670 1.1.14.2 matt
1671 1.1.14.2 matt /*
1672 1.1.14.2 matt * Tear down the kernel mapping.
1673 1.1.14.2 matt */
1674 1.1.14.2 matt
1675 1.1.14.2 matt pmap_remove(kpm, kva, kva + klen);
1676 1.1.14.2 matt pmap_update(kpm);
1677 1.1.14.2 matt uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
1678 1.1.14.2 matt
1679 1.1.14.2 matt /*
1680 1.1.14.2 matt * Unwire the user pages.
1681 1.1.14.2 matt */
1682 1.1.14.2 matt
1683 1.1.14.2 matt uvm_vsunlock(vs, (void *)uva, len);
1684 1.1.14.2 matt return error;
1685 1.1.14.2 matt }
1686