genfs_io.c revision 1.53.2.5 1 /* $NetBSD: genfs_io.c,v 1.53.2.5 2011/11/30 14:31:29 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.53.2.5 2011/11/30 14:31:29 yamt Exp $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/kernel.h>
40 #include <sys/mount.h>
41 #include <sys/vnode.h>
42 #include <sys/kmem.h>
43 #include <sys/kauth.h>
44 #include <sys/fstrans.h>
45 #include <sys/buf.h>
46 #include <sys/radixtree.h>
47
48 #include <miscfs/genfs/genfs.h>
49 #include <miscfs/genfs/genfs_node.h>
50 #include <miscfs/specfs/specdev.h>
51 #include <miscfs/syncfs/syncfs.h>
52
53 #include <uvm/uvm.h>
54 #include <uvm/uvm_pager.h>
55 #include <uvm/uvm_page_array.h>
56
57 static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
58 off_t, enum uio_rw);
59 static void genfs_dio_iodone(struct buf *);
60
61 static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
62 void (*)(struct buf *));
63 static void genfs_rel_pages(struct vm_page **, int);
64 static void genfs_markdirty(struct vnode *);
65
66 int genfs_maxdio = MAXPHYS;
67
68 static void
69 genfs_rel_pages(struct vm_page **pgs, int npages)
70 {
71 int i;
72
73 for (i = 0; i < npages; i++) {
74 struct vm_page *pg = pgs[i];
75
76 if (pg == NULL || pg == PGO_DONTCARE)
77 continue;
78 if (pg->flags & PG_FAKE) {
79 pg->flags |= PG_RELEASED;
80 }
81 }
82 mutex_enter(&uvm_pageqlock);
83 uvm_page_unbusy(pgs, npages);
84 mutex_exit(&uvm_pageqlock);
85 }
86
87 static void
88 genfs_markdirty(struct vnode *vp)
89 {
90
91 KASSERT(mutex_owned(vp->v_interlock));
92 if ((vp->v_iflag & VI_ONWORKLST) == 0) {
93 vn_syncer_add_to_worklist(vp, filedelay);
94 }
95 if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
96 vp->v_iflag |= VI_WRMAPDIRTY;
97 }
98 }
99
100 /*
101 * generic VM getpages routine.
102 * Return PG_BUSY pages for the given range,
103 * reading from backing store if necessary.
104 */
105
106 int
107 genfs_getpages(void *v)
108 {
109 struct vop_getpages_args /* {
110 struct vnode *a_vp;
111 voff_t a_offset;
112 struct vm_page **a_m;
113 int *a_count;
114 int a_centeridx;
115 vm_prot_t a_access_type;
116 int a_advice;
117 int a_flags;
118 } */ * const ap = v;
119
120 off_t diskeof, memeof;
121 int i, error, npages;
122 const int flags = ap->a_flags;
123 struct vnode * const vp = ap->a_vp;
124 struct uvm_object * const uobj = &vp->v_uobj;
125 kauth_cred_t const cred = curlwp->l_cred; /* XXXUBC curlwp */
126 const bool async = (flags & PGO_SYNCIO) == 0;
127 const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
128 const bool overwrite = (flags & PGO_OVERWRITE) != 0;
129 const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
130 const bool glocked = (flags & PGO_GLOCKHELD) != 0;
131 const bool need_wapbl = blockalloc && vp->v_mount->mnt_wapbl;
132 bool has_trans_wapbl = false;
133 UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
134
135 UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
136 vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
137
138 KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
139 vp->v_type == VLNK || vp->v_type == VBLK);
140
141 startover:
142 error = 0;
143 const voff_t origvsize = vp->v_size;
144 const off_t origoffset = ap->a_offset;
145 const int orignpages = *ap->a_count;
146
147 GOP_SIZE(vp, origvsize, &diskeof, 0);
148 if (flags & PGO_PASTEOF) {
149 off_t newsize;
150 #if defined(DIAGNOSTIC)
151 off_t writeeof;
152 #endif /* defined(DIAGNOSTIC) */
153
154 newsize = MAX(origvsize,
155 origoffset + (orignpages << PAGE_SHIFT));
156 GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
157 #if defined(DIAGNOSTIC)
158 GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
159 if (newsize > round_page(writeeof)) {
160 panic("%s: past eof: %" PRId64 " vs. %" PRId64,
161 __func__, newsize, round_page(writeeof));
162 }
163 #endif /* defined(DIAGNOSTIC) */
164 } else {
165 GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
166 }
167 KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
168 KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
169 KASSERT(orignpages > 0);
170
171 /*
172 * Bounds-check the request.
173 */
174
175 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
176 if ((flags & PGO_LOCKED) == 0) {
177 mutex_exit(uobj->vmobjlock);
178 }
179 UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
180 origoffset, *ap->a_count, memeof,0);
181 error = EINVAL;
182 goto out_err;
183 }
184
185 /* uobj is locked */
186
187 if ((flags & PGO_NOTIMESTAMP) == 0 &&
188 (vp->v_type != VBLK ||
189 (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
190 int updflags = 0;
191
192 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
193 updflags = GOP_UPDATE_ACCESSED;
194 }
195 if (memwrite) {
196 updflags |= GOP_UPDATE_MODIFIED;
197 }
198 if (updflags != 0) {
199 GOP_MARKUPDATE(vp, updflags);
200 }
201 }
202
203 /*
204 * For PGO_LOCKED requests, just return whatever's in memory.
205 */
206
207 if (flags & PGO_LOCKED) {
208 int nfound;
209 struct vm_page *pg;
210
211 KASSERT(!glocked);
212 npages = *ap->a_count;
213 #if defined(DEBUG)
214 for (i = 0; i < npages; i++) {
215 pg = ap->a_m[i];
216 KASSERT(pg == NULL || pg == PGO_DONTCARE);
217 }
218 #endif /* defined(DEBUG) */
219 nfound = uvn_findpages(uobj, origoffset, &npages,
220 ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
221 KASSERT(npages == *ap->a_count);
222 if (nfound == 0) {
223 error = EBUSY;
224 goto out_err;
225 }
226 if (!genfs_node_rdtrylock(vp)) {
227 genfs_rel_pages(ap->a_m, npages);
228
229 /*
230 * restore the array.
231 */
232
233 for (i = 0; i < npages; i++) {
234 pg = ap->a_m[i];
235
236 if (pg != NULL && pg != PGO_DONTCARE) {
237 ap->a_m[i] = NULL;
238 }
239 KASSERT(ap->a_m[i] == NULL ||
240 ap->a_m[i] == PGO_DONTCARE);
241 }
242 } else {
243 genfs_node_unlock(vp);
244 }
245 error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
246 if (error == 0 && memwrite) {
247 for (i = 0; i < npages; i++) {
248 pg = ap->a_m[i];
249 if (pg == NULL || pg == PGO_DONTCARE) {
250 continue;
251 }
252 if (uvm_pagegetdirty(pg) ==
253 UVM_PAGE_STATUS_CLEAN) {
254 uvm_pagemarkdirty(pg,
255 UVM_PAGE_STATUS_UNKNOWN);
256 }
257 }
258 genfs_markdirty(vp);
259 }
260 goto out_err;
261 }
262 mutex_exit(uobj->vmobjlock);
263
264 /*
265 * find the requested pages and make some simple checks.
266 * leave space in the page array for a whole block.
267 */
268
269 const int fs_bshift = (vp->v_type != VBLK) ?
270 vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
271 const int dev_bshift = (vp->v_type != VBLK) ?
272 vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
273 const int fs_bsize = 1 << fs_bshift;
274 #define blk_mask (fs_bsize - 1)
275 #define trunc_blk(x) ((x) & ~blk_mask)
276 #define round_blk(x) (((x) + blk_mask) & ~blk_mask)
277
278 const int orignmempages = MIN(orignpages,
279 round_page(memeof - origoffset) >> PAGE_SHIFT);
280 npages = orignmempages;
281 const off_t startoffset = trunc_blk(origoffset);
282 const off_t endoffset = MIN(
283 round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
284 round_page(memeof));
285 const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
286
287 const int pgs_size = sizeof(struct vm_page *) *
288 ((endoffset - startoffset) >> PAGE_SHIFT);
289 struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
290
291 if (pgs_size > sizeof(pgs_onstack)) {
292 pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
293 if (pgs == NULL) {
294 pgs = pgs_onstack;
295 error = ENOMEM;
296 goto out_err;
297 }
298 } else {
299 pgs = pgs_onstack;
300 (void)memset(pgs, 0, pgs_size);
301 }
302
303 UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
304 ridx, npages, startoffset, endoffset);
305
306 if (!has_trans_wapbl) {
307 fstrans_start(vp->v_mount, FSTRANS_SHARED);
308 /*
309 * XXX: This assumes that we come here only via
310 * the mmio path
311 */
312 if (need_wapbl) {
313 error = WAPBL_BEGIN(vp->v_mount);
314 if (error) {
315 fstrans_done(vp->v_mount);
316 goto out_err_free;
317 }
318 }
319 has_trans_wapbl = true;
320 }
321
322 /*
323 * hold g_glock to prevent a race with truncate.
324 *
325 * check if our idea of v_size is still valid.
326 */
327
328 KASSERT(!glocked || genfs_node_wrlocked(vp));
329 if (!glocked) {
330 if (blockalloc) {
331 genfs_node_wrlock(vp);
332 } else {
333 genfs_node_rdlock(vp);
334 }
335 }
336 mutex_enter(uobj->vmobjlock);
337 if (vp->v_size < origvsize) {
338 if (!glocked) {
339 genfs_node_unlock(vp);
340 }
341 if (pgs != pgs_onstack)
342 kmem_free(pgs, pgs_size);
343 goto startover;
344 }
345
346 if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
347 async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
348 if (!glocked) {
349 genfs_node_unlock(vp);
350 }
351 KASSERT(async != 0);
352 genfs_rel_pages(&pgs[ridx], orignmempages);
353 mutex_exit(uobj->vmobjlock);
354 error = EBUSY;
355 goto out_err_free;
356 }
357
358 /*
359 * if PGO_OVERWRITE is set, don't bother reading the pages.
360 */
361
362 if (overwrite) {
363 if (!glocked) {
364 genfs_node_unlock(vp);
365 }
366 UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
367
368 for (i = 0; i < npages; i++) {
369 struct vm_page *pg = pgs[ridx + i];
370
371 /*
372 * we should not see PG_HOLE pages here as it's a
373 * caller's responsibility to allocate blocks
374 * beforehand for the overwrite case.
375 */
376 KASSERT((pg->flags & PG_HOLE) == 0);
377 pg->flags &= ~PG_RDONLY;
378 /*
379 * mark the page dirty.
380 * otherwise another thread can do putpages and pull
381 * our vnode from syncer's queue before our caller does
382 * ubc_release.
383 */
384 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
385 }
386 npages += ridx;
387 goto out;
388 }
389
390 /*
391 * if the pages are already resident, just return them.
392 */
393
394 for (i = 0; i < npages; i++) {
395 struct vm_page *pg = pgs[ridx + i];
396
397 if ((pg->flags & PG_FAKE) ||
398 (memwrite && (pg->flags & (PG_RDONLY|PG_HOLE)) != 0)) {
399 break;
400 }
401 }
402 if (i == npages) {
403 if (!glocked) {
404 genfs_node_unlock(vp);
405 }
406 UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
407 npages += ridx;
408 goto out;
409 }
410
411 /*
412 * the page wasn't resident and we're not overwriting,
413 * so we're going to have to do some i/o.
414 * find any additional pages needed to cover the expanded range.
415 */
416
417 npages = (endoffset - startoffset) >> PAGE_SHIFT;
418 if (startoffset != origoffset || npages != orignmempages) {
419 int npgs;
420
421 /*
422 * we need to avoid deadlocks caused by locking
423 * additional pages at lower offsets than pages we
424 * already have locked. unlock them all and start over.
425 */
426
427 genfs_rel_pages(&pgs[ridx], orignmempages);
428 memset(pgs, 0, pgs_size);
429
430 UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
431 startoffset, endoffset, 0,0);
432 npgs = npages;
433 if (uvn_findpages(uobj, startoffset, &npgs, pgs,
434 async ? UFP_NOWAIT : UFP_ALL) != npages) {
435 if (!glocked) {
436 genfs_node_unlock(vp);
437 }
438 KASSERT(async != 0);
439 genfs_rel_pages(pgs, npages);
440 mutex_exit(uobj->vmobjlock);
441 error = EBUSY;
442 goto out_err_free;
443 }
444 }
445
446 mutex_exit(uobj->vmobjlock);
447
448 {
449 size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
450 vaddr_t kva;
451 struct buf *bp, *mbp;
452 bool sawhole = false;
453
454 /*
455 * read the desired page(s).
456 */
457
458 totalbytes = npages << PAGE_SHIFT;
459 bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
460 tailbytes = totalbytes - bytes;
461 skipbytes = 0;
462
463 kva = uvm_pagermapin(pgs, npages,
464 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
465
466 mbp = getiobuf(vp, true);
467 mbp->b_bufsize = totalbytes;
468 mbp->b_data = (void *)kva;
469 mbp->b_resid = mbp->b_bcount = bytes;
470 mbp->b_cflags = BC_BUSY;
471 if (async) {
472 mbp->b_flags = B_READ | B_ASYNC;
473 mbp->b_iodone = uvm_aio_biodone;
474 } else {
475 mbp->b_flags = B_READ;
476 mbp->b_iodone = NULL;
477 }
478 if (async)
479 BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
480 else
481 BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
482
483 /*
484 * if EOF is in the middle of the range, zero the part past EOF.
485 * skip over pages which are not PG_FAKE since in that case they have
486 * valid data that we need to preserve.
487 */
488
489 tailstart = bytes;
490 while (tailbytes > 0) {
491 const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
492
493 KASSERT(len <= tailbytes);
494 if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
495 memset((void *)(kva + tailstart), 0, len);
496 UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
497 kva, tailstart, len, 0);
498 }
499 tailstart += len;
500 tailbytes -= len;
501 }
502
503 /*
504 * now loop over the pages, reading as needed.
505 */
506
507 bp = NULL;
508 off_t offset;
509 for (offset = startoffset;
510 bytes > 0;
511 offset += iobytes, bytes -= iobytes) {
512 int run;
513 daddr_t lbn, blkno;
514 int pidx;
515 struct vnode *devvp;
516
517 /*
518 * skip pages which don't need to be read.
519 */
520
521 pidx = (offset - startoffset) >> PAGE_SHIFT;
522 while ((pgs[pidx]->flags & PG_FAKE) == 0) {
523 size_t b;
524
525 KASSERT((offset & (PAGE_SIZE - 1)) == 0);
526 if ((pgs[pidx]->flags & PG_HOLE)) {
527 sawhole = true;
528 }
529 b = MIN(PAGE_SIZE, bytes);
530 offset += b;
531 bytes -= b;
532 skipbytes += b;
533 pidx++;
534 UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
535 offset, 0,0,0);
536 if (bytes == 0) {
537 goto loopdone;
538 }
539 }
540
541 /*
542 * bmap the file to find out the blkno to read from and
543 * how much we can read in one i/o. if bmap returns an error,
544 * skip the rest of the top-level i/o.
545 */
546
547 lbn = offset >> fs_bshift;
548 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
549 if (error) {
550 UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
551 lbn,error,0,0);
552 skipbytes += bytes;
553 bytes = 0;
554 goto loopdone;
555 }
556
557 /*
558 * see how many pages can be read with this i/o.
559 * reduce the i/o size if necessary to avoid
560 * overwriting pages with valid data.
561 */
562
563 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
564 bytes);
565 if (offset + iobytes > round_page(offset)) {
566 int pcount;
567
568 pcount = 1;
569 while (pidx + pcount < npages &&
570 pgs[pidx + pcount]->flags & PG_FAKE) {
571 pcount++;
572 }
573 iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
574 (offset - trunc_page(offset)));
575 }
576
577 /*
578 * if this block isn't allocated, zero it instead of
579 * reading it. unless we are going to allocate blocks,
580 * mark the pages we zeroed PG_HOLE.
581 */
582
583 if (blkno == (daddr_t)-1) {
584 int holepages = (round_page(offset + iobytes) -
585 trunc_page(offset)) >> PAGE_SHIFT;
586 UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
587
588 sawhole = true;
589 memset((char *)kva + (offset - startoffset), 0,
590 iobytes);
591 skipbytes += iobytes;
592
593 mutex_enter(uobj->vmobjlock);
594 for (i = 0; i < holepages; i++) {
595 #if 0
596 if (memwrite) {
597 uvm_pagemarkdirty(pgs[pidx + i],
598 UVM_PAGE_STATUS_DIRTY);
599 }
600 #endif
601 if (!blockalloc) {
602 pgs[pidx + i]->flags |= PG_HOLE;
603 }
604 }
605 mutex_exit(uobj->vmobjlock);
606 continue;
607 }
608
609 /*
610 * allocate a sub-buf for this piece of the i/o
611 * (or just use mbp if there's only 1 piece),
612 * and start it going.
613 */
614
615 if (offset == startoffset && iobytes == bytes) {
616 bp = mbp;
617 } else {
618 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
619 vp, bp, vp->v_numoutput, 0);
620 bp = getiobuf(vp, true);
621 nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
622 }
623 bp->b_lblkno = 0;
624
625 /* adjust physical blkno for partial blocks */
626 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
627 dev_bshift);
628
629 UVMHIST_LOG(ubchist,
630 "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
631 bp, offset, bp->b_bcount, bp->b_blkno);
632
633 VOP_STRATEGY(devvp, bp);
634 }
635
636 loopdone:
637 nestiobuf_done(mbp, skipbytes, error);
638 if (async) {
639 UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
640 if (!glocked) {
641 genfs_node_unlock(vp);
642 }
643 error = 0;
644 goto out_err_free;
645 }
646 if (bp != NULL) {
647 error = biowait(mbp);
648 }
649
650 /* Remove the mapping (make KVA available as soon as possible) */
651 uvm_pagermapout(kva, npages);
652
653 /*
654 * if this we encountered a hole then we have to do a little more work.
655 * if blockalloc is false, we marked the page PG_HOLE so that future
656 * write accesses to the page will fault again.
657 * if blockalloc is true, we must make sure that the backing store for
658 * the page is completely allocated while the pages are locked.
659 */
660
661 if (!error && sawhole && blockalloc) {
662 error = GOP_ALLOC(vp, startoffset,
663 npages << PAGE_SHIFT, 0, cred);
664 UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
665 startoffset, npages << PAGE_SHIFT, error,0);
666 if (!error) {
667 mutex_enter(uobj->vmobjlock);
668 for (i = 0; i < npages; i++) {
669 struct vm_page *pg = pgs[i];
670
671 if (pg == NULL) {
672 continue;
673 }
674 pg->flags &= ~PG_HOLE;
675 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
676 UVMHIST_LOG(ubchist, "mark dirty pg %p",
677 pg,0,0,0);
678 }
679 mutex_exit(uobj->vmobjlock);
680 }
681 }
682 if (!glocked) {
683 genfs_node_unlock(vp);
684 }
685
686 putiobuf(mbp);
687 }
688
689 mutex_enter(uobj->vmobjlock);
690
691 /*
692 * we're almost done! release the pages...
693 * for errors, we free the pages.
694 * otherwise we activate them and mark them as valid and clean.
695 * also, unbusy pages that were not actually requested.
696 */
697
698 if (error) {
699 for (i = 0; i < npages; i++) {
700 struct vm_page *pg = pgs[i];
701
702 if (pg == NULL) {
703 continue;
704 }
705 UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
706 pg, pg->flags, 0,0);
707 if (pg->flags & PG_FAKE) {
708 pg->flags |= PG_RELEASED;
709 }
710 }
711 mutex_enter(&uvm_pageqlock);
712 uvm_page_unbusy(pgs, npages);
713 mutex_exit(&uvm_pageqlock);
714 mutex_exit(uobj->vmobjlock);
715 UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
716 goto out_err_free;
717 }
718
719 out:
720 UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
721 error = 0;
722 mutex_enter(&uvm_pageqlock);
723 for (i = 0; i < npages; i++) {
724 struct vm_page *pg = pgs[i];
725 if (pg == NULL) {
726 continue;
727 }
728 UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
729 pg, pg->flags, 0,0);
730 if (pg->flags & PG_FAKE && !overwrite) {
731 /*
732 * we've read page's contents from the backing storage.
733 *
734 * for a read fault, we keep them CLEAN.
735 */
736 KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN);
737 pg->flags &= ~PG_FAKE;
738 }
739 KASSERT(!blockalloc || (pg->flags & PG_HOLE) == 0);
740 if (i < ridx || i >= ridx + orignmempages || async) {
741 UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
742 pg, pg->offset,0,0);
743 KASSERT(!overwrite);
744 if (pg->flags & PG_WANTED) {
745 wakeup(pg);
746 }
747 if (pg->flags & PG_FAKE && overwrite) {
748 uvm_pagezero(pg);
749 }
750 if (pg->flags & PG_RELEASED) {
751 uvm_pagefree(pg);
752 continue;
753 }
754 uvm_pageenqueue(pg);
755 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
756 UVM_PAGE_OWN(pg, NULL);
757 } else if (memwrite && !overwrite &&
758 uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
759 /*
760 * for a write fault, start dirtiness tracking of
761 * requested pages.
762 */
763 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
764 }
765 }
766 mutex_exit(&uvm_pageqlock);
767 if (memwrite) {
768 genfs_markdirty(vp);
769 }
770 mutex_exit(uobj->vmobjlock);
771 if (ap->a_m != NULL) {
772 memcpy(ap->a_m, &pgs[ridx],
773 orignmempages * sizeof(struct vm_page *));
774 }
775
776 out_err_free:
777 if (pgs != NULL && pgs != pgs_onstack)
778 kmem_free(pgs, pgs_size);
779 out_err:
780 if (has_trans_wapbl) {
781 if (need_wapbl)
782 WAPBL_END(vp->v_mount);
783 fstrans_done(vp->v_mount);
784 }
785 return error;
786 }
787
788 /*
789 * generic VM putpages routine.
790 * Write the given range of pages to backing store.
791 *
792 * => "offhi == 0" means flush all pages at or after "offlo".
793 * => object should be locked by caller. we return with the
794 * object unlocked.
795 * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
796 * thus, a caller might want to unlock higher level resources
797 * (e.g. vm_map) before calling flush.
798 * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
799 * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
800 * => NOTE: we rely on the fact that the object's memq is a TAILQ and
801 * that new pages are inserted on the tail end of the list. thus,
802 * we can make a complete pass through the object in one go by starting
803 * at the head and working towards the tail (new pages are put in
804 * front of us).
805 * => NOTE: we are allowed to lock the page queues, so the caller
806 * must not be holding the page queue lock.
807 *
808 * note on "cleaning" object and PG_BUSY pages:
809 * this routine is holding the lock on the object. the only time
810 * that it can run into a PG_BUSY page that it does not own is if
811 * some other process has started I/O on the page (e.g. either
812 * a pagein, or a pageout). if the PG_BUSY page is being paged
813 * in, then it can not be dirty (!UVM_PAGE_STATUS_CLEAN) because no
814 * one has had a chance to modify it yet. if the PG_BUSY page is
815 * being paged out then it means that someone else has already started
816 * cleaning the page for us (how nice!). in this case, if we
817 * have syncio specified, then after we make our pass through the
818 * object we need to wait for the other PG_BUSY pages to clear
819 * off (i.e. we need to do an iosync). also note that once a
820 * page is PG_BUSY it must stay in its object until it is un-busyed.
821 *
822 * note on page traversal:
823 * we can traverse the pages in an object either by going down the
824 * linked list in "uobj->memq", or we can go over the address range
825 * by page doing hash table lookups for each address. depending
826 * on how many pages are in the object it may be cheaper to do one
827 * or the other. we set "by_list" to true if we are using memq.
828 * if the cost of a hash lookup was equal to the cost of the list
829 * traversal we could compare the number of pages in the start->stop
830 * range to the total number of pages in the object. however, it
831 * seems that a hash table lookup is more expensive than the linked
832 * list traversal, so we multiply the number of pages in the
833 * range by an estimate of the relatively higher cost of the hash lookup.
834 */
835
836 int
837 genfs_putpages(void *v)
838 {
839 struct vop_putpages_args /* {
840 struct vnode *a_vp;
841 voff_t a_offlo;
842 voff_t a_offhi;
843 int a_flags;
844 } */ * const ap = v;
845
846 return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
847 ap->a_flags, NULL);
848 }
849
850 int
851 genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
852 int origflags, struct vm_page **busypg)
853 {
854 struct uvm_object * const uobj = &vp->v_uobj;
855 kmutex_t * const slock = uobj->vmobjlock;
856 off_t off;
857 /* Even for strange MAXPHYS, the shift rounds down to a page */
858 #define maxpages (MAXPHYS >> PAGE_SHIFT)
859 int i, error, npages, nback;
860 int freeflag;
861 struct vm_page *pgs[maxpages], *pg;
862 struct uvm_page_array a;
863 bool wasclean, needs_clean, yld;
864 bool async = (origflags & PGO_SYNCIO) == 0;
865 bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
866 struct lwp * const l = curlwp ? curlwp : &lwp0;
867 int flags;
868 bool modified; /* if we write out any pages */
869 bool need_wapbl;
870 bool has_trans;
871 bool tryclean; /* try to pull off from the syncer's list */
872 bool onworklst;
873 const bool dirtyonly = (origflags & (PGO_DEACTIVATE|PGO_FREE)) == 0;
874
875 UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
876
877 KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
878 KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
879 KASSERT(startoff < endoff || endoff == 0);
880
881 UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
882 vp, uobj->uo_npages, startoff, endoff - startoff);
883
884 has_trans = false;
885 need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl &&
886 (origflags & PGO_JOURNALLOCKED) == 0);
887
888 retry:
889 modified = false;
890 flags = origflags;
891 KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
892 (vp->v_iflag & VI_WRMAPDIRTY) == 0);
893
894 /*
895 * shortcut if we have no pages to process.
896 */
897
898 if (uobj->uo_npages == 0 || (dirtyonly &&
899 radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
900 UVM_PAGE_DIRTY_TAG))) {
901 if (vp->v_iflag & VI_ONWORKLST) {
902 vp->v_iflag &= ~VI_WRMAPDIRTY;
903 if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
904 vn_syncer_remove_from_worklist(vp);
905 }
906 if (has_trans) {
907 if (need_wapbl)
908 WAPBL_END(vp->v_mount);
909 fstrans_done(vp->v_mount);
910 }
911 mutex_exit(slock);
912 return (0);
913 }
914
915 /*
916 * the vnode has pages, set up to process the request.
917 */
918
919 if (!has_trans && (flags & PGO_CLEANIT) != 0) {
920 mutex_exit(slock);
921 if (pagedaemon) {
922 error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
923 if (error)
924 return error;
925 } else
926 fstrans_start(vp->v_mount, FSTRANS_LAZY);
927 if (need_wapbl) {
928 error = WAPBL_BEGIN(vp->v_mount);
929 if (error) {
930 fstrans_done(vp->v_mount);
931 return error;
932 }
933 }
934 has_trans = true;
935 mutex_enter(slock);
936 goto retry;
937 }
938
939 error = 0;
940 wasclean = (vp->v_numoutput == 0);
941 off = startoff;
942 if (endoff == 0 || flags & PGO_ALLPAGES) {
943 endoff = trunc_page(LLONG_MAX);
944 }
945
946 /*
947 * if this vnode is known not to have dirty pages,
948 * don't bother to clean it out.
949 */
950
951 if ((vp->v_iflag & VI_ONWORKLST) == 0) {
952 #if !defined(DEBUG)
953 if (dirtyonly) {
954 goto skip_scan;
955 }
956 #endif /* !defined(DEBUG) */
957 flags &= ~PGO_CLEANIT;
958 }
959
960 /*
961 * start the loop.
962 */
963
964 freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
965 tryclean = true;
966 uvm_page_array_init(&a);
967 for (;;) {
968 bool protected;
969
970 pg = uvm_page_array_fill_and_peek(&a, uobj, off, 0,
971 dirtyonly ? UVM_PAGE_ARRAY_FILL_DIRTYONLY : 0);
972 if (pg == NULL) {
973 break;
974 }
975
976 /*
977 * if the current page is not interesting, move on to the next.
978 */
979
980 KASSERT(pg->uobject == uobj);
981 KASSERT((pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
982 (pg->flags & (PG_BUSY)) != 0);
983 KASSERT(pg->offset >= startoff);
984 KASSERT(pg->offset >= off);
985 KASSERT(!dirtyonly ||
986 uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN);
987 if (pg->offset >= endoff) {
988 break;
989 }
990 if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
991 KASSERT((pg->flags & PG_BUSY) != 0);
992 wasclean = false;
993 off = pg->offset + PAGE_SIZE;
994 uvm_page_array_advance(&a);
995 continue;
996 }
997
998 /*
999 * if the current page needs to be cleaned and it's busy,
1000 * wait for it to become unbusy.
1001 */
1002
1003 yld = (l->l_cpu->ci_schedstate.spc_flags &
1004 SPCF_SHOULDYIELD) && !pagedaemon;
1005 if (pg->flags & PG_BUSY || yld) {
1006 UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
1007 if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
1008 UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
1009 error = EDEADLK;
1010 if (busypg != NULL)
1011 *busypg = pg;
1012 break;
1013 }
1014 if (pagedaemon) {
1015 /*
1016 * someone has taken the page while we
1017 * dropped the lock for fstrans_start.
1018 */
1019 break;
1020 }
1021 off = pg->offset; /* visit this page again */
1022 if ((pg->flags & PG_BUSY) != 0) {
1023 pg->flags |= PG_WANTED;
1024 UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
1025 } else {
1026 KASSERT(yld);
1027 mutex_exit(slock);
1028 preempt();
1029 }
1030 /*
1031 * as we dropped the object lock, our cached pages can
1032 * be stale.
1033 */
1034 uvm_page_array_clear(&a);
1035 mutex_enter(slock);
1036 continue;
1037 }
1038
1039 off = pg->offset + PAGE_SIZE;
1040 uvm_page_array_advance(&a);
1041
1042 /*
1043 * if we're freeing, remove all mappings of the page now.
1044 * if we're cleaning, check if the page is needs to be cleaned.
1045 */
1046
1047 protected = false;
1048 if (flags & PGO_FREE) {
1049 pmap_page_protect(pg, VM_PROT_NONE);
1050 protected = true;
1051 } else if (flags & PGO_CLEANIT) {
1052
1053 /*
1054 * if we still have some hope to pull this vnode off
1055 * from the syncer queue, write-protect the page.
1056 */
1057
1058 if (tryclean && wasclean) {
1059
1060 /*
1061 * uobj pages get wired only by uvm_fault
1062 * where uobj is locked.
1063 */
1064
1065 if (pg->wire_count == 0) {
1066 pmap_page_protect(pg,
1067 VM_PROT_READ|VM_PROT_EXECUTE);
1068 protected = true;
1069 } else {
1070 /*
1071 * give up.
1072 */
1073 tryclean = false;
1074 }
1075 }
1076 }
1077
1078 if (flags & PGO_CLEANIT) {
1079 needs_clean = uvm_pagecheckdirty(pg, protected);
1080 } else {
1081 needs_clean = false;
1082 }
1083
1084 /*
1085 * if we're cleaning, build a cluster.
1086 * the cluster will consist of pages which are currently dirty.
1087 * if not cleaning, just operate on the one page.
1088 */
1089
1090 if (needs_clean) {
1091 KDASSERT((vp->v_iflag & VI_ONWORKLST));
1092 wasclean = false;
1093 memset(pgs, 0, sizeof(pgs));
1094 pg->flags |= PG_BUSY;
1095 UVM_PAGE_OWN(pg, "genfs_putpages");
1096
1097 /*
1098 * first look backward.
1099 *
1100 * XXX implement PG_PAGER1 incompatibility check.
1101 * probably it's better to make PG_NEEDCOMMIT a first
1102 * level citizen for uvm/genfs.
1103 */
1104
1105 npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
1106 nback = npages;
1107 uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
1108 UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
1109 if (nback) {
1110 memmove(&pgs[0], &pgs[npages - nback],
1111 nback * sizeof(pgs[0]));
1112 if (npages - nback < nback)
1113 memset(&pgs[nback], 0,
1114 (npages - nback) * sizeof(pgs[0]));
1115 else
1116 memset(&pgs[npages - nback], 0,
1117 nback * sizeof(pgs[0]));
1118 }
1119
1120 /*
1121 * then plug in our page of interest.
1122 */
1123
1124 pgs[nback] = pg;
1125
1126 /*
1127 * then look forward to fill in the remaining space in
1128 * the array of pages.
1129 */
1130
1131 for (npages = 1; npages < maxpages; npages++) {
1132 struct vm_page *nextpg;
1133
1134 /*
1135 * regardless of the value of dirtyonly,
1136 * we don't need to care about clean pages here
1137 * as we will drop the object lock to call
1138 * GOP_WRITE and thus need to clear the array
1139 * before the next iteration anyway.
1140 */
1141
1142 nextpg = uvm_page_array_fill_and_peek(&a, uobj,
1143 pgs[npages - 1]->offset + PAGE_SIZE,
1144 maxpages - npages,
1145 UVM_PAGE_ARRAY_FILL_DIRTYONLY |
1146 UVM_PAGE_ARRAY_FILL_DENSE);
1147 if (nextpg == NULL) {
1148 break;
1149 }
1150 KASSERT(nextpg->uobject == pg->uobject);
1151 KASSERT(nextpg->offset > pg->offset);
1152 KASSERT(nextpg->offset >
1153 pgs[npages - 1]->offset);
1154 if (pgs[npages - 1]->offset + PAGE_SIZE !=
1155 nextpg->offset) {
1156 break;
1157 }
1158 if ((nextpg->flags & PG_BUSY) != 0) {
1159 break;
1160 }
1161
1162 /*
1163 * don't bother to cluster incompatible pages
1164 * together.
1165 *
1166 * XXX hack for nfs
1167 */
1168
1169 if (((nextpg->flags ^ pgs[npages - 1]->flags) &
1170 PG_PAGER1) != 0) {
1171 break;
1172 }
1173 if (!uvm_pagecheckdirty(nextpg, false)) {
1174 break;
1175 }
1176 nextpg->flags |= PG_BUSY;
1177 UVM_PAGE_OWN(nextpg, "genfs_putpages2");
1178 pgs[npages] = nextpg;
1179 uvm_page_array_advance(&a);
1180 }
1181 } else {
1182 pgs[0] = pg;
1183 npages = 1;
1184 nback = 0;
1185 }
1186
1187 /*
1188 * apply FREE or DEACTIVATE options if requested.
1189 */
1190
1191 if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1192 mutex_enter(&uvm_pageqlock);
1193 }
1194 for (i = 0; i < npages; i++) {
1195 struct vm_page *tpg = pgs[i];
1196
1197 KASSERT(tpg->uobject == uobj);
1198 if (tpg->offset < startoff || tpg->offset >= endoff)
1199 continue;
1200 if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
1201 uvm_pagedeactivate(tpg);
1202 } else if (flags & PGO_FREE) {
1203 pmap_page_protect(tpg, VM_PROT_NONE);
1204 if (tpg->flags & PG_BUSY) {
1205 tpg->flags |= freeflag;
1206 if (pagedaemon) {
1207 uvm_pageout_start(1);
1208 uvm_pagedequeue(tpg);
1209 }
1210 } else {
1211
1212 /*
1213 * ``page is not busy''
1214 * implies that npages is 1
1215 * and needs_clean is false.
1216 */
1217
1218 KASSERT(npages == 1);
1219 KASSERT(!needs_clean);
1220 KASSERT(pg == tpg);
1221 KASSERT(off == tpg->offset + PAGE_SIZE);
1222 uvm_pagefree(tpg);
1223 if (pagedaemon)
1224 uvmexp.pdfreed++;
1225 }
1226 }
1227 }
1228 if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1229 mutex_exit(&uvm_pageqlock);
1230 }
1231 if (needs_clean) {
1232 KASSERT(off == pg->offset + PAGE_SIZE);
1233 off = pg->offset + ((npages - nback) << PAGE_SHIFT);
1234 KASSERT(pgs[nback] == pg);
1235 KASSERT(off == pgs[npages - 1]->offset + PAGE_SIZE);
1236 mutex_exit(slock);
1237
1238 /*
1239 * start the i/o.
1240 *
1241 * as we dropped the object lock, our cached pages can
1242 * be stale.
1243 */
1244 modified = true;
1245 uvm_page_array_clear(&a);
1246 error = GOP_WRITE(vp, pgs, npages, flags);
1247 mutex_enter(slock);
1248 if (error) {
1249 break;
1250 }
1251 }
1252 }
1253 uvm_page_array_fini(&a);
1254
1255 /*
1256 * update ctime/mtime if the modification we started writing out might
1257 * be from mmap'ed write.
1258 *
1259 * this is necessary when an application keeps a file mmaped and
1260 * repeatedly modifies it via the window. note that, because we
1261 * don't always write-protect pages when cleaning, such modifications
1262 * might not involve any page faults.
1263 */
1264
1265 if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
1266 (vp->v_type != VBLK ||
1267 (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
1268 GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
1269 }
1270
1271 /*
1272 * if we no longer have any possibly dirty pages, take us off the
1273 * syncer list.
1274 */
1275
1276 if ((vp->v_iflag & VI_ONWORKLST) != 0 &&
1277 radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
1278 UVM_PAGE_DIRTY_TAG)) {
1279 vp->v_iflag &= ~VI_WRMAPDIRTY;
1280 if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
1281 vn_syncer_remove_from_worklist(vp);
1282 }
1283
1284 #if !defined(DEBUG)
1285 skip_scan:
1286 #endif /* !defined(DEBUG) */
1287
1288 /*
1289 * if we started any i/o and we're doing sync i/o, wait for all writes
1290 * to finish.
1291 */
1292
1293 if (!wasclean && !async) {
1294 while (vp->v_numoutput != 0)
1295 cv_wait(&vp->v_cv, slock);
1296 }
1297 onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
1298 mutex_exit(slock);
1299
1300 if ((flags & PGO_RECLAIM) != 0 && onworklst) {
1301 /*
1302 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
1303 * retrying is not a big deal because, in many cases,
1304 * uobj->uo_npages is already 0 here.
1305 */
1306 mutex_enter(slock);
1307 goto retry;
1308 }
1309
1310 if (has_trans) {
1311 if (need_wapbl)
1312 WAPBL_END(vp->v_mount);
1313 fstrans_done(vp->v_mount);
1314 }
1315
1316 return (error);
1317 }
1318
1319 int
1320 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1321 {
1322 off_t off;
1323 vaddr_t kva;
1324 size_t len;
1325 int error;
1326 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1327
1328 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1329 vp, pgs, npages, flags);
1330
1331 off = pgs[0]->offset;
1332 kva = uvm_pagermapin(pgs, npages,
1333 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1334 len = npages << PAGE_SHIFT;
1335
1336 error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
1337 uvm_aio_biodone);
1338
1339 return error;
1340 }
1341
1342 int
1343 genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1344 {
1345 off_t off;
1346 vaddr_t kva;
1347 size_t len;
1348 int error;
1349 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1350
1351 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1352 vp, pgs, npages, flags);
1353
1354 off = pgs[0]->offset;
1355 kva = uvm_pagermapin(pgs, npages,
1356 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
1357 len = npages << PAGE_SHIFT;
1358
1359 error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
1360 uvm_aio_biodone);
1361
1362 return error;
1363 }
1364
1365 /*
1366 * Backend routine for doing I/O to vnode pages. Pages are already locked
1367 * and mapped into kernel memory. Here we just look up the underlying
1368 * device block addresses and call the strategy routine.
1369 */
1370
1371 static int
1372 genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
1373 enum uio_rw rw, void (*iodone)(struct buf *))
1374 {
1375 int s, error;
1376 int fs_bshift, dev_bshift;
1377 off_t eof, offset, startoffset;
1378 size_t bytes, iobytes, skipbytes;
1379 struct buf *mbp, *bp;
1380 const bool async = (flags & PGO_SYNCIO) == 0;
1381 const bool iowrite = rw == UIO_WRITE;
1382 const int brw = iowrite ? B_WRITE : B_READ;
1383 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1384
1385 UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
1386 vp, kva, len, flags);
1387
1388 KASSERT(vp->v_size <= vp->v_writesize);
1389 GOP_SIZE(vp, vp->v_writesize, &eof, 0);
1390 if (vp->v_type != VBLK) {
1391 fs_bshift = vp->v_mount->mnt_fs_bshift;
1392 dev_bshift = vp->v_mount->mnt_dev_bshift;
1393 } else {
1394 fs_bshift = DEV_BSHIFT;
1395 dev_bshift = DEV_BSHIFT;
1396 }
1397 error = 0;
1398 startoffset = off;
1399 bytes = MIN(len, eof - startoffset);
1400 skipbytes = 0;
1401 KASSERT(bytes != 0);
1402
1403 if (iowrite) {
1404 mutex_enter(vp->v_interlock);
1405 vp->v_numoutput += 2;
1406 mutex_exit(vp->v_interlock);
1407 }
1408 mbp = getiobuf(vp, true);
1409 UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
1410 vp, mbp, vp->v_numoutput, bytes);
1411 mbp->b_bufsize = len;
1412 mbp->b_data = (void *)kva;
1413 mbp->b_resid = mbp->b_bcount = bytes;
1414 mbp->b_cflags = BC_BUSY | BC_AGE;
1415 if (async) {
1416 mbp->b_flags = brw | B_ASYNC;
1417 mbp->b_iodone = iodone;
1418 } else {
1419 mbp->b_flags = brw;
1420 mbp->b_iodone = NULL;
1421 }
1422 if (curlwp == uvm.pagedaemon_lwp)
1423 BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
1424 else if (async)
1425 BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
1426 else
1427 BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
1428
1429 bp = NULL;
1430 for (offset = startoffset;
1431 bytes > 0;
1432 offset += iobytes, bytes -= iobytes) {
1433 int run;
1434 daddr_t lbn, blkno;
1435 struct vnode *devvp;
1436
1437 /*
1438 * bmap the file to find out the blkno to read from and
1439 * how much we can read in one i/o. if bmap returns an error,
1440 * skip the rest of the top-level i/o.
1441 */
1442
1443 lbn = offset >> fs_bshift;
1444 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
1445 if (error) {
1446 UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
1447 lbn,error,0,0);
1448 skipbytes += bytes;
1449 bytes = 0;
1450 goto loopdone;
1451 }
1452
1453 /*
1454 * see how many pages can be read with this i/o.
1455 * reduce the i/o size if necessary to avoid
1456 * overwriting pages with valid data.
1457 */
1458
1459 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
1460 bytes);
1461
1462 /*
1463 * if this block isn't allocated, zero it instead of
1464 * reading it. unless we are going to allocate blocks,
1465 * mark the pages we zeroed PG_RDONLY.
1466 */
1467
1468 if (blkno == (daddr_t)-1) {
1469 if (!iowrite) {
1470 memset((char *)kva + (offset - startoffset), 0,
1471 iobytes);
1472 }
1473 skipbytes += iobytes;
1474 continue;
1475 }
1476
1477 /*
1478 * allocate a sub-buf for this piece of the i/o
1479 * (or just use mbp if there's only 1 piece),
1480 * and start it going.
1481 */
1482
1483 if (offset == startoffset && iobytes == bytes) {
1484 bp = mbp;
1485 } else {
1486 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
1487 vp, bp, vp->v_numoutput, 0);
1488 bp = getiobuf(vp, true);
1489 nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
1490 }
1491 bp->b_lblkno = 0;
1492
1493 /* adjust physical blkno for partial blocks */
1494 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
1495 dev_bshift);
1496
1497 UVMHIST_LOG(ubchist,
1498 "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
1499 bp, offset, bp->b_bcount, bp->b_blkno);
1500
1501 VOP_STRATEGY(devvp, bp);
1502 }
1503
1504 loopdone:
1505 if (skipbytes) {
1506 UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
1507 }
1508 nestiobuf_done(mbp, skipbytes, error);
1509 if (async) {
1510 UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
1511 return (0);
1512 }
1513 UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
1514 error = biowait(mbp);
1515 s = splbio();
1516 (*iodone)(mbp);
1517 splx(s);
1518 UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
1519 return (error);
1520 }
1521
1522 int
1523 genfs_compat_getpages(void *v)
1524 {
1525 struct vop_getpages_args /* {
1526 struct vnode *a_vp;
1527 voff_t a_offset;
1528 struct vm_page **a_m;
1529 int *a_count;
1530 int a_centeridx;
1531 vm_prot_t a_access_type;
1532 int a_advice;
1533 int a_flags;
1534 } */ *ap = v;
1535
1536 off_t origoffset;
1537 struct vnode *vp = ap->a_vp;
1538 struct uvm_object *uobj = &vp->v_uobj;
1539 struct vm_page *pg, **pgs;
1540 vaddr_t kva;
1541 int i, error, orignpages, npages;
1542 struct iovec iov;
1543 struct uio uio;
1544 kauth_cred_t cred = curlwp->l_cred;
1545 const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
1546
1547 error = 0;
1548 origoffset = ap->a_offset;
1549 orignpages = *ap->a_count;
1550 pgs = ap->a_m;
1551
1552 if (ap->a_flags & PGO_LOCKED) {
1553 uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
1554 UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
1555
1556 error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
1557 if (error == 0 && memwrite) {
1558 genfs_markdirty(vp);
1559 }
1560 return error;
1561 }
1562 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
1563 mutex_exit(uobj->vmobjlock);
1564 return EINVAL;
1565 }
1566 if ((ap->a_flags & PGO_SYNCIO) == 0) {
1567 mutex_exit(uobj->vmobjlock);
1568 return 0;
1569 }
1570 npages = orignpages;
1571 uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
1572 mutex_exit(uobj->vmobjlock);
1573 kva = uvm_pagermapin(pgs, npages,
1574 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
1575 for (i = 0; i < npages; i++) {
1576 pg = pgs[i];
1577 if ((pg->flags & PG_FAKE) == 0) {
1578 continue;
1579 }
1580 iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
1581 iov.iov_len = PAGE_SIZE;
1582 uio.uio_iov = &iov;
1583 uio.uio_iovcnt = 1;
1584 uio.uio_offset = origoffset + (i << PAGE_SHIFT);
1585 uio.uio_rw = UIO_READ;
1586 uio.uio_resid = PAGE_SIZE;
1587 UIO_SETUP_SYSSPACE(&uio);
1588 /* XXX vn_lock */
1589 error = VOP_READ(vp, &uio, 0, cred);
1590 if (error) {
1591 break;
1592 }
1593 if (uio.uio_resid) {
1594 memset(iov.iov_base, 0, uio.uio_resid);
1595 }
1596 }
1597 uvm_pagermapout(kva, npages);
1598 mutex_enter(uobj->vmobjlock);
1599 mutex_enter(&uvm_pageqlock);
1600 for (i = 0; i < npages; i++) {
1601 pg = pgs[i];
1602 if (error && (pg->flags & PG_FAKE) != 0) {
1603 pg->flags |= PG_RELEASED;
1604 } else {
1605 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
1606 uvm_pageactivate(pg);
1607 }
1608 }
1609 if (error) {
1610 uvm_page_unbusy(pgs, npages);
1611 }
1612 mutex_exit(&uvm_pageqlock);
1613 if (error == 0 && memwrite) {
1614 genfs_markdirty(vp);
1615 }
1616 mutex_exit(uobj->vmobjlock);
1617 return error;
1618 }
1619
1620 int
1621 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
1622 int flags)
1623 {
1624 off_t offset;
1625 struct iovec iov;
1626 struct uio uio;
1627 kauth_cred_t cred = curlwp->l_cred;
1628 struct buf *bp;
1629 vaddr_t kva;
1630 int error;
1631
1632 offset = pgs[0]->offset;
1633 kva = uvm_pagermapin(pgs, npages,
1634 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1635
1636 iov.iov_base = (void *)kva;
1637 iov.iov_len = npages << PAGE_SHIFT;
1638 uio.uio_iov = &iov;
1639 uio.uio_iovcnt = 1;
1640 uio.uio_offset = offset;
1641 uio.uio_rw = UIO_WRITE;
1642 uio.uio_resid = npages << PAGE_SHIFT;
1643 UIO_SETUP_SYSSPACE(&uio);
1644 /* XXX vn_lock */
1645 error = VOP_WRITE(vp, &uio, 0, cred);
1646
1647 mutex_enter(vp->v_interlock);
1648 vp->v_numoutput++;
1649 mutex_exit(vp->v_interlock);
1650
1651 bp = getiobuf(vp, true);
1652 bp->b_cflags = BC_BUSY | BC_AGE;
1653 bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
1654 bp->b_data = (char *)kva;
1655 bp->b_bcount = npages << PAGE_SHIFT;
1656 bp->b_bufsize = npages << PAGE_SHIFT;
1657 bp->b_resid = 0;
1658 bp->b_error = error;
1659 uvm_aio_aiodone(bp);
1660 return (error);
1661 }
1662
1663 /*
1664 * Process a uio using direct I/O. If we reach a part of the request
1665 * which cannot be processed in this fashion for some reason, just return.
1666 * The caller must handle some additional part of the request using
1667 * buffered I/O before trying direct I/O again.
1668 */
1669
1670 void
1671 genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
1672 {
1673 struct vmspace *vs;
1674 struct iovec *iov;
1675 vaddr_t va;
1676 size_t len;
1677 const int mask = DEV_BSIZE - 1;
1678 int error;
1679 bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
1680 (ioflag & IO_JOURNALLOCKED) == 0);
1681
1682 /*
1683 * We only support direct I/O to user space for now.
1684 */
1685
1686 if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
1687 return;
1688 }
1689
1690 /*
1691 * If the vnode is mapped, we would need to get the getpages lock
1692 * to stabilize the bmap, but then we would get into trouble while
1693 * locking the pages if the pages belong to this same vnode (or a
1694 * multi-vnode cascade to the same effect). Just fall back to
1695 * buffered I/O if the vnode is mapped to avoid this mess.
1696 */
1697
1698 if (vp->v_vflag & VV_MAPPED) {
1699 return;
1700 }
1701
1702 if (need_wapbl) {
1703 error = WAPBL_BEGIN(vp->v_mount);
1704 if (error)
1705 return;
1706 }
1707
1708 /*
1709 * Do as much of the uio as possible with direct I/O.
1710 */
1711
1712 vs = uio->uio_vmspace;
1713 while (uio->uio_resid) {
1714 iov = uio->uio_iov;
1715 if (iov->iov_len == 0) {
1716 uio->uio_iov++;
1717 uio->uio_iovcnt--;
1718 continue;
1719 }
1720 va = (vaddr_t)iov->iov_base;
1721 len = MIN(iov->iov_len, genfs_maxdio);
1722 len &= ~mask;
1723
1724 /*
1725 * If the next chunk is smaller than DEV_BSIZE or extends past
1726 * the current EOF, then fall back to buffered I/O.
1727 */
1728
1729 if (len == 0 || uio->uio_offset + len > vp->v_size) {
1730 break;
1731 }
1732
1733 /*
1734 * Check alignment. The file offset must be at least
1735 * sector-aligned. The exact constraint on memory alignment
1736 * is very hardware-dependent, but requiring sector-aligned
1737 * addresses there too is safe.
1738 */
1739
1740 if (uio->uio_offset & mask || va & mask) {
1741 break;
1742 }
1743 error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
1744 uio->uio_rw);
1745 if (error) {
1746 break;
1747 }
1748 iov->iov_base = (char *)iov->iov_base + len;
1749 iov->iov_len -= len;
1750 uio->uio_offset += len;
1751 uio->uio_resid -= len;
1752 }
1753
1754 if (need_wapbl)
1755 WAPBL_END(vp->v_mount);
1756 }
1757
1758 /*
1759 * Iodone routine for direct I/O. We don't do much here since the request is
1760 * always synchronous, so the caller will do most of the work after biowait().
1761 */
1762
1763 static void
1764 genfs_dio_iodone(struct buf *bp)
1765 {
1766
1767 KASSERT((bp->b_flags & B_ASYNC) == 0);
1768 if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
1769 mutex_enter(bp->b_objlock);
1770 vwakeup(bp);
1771 mutex_exit(bp->b_objlock);
1772 }
1773 putiobuf(bp);
1774 }
1775
1776 /*
1777 * Process one chunk of a direct I/O request.
1778 */
1779
1780 static int
1781 genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
1782 off_t off, enum uio_rw rw)
1783 {
1784 struct vm_map *map;
1785 struct pmap *upm, *kpm;
1786 size_t klen = round_page(uva + len) - trunc_page(uva);
1787 off_t spoff, epoff;
1788 vaddr_t kva, puva;
1789 paddr_t pa;
1790 vm_prot_t prot;
1791 int error, rv, poff, koff;
1792 const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
1793 (rw == UIO_WRITE ? PGO_FREE : 0);
1794
1795 /*
1796 * For writes, verify that this range of the file already has fully
1797 * allocated backing store. If there are any holes, just punt and
1798 * make the caller take the buffered write path.
1799 */
1800
1801 if (rw == UIO_WRITE) {
1802 daddr_t lbn, elbn, blkno;
1803 int bsize, bshift, run;
1804
1805 bshift = vp->v_mount->mnt_fs_bshift;
1806 bsize = 1 << bshift;
1807 lbn = off >> bshift;
1808 elbn = (off + len + bsize - 1) >> bshift;
1809 while (lbn < elbn) {
1810 error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
1811 if (error) {
1812 return error;
1813 }
1814 if (blkno == (daddr_t)-1) {
1815 return ENOSPC;
1816 }
1817 lbn += 1 + run;
1818 }
1819 }
1820
1821 /*
1822 * Flush any cached pages for parts of the file that we're about to
1823 * access. If we're writing, invalidate pages as well.
1824 */
1825
1826 spoff = trunc_page(off);
1827 epoff = round_page(off + len);
1828 mutex_enter(vp->v_interlock);
1829 error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
1830 if (error) {
1831 return error;
1832 }
1833
1834 /*
1835 * Wire the user pages and remap them into kernel memory.
1836 */
1837
1838 prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
1839 error = uvm_vslock(vs, (void *)uva, len, prot);
1840 if (error) {
1841 return error;
1842 }
1843
1844 map = &vs->vm_map;
1845 upm = vm_map_pmap(map);
1846 kpm = vm_map_pmap(kernel_map);
1847 puva = trunc_page(uva);
1848 kva = uvm_km_alloc(kernel_map, klen, atop(puva) & uvmexp.colormask,
1849 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
1850 for (poff = 0; poff < klen; poff += PAGE_SIZE) {
1851 rv = pmap_extract(upm, puva + poff, &pa);
1852 KASSERT(rv);
1853 pmap_kenter_pa(kva + poff, pa, prot, PMAP_WIRED);
1854 }
1855 pmap_update(kpm);
1856
1857 /*
1858 * Do the I/O.
1859 */
1860
1861 koff = uva - trunc_page(uva);
1862 error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
1863 genfs_dio_iodone);
1864
1865 /*
1866 * Tear down the kernel mapping.
1867 */
1868
1869 pmap_kremove(kva, klen);
1870 pmap_update(kpm);
1871 uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
1872
1873 /*
1874 * Unwire the user pages.
1875 */
1876
1877 uvm_vsunlock(vs, (void *)uva, len);
1878 return error;
1879 }
1880