genfs_io.c revision 1.55.2.1 1 /* $NetBSD: genfs_io.c,v 1.55.2.1 2012/09/12 06:15:35 tls Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.55.2.1 2012/09/12 06:15:35 tls Exp $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/kernel.h>
40 #include <sys/mount.h>
41 #include <sys/vnode.h>
42 #include <sys/kmem.h>
43 #include <sys/kauth.h>
44 #include <sys/fstrans.h>
45 #include <sys/buf.h>
46
47 #include <miscfs/genfs/genfs.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <miscfs/specfs/specdev.h>
50 #include <miscfs/syncfs/syncfs.h>
51
52 #include <uvm/uvm.h>
53 #include <uvm/uvm_pager.h>
54
55 static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
56 off_t, enum uio_rw);
57 static void genfs_dio_iodone(struct buf *);
58
59 static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
60 void (*)(struct buf *));
61 static void genfs_rel_pages(struct vm_page **, unsigned int);
62 static void genfs_markdirty(struct vnode *);
63
64 int genfs_maxdio = MAXPHYS;
65
66 static void
67 genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
68 {
69 unsigned int i;
70
71 for (i = 0; i < npages; i++) {
72 struct vm_page *pg = pgs[i];
73
74 if (pg == NULL || pg == PGO_DONTCARE)
75 continue;
76 KASSERT(uvm_page_locked_p(pg));
77 if (pg->flags & PG_FAKE) {
78 pg->flags |= PG_RELEASED;
79 }
80 }
81 mutex_enter(&uvm_pageqlock);
82 uvm_page_unbusy(pgs, npages);
83 mutex_exit(&uvm_pageqlock);
84 }
85
86 static void
87 genfs_markdirty(struct vnode *vp)
88 {
89 struct genfs_node * const gp = VTOG(vp);
90
91 KASSERT(mutex_owned(vp->v_interlock));
92 gp->g_dirtygen++;
93 if ((vp->v_iflag & VI_ONWORKLST) == 0) {
94 vn_syncer_add_to_worklist(vp, filedelay);
95 }
96 if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
97 vp->v_iflag |= VI_WRMAPDIRTY;
98 }
99 }
100
101 /*
102 * generic VM getpages routine.
103 * Return PG_BUSY pages for the given range,
104 * reading from backing store if necessary.
105 */
106
107 int
108 genfs_getpages(void *v)
109 {
110 struct vop_getpages_args /* {
111 struct vnode *a_vp;
112 voff_t a_offset;
113 struct vm_page **a_m;
114 int *a_count;
115 int a_centeridx;
116 vm_prot_t a_access_type;
117 int a_advice;
118 int a_flags;
119 } */ * const ap = v;
120
121 off_t diskeof, memeof;
122 int i, error, npages;
123 const int flags = ap->a_flags;
124 struct vnode * const vp = ap->a_vp;
125 struct uvm_object * const uobj = &vp->v_uobj;
126 kauth_cred_t const cred = curlwp->l_cred; /* XXXUBC curlwp */
127 const bool async = (flags & PGO_SYNCIO) == 0;
128 const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
129 const bool overwrite = (flags & PGO_OVERWRITE) != 0;
130 const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
131 const bool glocked = (flags & PGO_GLOCKHELD) != 0;
132 const bool need_wapbl = blockalloc && vp->v_mount->mnt_wapbl;
133 bool has_trans_wapbl = false;
134 UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
135
136 UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
137 vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
138
139 KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
140 vp->v_type == VLNK || vp->v_type == VBLK);
141
142 startover:
143 error = 0;
144 const voff_t origvsize = vp->v_size;
145 const off_t origoffset = ap->a_offset;
146 const int orignpages = *ap->a_count;
147
148 GOP_SIZE(vp, origvsize, &diskeof, 0);
149 if (flags & PGO_PASTEOF) {
150 off_t newsize;
151 #if defined(DIAGNOSTIC)
152 off_t writeeof;
153 #endif /* defined(DIAGNOSTIC) */
154
155 newsize = MAX(origvsize,
156 origoffset + (orignpages << PAGE_SHIFT));
157 GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
158 #if defined(DIAGNOSTIC)
159 GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
160 if (newsize > round_page(writeeof)) {
161 panic("%s: past eof: %" PRId64 " vs. %" PRId64,
162 __func__, newsize, round_page(writeeof));
163 }
164 #endif /* defined(DIAGNOSTIC) */
165 } else {
166 GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
167 }
168 KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
169 KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
170 KASSERT(orignpages > 0);
171
172 /*
173 * Bounds-check the request.
174 */
175
176 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
177 if ((flags & PGO_LOCKED) == 0) {
178 mutex_exit(uobj->vmobjlock);
179 }
180 UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
181 origoffset, *ap->a_count, memeof,0);
182 error = EINVAL;
183 goto out_err;
184 }
185
186 /* uobj is locked */
187
188 if ((flags & PGO_NOTIMESTAMP) == 0 &&
189 (vp->v_type != VBLK ||
190 (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
191 int updflags = 0;
192
193 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
194 updflags = GOP_UPDATE_ACCESSED;
195 }
196 if (memwrite) {
197 updflags |= GOP_UPDATE_MODIFIED;
198 }
199 if (updflags != 0) {
200 GOP_MARKUPDATE(vp, updflags);
201 }
202 }
203
204 /*
205 * For PGO_LOCKED requests, just return whatever's in memory.
206 */
207
208 if (flags & PGO_LOCKED) {
209 int nfound;
210 struct vm_page *pg;
211
212 KASSERT(!glocked);
213 npages = *ap->a_count;
214 #if defined(DEBUG)
215 for (i = 0; i < npages; i++) {
216 pg = ap->a_m[i];
217 KASSERT(pg == NULL || pg == PGO_DONTCARE);
218 }
219 #endif /* defined(DEBUG) */
220 nfound = uvn_findpages(uobj, origoffset, &npages,
221 ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
222 KASSERT(npages == *ap->a_count);
223 if (nfound == 0) {
224 error = EBUSY;
225 goto out_err;
226 }
227 if (!genfs_node_rdtrylock(vp)) {
228 genfs_rel_pages(ap->a_m, npages);
229
230 /*
231 * restore the array.
232 */
233
234 for (i = 0; i < npages; i++) {
235 pg = ap->a_m[i];
236
237 if (pg != NULL && pg != PGO_DONTCARE) {
238 ap->a_m[i] = NULL;
239 }
240 KASSERT(ap->a_m[i] == NULL ||
241 ap->a_m[i] == PGO_DONTCARE);
242 }
243 } else {
244 genfs_node_unlock(vp);
245 }
246 error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
247 if (error == 0 && memwrite) {
248 genfs_markdirty(vp);
249 }
250 goto out_err;
251 }
252 mutex_exit(uobj->vmobjlock);
253
254 /*
255 * find the requested pages and make some simple checks.
256 * leave space in the page array for a whole block.
257 */
258
259 const int fs_bshift = (vp->v_type != VBLK) ?
260 vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
261 const int dev_bshift = (vp->v_type != VBLK) ?
262 vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
263 const int fs_bsize = 1 << fs_bshift;
264 #define blk_mask (fs_bsize - 1)
265 #define trunc_blk(x) ((x) & ~blk_mask)
266 #define round_blk(x) (((x) + blk_mask) & ~blk_mask)
267
268 const int orignmempages = MIN(orignpages,
269 round_page(memeof - origoffset) >> PAGE_SHIFT);
270 npages = orignmempages;
271 const off_t startoffset = trunc_blk(origoffset);
272 const off_t endoffset = MIN(
273 round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
274 round_page(memeof));
275 const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
276
277 const int pgs_size = sizeof(struct vm_page *) *
278 ((endoffset - startoffset) >> PAGE_SHIFT);
279 struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
280
281 if (pgs_size > sizeof(pgs_onstack)) {
282 pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
283 if (pgs == NULL) {
284 pgs = pgs_onstack;
285 error = ENOMEM;
286 goto out_err;
287 }
288 } else {
289 pgs = pgs_onstack;
290 (void)memset(pgs, 0, pgs_size);
291 }
292
293 UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
294 ridx, npages, startoffset, endoffset);
295
296 if (!has_trans_wapbl) {
297 fstrans_start(vp->v_mount, FSTRANS_SHARED);
298 /*
299 * XXX: This assumes that we come here only via
300 * the mmio path
301 */
302 if (need_wapbl) {
303 error = WAPBL_BEGIN(vp->v_mount);
304 if (error) {
305 fstrans_done(vp->v_mount);
306 goto out_err_free;
307 }
308 }
309 has_trans_wapbl = true;
310 }
311
312 /*
313 * hold g_glock to prevent a race with truncate.
314 *
315 * check if our idea of v_size is still valid.
316 */
317
318 KASSERT(!glocked || genfs_node_wrlocked(vp));
319 if (!glocked) {
320 if (blockalloc) {
321 genfs_node_wrlock(vp);
322 } else {
323 genfs_node_rdlock(vp);
324 }
325 }
326 mutex_enter(uobj->vmobjlock);
327 if (vp->v_size < origvsize) {
328 if (!glocked) {
329 genfs_node_unlock(vp);
330 }
331 if (pgs != pgs_onstack)
332 kmem_free(pgs, pgs_size);
333 goto startover;
334 }
335
336 if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
337 async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
338 if (!glocked) {
339 genfs_node_unlock(vp);
340 }
341 KASSERT(async != 0);
342 genfs_rel_pages(&pgs[ridx], orignmempages);
343 mutex_exit(uobj->vmobjlock);
344 error = EBUSY;
345 goto out_err_free;
346 }
347
348 /*
349 * if the pages are already resident, just return them.
350 */
351
352 for (i = 0; i < npages; i++) {
353 struct vm_page *pg = pgs[ridx + i];
354
355 if ((pg->flags & PG_FAKE) ||
356 (blockalloc && (pg->flags & PG_RDONLY))) {
357 break;
358 }
359 }
360 if (i == npages) {
361 if (!glocked) {
362 genfs_node_unlock(vp);
363 }
364 UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
365 npages += ridx;
366 goto out;
367 }
368
369 /*
370 * if PGO_OVERWRITE is set, don't bother reading the pages.
371 */
372
373 if (overwrite) {
374 if (!glocked) {
375 genfs_node_unlock(vp);
376 }
377 UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
378
379 for (i = 0; i < npages; i++) {
380 struct vm_page *pg = pgs[ridx + i];
381
382 pg->flags &= ~(PG_RDONLY|PG_CLEAN);
383 }
384 npages += ridx;
385 goto out;
386 }
387
388 /*
389 * the page wasn't resident and we're not overwriting,
390 * so we're going to have to do some i/o.
391 * find any additional pages needed to cover the expanded range.
392 */
393
394 npages = (endoffset - startoffset) >> PAGE_SHIFT;
395 if (startoffset != origoffset || npages != orignmempages) {
396 int npgs;
397
398 /*
399 * we need to avoid deadlocks caused by locking
400 * additional pages at lower offsets than pages we
401 * already have locked. unlock them all and start over.
402 */
403
404 genfs_rel_pages(&pgs[ridx], orignmempages);
405 memset(pgs, 0, pgs_size);
406
407 UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
408 startoffset, endoffset, 0,0);
409 npgs = npages;
410 if (uvn_findpages(uobj, startoffset, &npgs, pgs,
411 async ? UFP_NOWAIT : UFP_ALL) != npages) {
412 if (!glocked) {
413 genfs_node_unlock(vp);
414 }
415 KASSERT(async != 0);
416 genfs_rel_pages(pgs, npages);
417 mutex_exit(uobj->vmobjlock);
418 error = EBUSY;
419 goto out_err_free;
420 }
421 }
422
423 mutex_exit(uobj->vmobjlock);
424
425 {
426 size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
427 vaddr_t kva;
428 struct buf *bp, *mbp;
429 bool sawhole = false;
430
431 /*
432 * read the desired page(s).
433 */
434
435 totalbytes = npages << PAGE_SHIFT;
436 bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
437 tailbytes = totalbytes - bytes;
438 skipbytes = 0;
439
440 kva = uvm_pagermapin(pgs, npages,
441 UVMPAGER_MAPIN_READ | (async ? 0 : UVMPAGER_MAPIN_WAITOK));
442 if (kva == 0) {
443 error = EBUSY;
444 goto mapin_fail;
445 }
446
447 mbp = getiobuf(vp, true);
448 mbp->b_bufsize = totalbytes;
449 mbp->b_data = (void *)kva;
450 mbp->b_resid = mbp->b_bcount = bytes;
451 mbp->b_cflags = BC_BUSY;
452 if (async) {
453 mbp->b_flags = B_READ | B_ASYNC;
454 mbp->b_iodone = uvm_aio_biodone;
455 } else {
456 mbp->b_flags = B_READ;
457 mbp->b_iodone = NULL;
458 }
459 if (async)
460 BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
461 else
462 BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
463
464 /*
465 * if EOF is in the middle of the range, zero the part past EOF.
466 * skip over pages which are not PG_FAKE since in that case they have
467 * valid data that we need to preserve.
468 */
469
470 tailstart = bytes;
471 while (tailbytes > 0) {
472 const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
473
474 KASSERT(len <= tailbytes);
475 if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
476 memset((void *)(kva + tailstart), 0, len);
477 UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
478 kva, tailstart, len, 0);
479 }
480 tailstart += len;
481 tailbytes -= len;
482 }
483
484 /*
485 * now loop over the pages, reading as needed.
486 */
487
488 bp = NULL;
489 off_t offset;
490 for (offset = startoffset;
491 bytes > 0;
492 offset += iobytes, bytes -= iobytes) {
493 int run;
494 daddr_t lbn, blkno;
495 int pidx;
496 struct vnode *devvp;
497
498 /*
499 * skip pages which don't need to be read.
500 */
501
502 pidx = (offset - startoffset) >> PAGE_SHIFT;
503 while ((pgs[pidx]->flags & PG_FAKE) == 0) {
504 size_t b;
505
506 KASSERT((offset & (PAGE_SIZE - 1)) == 0);
507 if ((pgs[pidx]->flags & PG_RDONLY)) {
508 sawhole = true;
509 }
510 b = MIN(PAGE_SIZE, bytes);
511 offset += b;
512 bytes -= b;
513 skipbytes += b;
514 pidx++;
515 UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
516 offset, 0,0,0);
517 if (bytes == 0) {
518 goto loopdone;
519 }
520 }
521
522 /*
523 * bmap the file to find out the blkno to read from and
524 * how much we can read in one i/o. if bmap returns an error,
525 * skip the rest of the top-level i/o.
526 */
527
528 lbn = offset >> fs_bshift;
529 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
530 if (error) {
531 UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
532 lbn,error,0,0);
533 skipbytes += bytes;
534 bytes = 0;
535 goto loopdone;
536 }
537
538 /*
539 * see how many pages can be read with this i/o.
540 * reduce the i/o size if necessary to avoid
541 * overwriting pages with valid data.
542 */
543
544 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
545 bytes);
546 if (offset + iobytes > round_page(offset)) {
547 int pcount;
548
549 pcount = 1;
550 while (pidx + pcount < npages &&
551 pgs[pidx + pcount]->flags & PG_FAKE) {
552 pcount++;
553 }
554 iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
555 (offset - trunc_page(offset)));
556 }
557
558 /*
559 * if this block isn't allocated, zero it instead of
560 * reading it. unless we are going to allocate blocks,
561 * mark the pages we zeroed PG_RDONLY.
562 */
563
564 if (blkno == (daddr_t)-1) {
565 int holepages = (round_page(offset + iobytes) -
566 trunc_page(offset)) >> PAGE_SHIFT;
567 UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
568
569 sawhole = true;
570 memset((char *)kva + (offset - startoffset), 0,
571 iobytes);
572 skipbytes += iobytes;
573
574 mutex_enter(uobj->vmobjlock);
575 for (i = 0; i < holepages; i++) {
576 if (memwrite) {
577 pgs[pidx + i]->flags &= ~PG_CLEAN;
578 }
579 if (!blockalloc) {
580 pgs[pidx + i]->flags |= PG_RDONLY;
581 }
582 }
583 mutex_exit(uobj->vmobjlock);
584 continue;
585 }
586
587 /*
588 * allocate a sub-buf for this piece of the i/o
589 * (or just use mbp if there's only 1 piece),
590 * and start it going.
591 */
592
593 if (offset == startoffset && iobytes == bytes) {
594 bp = mbp;
595 } else {
596 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
597 vp, bp, vp->v_numoutput, 0);
598 bp = getiobuf(vp, true);
599 nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
600 }
601 bp->b_lblkno = 0;
602
603 /* adjust physical blkno for partial blocks */
604 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
605 dev_bshift);
606
607 UVMHIST_LOG(ubchist,
608 "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
609 bp, offset, bp->b_bcount, bp->b_blkno);
610
611 VOP_STRATEGY(devvp, bp);
612 }
613
614 loopdone:
615 nestiobuf_done(mbp, skipbytes, error);
616 if (async) {
617 UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
618 if (!glocked) {
619 genfs_node_unlock(vp);
620 }
621 error = 0;
622 goto out_err_free;
623 }
624 if (bp != NULL) {
625 error = biowait(mbp);
626 }
627
628 /* Remove the mapping (make KVA available as soon as possible) */
629 uvm_pagermapout(kva, npages);
630
631 /*
632 * if this we encountered a hole then we have to do a little more work.
633 * for read faults, we marked the page PG_RDONLY so that future
634 * write accesses to the page will fault again.
635 * for write faults, we must make sure that the backing store for
636 * the page is completely allocated while the pages are locked.
637 */
638
639 if (!error && sawhole && blockalloc) {
640 error = GOP_ALLOC(vp, startoffset,
641 npages << PAGE_SHIFT, 0, cred);
642 UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
643 startoffset, npages << PAGE_SHIFT, error,0);
644 if (!error) {
645 mutex_enter(uobj->vmobjlock);
646 for (i = 0; i < npages; i++) {
647 struct vm_page *pg = pgs[i];
648
649 if (pg == NULL) {
650 continue;
651 }
652 pg->flags &= ~(PG_CLEAN|PG_RDONLY);
653 UVMHIST_LOG(ubchist, "mark dirty pg %p",
654 pg,0,0,0);
655 }
656 mutex_exit(uobj->vmobjlock);
657 }
658 }
659
660 putiobuf(mbp);
661 }
662
663 mapin_fail:
664 if (!glocked) {
665 genfs_node_unlock(vp);
666 }
667 mutex_enter(uobj->vmobjlock);
668
669 /*
670 * we're almost done! release the pages...
671 * for errors, we free the pages.
672 * otherwise we activate them and mark them as valid and clean.
673 * also, unbusy pages that were not actually requested.
674 */
675
676 if (error) {
677 genfs_rel_pages(pgs, npages);
678 mutex_exit(uobj->vmobjlock);
679 UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
680 goto out_err_free;
681 }
682
683 out:
684 UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
685 error = 0;
686 mutex_enter(&uvm_pageqlock);
687 for (i = 0; i < npages; i++) {
688 struct vm_page *pg = pgs[i];
689 if (pg == NULL) {
690 continue;
691 }
692 UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
693 pg, pg->flags, 0,0);
694 if (pg->flags & PG_FAKE && !overwrite) {
695 pg->flags &= ~(PG_FAKE);
696 pmap_clear_modify(pgs[i]);
697 }
698 KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0);
699 if (i < ridx || i >= ridx + orignmempages || async) {
700 UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
701 pg, pg->offset,0,0);
702 if (pg->flags & PG_WANTED) {
703 wakeup(pg);
704 }
705 if (pg->flags & PG_FAKE) {
706 KASSERT(overwrite);
707 uvm_pagezero(pg);
708 }
709 if (pg->flags & PG_RELEASED) {
710 uvm_pagefree(pg);
711 continue;
712 }
713 uvm_pageenqueue(pg);
714 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
715 UVM_PAGE_OWN(pg, NULL);
716 }
717 }
718 mutex_exit(&uvm_pageqlock);
719 if (memwrite) {
720 genfs_markdirty(vp);
721 }
722 mutex_exit(uobj->vmobjlock);
723 if (ap->a_m != NULL) {
724 memcpy(ap->a_m, &pgs[ridx],
725 orignmempages * sizeof(struct vm_page *));
726 }
727
728 out_err_free:
729 if (pgs != NULL && pgs != pgs_onstack)
730 kmem_free(pgs, pgs_size);
731 out_err:
732 if (has_trans_wapbl) {
733 if (need_wapbl)
734 WAPBL_END(vp->v_mount);
735 fstrans_done(vp->v_mount);
736 }
737 return error;
738 }
739
740 /*
741 * generic VM putpages routine.
742 * Write the given range of pages to backing store.
743 *
744 * => "offhi == 0" means flush all pages at or after "offlo".
745 * => object should be locked by caller. we return with the
746 * object unlocked.
747 * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
748 * thus, a caller might want to unlock higher level resources
749 * (e.g. vm_map) before calling flush.
750 * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
751 * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
752 * => NOTE: we rely on the fact that the object's memq is a TAILQ and
753 * that new pages are inserted on the tail end of the list. thus,
754 * we can make a complete pass through the object in one go by starting
755 * at the head and working towards the tail (new pages are put in
756 * front of us).
757 * => NOTE: we are allowed to lock the page queues, so the caller
758 * must not be holding the page queue lock.
759 *
760 * note on "cleaning" object and PG_BUSY pages:
761 * this routine is holding the lock on the object. the only time
762 * that it can run into a PG_BUSY page that it does not own is if
763 * some other process has started I/O on the page (e.g. either
764 * a pagein, or a pageout). if the PG_BUSY page is being paged
765 * in, then it can not be dirty (!PG_CLEAN) because no one has
766 * had a chance to modify it yet. if the PG_BUSY page is being
767 * paged out then it means that someone else has already started
768 * cleaning the page for us (how nice!). in this case, if we
769 * have syncio specified, then after we make our pass through the
770 * object we need to wait for the other PG_BUSY pages to clear
771 * off (i.e. we need to do an iosync). also note that once a
772 * page is PG_BUSY it must stay in its object until it is un-busyed.
773 *
774 * note on page traversal:
775 * we can traverse the pages in an object either by going down the
776 * linked list in "uobj->memq", or we can go over the address range
777 * by page doing hash table lookups for each address. depending
778 * on how many pages are in the object it may be cheaper to do one
779 * or the other. we set "by_list" to true if we are using memq.
780 * if the cost of a hash lookup was equal to the cost of the list
781 * traversal we could compare the number of pages in the start->stop
782 * range to the total number of pages in the object. however, it
783 * seems that a hash table lookup is more expensive than the linked
784 * list traversal, so we multiply the number of pages in the
785 * range by an estimate of the relatively higher cost of the hash lookup.
786 */
787
788 int
789 genfs_putpages(void *v)
790 {
791 struct vop_putpages_args /* {
792 struct vnode *a_vp;
793 voff_t a_offlo;
794 voff_t a_offhi;
795 int a_flags;
796 } */ * const ap = v;
797
798 return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
799 ap->a_flags, NULL);
800 }
801
802 int
803 genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
804 int origflags, struct vm_page **busypg)
805 {
806 struct uvm_object * const uobj = &vp->v_uobj;
807 kmutex_t * const slock = uobj->vmobjlock;
808 off_t off;
809 int i, error, npages, nback;
810 int freeflag;
811 #if 1
812 unsigned int maxpages;
813 struct vm_page *pgs[MACHINE_MAXPHYS >> PAGE_SHIFT];
814 #else
815 unsigned int maxpages = 64;
816 struct vm_page *pgs[64];
817 #endif
818 struct vm_page *pg, *nextpg, *tpg, curmp, endmp;
819 bool wasclean, by_list, needs_clean, yld;
820 bool async = (origflags & PGO_SYNCIO) == 0;
821 bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
822 struct lwp * const l = curlwp ? curlwp : &lwp0;
823 struct genfs_node * const gp = VTOG(vp);
824 int flags;
825 int dirtygen;
826 bool modified;
827 bool need_wapbl;
828 bool has_trans;
829 bool cleanall;
830 bool onworklst;
831 static int printed;
832
833 if (vp && vp->v_mount && vp->v_mount->mnt_maxphys) {
834 maxpages = vp->v_mount->mnt_maxphys >> PAGE_SHIFT;
835 } else {
836 maxpages = MAXPHYS >> PAGE_SHIFT;
837 }
838
839 if (!printed || maxpages > printed ) {
840 printf("putpages: maxpages %d\n", maxpages);
841 printed = maxpages;
842 }
843
844 UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
845
846 KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
847 KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
848 KASSERT(startoff < endoff || endoff == 0);
849
850 UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
851 vp, uobj->uo_npages, startoff, endoff - startoff);
852
853 has_trans = false;
854 need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl &&
855 (origflags & PGO_JOURNALLOCKED) == 0);
856
857 retry:
858 modified = false;
859 flags = origflags;
860 KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
861 (vp->v_iflag & VI_WRMAPDIRTY) == 0);
862 if (uobj->uo_npages == 0) {
863 if (vp->v_iflag & VI_ONWORKLST) {
864 vp->v_iflag &= ~VI_WRMAPDIRTY;
865 if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
866 vn_syncer_remove_from_worklist(vp);
867 }
868 if (has_trans) {
869 if (need_wapbl)
870 WAPBL_END(vp->v_mount);
871 fstrans_done(vp->v_mount);
872 }
873 mutex_exit(slock);
874 return (0);
875 }
876
877 /*
878 * the vnode has pages, set up to process the request.
879 */
880
881 if (!has_trans && (flags & PGO_CLEANIT) != 0) {
882 mutex_exit(slock);
883 if (pagedaemon) {
884 error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
885 if (error)
886 return error;
887 } else
888 fstrans_start(vp->v_mount, FSTRANS_LAZY);
889 if (need_wapbl) {
890 error = WAPBL_BEGIN(vp->v_mount);
891 if (error) {
892 fstrans_done(vp->v_mount);
893 return error;
894 }
895 }
896 has_trans = true;
897 mutex_enter(slock);
898 goto retry;
899 }
900
901 error = 0;
902 wasclean = (vp->v_numoutput == 0);
903 off = startoff;
904 if (endoff == 0 || flags & PGO_ALLPAGES) {
905 endoff = trunc_page(LLONG_MAX);
906 }
907 by_list = (uobj->uo_npages <=
908 ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
909
910 /*
911 * if this vnode is known not to have dirty pages,
912 * don't bother to clean it out.
913 */
914
915 if ((vp->v_iflag & VI_ONWORKLST) == 0) {
916 #if !defined(DEBUG)
917 if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
918 goto skip_scan;
919 }
920 #endif /* !defined(DEBUG) */
921 flags &= ~PGO_CLEANIT;
922 }
923
924 /*
925 * start the loop. when scanning by list, hold the last page
926 * in the list before we start. pages allocated after we start
927 * will be added to the end of the list, so we can stop at the
928 * current last page.
929 */
930
931 cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
932 startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
933 (vp->v_iflag & VI_ONWORKLST) != 0;
934 dirtygen = gp->g_dirtygen;
935 freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
936 if (by_list) {
937 curmp.flags = PG_MARKER;
938 endmp.flags = PG_MARKER;
939 pg = TAILQ_FIRST(&uobj->memq);
940 TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
941 } else {
942 pg = uvm_pagelookup(uobj, off);
943 }
944 nextpg = NULL;
945 while (by_list || off < endoff) {
946
947 /*
948 * if the current page is not interesting, move on to the next.
949 */
950
951 KASSERT(pg == NULL || pg->uobject == uobj ||
952 (pg->flags & PG_MARKER) != 0);
953 KASSERT(pg == NULL ||
954 (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
955 (pg->flags & (PG_BUSY|PG_MARKER)) != 0);
956 if (by_list) {
957 if (pg == &endmp) {
958 break;
959 }
960 if (pg->flags & PG_MARKER) {
961 pg = TAILQ_NEXT(pg, listq.queue);
962 continue;
963 }
964 if (pg->offset < startoff || pg->offset >= endoff ||
965 pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
966 if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
967 wasclean = false;
968 }
969 pg = TAILQ_NEXT(pg, listq.queue);
970 continue;
971 }
972 off = pg->offset;
973 } else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
974 if (pg != NULL) {
975 wasclean = false;
976 }
977 off += PAGE_SIZE;
978 if (off < endoff) {
979 pg = uvm_pagelookup(uobj, off);
980 }
981 continue;
982 }
983
984 /*
985 * if the current page needs to be cleaned and it's busy,
986 * wait for it to become unbusy.
987 */
988
989 yld = (l->l_cpu->ci_schedstate.spc_flags &
990 SPCF_SHOULDYIELD) && !pagedaemon;
991 if (pg->flags & PG_BUSY || yld) {
992 UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
993 if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
994 UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
995 error = EDEADLK;
996 if (busypg != NULL)
997 *busypg = pg;
998 break;
999 }
1000 if (pagedaemon) {
1001 /*
1002 * someone has taken the page while we
1003 * dropped the lock for fstrans_start.
1004 */
1005 break;
1006 }
1007 if (by_list) {
1008 TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
1009 UVMHIST_LOG(ubchist, "curmp next %p",
1010 TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
1011 }
1012 if (yld) {
1013 mutex_exit(slock);
1014 preempt();
1015 mutex_enter(slock);
1016 } else {
1017 pg->flags |= PG_WANTED;
1018 UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
1019 mutex_enter(slock);
1020 }
1021 if (by_list) {
1022 UVMHIST_LOG(ubchist, "after next %p",
1023 TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
1024 pg = TAILQ_NEXT(&curmp, listq.queue);
1025 TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
1026 } else {
1027 pg = uvm_pagelookup(uobj, off);
1028 }
1029 continue;
1030 }
1031
1032 /*
1033 * if we're freeing, remove all mappings of the page now.
1034 * if we're cleaning, check if the page is needs to be cleaned.
1035 */
1036
1037 if (flags & PGO_FREE) {
1038 pmap_page_protect(pg, VM_PROT_NONE);
1039 } else if (flags & PGO_CLEANIT) {
1040
1041 /*
1042 * if we still have some hope to pull this vnode off
1043 * from the syncer queue, write-protect the page.
1044 */
1045
1046 if (cleanall && wasclean &&
1047 gp->g_dirtygen == dirtygen) {
1048
1049 /*
1050 * uobj pages get wired only by uvm_fault
1051 * where uobj is locked.
1052 */
1053
1054 if (pg->wire_count == 0) {
1055 pmap_page_protect(pg,
1056 VM_PROT_READ|VM_PROT_EXECUTE);
1057 } else {
1058 cleanall = false;
1059 }
1060 }
1061 }
1062
1063 if (flags & PGO_CLEANIT) {
1064 needs_clean = pmap_clear_modify(pg) ||
1065 (pg->flags & PG_CLEAN) == 0;
1066 pg->flags |= PG_CLEAN;
1067 } else {
1068 needs_clean = false;
1069 }
1070
1071 /*
1072 * if we're cleaning, build a cluster.
1073 * the cluster will consist of pages which are currently dirty,
1074 * but they will be returned to us marked clean.
1075 * if not cleaning, just operate on the one page.
1076 */
1077
1078 if (needs_clean) {
1079 KDASSERT((vp->v_iflag & VI_ONWORKLST));
1080 wasclean = false;
1081 memset(pgs, 0, sizeof(pgs));
1082 pg->flags |= PG_BUSY;
1083 UVM_PAGE_OWN(pg, "genfs_putpages");
1084
1085 /*
1086 * first look backward.
1087 */
1088
1089 npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
1090 nback = npages;
1091 uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
1092 UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
1093 if (nback) {
1094 memmove(&pgs[0], &pgs[npages - nback],
1095 nback * sizeof(pgs[0]));
1096 if (npages - nback < nback)
1097 memset(&pgs[nback], 0,
1098 (npages - nback) * sizeof(pgs[0]));
1099 else
1100 memset(&pgs[npages - nback], 0,
1101 nback * sizeof(pgs[0]));
1102 }
1103
1104 /*
1105 * then plug in our page of interest.
1106 */
1107
1108 pgs[nback] = pg;
1109
1110 /*
1111 * then look forward to fill in the remaining space in
1112 * the array of pages.
1113 */
1114
1115 npages = maxpages - nback - 1;
1116 uvn_findpages(uobj, off + PAGE_SIZE, &npages,
1117 &pgs[nback + 1],
1118 UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
1119 npages += nback + 1;
1120 } else {
1121 pgs[0] = pg;
1122 npages = 1;
1123 nback = 0;
1124 }
1125
1126 /*
1127 * apply FREE or DEACTIVATE options if requested.
1128 */
1129
1130 if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1131 mutex_enter(&uvm_pageqlock);
1132 }
1133 for (i = 0; i < npages; i++) {
1134 tpg = pgs[i];
1135 KASSERT(tpg->uobject == uobj);
1136 if (by_list && tpg == TAILQ_NEXT(pg, listq.queue))
1137 pg = tpg;
1138 if (tpg->offset < startoff || tpg->offset >= endoff)
1139 continue;
1140 if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
1141 uvm_pagedeactivate(tpg);
1142 } else if (flags & PGO_FREE) {
1143 pmap_page_protect(tpg, VM_PROT_NONE);
1144 if (tpg->flags & PG_BUSY) {
1145 tpg->flags |= freeflag;
1146 if (pagedaemon) {
1147 uvm_pageout_start(1);
1148 uvm_pagedequeue(tpg);
1149 }
1150 } else {
1151
1152 /*
1153 * ``page is not busy''
1154 * implies that npages is 1
1155 * and needs_clean is false.
1156 */
1157
1158 nextpg = TAILQ_NEXT(tpg, listq.queue);
1159 uvm_pagefree(tpg);
1160 if (pagedaemon)
1161 uvmexp.pdfreed++;
1162 }
1163 }
1164 }
1165 if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1166 mutex_exit(&uvm_pageqlock);
1167 }
1168 if (needs_clean) {
1169 modified = true;
1170
1171 /*
1172 * start the i/o. if we're traversing by list,
1173 * keep our place in the list with a marker page.
1174 */
1175
1176 if (by_list) {
1177 TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
1178 listq.queue);
1179 }
1180 mutex_exit(slock);
1181 error = GOP_WRITE(vp, pgs, npages, flags);
1182 mutex_enter(slock);
1183 if (by_list) {
1184 pg = TAILQ_NEXT(&curmp, listq.queue);
1185 TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
1186 }
1187 if (error) {
1188 break;
1189 }
1190 if (by_list) {
1191 continue;
1192 }
1193 }
1194
1195 /*
1196 * find the next page and continue if there was no error.
1197 */
1198
1199 if (by_list) {
1200 if (nextpg) {
1201 pg = nextpg;
1202 nextpg = NULL;
1203 } else {
1204 pg = TAILQ_NEXT(pg, listq.queue);
1205 }
1206 } else {
1207 off += (npages - nback) << PAGE_SHIFT;
1208 if (off < endoff) {
1209 pg = uvm_pagelookup(uobj, off);
1210 }
1211 }
1212 }
1213 if (by_list) {
1214 TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
1215 }
1216
1217 if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
1218 (vp->v_type != VBLK ||
1219 (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
1220 GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
1221 }
1222
1223 /*
1224 * if we're cleaning and there was nothing to clean,
1225 * take us off the syncer list. if we started any i/o
1226 * and we're doing sync i/o, wait for all writes to finish.
1227 */
1228
1229 if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
1230 (vp->v_iflag & VI_ONWORKLST) != 0) {
1231 #if defined(DEBUG)
1232 TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
1233 if ((pg->flags & (PG_FAKE | PG_MARKER)) != 0) {
1234 continue;
1235 }
1236 if ((pg->flags & PG_CLEAN) == 0) {
1237 printf("%s: %p: !CLEAN\n", __func__, pg);
1238 }
1239 if (pmap_is_modified(pg)) {
1240 printf("%s: %p: modified\n", __func__, pg);
1241 }
1242 }
1243 #endif /* defined(DEBUG) */
1244 vp->v_iflag &= ~VI_WRMAPDIRTY;
1245 if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
1246 vn_syncer_remove_from_worklist(vp);
1247 }
1248
1249 #if !defined(DEBUG)
1250 skip_scan:
1251 #endif /* !defined(DEBUG) */
1252
1253 /* Wait for output to complete. */
1254 if (!wasclean && !async && vp->v_numoutput != 0) {
1255 while (vp->v_numoutput != 0)
1256 cv_wait(&vp->v_cv, slock);
1257 }
1258 onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
1259 mutex_exit(slock);
1260
1261 if ((flags & PGO_RECLAIM) != 0 && onworklst) {
1262 /*
1263 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
1264 * retrying is not a big deal because, in many cases,
1265 * uobj->uo_npages is already 0 here.
1266 */
1267 mutex_enter(slock);
1268 goto retry;
1269 }
1270
1271 if (has_trans) {
1272 if (need_wapbl)
1273 WAPBL_END(vp->v_mount);
1274 fstrans_done(vp->v_mount);
1275 }
1276
1277 return (error);
1278 }
1279
1280 int
1281 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1282 {
1283 off_t off;
1284 vaddr_t kva;
1285 size_t len;
1286 int error;
1287 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1288
1289 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1290 vp, pgs, npages, flags);
1291
1292 off = pgs[0]->offset;
1293 kva = uvm_pagermapin(pgs, npages,
1294 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1295 len = npages << PAGE_SHIFT;
1296
1297 error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
1298 uvm_aio_biodone);
1299
1300 return error;
1301 }
1302
1303 int
1304 genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1305 {
1306 off_t off;
1307 vaddr_t kva;
1308 size_t len;
1309 int error;
1310 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1311
1312 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1313 vp, pgs, npages, flags);
1314
1315 off = pgs[0]->offset;
1316 kva = uvm_pagermapin(pgs, npages,
1317 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
1318 len = npages << PAGE_SHIFT;
1319
1320 error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
1321 uvm_aio_biodone);
1322
1323 return error;
1324 }
1325
1326 /*
1327 * Backend routine for doing I/O to vnode pages. Pages are already locked
1328 * and mapped into kernel memory. Here we just look up the underlying
1329 * device block addresses and call the strategy routine.
1330 */
1331
1332 static int
1333 genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
1334 enum uio_rw rw, void (*iodone)(struct buf *))
1335 {
1336 int s, error;
1337 int fs_bshift, dev_bshift;
1338 off_t eof, offset, startoffset;
1339 size_t bytes, iobytes, skipbytes;
1340 struct buf *mbp, *bp;
1341 const bool async = (flags & PGO_SYNCIO) == 0;
1342 const bool lazy = (flags & PGO_LAZY) == 0;
1343 const bool iowrite = rw == UIO_WRITE;
1344 const int brw = iowrite ? B_WRITE : B_READ;
1345 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1346
1347 UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
1348 vp, kva, len, flags);
1349
1350 KASSERT(vp->v_size <= vp->v_writesize);
1351 GOP_SIZE(vp, vp->v_writesize, &eof, 0);
1352 if (vp->v_type != VBLK) {
1353 fs_bshift = vp->v_mount->mnt_fs_bshift;
1354 dev_bshift = vp->v_mount->mnt_dev_bshift;
1355 } else {
1356 fs_bshift = DEV_BSHIFT;
1357 dev_bshift = DEV_BSHIFT;
1358 }
1359 error = 0;
1360 startoffset = off;
1361 bytes = MIN(len, eof - startoffset);
1362 skipbytes = 0;
1363 KASSERT(bytes != 0);
1364
1365 if (iowrite) {
1366 mutex_enter(vp->v_interlock);
1367 vp->v_numoutput += 2;
1368 mutex_exit(vp->v_interlock);
1369 }
1370 mbp = getiobuf(vp, true);
1371 UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
1372 vp, mbp, vp->v_numoutput, bytes);
1373 mbp->b_bufsize = len;
1374 mbp->b_data = (void *)kva;
1375 mbp->b_resid = mbp->b_bcount = bytes;
1376 mbp->b_cflags = BC_BUSY | BC_AGE;
1377 if (async) {
1378 mbp->b_flags = brw | B_ASYNC;
1379 mbp->b_iodone = iodone;
1380 } else {
1381 mbp->b_flags = brw;
1382 mbp->b_iodone = NULL;
1383 }
1384 if (curlwp == uvm.pagedaemon_lwp)
1385 BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
1386 else if (async || lazy)
1387 BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
1388 else
1389 BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
1390
1391 bp = NULL;
1392 for (offset = startoffset;
1393 bytes > 0;
1394 offset += iobytes, bytes -= iobytes) {
1395 int run;
1396 daddr_t lbn, blkno;
1397 struct vnode *devvp;
1398
1399 /*
1400 * bmap the file to find out the blkno to read from and
1401 * how much we can read in one i/o. if bmap returns an error,
1402 * skip the rest of the top-level i/o.
1403 */
1404
1405 lbn = offset >> fs_bshift;
1406 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
1407 if (error) {
1408 UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
1409 lbn,error,0,0);
1410 skipbytes += bytes;
1411 bytes = 0;
1412 goto loopdone;
1413 }
1414
1415 /*
1416 * see how many pages can be read with this i/o.
1417 * reduce the i/o size if necessary to avoid
1418 * overwriting pages with valid data.
1419 */
1420
1421 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
1422 bytes);
1423
1424 /*
1425 * if this block isn't allocated, zero it instead of
1426 * reading it. unless we are going to allocate blocks,
1427 * mark the pages we zeroed PG_RDONLY.
1428 */
1429
1430 if (blkno == (daddr_t)-1) {
1431 if (!iowrite) {
1432 memset((char *)kva + (offset - startoffset), 0,
1433 iobytes);
1434 }
1435 skipbytes += iobytes;
1436 continue;
1437 }
1438
1439 /*
1440 * allocate a sub-buf for this piece of the i/o
1441 * (or just use mbp if there's only 1 piece),
1442 * and start it going.
1443 */
1444
1445 if (offset == startoffset && iobytes == bytes) {
1446 bp = mbp;
1447 } else {
1448 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
1449 vp, bp, vp->v_numoutput, 0);
1450 bp = getiobuf(vp, true);
1451 nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
1452 }
1453 bp->b_lblkno = 0;
1454
1455 /* adjust physical blkno for partial blocks */
1456 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
1457 dev_bshift);
1458
1459 UVMHIST_LOG(ubchist,
1460 "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
1461 bp, offset, bp->b_bcount, bp->b_blkno);
1462
1463 VOP_STRATEGY(devvp, bp);
1464 }
1465
1466 loopdone:
1467 if (skipbytes) {
1468 UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
1469 }
1470 nestiobuf_done(mbp, skipbytes, error);
1471 if (async) {
1472 UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
1473 return (0);
1474 }
1475 UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
1476 error = biowait(mbp);
1477 s = splbio();
1478 (*iodone)(mbp);
1479 splx(s);
1480 UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
1481 return (error);
1482 }
1483
1484 int
1485 genfs_compat_getpages(void *v)
1486 {
1487 struct vop_getpages_args /* {
1488 struct vnode *a_vp;
1489 voff_t a_offset;
1490 struct vm_page **a_m;
1491 int *a_count;
1492 int a_centeridx;
1493 vm_prot_t a_access_type;
1494 int a_advice;
1495 int a_flags;
1496 } */ *ap = v;
1497
1498 off_t origoffset;
1499 struct vnode *vp = ap->a_vp;
1500 struct uvm_object *uobj = &vp->v_uobj;
1501 struct vm_page *pg, **pgs;
1502 vaddr_t kva;
1503 int i, error, orignpages, npages;
1504 struct iovec iov;
1505 struct uio uio;
1506 kauth_cred_t cred = curlwp->l_cred;
1507 const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
1508
1509 error = 0;
1510 origoffset = ap->a_offset;
1511 orignpages = *ap->a_count;
1512 pgs = ap->a_m;
1513
1514 if (ap->a_flags & PGO_LOCKED) {
1515 uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
1516 UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
1517
1518 error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
1519 if (error == 0 && memwrite) {
1520 genfs_markdirty(vp);
1521 }
1522 return error;
1523 }
1524 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
1525 mutex_exit(uobj->vmobjlock);
1526 return EINVAL;
1527 }
1528 if ((ap->a_flags & PGO_SYNCIO) == 0) {
1529 mutex_exit(uobj->vmobjlock);
1530 return 0;
1531 }
1532 npages = orignpages;
1533 uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
1534 mutex_exit(uobj->vmobjlock);
1535 kva = uvm_pagermapin(pgs, npages,
1536 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
1537 for (i = 0; i < npages; i++) {
1538 pg = pgs[i];
1539 if ((pg->flags & PG_FAKE) == 0) {
1540 continue;
1541 }
1542 iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
1543 iov.iov_len = PAGE_SIZE;
1544 uio.uio_iov = &iov;
1545 uio.uio_iovcnt = 1;
1546 uio.uio_offset = origoffset + (i << PAGE_SHIFT);
1547 uio.uio_rw = UIO_READ;
1548 uio.uio_resid = PAGE_SIZE;
1549 UIO_SETUP_SYSSPACE(&uio);
1550 /* XXX vn_lock */
1551 error = VOP_READ(vp, &uio, 0, cred);
1552 if (error) {
1553 break;
1554 }
1555 if (uio.uio_resid) {
1556 memset(iov.iov_base, 0, uio.uio_resid);
1557 }
1558 }
1559 uvm_pagermapout(kva, npages);
1560 mutex_enter(uobj->vmobjlock);
1561 mutex_enter(&uvm_pageqlock);
1562 for (i = 0; i < npages; i++) {
1563 pg = pgs[i];
1564 if (error && (pg->flags & PG_FAKE) != 0) {
1565 pg->flags |= PG_RELEASED;
1566 } else {
1567 pmap_clear_modify(pg);
1568 uvm_pageactivate(pg);
1569 }
1570 }
1571 if (error) {
1572 uvm_page_unbusy(pgs, npages);
1573 }
1574 mutex_exit(&uvm_pageqlock);
1575 if (error == 0 && memwrite) {
1576 genfs_markdirty(vp);
1577 }
1578 mutex_exit(uobj->vmobjlock);
1579 return error;
1580 }
1581
1582 int
1583 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
1584 int flags)
1585 {
1586 off_t offset;
1587 struct iovec iov;
1588 struct uio uio;
1589 kauth_cred_t cred = curlwp->l_cred;
1590 struct buf *bp;
1591 vaddr_t kva;
1592 int error;
1593
1594 offset = pgs[0]->offset;
1595 kva = uvm_pagermapin(pgs, npages,
1596 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1597
1598 iov.iov_base = (void *)kva;
1599 iov.iov_len = npages << PAGE_SHIFT;
1600 uio.uio_iov = &iov;
1601 uio.uio_iovcnt = 1;
1602 uio.uio_offset = offset;
1603 uio.uio_rw = UIO_WRITE;
1604 uio.uio_resid = npages << PAGE_SHIFT;
1605 UIO_SETUP_SYSSPACE(&uio);
1606 /* XXX vn_lock */
1607 error = VOP_WRITE(vp, &uio, 0, cred);
1608
1609 mutex_enter(vp->v_interlock);
1610 vp->v_numoutput++;
1611 mutex_exit(vp->v_interlock);
1612
1613 bp = getiobuf(vp, true);
1614 bp->b_cflags = BC_BUSY | BC_AGE;
1615 bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
1616 bp->b_data = (char *)kva;
1617 bp->b_bcount = npages << PAGE_SHIFT;
1618 bp->b_bufsize = npages << PAGE_SHIFT;
1619 bp->b_resid = 0;
1620 bp->b_error = error;
1621 uvm_aio_aiodone(bp);
1622 return (error);
1623 }
1624
1625 /*
1626 * Process a uio using direct I/O. If we reach a part of the request
1627 * which cannot be processed in this fashion for some reason, just return.
1628 * The caller must handle some additional part of the request using
1629 * buffered I/O before trying direct I/O again.
1630 */
1631
1632 void
1633 genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
1634 {
1635 struct vmspace *vs;
1636 struct iovec *iov;
1637 vaddr_t va;
1638 size_t len;
1639 const int mask = DEV_BSIZE - 1;
1640 int error;
1641 bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
1642 (ioflag & IO_JOURNALLOCKED) == 0);
1643
1644 /*
1645 * We only support direct I/O to user space for now.
1646 */
1647
1648 if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
1649 return;
1650 }
1651
1652 /*
1653 * If the vnode is mapped, we would need to get the getpages lock
1654 * to stabilize the bmap, but then we would get into trouble while
1655 * locking the pages if the pages belong to this same vnode (or a
1656 * multi-vnode cascade to the same effect). Just fall back to
1657 * buffered I/O if the vnode is mapped to avoid this mess.
1658 */
1659
1660 if (vp->v_vflag & VV_MAPPED) {
1661 return;
1662 }
1663
1664 if (need_wapbl) {
1665 error = WAPBL_BEGIN(vp->v_mount);
1666 if (error)
1667 return;
1668 }
1669
1670 /*
1671 * Do as much of the uio as possible with direct I/O.
1672 */
1673
1674 vs = uio->uio_vmspace;
1675 while (uio->uio_resid) {
1676 iov = uio->uio_iov;
1677 if (iov->iov_len == 0) {
1678 uio->uio_iov++;
1679 uio->uio_iovcnt--;
1680 continue;
1681 }
1682 va = (vaddr_t)iov->iov_base;
1683 len = MIN(iov->iov_len, genfs_maxdio);
1684 len &= ~mask;
1685
1686 /*
1687 * If the next chunk is smaller than DEV_BSIZE or extends past
1688 * the current EOF, then fall back to buffered I/O.
1689 */
1690
1691 if (len == 0 || uio->uio_offset + len > vp->v_size) {
1692 break;
1693 }
1694
1695 /*
1696 * Check alignment. The file offset must be at least
1697 * sector-aligned. The exact constraint on memory alignment
1698 * is very hardware-dependent, but requiring sector-aligned
1699 * addresses there too is safe.
1700 */
1701
1702 if (uio->uio_offset & mask || va & mask) {
1703 break;
1704 }
1705 error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
1706 uio->uio_rw);
1707 if (error) {
1708 break;
1709 }
1710 iov->iov_base = (char *)iov->iov_base + len;
1711 iov->iov_len -= len;
1712 uio->uio_offset += len;
1713 uio->uio_resid -= len;
1714 }
1715
1716 if (need_wapbl)
1717 WAPBL_END(vp->v_mount);
1718 }
1719
1720 /*
1721 * Iodone routine for direct I/O. We don't do much here since the request is
1722 * always synchronous, so the caller will do most of the work after biowait().
1723 */
1724
1725 static void
1726 genfs_dio_iodone(struct buf *bp)
1727 {
1728
1729 KASSERT((bp->b_flags & B_ASYNC) == 0);
1730 if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
1731 mutex_enter(bp->b_objlock);
1732 vwakeup(bp);
1733 mutex_exit(bp->b_objlock);
1734 }
1735 putiobuf(bp);
1736 }
1737
1738 /*
1739 * Process one chunk of a direct I/O request.
1740 */
1741
1742 static int
1743 genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
1744 off_t off, enum uio_rw rw)
1745 {
1746 struct vm_map *map;
1747 struct pmap *upm, *kpm;
1748 size_t klen = round_page(uva + len) - trunc_page(uva);
1749 off_t spoff, epoff;
1750 vaddr_t kva, puva;
1751 paddr_t pa;
1752 vm_prot_t prot;
1753 int error, rv, poff, koff;
1754 const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
1755 (rw == UIO_WRITE ? PGO_FREE : 0);
1756
1757 /*
1758 * For writes, verify that this range of the file already has fully
1759 * allocated backing store. If there are any holes, just punt and
1760 * make the caller take the buffered write path.
1761 */
1762
1763 if (rw == UIO_WRITE) {
1764 daddr_t lbn, elbn, blkno;
1765 int bsize, bshift, run;
1766
1767 bshift = vp->v_mount->mnt_fs_bshift;
1768 bsize = 1 << bshift;
1769 lbn = off >> bshift;
1770 elbn = (off + len + bsize - 1) >> bshift;
1771 while (lbn < elbn) {
1772 error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
1773 if (error) {
1774 return error;
1775 }
1776 if (blkno == (daddr_t)-1) {
1777 return ENOSPC;
1778 }
1779 lbn += 1 + run;
1780 }
1781 }
1782
1783 /*
1784 * Flush any cached pages for parts of the file that we're about to
1785 * access. If we're writing, invalidate pages as well.
1786 */
1787
1788 spoff = trunc_page(off);
1789 epoff = round_page(off + len);
1790 mutex_enter(vp->v_interlock);
1791 error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
1792 if (error) {
1793 return error;
1794 }
1795
1796 /*
1797 * Wire the user pages and remap them into kernel memory.
1798 */
1799
1800 prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
1801 error = uvm_vslock(vs, (void *)uva, len, prot);
1802 if (error) {
1803 return error;
1804 }
1805
1806 map = &vs->vm_map;
1807 upm = vm_map_pmap(map);
1808 kpm = vm_map_pmap(kernel_map);
1809 puva = trunc_page(uva);
1810 kva = uvm_km_alloc(kernel_map, klen, atop(puva) & uvmexp.colormask,
1811 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
1812 for (poff = 0; poff < klen; poff += PAGE_SIZE) {
1813 rv = pmap_extract(upm, puva + poff, &pa);
1814 KASSERT(rv);
1815 pmap_kenter_pa(kva + poff, pa, prot, PMAP_WIRED);
1816 }
1817 pmap_update(kpm);
1818
1819 /*
1820 * Do the I/O.
1821 */
1822
1823 koff = uva - trunc_page(uva);
1824 error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
1825 genfs_dio_iodone);
1826
1827 /*
1828 * Tear down the kernel mapping.
1829 */
1830
1831 pmap_kremove(kva, klen);
1832 pmap_update(kpm);
1833 uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
1834
1835 /*
1836 * Unwire the user pages.
1837 */
1838
1839 uvm_vsunlock(vs, (void *)uva, len);
1840 return error;
1841 }
1842