genfs_vnops.c revision 1.101 1 /* $NetBSD: genfs_vnops.c,v 1.101 2005/07/17 12:27:47 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.101 2005/07/17 12:27:47 yamt Exp $");
35
36 #if defined(_KERNEL_OPT)
37 #include "opt_nfsserver.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/kernel.h>
44 #include <sys/mount.h>
45 #include <sys/namei.h>
46 #include <sys/vnode.h>
47 #include <sys/fcntl.h>
48 #include <sys/malloc.h>
49 #include <sys/poll.h>
50 #include <sys/mman.h>
51 #include <sys/file.h>
52
53 #include <miscfs/genfs/genfs.h>
54 #include <miscfs/genfs/genfs_node.h>
55 #include <miscfs/specfs/specdev.h>
56
57 #include <uvm/uvm.h>
58 #include <uvm/uvm_pager.h>
59
60 #ifdef NFSSERVER
61 #include <nfs/rpcv2.h>
62 #include <nfs/nfsproto.h>
63 #include <nfs/nfs.h>
64 #include <nfs/nqnfs.h>
65 #include <nfs/nfs_var.h>
66 #endif
67
68 static __inline void genfs_rel_pages(struct vm_page **, int);
69 static void filt_genfsdetach(struct knote *);
70 static int filt_genfsread(struct knote *, long);
71 static int filt_genfsvnode(struct knote *, long);
72
73
74 #define MAX_READ_AHEAD 16 /* XXXUBC 16 */
75 int genfs_rapages = MAX_READ_AHEAD; /* # of pages in each chunk of readahead */
76 int genfs_racount = 2; /* # of page chunks to readahead */
77 int genfs_raskip = 2; /* # of busy page chunks allowed to skip */
78
79 int
80 genfs_poll(void *v)
81 {
82 struct vop_poll_args /* {
83 struct vnode *a_vp;
84 int a_events;
85 struct proc *a_p;
86 } */ *ap = v;
87
88 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
89 }
90
91 int
92 genfs_fsync(void *v)
93 {
94 struct vop_fsync_args /* {
95 struct vnode *a_vp;
96 struct ucred *a_cred;
97 int a_flags;
98 off_t offlo;
99 off_t offhi;
100 struct proc *a_p;
101 } */ *ap = v;
102 struct vnode *vp = ap->a_vp, *dvp;
103 int wait;
104 int error;
105
106 wait = (ap->a_flags & FSYNC_WAIT) != 0;
107 vflushbuf(vp, wait);
108 if ((ap->a_flags & FSYNC_DATAONLY) != 0)
109 error = 0;
110 else
111 error = VOP_UPDATE(vp, NULL, NULL, wait ? UPDATE_WAIT : 0);
112
113 if (error == 0 && ap->a_flags & FSYNC_CACHE) {
114 int l = 0;
115 if (VOP_BMAP(vp, 0, &dvp, NULL, NULL))
116 error = ENXIO;
117 else
118 error = VOP_IOCTL(dvp, DIOCCACHESYNC, &l, FWRITE,
119 ap->a_p->p_ucred, ap->a_p);
120 }
121
122 return (error);
123 }
124
125 int
126 genfs_seek(void *v)
127 {
128 struct vop_seek_args /* {
129 struct vnode *a_vp;
130 off_t a_oldoff;
131 off_t a_newoff;
132 struct ucred *a_ucred;
133 } */ *ap = v;
134
135 if (ap->a_newoff < 0)
136 return (EINVAL);
137
138 return (0);
139 }
140
141 int
142 genfs_abortop(void *v)
143 {
144 struct vop_abortop_args /* {
145 struct vnode *a_dvp;
146 struct componentname *a_cnp;
147 } */ *ap = v;
148
149 if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
150 PNBUF_PUT(ap->a_cnp->cn_pnbuf);
151 return (0);
152 }
153
154 int
155 genfs_fcntl(void *v)
156 {
157 struct vop_fcntl_args /* {
158 struct vnode *a_vp;
159 u_int a_command;
160 caddr_t a_data;
161 int a_fflag;
162 struct ucred *a_cred;
163 struct proc *a_p;
164 } */ *ap = v;
165
166 if (ap->a_command == F_SETFL)
167 return (0);
168 else
169 return (EOPNOTSUPP);
170 }
171
172 /*ARGSUSED*/
173 int
174 genfs_badop(void *v)
175 {
176
177 panic("genfs: bad op");
178 }
179
180 /*ARGSUSED*/
181 int
182 genfs_nullop(void *v)
183 {
184
185 return (0);
186 }
187
188 /*ARGSUSED*/
189 int
190 genfs_einval(void *v)
191 {
192
193 return (EINVAL);
194 }
195
196 /*
197 * Called when an fs doesn't support a particular vop.
198 * This takes care to vrele, vput, or vunlock passed in vnodes.
199 */
200 int
201 genfs_eopnotsupp(void *v)
202 {
203 struct vop_generic_args /*
204 struct vnodeop_desc *a_desc;
205 / * other random data follows, presumably * /
206 } */ *ap = v;
207 struct vnodeop_desc *desc = ap->a_desc;
208 struct vnode *vp, *vp_last = NULL;
209 int flags, i, j, offset;
210
211 flags = desc->vdesc_flags;
212 for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
213 if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
214 break; /* stop at end of list */
215 if ((j = flags & VDESC_VP0_WILLPUT)) {
216 vp = *VOPARG_OFFSETTO(struct vnode **, offset, ap);
217
218 /* Skip if NULL */
219 if (!vp)
220 continue;
221
222 switch (j) {
223 case VDESC_VP0_WILLPUT:
224 /* Check for dvp == vp cases */
225 if (vp == vp_last)
226 vrele(vp);
227 else {
228 vput(vp);
229 vp_last = vp;
230 }
231 break;
232 case VDESC_VP0_WILLUNLOCK:
233 VOP_UNLOCK(vp, 0);
234 break;
235 case VDESC_VP0_WILLRELE:
236 vrele(vp);
237 break;
238 }
239 }
240 }
241
242 return (EOPNOTSUPP);
243 }
244
245 /*ARGSUSED*/
246 int
247 genfs_ebadf(void *v)
248 {
249
250 return (EBADF);
251 }
252
253 /* ARGSUSED */
254 int
255 genfs_enoioctl(void *v)
256 {
257
258 return (EPASSTHROUGH);
259 }
260
261
262 /*
263 * Eliminate all activity associated with the requested vnode
264 * and with all vnodes aliased to the requested vnode.
265 */
266 int
267 genfs_revoke(void *v)
268 {
269 struct vop_revoke_args /* {
270 struct vnode *a_vp;
271 int a_flags;
272 } */ *ap = v;
273 struct vnode *vp, *vq;
274 struct proc *p = curproc; /* XXX */
275
276 #ifdef DIAGNOSTIC
277 if ((ap->a_flags & REVOKEALL) == 0)
278 panic("genfs_revoke: not revokeall");
279 #endif
280
281 vp = ap->a_vp;
282 simple_lock(&vp->v_interlock);
283
284 if (vp->v_flag & VALIASED) {
285 /*
286 * If a vgone (or vclean) is already in progress,
287 * wait until it is done and return.
288 */
289 if (vp->v_flag & VXLOCK) {
290 vp->v_flag |= VXWANT;
291 ltsleep(vp, PINOD|PNORELOCK, "vop_revokeall", 0,
292 &vp->v_interlock);
293 return (0);
294 }
295 /*
296 * Ensure that vp will not be vgone'd while we
297 * are eliminating its aliases.
298 */
299 vp->v_flag |= VXLOCK;
300 simple_unlock(&vp->v_interlock);
301 while (vp->v_flag & VALIASED) {
302 simple_lock(&spechash_slock);
303 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
304 if (vq->v_rdev != vp->v_rdev ||
305 vq->v_type != vp->v_type || vp == vq)
306 continue;
307 simple_unlock(&spechash_slock);
308 vgone(vq);
309 break;
310 }
311 if (vq == NULLVP)
312 simple_unlock(&spechash_slock);
313 }
314 /*
315 * Remove the lock so that vgone below will
316 * really eliminate the vnode after which time
317 * vgone will awaken any sleepers.
318 */
319 simple_lock(&vp->v_interlock);
320 vp->v_flag &= ~VXLOCK;
321 }
322 vgonel(vp, p);
323 return (0);
324 }
325
326 /*
327 * Lock the node.
328 */
329 int
330 genfs_lock(void *v)
331 {
332 struct vop_lock_args /* {
333 struct vnode *a_vp;
334 int a_flags;
335 } */ *ap = v;
336 struct vnode *vp = ap->a_vp;
337
338 return (lockmgr(vp->v_vnlock, ap->a_flags, &vp->v_interlock));
339 }
340
341 /*
342 * Unlock the node.
343 */
344 int
345 genfs_unlock(void *v)
346 {
347 struct vop_unlock_args /* {
348 struct vnode *a_vp;
349 int a_flags;
350 } */ *ap = v;
351 struct vnode *vp = ap->a_vp;
352
353 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE,
354 &vp->v_interlock));
355 }
356
357 /*
358 * Return whether or not the node is locked.
359 */
360 int
361 genfs_islocked(void *v)
362 {
363 struct vop_islocked_args /* {
364 struct vnode *a_vp;
365 } */ *ap = v;
366 struct vnode *vp = ap->a_vp;
367
368 return (lockstatus(vp->v_vnlock));
369 }
370
371 /*
372 * Stubs to use when there is no locking to be done on the underlying object.
373 */
374 int
375 genfs_nolock(void *v)
376 {
377 struct vop_lock_args /* {
378 struct vnode *a_vp;
379 int a_flags;
380 struct proc *a_p;
381 } */ *ap = v;
382
383 /*
384 * Since we are not using the lock manager, we must clear
385 * the interlock here.
386 */
387 if (ap->a_flags & LK_INTERLOCK)
388 simple_unlock(&ap->a_vp->v_interlock);
389 return (0);
390 }
391
392 int
393 genfs_nounlock(void *v)
394 {
395
396 return (0);
397 }
398
399 int
400 genfs_noislocked(void *v)
401 {
402
403 return (0);
404 }
405
406 /*
407 * Local lease check for NFS servers. Just set up args and let
408 * nqsrv_getlease() do the rest. If NFSSERVER is not in the kernel,
409 * this is a null operation.
410 */
411 int
412 genfs_lease_check(void *v)
413 {
414 #ifdef NFSSERVER
415 struct vop_lease_args /* {
416 struct vnode *a_vp;
417 struct proc *a_p;
418 struct ucred *a_cred;
419 int a_flag;
420 } */ *ap = v;
421 u_int32_t duration = 0;
422 int cache;
423 u_quad_t frev;
424
425 (void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag,
426 NQLOCALSLP, ap->a_p, (struct mbuf *)0, &cache, &frev, ap->a_cred);
427 return (0);
428 #else
429 return (0);
430 #endif /* NFSSERVER */
431 }
432
433 int
434 genfs_mmap(void *v)
435 {
436
437 return (0);
438 }
439
440 static __inline void
441 genfs_rel_pages(struct vm_page **pgs, int npages)
442 {
443 int i;
444
445 for (i = 0; i < npages; i++) {
446 struct vm_page *pg = pgs[i];
447
448 if (pg == NULL)
449 continue;
450 if (pg->flags & PG_FAKE) {
451 pg->flags |= PG_RELEASED;
452 }
453 }
454 uvm_lock_pageq();
455 uvm_page_unbusy(pgs, npages);
456 uvm_unlock_pageq();
457 }
458
459 /*
460 * generic VM getpages routine.
461 * Return PG_BUSY pages for the given range,
462 * reading from backing store if necessary.
463 */
464
465 int
466 genfs_getpages(void *v)
467 {
468 struct vop_getpages_args /* {
469 struct vnode *a_vp;
470 voff_t a_offset;
471 struct vm_page **a_m;
472 int *a_count;
473 int a_centeridx;
474 vm_prot_t a_access_type;
475 int a_advice;
476 int a_flags;
477 } */ *ap = v;
478
479 off_t newsize, diskeof, memeof;
480 off_t offset, origoffset, startoffset, endoffset, raoffset;
481 daddr_t lbn, blkno;
482 int s, i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
483 int fs_bshift, fs_bsize, dev_bshift;
484 int flags = ap->a_flags;
485 size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
486 vaddr_t kva;
487 struct buf *bp, *mbp;
488 struct vnode *vp = ap->a_vp;
489 struct vnode *devvp;
490 struct genfs_node *gp = VTOG(vp);
491 struct uvm_object *uobj = &vp->v_uobj;
492 struct vm_page *pg, **pgs, *pgs_onstack[MAX_READ_AHEAD];
493 int pgs_size;
494 struct ucred *cred = curproc->p_ucred; /* XXXUBC curlwp */
495 boolean_t async = (flags & PGO_SYNCIO) == 0;
496 boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
497 boolean_t sawhole = FALSE;
498 boolean_t overwrite = (flags & PGO_OVERWRITE) != 0;
499 boolean_t blockalloc = write && (flags & PGO_NOBLOCKALLOC) == 0;
500 UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
501
502 UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
503 vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
504
505 /* XXXUBC temp limit */
506 if (*ap->a_count > MAX_READ_AHEAD) {
507 panic("genfs_getpages: too many pages");
508 }
509
510 error = 0;
511 origoffset = ap->a_offset;
512 orignpages = *ap->a_count;
513 GOP_SIZE(vp, vp->v_size, &diskeof, GOP_SIZE_READ);
514 if (flags & PGO_PASTEOF) {
515 newsize = MAX(vp->v_size,
516 origoffset + (orignpages << PAGE_SHIFT));
517 GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_READ|GOP_SIZE_MEM);
518 } else {
519 GOP_SIZE(vp, vp->v_size, &memeof, GOP_SIZE_READ|GOP_SIZE_MEM);
520 }
521 KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
522 KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
523 KASSERT(orignpages > 0);
524
525 /*
526 * Bounds-check the request.
527 */
528
529 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
530 if ((flags & PGO_LOCKED) == 0) {
531 simple_unlock(&uobj->vmobjlock);
532 }
533 UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
534 origoffset, *ap->a_count, memeof,0);
535 return (EINVAL);
536 }
537
538 /* uobj is locked */
539
540 if (write) {
541 gp->g_dirtygen++;
542 if ((vp->v_flag & VONWORKLST) == 0) {
543 vn_syncer_add_to_worklist(vp, filedelay);
544 }
545 }
546
547 /*
548 * For PGO_LOCKED requests, just return whatever's in memory.
549 */
550
551 if (flags & PGO_LOCKED) {
552 uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
553 UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
554
555 return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
556 }
557
558 /*
559 * find the requested pages and make some simple checks.
560 * leave space in the page array for a whole block.
561 */
562
563 if (vp->v_type == VREG) {
564 fs_bshift = vp->v_mount->mnt_fs_bshift;
565 dev_bshift = vp->v_mount->mnt_dev_bshift;
566 } else {
567 fs_bshift = DEV_BSHIFT;
568 dev_bshift = DEV_BSHIFT;
569 }
570 fs_bsize = 1 << fs_bshift;
571
572 orignpages = MIN(orignpages,
573 round_page(memeof - origoffset) >> PAGE_SHIFT);
574 npages = orignpages;
575 startoffset = origoffset & ~(fs_bsize - 1);
576 endoffset = round_page((origoffset + (npages << PAGE_SHIFT) +
577 fs_bsize - 1) & ~(fs_bsize - 1));
578 endoffset = MIN(endoffset, round_page(memeof));
579 ridx = (origoffset - startoffset) >> PAGE_SHIFT;
580
581 pgs_size = sizeof(struct vm_page *) *
582 ((endoffset - startoffset) >> PAGE_SHIFT);
583 if (pgs_size > sizeof(pgs_onstack)) {
584 pgs = malloc(pgs_size, M_DEVBUF, M_NOWAIT | M_ZERO);
585 if (pgs == NULL) {
586 simple_unlock(&uobj->vmobjlock);
587 return (ENOMEM);
588 }
589 } else {
590 pgs = pgs_onstack;
591 memset(pgs, 0, pgs_size);
592 }
593 UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
594 ridx, npages, startoffset, endoffset);
595 if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
596 async ? UFP_NOWAIT : UFP_ALL) != orignpages) {
597 KASSERT(async != 0);
598 genfs_rel_pages(&pgs[ridx], orignpages);
599 simple_unlock(&uobj->vmobjlock);
600 if (pgs != pgs_onstack)
601 free(pgs, M_DEVBUF);
602 return (EBUSY);
603 }
604
605 /*
606 * if the pages are already resident, just return them.
607 */
608
609 for (i = 0; i < npages; i++) {
610 struct vm_page *pg1 = pgs[ridx + i];
611
612 if ((pg1->flags & PG_FAKE) ||
613 (blockalloc && (pg1->flags & PG_RDONLY))) {
614 break;
615 }
616 }
617 if (i == npages) {
618 UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
619 raoffset = origoffset + (orignpages << PAGE_SHIFT);
620 npages += ridx;
621 goto raout;
622 }
623
624 /*
625 * if PGO_OVERWRITE is set, don't bother reading the pages.
626 */
627
628 if (flags & PGO_OVERWRITE) {
629 UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
630
631 for (i = 0; i < npages; i++) {
632 struct vm_page *pg1 = pgs[ridx + i];
633
634 pg1->flags &= ~(PG_RDONLY|PG_CLEAN);
635 }
636 npages += ridx;
637 goto out;
638 }
639
640 /*
641 * the page wasn't resident and we're not overwriting,
642 * so we're going to have to do some i/o.
643 * find any additional pages needed to cover the expanded range.
644 */
645
646 npages = (endoffset - startoffset) >> PAGE_SHIFT;
647 if (startoffset != origoffset || npages != orignpages) {
648
649 /*
650 * we need to avoid deadlocks caused by locking
651 * additional pages at lower offsets than pages we
652 * already have locked. unlock them all and start over.
653 */
654
655 genfs_rel_pages(&pgs[ridx], orignpages);
656 memset(pgs, 0, pgs_size);
657
658 UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
659 startoffset, endoffset, 0,0);
660 npgs = npages;
661 if (uvn_findpages(uobj, startoffset, &npgs, pgs,
662 async ? UFP_NOWAIT : UFP_ALL) != npages) {
663 KASSERT(async != 0);
664 genfs_rel_pages(pgs, npages);
665 simple_unlock(&uobj->vmobjlock);
666 if (pgs != pgs_onstack)
667 free(pgs, M_DEVBUF);
668 return (EBUSY);
669 }
670 }
671 simple_unlock(&uobj->vmobjlock);
672
673 /*
674 * read the desired page(s).
675 */
676
677 totalbytes = npages << PAGE_SHIFT;
678 bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
679 tailbytes = totalbytes - bytes;
680 skipbytes = 0;
681
682 kva = uvm_pagermapin(pgs, npages,
683 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
684
685 s = splbio();
686 mbp = pool_get(&bufpool, PR_WAITOK);
687 splx(s);
688 BUF_INIT(mbp);
689 mbp->b_bufsize = totalbytes;
690 mbp->b_data = (void *)kva;
691 mbp->b_resid = mbp->b_bcount = bytes;
692 mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL|B_ASYNC : 0);
693 mbp->b_iodone = (async ? uvm_aio_biodone : 0);
694 mbp->b_vp = vp;
695
696 /*
697 * if EOF is in the middle of the range, zero the part past EOF.
698 * if the page including EOF is not PG_FAKE, skip over it since
699 * in that case it has valid data that we need to preserve.
700 */
701
702 if (tailbytes > 0) {
703 size_t tailstart = bytes;
704
705 if ((pgs[bytes >> PAGE_SHIFT]->flags & PG_FAKE) == 0) {
706 tailstart = round_page(tailstart);
707 tailbytes -= tailstart - bytes;
708 }
709 UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
710 kva, tailstart, tailbytes,0);
711 memset((void *)(kva + tailstart), 0, tailbytes);
712 }
713
714 /*
715 * now loop over the pages, reading as needed.
716 */
717
718 if (blockalloc) {
719 lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
720 } else {
721 lockmgr(&gp->g_glock, LK_SHARED, NULL);
722 }
723
724 bp = NULL;
725 for (offset = startoffset;
726 bytes > 0;
727 offset += iobytes, bytes -= iobytes) {
728
729 /*
730 * skip pages which don't need to be read.
731 */
732
733 pidx = (offset - startoffset) >> PAGE_SHIFT;
734 while ((pgs[pidx]->flags & PG_FAKE) == 0) {
735 size_t b;
736
737 KASSERT((offset & (PAGE_SIZE - 1)) == 0);
738 if ((pgs[pidx]->flags & PG_RDONLY)) {
739 sawhole = TRUE;
740 }
741 b = MIN(PAGE_SIZE, bytes);
742 offset += b;
743 bytes -= b;
744 skipbytes += b;
745 pidx++;
746 UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
747 offset, 0,0,0);
748 if (bytes == 0) {
749 goto loopdone;
750 }
751 }
752
753 /*
754 * bmap the file to find out the blkno to read from and
755 * how much we can read in one i/o. if bmap returns an error,
756 * skip the rest of the top-level i/o.
757 */
758
759 lbn = offset >> fs_bshift;
760 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
761 if (error) {
762 UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
763 lbn, error,0,0);
764 skipbytes += bytes;
765 goto loopdone;
766 }
767
768 /*
769 * see how many pages can be read with this i/o.
770 * reduce the i/o size if necessary to avoid
771 * overwriting pages with valid data.
772 */
773
774 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
775 bytes);
776 if (offset + iobytes > round_page(offset)) {
777 pcount = 1;
778 while (pidx + pcount < npages &&
779 pgs[pidx + pcount]->flags & PG_FAKE) {
780 pcount++;
781 }
782 iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
783 (offset - trunc_page(offset)));
784 }
785
786 /*
787 * if this block isn't allocated, zero it instead of
788 * reading it. unless we are going to allocate blocks,
789 * mark the pages we zeroed PG_RDONLY.
790 */
791
792 if (blkno < 0) {
793 int holepages = (round_page(offset + iobytes) -
794 trunc_page(offset)) >> PAGE_SHIFT;
795 UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
796
797 sawhole = TRUE;
798 memset((char *)kva + (offset - startoffset), 0,
799 iobytes);
800 skipbytes += iobytes;
801
802 for (i = 0; i < holepages; i++) {
803 if (write) {
804 pgs[pidx + i]->flags &= ~PG_CLEAN;
805 }
806 if (!blockalloc) {
807 pgs[pidx + i]->flags |= PG_RDONLY;
808 }
809 }
810 continue;
811 }
812
813 /*
814 * allocate a sub-buf for this piece of the i/o
815 * (or just use mbp if there's only 1 piece),
816 * and start it going.
817 */
818
819 if (offset == startoffset && iobytes == bytes) {
820 bp = mbp;
821 } else {
822 s = splbio();
823 bp = pool_get(&bufpool, PR_WAITOK);
824 splx(s);
825 BUF_INIT(bp);
826 bp->b_data = (char *)kva + offset - startoffset;
827 bp->b_resid = bp->b_bcount = iobytes;
828 bp->b_flags = B_BUSY|B_READ|B_CALL|B_ASYNC;
829 bp->b_iodone = uvm_aio_biodone1;
830 bp->b_vp = vp;
831 bp->b_proc = NULL;
832 }
833 bp->b_lblkno = 0;
834 bp->b_private = mbp;
835 if (devvp->v_type == VBLK) {
836 bp->b_dev = devvp->v_rdev;
837 }
838
839 /* adjust physical blkno for partial blocks */
840 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
841 dev_bshift);
842
843 UVMHIST_LOG(ubchist,
844 "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
845 bp, offset, iobytes, bp->b_blkno);
846
847 if (async)
848 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
849 else
850 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
851 VOP_STRATEGY(bp->b_vp, bp);
852 }
853
854 loopdone:
855 if (skipbytes) {
856 s = splbio();
857 if (error) {
858 mbp->b_flags |= B_ERROR;
859 mbp->b_error = error;
860 }
861 mbp->b_resid -= skipbytes;
862 if (mbp->b_resid == 0) {
863 biodone(mbp);
864 }
865 splx(s);
866 }
867
868 if (async) {
869 UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
870 lockmgr(&gp->g_glock, LK_RELEASE, NULL);
871 if (pgs != pgs_onstack)
872 free(pgs, M_DEVBUF);
873 return (0);
874 }
875 if (bp != NULL) {
876 error = biowait(mbp);
877 }
878 s = splbio();
879 pool_put(&bufpool, mbp);
880 splx(s);
881 uvm_pagermapout(kva, npages);
882 raoffset = startoffset + totalbytes;
883
884 /*
885 * if this we encountered a hole then we have to do a little more work.
886 * for read faults, we marked the page PG_RDONLY so that future
887 * write accesses to the page will fault again.
888 * for write faults, we must make sure that the backing store for
889 * the page is completely allocated while the pages are locked.
890 */
891
892 if (!error && sawhole && blockalloc) {
893 error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0,
894 cred);
895 UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
896 startoffset, npages << PAGE_SHIFT, error,0);
897 if (!error) {
898 for (i = 0; i < npages; i++) {
899 if (pgs[i] == NULL) {
900 continue;
901 }
902 pgs[i]->flags &= ~(PG_CLEAN|PG_RDONLY);
903 UVMHIST_LOG(ubchist, "mark dirty pg %p",
904 pgs[i],0,0,0);
905 }
906 }
907 }
908 lockmgr(&gp->g_glock, LK_RELEASE, NULL);
909 simple_lock(&uobj->vmobjlock);
910
911 /*
912 * see if we want to start any readahead.
913 * XXXUBC for now, just read the next 128k on 64k boundaries.
914 * this is pretty nonsensical, but it is 50% faster than reading
915 * just the next 64k.
916 */
917
918 raout:
919 if (!error && !async && !write && ((int)raoffset & 0xffff) == 0 &&
920 PAGE_SHIFT <= 16) {
921 off_t rasize;
922 int rapages, err, j, skipped;
923
924 /* XXXUBC temp limit, from above */
925 rapages = MIN(MIN(1 << (16 - PAGE_SHIFT), MAX_READ_AHEAD),
926 genfs_rapages);
927 rasize = rapages << PAGE_SHIFT;
928 for (j = skipped = 0; j < genfs_racount; j++) {
929
930 if (raoffset >= memeof)
931 break;
932
933 err = VOP_GETPAGES(vp, raoffset, NULL, &rapages, 0,
934 VM_PROT_READ, 0, 0);
935 simple_lock(&uobj->vmobjlock);
936 if (err) {
937 if (err != EBUSY ||
938 skipped++ == genfs_raskip)
939 break;
940 }
941 raoffset += rasize;
942 rapages = rasize >> PAGE_SHIFT;
943 }
944 }
945
946 /*
947 * we're almost done! release the pages...
948 * for errors, we free the pages.
949 * otherwise we activate them and mark them as valid and clean.
950 * also, unbusy pages that were not actually requested.
951 */
952
953 if (error) {
954 for (i = 0; i < npages; i++) {
955 if (pgs[i] == NULL) {
956 continue;
957 }
958 UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
959 pgs[i], pgs[i]->flags, 0,0);
960 if (pgs[i]->flags & PG_FAKE) {
961 pgs[i]->flags |= PG_RELEASED;
962 }
963 }
964 uvm_lock_pageq();
965 uvm_page_unbusy(pgs, npages);
966 uvm_unlock_pageq();
967 simple_unlock(&uobj->vmobjlock);
968 UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
969 if (pgs != pgs_onstack)
970 free(pgs, M_DEVBUF);
971 return (error);
972 }
973
974 out:
975 UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
976 uvm_lock_pageq();
977 for (i = 0; i < npages; i++) {
978 pg = pgs[i];
979 if (pg == NULL) {
980 continue;
981 }
982 UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
983 pg, pg->flags, 0,0);
984 if (pg->flags & PG_FAKE && !overwrite) {
985 pg->flags &= ~(PG_FAKE);
986 pmap_clear_modify(pgs[i]);
987 }
988 KASSERT(!write || !blockalloc || (pg->flags & PG_RDONLY) == 0);
989 if (i < ridx || i >= ridx + orignpages || async) {
990 UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
991 pg, pg->offset,0,0);
992 if (pg->flags & PG_WANTED) {
993 wakeup(pg);
994 }
995 if (pg->flags & PG_FAKE) {
996 KASSERT(overwrite);
997 uvm_pagezero(pg);
998 }
999 if (pg->flags & PG_RELEASED) {
1000 uvm_pagefree(pg);
1001 continue;
1002 }
1003 uvm_pageactivate(pg);
1004 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
1005 UVM_PAGE_OWN(pg, NULL);
1006 }
1007 }
1008 uvm_unlock_pageq();
1009 simple_unlock(&uobj->vmobjlock);
1010 if (ap->a_m != NULL) {
1011 memcpy(ap->a_m, &pgs[ridx],
1012 orignpages * sizeof(struct vm_page *));
1013 }
1014 if (pgs != pgs_onstack)
1015 free(pgs, M_DEVBUF);
1016 return (0);
1017 }
1018
1019 /*
1020 * generic VM putpages routine.
1021 * Write the given range of pages to backing store.
1022 *
1023 * => "offhi == 0" means flush all pages at or after "offlo".
1024 * => object should be locked by caller. we may _unlock_ the object
1025 * if (and only if) we need to clean a page (PGO_CLEANIT), or
1026 * if PGO_SYNCIO is set and there are pages busy.
1027 * we return with the object locked.
1028 * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
1029 * thus, a caller might want to unlock higher level resources
1030 * (e.g. vm_map) before calling flush.
1031 * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, then we will neither
1032 * unlock the object nor block.
1033 * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
1034 * => NOTE: we rely on the fact that the object's memq is a TAILQ and
1035 * that new pages are inserted on the tail end of the list. thus,
1036 * we can make a complete pass through the object in one go by starting
1037 * at the head and working towards the tail (new pages are put in
1038 * front of us).
1039 * => NOTE: we are allowed to lock the page queues, so the caller
1040 * must not be holding the page queue lock.
1041 *
1042 * note on "cleaning" object and PG_BUSY pages:
1043 * this routine is holding the lock on the object. the only time
1044 * that it can run into a PG_BUSY page that it does not own is if
1045 * some other process has started I/O on the page (e.g. either
1046 * a pagein, or a pageout). if the PG_BUSY page is being paged
1047 * in, then it can not be dirty (!PG_CLEAN) because no one has
1048 * had a chance to modify it yet. if the PG_BUSY page is being
1049 * paged out then it means that someone else has already started
1050 * cleaning the page for us (how nice!). in this case, if we
1051 * have syncio specified, then after we make our pass through the
1052 * object we need to wait for the other PG_BUSY pages to clear
1053 * off (i.e. we need to do an iosync). also note that once a
1054 * page is PG_BUSY it must stay in its object until it is un-busyed.
1055 *
1056 * note on page traversal:
1057 * we can traverse the pages in an object either by going down the
1058 * linked list in "uobj->memq", or we can go over the address range
1059 * by page doing hash table lookups for each address. depending
1060 * on how many pages are in the object it may be cheaper to do one
1061 * or the other. we set "by_list" to true if we are using memq.
1062 * if the cost of a hash lookup was equal to the cost of the list
1063 * traversal we could compare the number of pages in the start->stop
1064 * range to the total number of pages in the object. however, it
1065 * seems that a hash table lookup is more expensive than the linked
1066 * list traversal, so we multiply the number of pages in the
1067 * range by an estimate of the relatively higher cost of the hash lookup.
1068 */
1069
1070 int
1071 genfs_putpages(void *v)
1072 {
1073 struct vop_putpages_args /* {
1074 struct vnode *a_vp;
1075 voff_t a_offlo;
1076 voff_t a_offhi;
1077 int a_flags;
1078 } */ *ap = v;
1079 struct vnode *vp = ap->a_vp;
1080 struct uvm_object *uobj = &vp->v_uobj;
1081 struct simplelock *slock = &uobj->vmobjlock;
1082 off_t startoff = ap->a_offlo;
1083 off_t endoff = ap->a_offhi;
1084 off_t off;
1085 int flags = ap->a_flags;
1086 /* Even for strange MAXPHYS, the shift rounds down to a page */
1087 const int maxpages = MAXPHYS >> PAGE_SHIFT;
1088 int i, s, error, npages, nback;
1089 int freeflag;
1090 struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
1091 boolean_t wasclean, by_list, needs_clean, yld;
1092 boolean_t async = (flags & PGO_SYNCIO) == 0;
1093 boolean_t pagedaemon = curproc == uvm.pagedaemon_proc;
1094 struct lwp *l = curlwp ? curlwp : &lwp0;
1095 struct genfs_node *gp = VTOG(vp);
1096 int dirtygen;
1097
1098 UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
1099
1100 KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
1101 KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
1102 KASSERT(startoff < endoff || endoff == 0);
1103
1104 UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
1105 vp, uobj->uo_npages, startoff, endoff - startoff);
1106 if (uobj->uo_npages == 0) {
1107 s = splbio();
1108 if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
1109 (vp->v_flag & VONWORKLST)) {
1110 vp->v_flag &= ~VONWORKLST;
1111 LIST_REMOVE(vp, v_synclist);
1112 }
1113 splx(s);
1114 simple_unlock(slock);
1115 return (0);
1116 }
1117
1118 /*
1119 * the vnode has pages, set up to process the request.
1120 */
1121
1122 error = 0;
1123 s = splbio();
1124 simple_lock(&global_v_numoutput_slock);
1125 wasclean = (vp->v_numoutput == 0);
1126 simple_unlock(&global_v_numoutput_slock);
1127 splx(s);
1128 off = startoff;
1129 if (endoff == 0 || flags & PGO_ALLPAGES) {
1130 endoff = trunc_page(LLONG_MAX);
1131 }
1132 by_list = (uobj->uo_npages <=
1133 ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
1134
1135 /*
1136 * start the loop. when scanning by list, hold the last page
1137 * in the list before we start. pages allocated after we start
1138 * will be added to the end of the list, so we can stop at the
1139 * current last page.
1140 */
1141
1142 dirtygen = gp->g_dirtygen;
1143 freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
1144 curmp.uobject = uobj;
1145 curmp.offset = (voff_t)-1;
1146 curmp.flags = PG_BUSY;
1147 endmp.uobject = uobj;
1148 endmp.offset = (voff_t)-1;
1149 endmp.flags = PG_BUSY;
1150 if (by_list) {
1151 pg = TAILQ_FIRST(&uobj->memq);
1152 TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
1153 PHOLD(l);
1154 } else {
1155 pg = uvm_pagelookup(uobj, off);
1156 }
1157 nextpg = NULL;
1158 while (by_list || off < endoff) {
1159
1160 /*
1161 * if the current page is not interesting, move on to the next.
1162 */
1163
1164 KASSERT(pg == NULL || pg->uobject == uobj);
1165 KASSERT(pg == NULL ||
1166 (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
1167 (pg->flags & PG_BUSY) != 0);
1168 if (by_list) {
1169 if (pg == &endmp) {
1170 break;
1171 }
1172 if (pg->offset < startoff || pg->offset >= endoff ||
1173 pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
1174 if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
1175 wasclean = FALSE;
1176 }
1177 pg = TAILQ_NEXT(pg, listq);
1178 continue;
1179 }
1180 off = pg->offset;
1181 } else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
1182 if (pg != NULL) {
1183 wasclean = FALSE;
1184 }
1185 off += PAGE_SIZE;
1186 if (off < endoff) {
1187 pg = uvm_pagelookup(uobj, off);
1188 }
1189 continue;
1190 }
1191
1192 /*
1193 * if the current page needs to be cleaned and it's busy,
1194 * wait for it to become unbusy.
1195 */
1196
1197 yld = (l->l_cpu->ci_schedstate.spc_flags &
1198 SPCF_SHOULDYIELD) && !pagedaemon;
1199 if (pg->flags & PG_BUSY || yld) {
1200 UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
1201 if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
1202 UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
1203 error = EDEADLK;
1204 break;
1205 }
1206 KASSERT(!pagedaemon);
1207 if (by_list) {
1208 TAILQ_INSERT_BEFORE(pg, &curmp, listq);
1209 UVMHIST_LOG(ubchist, "curmp next %p",
1210 TAILQ_NEXT(&curmp, listq), 0,0,0);
1211 }
1212 if (yld) {
1213 simple_unlock(slock);
1214 preempt(1);
1215 simple_lock(slock);
1216 } else {
1217 pg->flags |= PG_WANTED;
1218 UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
1219 simple_lock(slock);
1220 }
1221 if (by_list) {
1222 UVMHIST_LOG(ubchist, "after next %p",
1223 TAILQ_NEXT(&curmp, listq), 0,0,0);
1224 pg = TAILQ_NEXT(&curmp, listq);
1225 TAILQ_REMOVE(&uobj->memq, &curmp, listq);
1226 } else {
1227 pg = uvm_pagelookup(uobj, off);
1228 }
1229 continue;
1230 }
1231
1232 /*
1233 * if we're freeing, remove all mappings of the page now.
1234 * if we're cleaning, check if the page is needs to be cleaned.
1235 */
1236
1237 if (flags & PGO_FREE) {
1238 pmap_page_protect(pg, VM_PROT_NONE);
1239 } else if (flags & PGO_CLEANIT) {
1240
1241 /*
1242 * if we still have some hope to pull this vnode off
1243 * from the syncer queue, write-protect the page.
1244 */
1245
1246 if (wasclean && gp->g_dirtygen == dirtygen &&
1247 startoff == 0 && endoff == trunc_page(LLONG_MAX)) {
1248 pmap_page_protect(pg,
1249 VM_PROT_READ|VM_PROT_EXECUTE);
1250 }
1251 }
1252
1253 if (flags & PGO_CLEANIT) {
1254 needs_clean = pmap_clear_modify(pg) ||
1255 (pg->flags & PG_CLEAN) == 0;
1256 pg->flags |= PG_CLEAN;
1257 } else {
1258 needs_clean = FALSE;
1259 }
1260
1261 /*
1262 * if we're cleaning, build a cluster.
1263 * the cluster will consist of pages which are currently dirty,
1264 * but they will be returned to us marked clean.
1265 * if not cleaning, just operate on the one page.
1266 */
1267
1268 if (needs_clean) {
1269 KDASSERT((vp->v_flag & VONWORKLST));
1270 wasclean = FALSE;
1271 memset(pgs, 0, sizeof(pgs));
1272 pg->flags |= PG_BUSY;
1273 UVM_PAGE_OWN(pg, "genfs_putpages");
1274
1275 /*
1276 * first look backward.
1277 */
1278
1279 npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
1280 nback = npages;
1281 uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
1282 UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
1283 if (nback) {
1284 memmove(&pgs[0], &pgs[npages - nback],
1285 nback * sizeof(pgs[0]));
1286 if (npages - nback < nback)
1287 memset(&pgs[nback], 0,
1288 (npages - nback) * sizeof(pgs[0]));
1289 else
1290 memset(&pgs[npages - nback], 0,
1291 nback * sizeof(pgs[0]));
1292 }
1293
1294 /*
1295 * then plug in our page of interest.
1296 */
1297
1298 pgs[nback] = pg;
1299
1300 /*
1301 * then look forward to fill in the remaining space in
1302 * the array of pages.
1303 */
1304
1305 npages = maxpages - nback - 1;
1306 uvn_findpages(uobj, off + PAGE_SIZE, &npages,
1307 &pgs[nback + 1],
1308 UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
1309 npages += nback + 1;
1310 } else {
1311 pgs[0] = pg;
1312 npages = 1;
1313 nback = 0;
1314 }
1315
1316 /*
1317 * apply FREE or DEACTIVATE options if requested.
1318 */
1319
1320 if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1321 uvm_lock_pageq();
1322 }
1323 for (i = 0; i < npages; i++) {
1324 tpg = pgs[i];
1325 KASSERT(tpg->uobject == uobj);
1326 if (by_list && tpg == TAILQ_NEXT(pg, listq))
1327 pg = tpg;
1328 if (tpg->offset < startoff || tpg->offset >= endoff)
1329 continue;
1330 if (flags & PGO_DEACTIVATE &&
1331 (tpg->pqflags & PQ_INACTIVE) == 0 &&
1332 tpg->wire_count == 0) {
1333 (void) pmap_clear_reference(tpg);
1334 uvm_pagedeactivate(tpg);
1335 } else if (flags & PGO_FREE) {
1336 pmap_page_protect(tpg, VM_PROT_NONE);
1337 if (tpg->flags & PG_BUSY) {
1338 tpg->flags |= freeflag;
1339 if (pagedaemon) {
1340 uvmexp.paging++;
1341 uvm_pagedequeue(tpg);
1342 }
1343 } else {
1344
1345 /*
1346 * ``page is not busy''
1347 * implies that npages is 1
1348 * and needs_clean is false.
1349 */
1350
1351 nextpg = TAILQ_NEXT(tpg, listq);
1352 uvm_pagefree(tpg);
1353 if (pagedaemon)
1354 uvmexp.pdfreed++;
1355 }
1356 }
1357 }
1358 if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1359 uvm_unlock_pageq();
1360 }
1361 if (needs_clean) {
1362
1363 /*
1364 * start the i/o. if we're traversing by list,
1365 * keep our place in the list with a marker page.
1366 */
1367
1368 if (by_list) {
1369 TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
1370 listq);
1371 }
1372 simple_unlock(slock);
1373 error = GOP_WRITE(vp, pgs, npages, flags);
1374 simple_lock(slock);
1375 if (by_list) {
1376 pg = TAILQ_NEXT(&curmp, listq);
1377 TAILQ_REMOVE(&uobj->memq, &curmp, listq);
1378 }
1379 if (error) {
1380 break;
1381 }
1382 if (by_list) {
1383 continue;
1384 }
1385 }
1386
1387 /*
1388 * find the next page and continue if there was no error.
1389 */
1390
1391 if (by_list) {
1392 if (nextpg) {
1393 pg = nextpg;
1394 nextpg = NULL;
1395 } else {
1396 pg = TAILQ_NEXT(pg, listq);
1397 }
1398 } else {
1399 off += (npages - nback) << PAGE_SHIFT;
1400 if (off < endoff) {
1401 pg = uvm_pagelookup(uobj, off);
1402 }
1403 }
1404 }
1405 if (by_list) {
1406 TAILQ_REMOVE(&uobj->memq, &endmp, listq);
1407 PRELE(l);
1408 }
1409
1410 /*
1411 * if we're cleaning and there was nothing to clean,
1412 * take us off the syncer list. if we started any i/o
1413 * and we're doing sync i/o, wait for all writes to finish.
1414 */
1415
1416 s = splbio();
1417 if ((flags & PGO_CLEANIT) && wasclean && gp->g_dirtygen == dirtygen &&
1418 startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
1419 LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
1420 (vp->v_flag & VONWORKLST)) {
1421 vp->v_flag &= ~VONWORKLST;
1422 LIST_REMOVE(vp, v_synclist);
1423 }
1424 splx(s);
1425 if (!wasclean && !async) {
1426 s = splbio();
1427 /*
1428 * XXX - we want simple_unlock(&global_v_numoutput_slock);
1429 * but the slot in ltsleep() is taken!
1430 * XXX - try to recover from missed wakeups with a timeout..
1431 * must think of something better.
1432 */
1433 while (vp->v_numoutput != 0) {
1434 vp->v_flag |= VBWAIT;
1435 UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock, FALSE,
1436 "genput2", hz);
1437 simple_lock(slock);
1438 }
1439 splx(s);
1440 }
1441 simple_unlock(&uobj->vmobjlock);
1442 return (error);
1443 }
1444
1445 int
1446 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1447 {
1448 int s, error, run;
1449 int fs_bshift, dev_bshift;
1450 vaddr_t kva;
1451 off_t eof, offset, startoffset;
1452 size_t bytes, iobytes, skipbytes;
1453 daddr_t lbn, blkno;
1454 struct vm_page *pg;
1455 struct buf *mbp, *bp;
1456 struct vnode *devvp;
1457 boolean_t async = (flags & PGO_SYNCIO) == 0;
1458 UVMHIST_FUNC("genfs_gop_write"); UVMHIST_CALLED(ubchist);
1459
1460 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1461 vp, pgs, npages, flags);
1462
1463 GOP_SIZE(vp, vp->v_size, &eof, GOP_SIZE_WRITE);
1464 if (vp->v_type == VREG) {
1465 fs_bshift = vp->v_mount->mnt_fs_bshift;
1466 dev_bshift = vp->v_mount->mnt_dev_bshift;
1467 } else {
1468 fs_bshift = DEV_BSHIFT;
1469 dev_bshift = DEV_BSHIFT;
1470 }
1471 error = 0;
1472 pg = pgs[0];
1473 startoffset = pg->offset;
1474 bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
1475 skipbytes = 0;
1476 KASSERT(bytes != 0);
1477
1478 kva = uvm_pagermapin(pgs, npages,
1479 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1480
1481 s = splbio();
1482 simple_lock(&global_v_numoutput_slock);
1483 vp->v_numoutput += 2;
1484 simple_unlock(&global_v_numoutput_slock);
1485 mbp = pool_get(&bufpool, PR_WAITOK);
1486 BUF_INIT(mbp);
1487 UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
1488 vp, mbp, vp->v_numoutput, bytes);
1489 splx(s);
1490 mbp->b_bufsize = npages << PAGE_SHIFT;
1491 mbp->b_data = (void *)kva;
1492 mbp->b_resid = mbp->b_bcount = bytes;
1493 mbp->b_flags = B_BUSY|B_WRITE|B_AGE| (async ? (B_CALL|B_ASYNC) : 0);
1494 mbp->b_iodone = uvm_aio_biodone;
1495 mbp->b_vp = vp;
1496
1497 bp = NULL;
1498 for (offset = startoffset;
1499 bytes > 0;
1500 offset += iobytes, bytes -= iobytes) {
1501 lbn = offset >> fs_bshift;
1502 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
1503 if (error) {
1504 UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
1505 skipbytes += bytes;
1506 bytes = 0;
1507 break;
1508 }
1509
1510 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
1511 bytes);
1512 if (blkno == (daddr_t)-1) {
1513 skipbytes += iobytes;
1514 continue;
1515 }
1516
1517 /* if it's really one i/o, don't make a second buf */
1518 if (offset == startoffset && iobytes == bytes) {
1519 bp = mbp;
1520 } else {
1521 s = splbio();
1522 V_INCR_NUMOUTPUT(vp);
1523 bp = pool_get(&bufpool, PR_WAITOK);
1524 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
1525 vp, bp, vp->v_numoutput, 0);
1526 splx(s);
1527 BUF_INIT(bp);
1528 bp->b_data = (char *)kva +
1529 (vaddr_t)(offset - pg->offset);
1530 bp->b_resid = bp->b_bcount = iobytes;
1531 bp->b_flags = B_BUSY|B_WRITE|B_CALL|B_ASYNC;
1532 bp->b_iodone = uvm_aio_biodone1;
1533 bp->b_vp = vp;
1534 }
1535 bp->b_lblkno = 0;
1536 bp->b_private = mbp;
1537 if (devvp->v_type == VBLK) {
1538 bp->b_dev = devvp->v_rdev;
1539 }
1540
1541 /* adjust physical blkno for partial blocks */
1542 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
1543 dev_bshift);
1544 UVMHIST_LOG(ubchist,
1545 "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
1546 vp, offset, bp->b_bcount, bp->b_blkno);
1547 if (curproc == uvm.pagedaemon_proc)
1548 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
1549 else if (async)
1550 BIO_SETPRIO(bp, BPRIO_TIMENONCRITICAL);
1551 else
1552 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
1553 VOP_STRATEGY(bp->b_vp, bp);
1554 }
1555 if (skipbytes) {
1556 UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
1557 s = splbio();
1558 if (error) {
1559 mbp->b_flags |= B_ERROR;
1560 mbp->b_error = error;
1561 }
1562 mbp->b_resid -= skipbytes;
1563 if (mbp->b_resid == 0) {
1564 biodone(mbp);
1565 }
1566 splx(s);
1567 }
1568 if (async) {
1569 UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
1570 return (0);
1571 }
1572 UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
1573 error = biowait(mbp);
1574 uvm_aio_aiodone(mbp);
1575 UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
1576 return (error);
1577 }
1578
1579 /*
1580 * VOP_PUTPAGES() for vnodes which never have pages.
1581 */
1582
1583 int
1584 genfs_null_putpages(void *v)
1585 {
1586 struct vop_putpages_args /* {
1587 struct vnode *a_vp;
1588 voff_t a_offlo;
1589 voff_t a_offhi;
1590 int a_flags;
1591 } */ *ap = v;
1592 struct vnode *vp = ap->a_vp;
1593
1594 KASSERT(vp->v_uobj.uo_npages == 0);
1595 simple_unlock(&vp->v_interlock);
1596 return (0);
1597 }
1598
1599 void
1600 genfs_node_init(struct vnode *vp, const struct genfs_ops *ops)
1601 {
1602 struct genfs_node *gp = VTOG(vp);
1603
1604 lockinit(&gp->g_glock, PINOD, "glock", 0, 0);
1605 gp->g_op = ops;
1606 }
1607
1608 void
1609 genfs_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
1610 {
1611 int bsize;
1612
1613 bsize = 1 << vp->v_mount->mnt_fs_bshift;
1614 *eobp = (size + bsize - 1) & ~(bsize - 1);
1615 }
1616
1617 int
1618 genfs_compat_getpages(void *v)
1619 {
1620 struct vop_getpages_args /* {
1621 struct vnode *a_vp;
1622 voff_t a_offset;
1623 struct vm_page **a_m;
1624 int *a_count;
1625 int a_centeridx;
1626 vm_prot_t a_access_type;
1627 int a_advice;
1628 int a_flags;
1629 } */ *ap = v;
1630
1631 off_t origoffset;
1632 struct vnode *vp = ap->a_vp;
1633 struct uvm_object *uobj = &vp->v_uobj;
1634 struct vm_page *pg, **pgs;
1635 vaddr_t kva;
1636 int i, error, orignpages, npages;
1637 struct iovec iov;
1638 struct uio uio;
1639 struct ucred *cred = curproc->p_ucred;
1640 boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
1641
1642 error = 0;
1643 origoffset = ap->a_offset;
1644 orignpages = *ap->a_count;
1645 pgs = ap->a_m;
1646
1647 if (write && (vp->v_flag & VONWORKLST) == 0) {
1648 vn_syncer_add_to_worklist(vp, filedelay);
1649 }
1650 if (ap->a_flags & PGO_LOCKED) {
1651 uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
1652 UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
1653
1654 return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
1655 }
1656 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
1657 simple_unlock(&uobj->vmobjlock);
1658 return (EINVAL);
1659 }
1660 npages = orignpages;
1661 uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
1662 simple_unlock(&uobj->vmobjlock);
1663 kva = uvm_pagermapin(pgs, npages,
1664 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
1665 for (i = 0; i < npages; i++) {
1666 pg = pgs[i];
1667 if ((pg->flags & PG_FAKE) == 0) {
1668 continue;
1669 }
1670 iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
1671 iov.iov_len = PAGE_SIZE;
1672 uio.uio_iov = &iov;
1673 uio.uio_iovcnt = 1;
1674 uio.uio_offset = origoffset + (i << PAGE_SHIFT);
1675 uio.uio_segflg = UIO_SYSSPACE;
1676 uio.uio_rw = UIO_READ;
1677 uio.uio_resid = PAGE_SIZE;
1678 uio.uio_procp = NULL;
1679 /* XXX vn_lock */
1680 error = VOP_READ(vp, &uio, 0, cred);
1681 if (error) {
1682 break;
1683 }
1684 if (uio.uio_resid) {
1685 memset(iov.iov_base, 0, uio.uio_resid);
1686 }
1687 }
1688 uvm_pagermapout(kva, npages);
1689 simple_lock(&uobj->vmobjlock);
1690 uvm_lock_pageq();
1691 for (i = 0; i < npages; i++) {
1692 pg = pgs[i];
1693 if (error && (pg->flags & PG_FAKE) != 0) {
1694 pg->flags |= PG_RELEASED;
1695 } else {
1696 pmap_clear_modify(pg);
1697 uvm_pageactivate(pg);
1698 }
1699 }
1700 if (error) {
1701 uvm_page_unbusy(pgs, npages);
1702 }
1703 uvm_unlock_pageq();
1704 simple_unlock(&uobj->vmobjlock);
1705 return (error);
1706 }
1707
1708 int
1709 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
1710 int flags)
1711 {
1712 off_t offset;
1713 struct iovec iov;
1714 struct uio uio;
1715 struct ucred *cred = curproc->p_ucred;
1716 struct buf *bp;
1717 vaddr_t kva;
1718 int s, error;
1719
1720 offset = pgs[0]->offset;
1721 kva = uvm_pagermapin(pgs, npages,
1722 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1723
1724 iov.iov_base = (void *)kva;
1725 iov.iov_len = npages << PAGE_SHIFT;
1726 uio.uio_iov = &iov;
1727 uio.uio_iovcnt = 1;
1728 uio.uio_offset = offset;
1729 uio.uio_segflg = UIO_SYSSPACE;
1730 uio.uio_rw = UIO_WRITE;
1731 uio.uio_resid = npages << PAGE_SHIFT;
1732 uio.uio_procp = NULL;
1733 /* XXX vn_lock */
1734 error = VOP_WRITE(vp, &uio, 0, cred);
1735
1736 s = splbio();
1737 V_INCR_NUMOUTPUT(vp);
1738 bp = pool_get(&bufpool, PR_WAITOK);
1739 splx(s);
1740
1741 BUF_INIT(bp);
1742 bp->b_flags = B_BUSY | B_WRITE | B_AGE;
1743 bp->b_vp = vp;
1744 bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
1745 bp->b_data = (char *)kva;
1746 bp->b_bcount = npages << PAGE_SHIFT;
1747 bp->b_bufsize = npages << PAGE_SHIFT;
1748 bp->b_resid = 0;
1749 if (error) {
1750 bp->b_flags |= B_ERROR;
1751 bp->b_error = error;
1752 }
1753 uvm_aio_aiodone(bp);
1754 return (error);
1755 }
1756
1757 static void
1758 filt_genfsdetach(struct knote *kn)
1759 {
1760 struct vnode *vp = (struct vnode *)kn->kn_hook;
1761
1762 /* XXXLUKEM lock the struct? */
1763 SLIST_REMOVE(&vp->v_klist, kn, knote, kn_selnext);
1764 }
1765
1766 static int
1767 filt_genfsread(struct knote *kn, long hint)
1768 {
1769 struct vnode *vp = (struct vnode *)kn->kn_hook;
1770
1771 /*
1772 * filesystem is gone, so set the EOF flag and schedule
1773 * the knote for deletion.
1774 */
1775 if (hint == NOTE_REVOKE) {
1776 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1777 return (1);
1778 }
1779
1780 /* XXXLUKEM lock the struct? */
1781 kn->kn_data = vp->v_size - kn->kn_fp->f_offset;
1782 return (kn->kn_data != 0);
1783 }
1784
1785 static int
1786 filt_genfsvnode(struct knote *kn, long hint)
1787 {
1788
1789 if (kn->kn_sfflags & hint)
1790 kn->kn_fflags |= hint;
1791 if (hint == NOTE_REVOKE) {
1792 kn->kn_flags |= EV_EOF;
1793 return (1);
1794 }
1795 return (kn->kn_fflags != 0);
1796 }
1797
1798 static const struct filterops genfsread_filtops =
1799 { 1, NULL, filt_genfsdetach, filt_genfsread };
1800 static const struct filterops genfsvnode_filtops =
1801 { 1, NULL, filt_genfsdetach, filt_genfsvnode };
1802
1803 int
1804 genfs_kqfilter(void *v)
1805 {
1806 struct vop_kqfilter_args /* {
1807 struct vnode *a_vp;
1808 struct knote *a_kn;
1809 } */ *ap = v;
1810 struct vnode *vp;
1811 struct knote *kn;
1812
1813 vp = ap->a_vp;
1814 kn = ap->a_kn;
1815 switch (kn->kn_filter) {
1816 case EVFILT_READ:
1817 kn->kn_fop = &genfsread_filtops;
1818 break;
1819 case EVFILT_VNODE:
1820 kn->kn_fop = &genfsvnode_filtops;
1821 break;
1822 default:
1823 return (1);
1824 }
1825
1826 kn->kn_hook = vp;
1827
1828 /* XXXLUKEM lock the struct? */
1829 SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
1830
1831 return (0);
1832 }
1833