genfs_vnops.c revision 1.141 1 /* $NetBSD: genfs_vnops.c,v 1.141 2006/12/15 13:51:30 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.141 2006/12/15 13:51:30 yamt Exp $");
35
36 #if defined(_KERNEL_OPT)
37 #include "opt_nfsserver.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/kernel.h>
44 #include <sys/mount.h>
45 #include <sys/namei.h>
46 #include <sys/vnode.h>
47 #include <sys/fcntl.h>
48 #include <sys/kmem.h>
49 #include <sys/poll.h>
50 #include <sys/mman.h>
51 #include <sys/file.h>
52 #include <sys/kauth.h>
53
54 #include <miscfs/genfs/genfs.h>
55 #include <miscfs/genfs/genfs_node.h>
56 #include <miscfs/specfs/specdev.h>
57
58 #include <uvm/uvm.h>
59 #include <uvm/uvm_pager.h>
60
61 #ifdef NFSSERVER
62 #include <nfs/rpcv2.h>
63 #include <nfs/nfsproto.h>
64 #include <nfs/nfs.h>
65 #include <nfs/nqnfs.h>
66 #include <nfs/nfs_var.h>
67 #endif
68
69 static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
70 off_t, enum uio_rw);
71 static void genfs_dio_iodone(struct buf *);
72
73 static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
74 void (*)(struct buf *));
75 static inline void genfs_rel_pages(struct vm_page **, int);
76 static void filt_genfsdetach(struct knote *);
77 static int filt_genfsread(struct knote *, long);
78 static int filt_genfsvnode(struct knote *, long);
79
80 #define MAX_READ_PAGES 16 /* XXXUBC 16 */
81
82 int genfs_maxdio = MAXPHYS;
83
84 int
85 genfs_poll(void *v)
86 {
87 struct vop_poll_args /* {
88 struct vnode *a_vp;
89 int a_events;
90 struct lwp *a_l;
91 } */ *ap = v;
92
93 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
94 }
95
96 int
97 genfs_seek(void *v)
98 {
99 struct vop_seek_args /* {
100 struct vnode *a_vp;
101 off_t a_oldoff;
102 off_t a_newoff;
103 kauth_cred_t cred;
104 } */ *ap = v;
105
106 if (ap->a_newoff < 0)
107 return (EINVAL);
108
109 return (0);
110 }
111
112 int
113 genfs_abortop(void *v)
114 {
115 struct vop_abortop_args /* {
116 struct vnode *a_dvp;
117 struct componentname *a_cnp;
118 } */ *ap = v;
119
120 if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
121 PNBUF_PUT(ap->a_cnp->cn_pnbuf);
122 return (0);
123 }
124
125 int
126 genfs_fcntl(void *v)
127 {
128 struct vop_fcntl_args /* {
129 struct vnode *a_vp;
130 u_int a_command;
131 caddr_t a_data;
132 int a_fflag;
133 kauth_cred_t a_cred;
134 struct lwp *a_l;
135 } */ *ap = v;
136
137 if (ap->a_command == F_SETFL)
138 return (0);
139 else
140 return (EOPNOTSUPP);
141 }
142
143 /*ARGSUSED*/
144 int
145 genfs_badop(void *v)
146 {
147
148 panic("genfs: bad op");
149 }
150
151 /*ARGSUSED*/
152 int
153 genfs_nullop(void *v)
154 {
155
156 return (0);
157 }
158
159 /*ARGSUSED*/
160 int
161 genfs_einval(void *v)
162 {
163
164 return (EINVAL);
165 }
166
167 /*
168 * Called when an fs doesn't support a particular vop.
169 * This takes care to vrele, vput, or vunlock passed in vnodes.
170 */
171 int
172 genfs_eopnotsupp(void *v)
173 {
174 struct vop_generic_args /*
175 struct vnodeop_desc *a_desc;
176 / * other random data follows, presumably * /
177 } */ *ap = v;
178 struct vnodeop_desc *desc = ap->a_desc;
179 struct vnode *vp, *vp_last = NULL;
180 int flags, i, j, offset;
181
182 flags = desc->vdesc_flags;
183 for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
184 if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
185 break; /* stop at end of list */
186 if ((j = flags & VDESC_VP0_WILLPUT)) {
187 vp = *VOPARG_OFFSETTO(struct vnode **, offset, ap);
188
189 /* Skip if NULL */
190 if (!vp)
191 continue;
192
193 switch (j) {
194 case VDESC_VP0_WILLPUT:
195 /* Check for dvp == vp cases */
196 if (vp == vp_last)
197 vrele(vp);
198 else {
199 vput(vp);
200 vp_last = vp;
201 }
202 break;
203 case VDESC_VP0_WILLUNLOCK:
204 VOP_UNLOCK(vp, 0);
205 break;
206 case VDESC_VP0_WILLRELE:
207 vrele(vp);
208 break;
209 }
210 }
211 }
212
213 return (EOPNOTSUPP);
214 }
215
216 /*ARGSUSED*/
217 int
218 genfs_ebadf(void *v)
219 {
220
221 return (EBADF);
222 }
223
224 /* ARGSUSED */
225 int
226 genfs_enoioctl(void *v)
227 {
228
229 return (EPASSTHROUGH);
230 }
231
232
233 /*
234 * Eliminate all activity associated with the requested vnode
235 * and with all vnodes aliased to the requested vnode.
236 */
237 int
238 genfs_revoke(void *v)
239 {
240 struct vop_revoke_args /* {
241 struct vnode *a_vp;
242 int a_flags;
243 } */ *ap = v;
244 struct vnode *vp, *vq;
245 struct lwp *l = curlwp; /* XXX */
246
247 #ifdef DIAGNOSTIC
248 if ((ap->a_flags & REVOKEALL) == 0)
249 panic("genfs_revoke: not revokeall");
250 #endif
251
252 vp = ap->a_vp;
253 simple_lock(&vp->v_interlock);
254
255 if (vp->v_flag & VALIASED) {
256 /*
257 * If a vgone (or vclean) is already in progress,
258 * wait until it is done and return.
259 */
260 if (vp->v_flag & VXLOCK) {
261 vp->v_flag |= VXWANT;
262 ltsleep(vp, PINOD|PNORELOCK, "vop_revokeall", 0,
263 &vp->v_interlock);
264 return (0);
265 }
266 /*
267 * Ensure that vp will not be vgone'd while we
268 * are eliminating its aliases.
269 */
270 vp->v_flag |= VXLOCK;
271 simple_unlock(&vp->v_interlock);
272 while (vp->v_flag & VALIASED) {
273 simple_lock(&spechash_slock);
274 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
275 if (vq->v_rdev != vp->v_rdev ||
276 vq->v_type != vp->v_type || vp == vq)
277 continue;
278 simple_unlock(&spechash_slock);
279 vgone(vq);
280 break;
281 }
282 if (vq == NULLVP)
283 simple_unlock(&spechash_slock);
284 }
285 /*
286 * Remove the lock so that vgone below will
287 * really eliminate the vnode after which time
288 * vgone will awaken any sleepers.
289 */
290 simple_lock(&vp->v_interlock);
291 vp->v_flag &= ~VXLOCK;
292 }
293 vgonel(vp, l);
294 return (0);
295 }
296
297 /*
298 * Lock the node.
299 */
300 int
301 genfs_lock(void *v)
302 {
303 struct vop_lock_args /* {
304 struct vnode *a_vp;
305 int a_flags;
306 } */ *ap = v;
307 struct vnode *vp = ap->a_vp;
308
309 return (lockmgr(vp->v_vnlock, ap->a_flags, &vp->v_interlock));
310 }
311
312 /*
313 * Unlock the node.
314 */
315 int
316 genfs_unlock(void *v)
317 {
318 struct vop_unlock_args /* {
319 struct vnode *a_vp;
320 int a_flags;
321 } */ *ap = v;
322 struct vnode *vp = ap->a_vp;
323
324 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE,
325 &vp->v_interlock));
326 }
327
328 /*
329 * Return whether or not the node is locked.
330 */
331 int
332 genfs_islocked(void *v)
333 {
334 struct vop_islocked_args /* {
335 struct vnode *a_vp;
336 } */ *ap = v;
337 struct vnode *vp = ap->a_vp;
338
339 return (lockstatus(vp->v_vnlock));
340 }
341
342 /*
343 * Stubs to use when there is no locking to be done on the underlying object.
344 */
345 int
346 genfs_nolock(void *v)
347 {
348 struct vop_lock_args /* {
349 struct vnode *a_vp;
350 int a_flags;
351 struct lwp *a_l;
352 } */ *ap = v;
353
354 /*
355 * Since we are not using the lock manager, we must clear
356 * the interlock here.
357 */
358 if (ap->a_flags & LK_INTERLOCK)
359 simple_unlock(&ap->a_vp->v_interlock);
360 return (0);
361 }
362
363 int
364 genfs_nounlock(void *v)
365 {
366
367 return (0);
368 }
369
370 int
371 genfs_noislocked(void *v)
372 {
373
374 return (0);
375 }
376
377 /*
378 * Local lease check for NFS servers. Just set up args and let
379 * nqsrv_getlease() do the rest. If NFSSERVER is not in the kernel,
380 * this is a null operation.
381 */
382 int
383 genfs_lease_check(void *v)
384 {
385 #ifdef NFSSERVER
386 struct vop_lease_args /* {
387 struct vnode *a_vp;
388 struct lwp *a_l;
389 kauth_cred_t a_cred;
390 int a_flag;
391 } */ *ap = v;
392 u_int32_t duration = 0;
393 int cache;
394 u_quad_t frev;
395
396 (void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag,
397 NQLOCALSLP, ap->a_l, (struct mbuf *)0, &cache, &frev, ap->a_cred);
398 return (0);
399 #else
400 (void) v;
401 return (0);
402 #endif /* NFSSERVER */
403 }
404
405 int
406 genfs_mmap(void *v)
407 {
408
409 return (0);
410 }
411
412 static inline void
413 genfs_rel_pages(struct vm_page **pgs, int npages)
414 {
415 int i;
416
417 for (i = 0; i < npages; i++) {
418 struct vm_page *pg = pgs[i];
419
420 if (pg == NULL || pg == PGO_DONTCARE)
421 continue;
422 if (pg->flags & PG_FAKE) {
423 pg->flags |= PG_RELEASED;
424 }
425 }
426 uvm_lock_pageq();
427 uvm_page_unbusy(pgs, npages);
428 uvm_unlock_pageq();
429 }
430
431 /*
432 * generic VM getpages routine.
433 * Return PG_BUSY pages for the given range,
434 * reading from backing store if necessary.
435 */
436
437 int
438 genfs_getpages(void *v)
439 {
440 struct vop_getpages_args /* {
441 struct vnode *a_vp;
442 voff_t a_offset;
443 struct vm_page **a_m;
444 int *a_count;
445 int a_centeridx;
446 vm_prot_t a_access_type;
447 int a_advice;
448 int a_flags;
449 } */ *ap = v;
450
451 off_t newsize, diskeof, memeof;
452 off_t offset, origoffset, startoffset, endoffset;
453 daddr_t lbn, blkno;
454 int i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
455 int fs_bshift, fs_bsize, dev_bshift;
456 int flags = ap->a_flags;
457 size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
458 vaddr_t kva;
459 struct buf *bp, *mbp;
460 struct vnode *vp = ap->a_vp;
461 struct vnode *devvp;
462 struct genfs_node *gp = VTOG(vp);
463 struct uvm_object *uobj = &vp->v_uobj;
464 struct vm_page *pg, **pgs, *pgs_onstack[MAX_READ_PAGES];
465 int pgs_size;
466 kauth_cred_t cred = curlwp->l_cred; /* XXXUBC curlwp */
467 boolean_t async = (flags & PGO_SYNCIO) == 0;
468 boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
469 boolean_t sawhole = FALSE;
470 boolean_t overwrite = (flags & PGO_OVERWRITE) != 0;
471 boolean_t blockalloc = write && (flags & PGO_NOBLOCKALLOC) == 0;
472 voff_t origvsize;
473 UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
474
475 UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
476 vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
477
478 KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
479 vp->v_type == VLNK || vp->v_type == VBLK);
480
481 /* XXXUBC temp limit */
482 if (*ap->a_count > MAX_READ_PAGES) {
483 panic("genfs_getpages: too many pages");
484 }
485
486 startover:
487 error = 0;
488 origvsize = vp->v_size;
489 origoffset = ap->a_offset;
490 orignpages = *ap->a_count;
491 GOP_SIZE(vp, vp->v_size, &diskeof, 0);
492 if (flags & PGO_PASTEOF) {
493 newsize = MAX(vp->v_size,
494 origoffset + (orignpages << PAGE_SHIFT));
495 GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
496 } else {
497 GOP_SIZE(vp, vp->v_size, &memeof, GOP_SIZE_MEM);
498 }
499 KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
500 KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
501 KASSERT(orignpages > 0);
502
503 /*
504 * Bounds-check the request.
505 */
506
507 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
508 if ((flags & PGO_LOCKED) == 0) {
509 simple_unlock(&uobj->vmobjlock);
510 }
511 UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
512 origoffset, *ap->a_count, memeof,0);
513 return (EINVAL);
514 }
515
516 /* uobj is locked */
517
518 if ((flags & PGO_NOTIMESTAMP) == 0 &&
519 (vp->v_type != VBLK ||
520 (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
521 int updflags = 0;
522
523 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
524 updflags = GOP_UPDATE_ACCESSED;
525 }
526 if (write) {
527 updflags |= GOP_UPDATE_MODIFIED;
528 }
529 if (updflags != 0) {
530 GOP_MARKUPDATE(vp, updflags);
531 }
532 }
533
534 if (write) {
535 gp->g_dirtygen++;
536 if ((vp->v_flag & VONWORKLST) == 0) {
537 vn_syncer_add_to_worklist(vp, filedelay);
538 }
539 if ((vp->v_flag & (VWRITEMAP|VWRITEMAPDIRTY)) == VWRITEMAP) {
540 vp->v_flag |= VWRITEMAPDIRTY;
541 }
542 }
543
544 /*
545 * For PGO_LOCKED requests, just return whatever's in memory.
546 */
547
548 if (flags & PGO_LOCKED) {
549 int nfound;
550
551 npages = *ap->a_count;
552 #if defined(DEBUG)
553 for (i = 0; i < npages; i++) {
554 pg = ap->a_m[i];
555 KASSERT(pg == NULL || pg == PGO_DONTCARE);
556 }
557 #endif /* defined(DEBUG) */
558 nfound = uvn_findpages(uobj, origoffset, &npages,
559 ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(write ? UFP_NORDONLY : 0));
560 KASSERT(npages == *ap->a_count);
561 if (nfound == 0) {
562 return EBUSY;
563 }
564 if (lockmgr(&gp->g_glock, LK_SHARED | LK_NOWAIT, NULL)) {
565 genfs_rel_pages(ap->a_m, npages);
566
567 /*
568 * restore the array.
569 */
570
571 for (i = 0; i < npages; i++) {
572 pg = ap->a_m[i];
573
574 if (pg != NULL || pg != PGO_DONTCARE) {
575 ap->a_m[i] = NULL;
576 }
577 }
578 } else {
579 lockmgr(&gp->g_glock, LK_RELEASE, NULL);
580 }
581 return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
582 }
583 simple_unlock(&uobj->vmobjlock);
584
585 /*
586 * find the requested pages and make some simple checks.
587 * leave space in the page array for a whole block.
588 */
589
590 if (vp->v_type != VBLK) {
591 fs_bshift = vp->v_mount->mnt_fs_bshift;
592 dev_bshift = vp->v_mount->mnt_dev_bshift;
593 } else {
594 fs_bshift = DEV_BSHIFT;
595 dev_bshift = DEV_BSHIFT;
596 }
597 fs_bsize = 1 << fs_bshift;
598
599 orignpages = MIN(orignpages,
600 round_page(memeof - origoffset) >> PAGE_SHIFT);
601 npages = orignpages;
602 startoffset = origoffset & ~(fs_bsize - 1);
603 endoffset = round_page((origoffset + (npages << PAGE_SHIFT) +
604 fs_bsize - 1) & ~(fs_bsize - 1));
605 endoffset = MIN(endoffset, round_page(memeof));
606 ridx = (origoffset - startoffset) >> PAGE_SHIFT;
607
608 pgs_size = sizeof(struct vm_page *) *
609 ((endoffset - startoffset) >> PAGE_SHIFT);
610 if (pgs_size > sizeof(pgs_onstack)) {
611 pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
612 if (pgs == NULL) {
613 return (ENOMEM);
614 }
615 } else {
616 pgs = pgs_onstack;
617 memset(pgs, 0, pgs_size);
618 }
619 UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
620 ridx, npages, startoffset, endoffset);
621
622 /*
623 * hold g_glock to prevent a race with truncate.
624 *
625 * check if our idea of v_size is still valid.
626 */
627
628 if (blockalloc) {
629 lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
630 } else {
631 lockmgr(&gp->g_glock, LK_SHARED, NULL);
632 }
633 simple_lock(&uobj->vmobjlock);
634 if (vp->v_size < origvsize) {
635 lockmgr(&gp->g_glock, LK_RELEASE, NULL);
636 if (pgs != pgs_onstack)
637 kmem_free(pgs, pgs_size);
638 goto startover;
639 }
640
641 if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
642 async ? UFP_NOWAIT : UFP_ALL) != orignpages) {
643 lockmgr(&gp->g_glock, LK_RELEASE, NULL);
644 KASSERT(async != 0);
645 genfs_rel_pages(&pgs[ridx], orignpages);
646 simple_unlock(&uobj->vmobjlock);
647 if (pgs != pgs_onstack)
648 kmem_free(pgs, pgs_size);
649 return (EBUSY);
650 }
651
652 /*
653 * if the pages are already resident, just return them.
654 */
655
656 for (i = 0; i < npages; i++) {
657 struct vm_page *pg1 = pgs[ridx + i];
658
659 if ((pg1->flags & PG_FAKE) ||
660 (blockalloc && (pg1->flags & PG_RDONLY))) {
661 break;
662 }
663 }
664 if (i == npages) {
665 lockmgr(&gp->g_glock, LK_RELEASE, NULL);
666 UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
667 npages += ridx;
668 goto out;
669 }
670
671 /*
672 * if PGO_OVERWRITE is set, don't bother reading the pages.
673 */
674
675 if (overwrite) {
676 lockmgr(&gp->g_glock, LK_RELEASE, NULL);
677 UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
678
679 for (i = 0; i < npages; i++) {
680 struct vm_page *pg1 = pgs[ridx + i];
681
682 pg1->flags &= ~(PG_RDONLY|PG_CLEAN);
683 }
684 npages += ridx;
685 goto out;
686 }
687
688 /*
689 * the page wasn't resident and we're not overwriting,
690 * so we're going to have to do some i/o.
691 * find any additional pages needed to cover the expanded range.
692 */
693
694 npages = (endoffset - startoffset) >> PAGE_SHIFT;
695 if (startoffset != origoffset || npages != orignpages) {
696
697 /*
698 * we need to avoid deadlocks caused by locking
699 * additional pages at lower offsets than pages we
700 * already have locked. unlock them all and start over.
701 */
702
703 genfs_rel_pages(&pgs[ridx], orignpages);
704 memset(pgs, 0, pgs_size);
705
706 UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
707 startoffset, endoffset, 0,0);
708 npgs = npages;
709 if (uvn_findpages(uobj, startoffset, &npgs, pgs,
710 async ? UFP_NOWAIT : UFP_ALL) != npages) {
711 lockmgr(&gp->g_glock, LK_RELEASE, NULL);
712 KASSERT(async != 0);
713 genfs_rel_pages(pgs, npages);
714 simple_unlock(&uobj->vmobjlock);
715 if (pgs != pgs_onstack)
716 kmem_free(pgs, pgs_size);
717 return (EBUSY);
718 }
719 }
720 simple_unlock(&uobj->vmobjlock);
721
722 /*
723 * read the desired page(s).
724 */
725
726 totalbytes = npages << PAGE_SHIFT;
727 bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
728 tailbytes = totalbytes - bytes;
729 skipbytes = 0;
730
731 kva = uvm_pagermapin(pgs, npages,
732 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
733
734 mbp = getiobuf();
735 mbp->b_bufsize = totalbytes;
736 mbp->b_data = (void *)kva;
737 mbp->b_resid = mbp->b_bcount = bytes;
738 mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL|B_ASYNC : 0);
739 mbp->b_iodone = (async ? uvm_aio_biodone : 0);
740 mbp->b_vp = vp;
741 if (async)
742 BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
743 else
744 BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
745
746 /*
747 * if EOF is in the middle of the range, zero the part past EOF.
748 * if the page including EOF is not PG_FAKE, skip over it since
749 * in that case it has valid data that we need to preserve.
750 */
751
752 if (tailbytes > 0) {
753 size_t tailstart = bytes;
754
755 if ((pgs[bytes >> PAGE_SHIFT]->flags & PG_FAKE) == 0) {
756 tailstart = round_page(tailstart);
757 tailbytes -= tailstart - bytes;
758 }
759 UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
760 kva, tailstart, tailbytes,0);
761 memset((void *)(kva + tailstart), 0, tailbytes);
762 }
763
764 /*
765 * now loop over the pages, reading as needed.
766 */
767
768 bp = NULL;
769 for (offset = startoffset;
770 bytes > 0;
771 offset += iobytes, bytes -= iobytes) {
772
773 /*
774 * skip pages which don't need to be read.
775 */
776
777 pidx = (offset - startoffset) >> PAGE_SHIFT;
778 while ((pgs[pidx]->flags & PG_FAKE) == 0) {
779 size_t b;
780
781 KASSERT((offset & (PAGE_SIZE - 1)) == 0);
782 if ((pgs[pidx]->flags & PG_RDONLY)) {
783 sawhole = TRUE;
784 }
785 b = MIN(PAGE_SIZE, bytes);
786 offset += b;
787 bytes -= b;
788 skipbytes += b;
789 pidx++;
790 UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
791 offset, 0,0,0);
792 if (bytes == 0) {
793 goto loopdone;
794 }
795 }
796
797 /*
798 * bmap the file to find out the blkno to read from and
799 * how much we can read in one i/o. if bmap returns an error,
800 * skip the rest of the top-level i/o.
801 */
802
803 lbn = offset >> fs_bshift;
804 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
805 if (error) {
806 UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
807 lbn, error,0,0);
808 skipbytes += bytes;
809 goto loopdone;
810 }
811
812 /*
813 * see how many pages can be read with this i/o.
814 * reduce the i/o size if necessary to avoid
815 * overwriting pages with valid data.
816 */
817
818 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
819 bytes);
820 if (offset + iobytes > round_page(offset)) {
821 pcount = 1;
822 while (pidx + pcount < npages &&
823 pgs[pidx + pcount]->flags & PG_FAKE) {
824 pcount++;
825 }
826 iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
827 (offset - trunc_page(offset)));
828 }
829
830 /*
831 * if this block isn't allocated, zero it instead of
832 * reading it. unless we are going to allocate blocks,
833 * mark the pages we zeroed PG_RDONLY.
834 */
835
836 if (blkno < 0) {
837 int holepages = (round_page(offset + iobytes) -
838 trunc_page(offset)) >> PAGE_SHIFT;
839 UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
840
841 sawhole = TRUE;
842 memset((char *)kva + (offset - startoffset), 0,
843 iobytes);
844 skipbytes += iobytes;
845
846 for (i = 0; i < holepages; i++) {
847 if (write) {
848 pgs[pidx + i]->flags &= ~PG_CLEAN;
849 }
850 if (!blockalloc) {
851 pgs[pidx + i]->flags |= PG_RDONLY;
852 }
853 }
854 continue;
855 }
856
857 /*
858 * allocate a sub-buf for this piece of the i/o
859 * (or just use mbp if there's only 1 piece),
860 * and start it going.
861 */
862
863 if (offset == startoffset && iobytes == bytes) {
864 bp = mbp;
865 } else {
866 bp = getiobuf();
867 nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
868 }
869 bp->b_lblkno = 0;
870
871 /* adjust physical blkno for partial blocks */
872 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
873 dev_bshift);
874
875 UVMHIST_LOG(ubchist,
876 "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
877 bp, offset, iobytes, bp->b_blkno);
878
879 VOP_STRATEGY(devvp, bp);
880 }
881
882 loopdone:
883 nestiobuf_done(mbp, skipbytes, error);
884 if (async) {
885 UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
886 lockmgr(&gp->g_glock, LK_RELEASE, NULL);
887 if (pgs != pgs_onstack)
888 kmem_free(pgs, pgs_size);
889 return (0);
890 }
891 if (bp != NULL) {
892 error = biowait(mbp);
893 }
894 putiobuf(mbp);
895 uvm_pagermapout(kva, npages);
896
897 /*
898 * if this we encountered a hole then we have to do a little more work.
899 * for read faults, we marked the page PG_RDONLY so that future
900 * write accesses to the page will fault again.
901 * for write faults, we must make sure that the backing store for
902 * the page is completely allocated while the pages are locked.
903 */
904
905 if (!error && sawhole && blockalloc) {
906 error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0,
907 cred);
908 UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
909 startoffset, npages << PAGE_SHIFT, error,0);
910 if (!error) {
911 for (i = 0; i < npages; i++) {
912 if (pgs[i] == NULL) {
913 continue;
914 }
915 pgs[i]->flags &= ~(PG_CLEAN|PG_RDONLY);
916 UVMHIST_LOG(ubchist, "mark dirty pg %p",
917 pgs[i],0,0,0);
918 }
919 }
920 }
921 lockmgr(&gp->g_glock, LK_RELEASE, NULL);
922 simple_lock(&uobj->vmobjlock);
923
924 /*
925 * we're almost done! release the pages...
926 * for errors, we free the pages.
927 * otherwise we activate them and mark them as valid and clean.
928 * also, unbusy pages that were not actually requested.
929 */
930
931 if (error) {
932 for (i = 0; i < npages; i++) {
933 if (pgs[i] == NULL) {
934 continue;
935 }
936 UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
937 pgs[i], pgs[i]->flags, 0,0);
938 if (pgs[i]->flags & PG_FAKE) {
939 pgs[i]->flags |= PG_RELEASED;
940 }
941 }
942 uvm_lock_pageq();
943 uvm_page_unbusy(pgs, npages);
944 uvm_unlock_pageq();
945 simple_unlock(&uobj->vmobjlock);
946 UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
947 if (pgs != pgs_onstack)
948 kmem_free(pgs, pgs_size);
949 return (error);
950 }
951
952 out:
953 UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
954 uvm_lock_pageq();
955 for (i = 0; i < npages; i++) {
956 pg = pgs[i];
957 if (pg == NULL) {
958 continue;
959 }
960 UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
961 pg, pg->flags, 0,0);
962 if (pg->flags & PG_FAKE && !overwrite) {
963 pg->flags &= ~(PG_FAKE);
964 pmap_clear_modify(pgs[i]);
965 }
966 KASSERT(!write || !blockalloc || (pg->flags & PG_RDONLY) == 0);
967 if (i < ridx || i >= ridx + orignpages || async) {
968 UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
969 pg, pg->offset,0,0);
970 if (pg->flags & PG_WANTED) {
971 wakeup(pg);
972 }
973 if (pg->flags & PG_FAKE) {
974 KASSERT(overwrite);
975 uvm_pagezero(pg);
976 }
977 if (pg->flags & PG_RELEASED) {
978 uvm_pagefree(pg);
979 continue;
980 }
981 uvm_pageenqueue(pg);
982 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
983 UVM_PAGE_OWN(pg, NULL);
984 }
985 }
986 uvm_unlock_pageq();
987 simple_unlock(&uobj->vmobjlock);
988 if (ap->a_m != NULL) {
989 memcpy(ap->a_m, &pgs[ridx],
990 orignpages * sizeof(struct vm_page *));
991 }
992 if (pgs != pgs_onstack)
993 kmem_free(pgs, pgs_size);
994 return (0);
995 }
996
997 /*
998 * generic VM putpages routine.
999 * Write the given range of pages to backing store.
1000 *
1001 * => "offhi == 0" means flush all pages at or after "offlo".
1002 * => object should be locked by caller. we return with the
1003 * object unlocked.
1004 * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
1005 * thus, a caller might want to unlock higher level resources
1006 * (e.g. vm_map) before calling flush.
1007 * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
1008 * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
1009 * => NOTE: we rely on the fact that the object's memq is a TAILQ and
1010 * that new pages are inserted on the tail end of the list. thus,
1011 * we can make a complete pass through the object in one go by starting
1012 * at the head and working towards the tail (new pages are put in
1013 * front of us).
1014 * => NOTE: we are allowed to lock the page queues, so the caller
1015 * must not be holding the page queue lock.
1016 *
1017 * note on "cleaning" object and PG_BUSY pages:
1018 * this routine is holding the lock on the object. the only time
1019 * that it can run into a PG_BUSY page that it does not own is if
1020 * some other process has started I/O on the page (e.g. either
1021 * a pagein, or a pageout). if the PG_BUSY page is being paged
1022 * in, then it can not be dirty (!PG_CLEAN) because no one has
1023 * had a chance to modify it yet. if the PG_BUSY page is being
1024 * paged out then it means that someone else has already started
1025 * cleaning the page for us (how nice!). in this case, if we
1026 * have syncio specified, then after we make our pass through the
1027 * object we need to wait for the other PG_BUSY pages to clear
1028 * off (i.e. we need to do an iosync). also note that once a
1029 * page is PG_BUSY it must stay in its object until it is un-busyed.
1030 *
1031 * note on page traversal:
1032 * we can traverse the pages in an object either by going down the
1033 * linked list in "uobj->memq", or we can go over the address range
1034 * by page doing hash table lookups for each address. depending
1035 * on how many pages are in the object it may be cheaper to do one
1036 * or the other. we set "by_list" to true if we are using memq.
1037 * if the cost of a hash lookup was equal to the cost of the list
1038 * traversal we could compare the number of pages in the start->stop
1039 * range to the total number of pages in the object. however, it
1040 * seems that a hash table lookup is more expensive than the linked
1041 * list traversal, so we multiply the number of pages in the
1042 * range by an estimate of the relatively higher cost of the hash lookup.
1043 */
1044
1045 int
1046 genfs_putpages(void *v)
1047 {
1048 struct vop_putpages_args /* {
1049 struct vnode *a_vp;
1050 voff_t a_offlo;
1051 voff_t a_offhi;
1052 int a_flags;
1053 } */ *ap = v;
1054 struct vnode *vp = ap->a_vp;
1055 struct uvm_object *uobj = &vp->v_uobj;
1056 struct simplelock *slock = &uobj->vmobjlock;
1057 off_t startoff = ap->a_offlo;
1058 off_t endoff = ap->a_offhi;
1059 off_t off;
1060 int flags = ap->a_flags;
1061 /* Even for strange MAXPHYS, the shift rounds down to a page */
1062 #define maxpages (MAXPHYS >> PAGE_SHIFT)
1063 int i, s, error, npages, nback;
1064 int freeflag;
1065 struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
1066 boolean_t wasclean, by_list, needs_clean, yld;
1067 boolean_t async = (flags & PGO_SYNCIO) == 0;
1068 boolean_t pagedaemon = curproc == uvm.pagedaemon_proc;
1069 struct lwp *l = curlwp ? curlwp : &lwp0;
1070 struct genfs_node *gp = VTOG(vp);
1071 int dirtygen;
1072 boolean_t modified = FALSE;
1073 boolean_t cleanall;
1074
1075 UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
1076
1077 KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
1078 KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
1079 KASSERT(startoff < endoff || endoff == 0);
1080
1081 UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
1082 vp, uobj->uo_npages, startoff, endoff - startoff);
1083
1084 KASSERT((vp->v_flag & VONWORKLST) != 0 ||
1085 (vp->v_flag & VWRITEMAPDIRTY) == 0);
1086 if (uobj->uo_npages == 0) {
1087 s = splbio();
1088 if (vp->v_flag & VONWORKLST) {
1089 vp->v_flag &= ~VWRITEMAPDIRTY;
1090 if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
1091 vn_syncer_remove_from_worklist(vp);
1092 }
1093 splx(s);
1094 simple_unlock(slock);
1095 return (0);
1096 }
1097
1098 /*
1099 * the vnode has pages, set up to process the request.
1100 */
1101
1102 error = 0;
1103 s = splbio();
1104 simple_lock(&global_v_numoutput_slock);
1105 wasclean = (vp->v_numoutput == 0);
1106 simple_unlock(&global_v_numoutput_slock);
1107 splx(s);
1108 off = startoff;
1109 if (endoff == 0 || flags & PGO_ALLPAGES) {
1110 endoff = trunc_page(LLONG_MAX);
1111 }
1112 by_list = (uobj->uo_npages <=
1113 ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
1114
1115 #if !defined(DEBUG)
1116 /*
1117 * if this vnode is known not to have dirty pages,
1118 * don't bother to clean it out.
1119 */
1120
1121 if ((vp->v_flag & VONWORKLST) == 0) {
1122 if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
1123 goto skip_scan;
1124 }
1125 flags &= ~PGO_CLEANIT;
1126 }
1127 #endif /* !defined(DEBUG) */
1128
1129 /*
1130 * start the loop. when scanning by list, hold the last page
1131 * in the list before we start. pages allocated after we start
1132 * will be added to the end of the list, so we can stop at the
1133 * current last page.
1134 */
1135
1136 cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
1137 startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
1138 (vp->v_flag & VONWORKLST) != 0;
1139 dirtygen = gp->g_dirtygen;
1140 freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
1141 if (by_list) {
1142 curmp.uobject = uobj;
1143 curmp.offset = (voff_t)-1;
1144 curmp.flags = PG_BUSY;
1145 endmp.uobject = uobj;
1146 endmp.offset = (voff_t)-1;
1147 endmp.flags = PG_BUSY;
1148 pg = TAILQ_FIRST(&uobj->memq);
1149 TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
1150 PHOLD(l);
1151 } else {
1152 pg = uvm_pagelookup(uobj, off);
1153 }
1154 nextpg = NULL;
1155 while (by_list || off < endoff) {
1156
1157 /*
1158 * if the current page is not interesting, move on to the next.
1159 */
1160
1161 KASSERT(pg == NULL || pg->uobject == uobj);
1162 KASSERT(pg == NULL ||
1163 (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
1164 (pg->flags & PG_BUSY) != 0);
1165 if (by_list) {
1166 if (pg == &endmp) {
1167 break;
1168 }
1169 if (pg->offset < startoff || pg->offset >= endoff ||
1170 pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
1171 if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
1172 wasclean = FALSE;
1173 }
1174 pg = TAILQ_NEXT(pg, listq);
1175 continue;
1176 }
1177 off = pg->offset;
1178 } else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
1179 if (pg != NULL) {
1180 wasclean = FALSE;
1181 }
1182 off += PAGE_SIZE;
1183 if (off < endoff) {
1184 pg = uvm_pagelookup(uobj, off);
1185 }
1186 continue;
1187 }
1188
1189 /*
1190 * if the current page needs to be cleaned and it's busy,
1191 * wait for it to become unbusy.
1192 */
1193
1194 yld = (l->l_cpu->ci_schedstate.spc_flags &
1195 SPCF_SHOULDYIELD) && !pagedaemon;
1196 if (pg->flags & PG_BUSY || yld) {
1197 UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
1198 if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
1199 UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
1200 error = EDEADLK;
1201 break;
1202 }
1203 KASSERT(!pagedaemon);
1204 if (by_list) {
1205 TAILQ_INSERT_BEFORE(pg, &curmp, listq);
1206 UVMHIST_LOG(ubchist, "curmp next %p",
1207 TAILQ_NEXT(&curmp, listq), 0,0,0);
1208 }
1209 if (yld) {
1210 simple_unlock(slock);
1211 preempt(1);
1212 simple_lock(slock);
1213 } else {
1214 pg->flags |= PG_WANTED;
1215 UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
1216 simple_lock(slock);
1217 }
1218 if (by_list) {
1219 UVMHIST_LOG(ubchist, "after next %p",
1220 TAILQ_NEXT(&curmp, listq), 0,0,0);
1221 pg = TAILQ_NEXT(&curmp, listq);
1222 TAILQ_REMOVE(&uobj->memq, &curmp, listq);
1223 } else {
1224 pg = uvm_pagelookup(uobj, off);
1225 }
1226 continue;
1227 }
1228
1229 /*
1230 * if we're freeing, remove all mappings of the page now.
1231 * if we're cleaning, check if the page is needs to be cleaned.
1232 */
1233
1234 if (flags & PGO_FREE) {
1235 pmap_page_protect(pg, VM_PROT_NONE);
1236 } else if (flags & PGO_CLEANIT) {
1237
1238 /*
1239 * if we still have some hope to pull this vnode off
1240 * from the syncer queue, write-protect the page.
1241 */
1242
1243 if (cleanall && wasclean &&
1244 gp->g_dirtygen == dirtygen) {
1245
1246 /*
1247 * uobj pages get wired only by uvm_fault
1248 * where uobj is locked.
1249 */
1250
1251 if (pg->wire_count == 0) {
1252 pmap_page_protect(pg,
1253 VM_PROT_READ|VM_PROT_EXECUTE);
1254 } else {
1255 cleanall = FALSE;
1256 }
1257 }
1258 }
1259
1260 if (flags & PGO_CLEANIT) {
1261 needs_clean = pmap_clear_modify(pg) ||
1262 (pg->flags & PG_CLEAN) == 0;
1263 pg->flags |= PG_CLEAN;
1264 } else {
1265 needs_clean = FALSE;
1266 }
1267
1268 /*
1269 * if we're cleaning, build a cluster.
1270 * the cluster will consist of pages which are currently dirty,
1271 * but they will be returned to us marked clean.
1272 * if not cleaning, just operate on the one page.
1273 */
1274
1275 if (needs_clean) {
1276 KDASSERT((vp->v_flag & VONWORKLST));
1277 wasclean = FALSE;
1278 memset(pgs, 0, sizeof(pgs));
1279 pg->flags |= PG_BUSY;
1280 UVM_PAGE_OWN(pg, "genfs_putpages");
1281
1282 /*
1283 * first look backward.
1284 */
1285
1286 npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
1287 nback = npages;
1288 uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
1289 UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
1290 if (nback) {
1291 memmove(&pgs[0], &pgs[npages - nback],
1292 nback * sizeof(pgs[0]));
1293 if (npages - nback < nback)
1294 memset(&pgs[nback], 0,
1295 (npages - nback) * sizeof(pgs[0]));
1296 else
1297 memset(&pgs[npages - nback], 0,
1298 nback * sizeof(pgs[0]));
1299 }
1300
1301 /*
1302 * then plug in our page of interest.
1303 */
1304
1305 pgs[nback] = pg;
1306
1307 /*
1308 * then look forward to fill in the remaining space in
1309 * the array of pages.
1310 */
1311
1312 npages = maxpages - nback - 1;
1313 uvn_findpages(uobj, off + PAGE_SIZE, &npages,
1314 &pgs[nback + 1],
1315 UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
1316 npages += nback + 1;
1317 } else {
1318 pgs[0] = pg;
1319 npages = 1;
1320 nback = 0;
1321 }
1322
1323 /*
1324 * apply FREE or DEACTIVATE options if requested.
1325 */
1326
1327 if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1328 uvm_lock_pageq();
1329 }
1330 for (i = 0; i < npages; i++) {
1331 tpg = pgs[i];
1332 KASSERT(tpg->uobject == uobj);
1333 if (by_list && tpg == TAILQ_NEXT(pg, listq))
1334 pg = tpg;
1335 if (tpg->offset < startoff || tpg->offset >= endoff)
1336 continue;
1337 if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
1338 (void) pmap_clear_reference(tpg);
1339 uvm_pagedeactivate(tpg);
1340 } else if (flags & PGO_FREE) {
1341 pmap_page_protect(tpg, VM_PROT_NONE);
1342 if (tpg->flags & PG_BUSY) {
1343 tpg->flags |= freeflag;
1344 if (pagedaemon) {
1345 uvmexp.paging++;
1346 uvm_pagedequeue(tpg);
1347 }
1348 } else {
1349
1350 /*
1351 * ``page is not busy''
1352 * implies that npages is 1
1353 * and needs_clean is false.
1354 */
1355
1356 nextpg = TAILQ_NEXT(tpg, listq);
1357 uvm_pagefree(tpg);
1358 if (pagedaemon)
1359 uvmexp.pdfreed++;
1360 }
1361 }
1362 }
1363 if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1364 uvm_unlock_pageq();
1365 }
1366 if (needs_clean) {
1367 modified = TRUE;
1368
1369 /*
1370 * start the i/o. if we're traversing by list,
1371 * keep our place in the list with a marker page.
1372 */
1373
1374 if (by_list) {
1375 TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
1376 listq);
1377 }
1378 simple_unlock(slock);
1379 error = GOP_WRITE(vp, pgs, npages, flags);
1380 simple_lock(slock);
1381 if (by_list) {
1382 pg = TAILQ_NEXT(&curmp, listq);
1383 TAILQ_REMOVE(&uobj->memq, &curmp, listq);
1384 }
1385 if (error) {
1386 break;
1387 }
1388 if (by_list) {
1389 continue;
1390 }
1391 }
1392
1393 /*
1394 * find the next page and continue if there was no error.
1395 */
1396
1397 if (by_list) {
1398 if (nextpg) {
1399 pg = nextpg;
1400 nextpg = NULL;
1401 } else {
1402 pg = TAILQ_NEXT(pg, listq);
1403 }
1404 } else {
1405 off += (npages - nback) << PAGE_SHIFT;
1406 if (off < endoff) {
1407 pg = uvm_pagelookup(uobj, off);
1408 }
1409 }
1410 }
1411 if (by_list) {
1412 TAILQ_REMOVE(&uobj->memq, &endmp, listq);
1413 PRELE(l);
1414 }
1415
1416 if (modified && (vp->v_flag & VWRITEMAPDIRTY) != 0 &&
1417 (vp->v_type != VBLK ||
1418 (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
1419 GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
1420 }
1421
1422 /*
1423 * if we're cleaning and there was nothing to clean,
1424 * take us off the syncer list. if we started any i/o
1425 * and we're doing sync i/o, wait for all writes to finish.
1426 */
1427
1428 s = splbio();
1429 if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
1430 (vp->v_flag & VONWORKLST) != 0) {
1431 vp->v_flag &= ~VWRITEMAPDIRTY;
1432 if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
1433 vn_syncer_remove_from_worklist(vp);
1434 }
1435 splx(s);
1436
1437 #if !defined(DEBUG)
1438 skip_scan:
1439 #endif /* !defined(DEBUG) */
1440 if (!wasclean && !async) {
1441 s = splbio();
1442 /*
1443 * XXX - we want simple_unlock(&global_v_numoutput_slock);
1444 * but the slot in ltsleep() is taken!
1445 * XXX - try to recover from missed wakeups with a timeout..
1446 * must think of something better.
1447 */
1448 while (vp->v_numoutput != 0) {
1449 vp->v_flag |= VBWAIT;
1450 UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock, FALSE,
1451 "genput2", hz);
1452 simple_lock(slock);
1453 }
1454 splx(s);
1455 }
1456 simple_unlock(slock);
1457 return (error);
1458 }
1459
1460 int
1461 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1462 {
1463 off_t off;
1464 vaddr_t kva;
1465 size_t len;
1466 int error;
1467 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1468
1469 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1470 vp, pgs, npages, flags);
1471
1472 off = pgs[0]->offset;
1473 kva = uvm_pagermapin(pgs, npages,
1474 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1475 len = npages << PAGE_SHIFT;
1476
1477 error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
1478 uvm_aio_biodone);
1479
1480 return error;
1481 }
1482
1483 /*
1484 * Backend routine for doing I/O to vnode pages. Pages are already locked
1485 * and mapped into kernel memory. Here we just look up the underlying
1486 * device block addresses and call the strategy routine.
1487 */
1488
1489 static int
1490 genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
1491 enum uio_rw rw, void (*iodone)(struct buf *))
1492 {
1493 int s, error, run;
1494 int fs_bshift, dev_bshift;
1495 off_t eof, offset, startoffset;
1496 size_t bytes, iobytes, skipbytes;
1497 daddr_t lbn, blkno;
1498 struct buf *mbp, *bp;
1499 struct vnode *devvp;
1500 boolean_t async = (flags & PGO_SYNCIO) == 0;
1501 boolean_t write = rw == UIO_WRITE;
1502 int brw = write ? B_WRITE : B_READ;
1503 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1504
1505 UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
1506 vp, kva, len, flags);
1507
1508 GOP_SIZE(vp, vp->v_size, &eof, 0);
1509 if (vp->v_type != VBLK) {
1510 fs_bshift = vp->v_mount->mnt_fs_bshift;
1511 dev_bshift = vp->v_mount->mnt_dev_bshift;
1512 } else {
1513 fs_bshift = DEV_BSHIFT;
1514 dev_bshift = DEV_BSHIFT;
1515 }
1516 error = 0;
1517 startoffset = off;
1518 bytes = MIN(len, eof - startoffset);
1519 skipbytes = 0;
1520 KASSERT(bytes != 0);
1521
1522 if (write) {
1523 s = splbio();
1524 simple_lock(&global_v_numoutput_slock);
1525 vp->v_numoutput += 2;
1526 simple_unlock(&global_v_numoutput_slock);
1527 splx(s);
1528 }
1529 mbp = getiobuf();
1530 UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
1531 vp, mbp, vp->v_numoutput, bytes);
1532 mbp->b_bufsize = len;
1533 mbp->b_data = (void *)kva;
1534 mbp->b_resid = mbp->b_bcount = bytes;
1535 mbp->b_flags = B_BUSY | brw | B_AGE | (async ? (B_CALL | B_ASYNC) : 0);
1536 mbp->b_iodone = iodone;
1537 mbp->b_vp = vp;
1538 if (curproc == uvm.pagedaemon_proc)
1539 BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
1540 else if (async)
1541 BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
1542 else
1543 BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
1544
1545 bp = NULL;
1546 for (offset = startoffset;
1547 bytes > 0;
1548 offset += iobytes, bytes -= iobytes) {
1549 lbn = offset >> fs_bshift;
1550 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
1551 if (error) {
1552 UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
1553 skipbytes += bytes;
1554 bytes = 0;
1555 break;
1556 }
1557
1558 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
1559 bytes);
1560 if (blkno == (daddr_t)-1) {
1561 if (!write) {
1562 memset((char *)kva + (offset - startoffset), 0,
1563 iobytes);
1564 }
1565 skipbytes += iobytes;
1566 continue;
1567 }
1568
1569 /* if it's really one i/o, don't make a second buf */
1570 if (offset == startoffset && iobytes == bytes) {
1571 bp = mbp;
1572 } else {
1573 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
1574 vp, bp, vp->v_numoutput, 0);
1575 bp = getiobuf();
1576 nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
1577 }
1578 bp->b_lblkno = 0;
1579
1580 /* adjust physical blkno for partial blocks */
1581 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
1582 dev_bshift);
1583 UVMHIST_LOG(ubchist,
1584 "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
1585 vp, offset, bp->b_bcount, bp->b_blkno);
1586
1587 VOP_STRATEGY(devvp, bp);
1588 }
1589 if (skipbytes) {
1590 UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
1591 }
1592 nestiobuf_done(mbp, skipbytes, error);
1593 if (async) {
1594 UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
1595 return (0);
1596 }
1597 UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
1598 error = biowait(mbp);
1599 s = splbio();
1600 (*iodone)(mbp);
1601 splx(s);
1602 UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
1603 return (error);
1604 }
1605
1606 /*
1607 * VOP_PUTPAGES() for vnodes which never have pages.
1608 */
1609
1610 int
1611 genfs_null_putpages(void *v)
1612 {
1613 struct vop_putpages_args /* {
1614 struct vnode *a_vp;
1615 voff_t a_offlo;
1616 voff_t a_offhi;
1617 int a_flags;
1618 } */ *ap = v;
1619 struct vnode *vp = ap->a_vp;
1620
1621 KASSERT(vp->v_uobj.uo_npages == 0);
1622 simple_unlock(&vp->v_interlock);
1623 return (0);
1624 }
1625
1626 void
1627 genfs_node_init(struct vnode *vp, const struct genfs_ops *ops)
1628 {
1629 struct genfs_node *gp = VTOG(vp);
1630
1631 lockinit(&gp->g_glock, PINOD, "glock", 0, 0);
1632 gp->g_op = ops;
1633 }
1634
1635 void
1636 genfs_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
1637 {
1638 int bsize;
1639
1640 bsize = 1 << vp->v_mount->mnt_fs_bshift;
1641 *eobp = (size + bsize - 1) & ~(bsize - 1);
1642 }
1643
1644 int
1645 genfs_compat_getpages(void *v)
1646 {
1647 struct vop_getpages_args /* {
1648 struct vnode *a_vp;
1649 voff_t a_offset;
1650 struct vm_page **a_m;
1651 int *a_count;
1652 int a_centeridx;
1653 vm_prot_t a_access_type;
1654 int a_advice;
1655 int a_flags;
1656 } */ *ap = v;
1657
1658 off_t origoffset;
1659 struct vnode *vp = ap->a_vp;
1660 struct uvm_object *uobj = &vp->v_uobj;
1661 struct vm_page *pg, **pgs;
1662 vaddr_t kva;
1663 int i, error, orignpages, npages;
1664 struct iovec iov;
1665 struct uio uio;
1666 kauth_cred_t cred = curlwp->l_cred;
1667 boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
1668
1669 error = 0;
1670 origoffset = ap->a_offset;
1671 orignpages = *ap->a_count;
1672 pgs = ap->a_m;
1673
1674 if (write && (vp->v_flag & VONWORKLST) == 0) {
1675 vn_syncer_add_to_worklist(vp, filedelay);
1676 }
1677 if (ap->a_flags & PGO_LOCKED) {
1678 uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
1679 UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
1680
1681 return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
1682 }
1683 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
1684 simple_unlock(&uobj->vmobjlock);
1685 return (EINVAL);
1686 }
1687 if ((ap->a_flags & PGO_SYNCIO) == 0) {
1688 simple_unlock(&uobj->vmobjlock);
1689 return 0;
1690 }
1691 npages = orignpages;
1692 uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
1693 simple_unlock(&uobj->vmobjlock);
1694 kva = uvm_pagermapin(pgs, npages,
1695 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
1696 for (i = 0; i < npages; i++) {
1697 pg = pgs[i];
1698 if ((pg->flags & PG_FAKE) == 0) {
1699 continue;
1700 }
1701 iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
1702 iov.iov_len = PAGE_SIZE;
1703 uio.uio_iov = &iov;
1704 uio.uio_iovcnt = 1;
1705 uio.uio_offset = origoffset + (i << PAGE_SHIFT);
1706 uio.uio_rw = UIO_READ;
1707 uio.uio_resid = PAGE_SIZE;
1708 UIO_SETUP_SYSSPACE(&uio);
1709 /* XXX vn_lock */
1710 error = VOP_READ(vp, &uio, 0, cred);
1711 if (error) {
1712 break;
1713 }
1714 if (uio.uio_resid) {
1715 memset(iov.iov_base, 0, uio.uio_resid);
1716 }
1717 }
1718 uvm_pagermapout(kva, npages);
1719 simple_lock(&uobj->vmobjlock);
1720 uvm_lock_pageq();
1721 for (i = 0; i < npages; i++) {
1722 pg = pgs[i];
1723 if (error && (pg->flags & PG_FAKE) != 0) {
1724 pg->flags |= PG_RELEASED;
1725 } else {
1726 pmap_clear_modify(pg);
1727 uvm_pageactivate(pg);
1728 }
1729 }
1730 if (error) {
1731 uvm_page_unbusy(pgs, npages);
1732 }
1733 uvm_unlock_pageq();
1734 simple_unlock(&uobj->vmobjlock);
1735 return (error);
1736 }
1737
1738 int
1739 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
1740 int flags)
1741 {
1742 off_t offset;
1743 struct iovec iov;
1744 struct uio uio;
1745 kauth_cred_t cred = curlwp->l_cred;
1746 struct buf *bp;
1747 vaddr_t kva;
1748 int s, error;
1749
1750 offset = pgs[0]->offset;
1751 kva = uvm_pagermapin(pgs, npages,
1752 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1753
1754 iov.iov_base = (void *)kva;
1755 iov.iov_len = npages << PAGE_SHIFT;
1756 uio.uio_iov = &iov;
1757 uio.uio_iovcnt = 1;
1758 uio.uio_offset = offset;
1759 uio.uio_rw = UIO_WRITE;
1760 uio.uio_resid = npages << PAGE_SHIFT;
1761 UIO_SETUP_SYSSPACE(&uio);
1762 /* XXX vn_lock */
1763 error = VOP_WRITE(vp, &uio, 0, cred);
1764
1765 s = splbio();
1766 V_INCR_NUMOUTPUT(vp);
1767 splx(s);
1768
1769 bp = getiobuf();
1770 bp->b_flags = B_BUSY | B_WRITE | B_AGE;
1771 bp->b_vp = vp;
1772 bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
1773 bp->b_data = (char *)kva;
1774 bp->b_bcount = npages << PAGE_SHIFT;
1775 bp->b_bufsize = npages << PAGE_SHIFT;
1776 bp->b_resid = 0;
1777 if (error) {
1778 bp->b_flags |= B_ERROR;
1779 bp->b_error = error;
1780 }
1781 uvm_aio_aiodone(bp);
1782 return (error);
1783 }
1784
1785 /*
1786 * Process a uio using direct I/O. If we reach a part of the request
1787 * which cannot be processed in this fashion for some reason, just return.
1788 * The caller must handle some additional part of the request using
1789 * buffered I/O before trying direct I/O again.
1790 */
1791
1792 void
1793 genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
1794 {
1795 struct vmspace *vs;
1796 struct iovec *iov;
1797 vaddr_t va;
1798 size_t len;
1799 const int mask = DEV_BSIZE - 1;
1800 int error;
1801
1802 /*
1803 * We only support direct I/O to user space for now.
1804 */
1805
1806 if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
1807 return;
1808 }
1809
1810 /*
1811 * If the vnode is mapped, we would need to get the getpages lock
1812 * to stabilize the bmap, but then we would get into trouble whil e
1813 * locking the pages if the pages belong to this same vnode (or a
1814 * multi-vnode cascade to the same effect). Just fall back to
1815 * buffered I/O if the vnode is mapped to avoid this mess.
1816 */
1817
1818 if (vp->v_flag & VMAPPED) {
1819 return;
1820 }
1821
1822 /*
1823 * Do as much of the uio as possible with direct I/O.
1824 */
1825
1826 vs = uio->uio_vmspace;
1827 while (uio->uio_resid) {
1828 iov = uio->uio_iov;
1829 if (iov->iov_len == 0) {
1830 uio->uio_iov++;
1831 uio->uio_iovcnt--;
1832 continue;
1833 }
1834 va = (vaddr_t)iov->iov_base;
1835 len = MIN(iov->iov_len, genfs_maxdio);
1836 len &= ~mask;
1837
1838 /*
1839 * If the next chunk is smaller than DEV_BSIZE or extends past
1840 * the current EOF, then fall back to buffered I/O.
1841 */
1842
1843 if (len == 0 || uio->uio_offset + len > vp->v_size) {
1844 return;
1845 }
1846
1847 /*
1848 * Check alignment. The file offset must be at least
1849 * sector-aligned. The exact constraint on memory alignment
1850 * is very hardware-dependent, but requiring sector-aligned
1851 * addresses there too is safe.
1852 */
1853
1854 if (uio->uio_offset & mask || va & mask) {
1855 return;
1856 }
1857 error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
1858 uio->uio_rw);
1859 if (error) {
1860 break;
1861 }
1862 iov->iov_base = (caddr_t)iov->iov_base + len;
1863 iov->iov_len -= len;
1864 uio->uio_offset += len;
1865 uio->uio_resid -= len;
1866 }
1867 }
1868
1869 /*
1870 * Iodone routine for direct I/O. We don't do much here since the request is
1871 * always synchronous, so the caller will do most of the work after biowait().
1872 */
1873
1874 static void
1875 genfs_dio_iodone(struct buf *bp)
1876 {
1877 int s;
1878
1879 KASSERT((bp->b_flags & B_ASYNC) == 0);
1880 s = splbio();
1881 if ((bp->b_flags & (B_READ | B_AGE)) == B_AGE) {
1882 vwakeup(bp);
1883 }
1884 putiobuf(bp);
1885 splx(s);
1886 }
1887
1888 /*
1889 * Process one chunk of a direct I/O request.
1890 */
1891
1892 static int
1893 genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
1894 off_t off, enum uio_rw rw)
1895 {
1896 struct vm_map *map;
1897 struct pmap *upm, *kpm;
1898 size_t klen = round_page(uva + len) - trunc_page(uva);
1899 off_t spoff, epoff;
1900 vaddr_t kva, puva;
1901 paddr_t pa;
1902 vm_prot_t prot;
1903 int error, rv, poff, koff;
1904 const int pgoflags = PGO_CLEANIT | PGO_SYNCIO |
1905 (rw == UIO_WRITE ? PGO_FREE : 0);
1906
1907 /*
1908 * For writes, verify that this range of the file already has fully
1909 * allocated backing store. If there are any holes, just punt and
1910 * make the caller take the buffered write path.
1911 */
1912
1913 if (rw == UIO_WRITE) {
1914 daddr_t lbn, elbn, blkno;
1915 int bsize, bshift, run;
1916
1917 bshift = vp->v_mount->mnt_fs_bshift;
1918 bsize = 1 << bshift;
1919 lbn = off >> bshift;
1920 elbn = (off + len + bsize - 1) >> bshift;
1921 while (lbn < elbn) {
1922 error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
1923 if (error) {
1924 return error;
1925 }
1926 if (blkno == (daddr_t)-1) {
1927 return ENOSPC;
1928 }
1929 lbn += 1 + run;
1930 }
1931 }
1932
1933 /*
1934 * Flush any cached pages for parts of the file that we're about to
1935 * access. If we're writing, invalidate pages as well.
1936 */
1937
1938 spoff = trunc_page(off);
1939 epoff = round_page(off + len);
1940 simple_lock(&vp->v_interlock);
1941 error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
1942 if (error) {
1943 return error;
1944 }
1945
1946 /*
1947 * Wire the user pages and remap them into kernel memory.
1948 */
1949
1950 prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
1951 error = uvm_vslock(vs, (void *)uva, len, prot);
1952 if (error) {
1953 return error;
1954 }
1955
1956 map = &vs->vm_map;
1957 upm = vm_map_pmap(map);
1958 kpm = vm_map_pmap(kernel_map);
1959 kva = uvm_km_alloc(kernel_map, klen, 0,
1960 UVM_KMF_VAONLY | UVM_KMF_WAITVA);
1961 puva = trunc_page(uva);
1962 for (poff = 0; poff < klen; poff += PAGE_SIZE) {
1963 rv = pmap_extract(upm, puva + poff, &pa);
1964 KASSERT(rv);
1965 pmap_enter(kpm, kva + poff, pa, prot, prot | PMAP_WIRED);
1966 }
1967 pmap_update(kpm);
1968
1969 /*
1970 * Do the I/O.
1971 */
1972
1973 koff = uva - trunc_page(uva);
1974 error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
1975 genfs_dio_iodone);
1976
1977 /*
1978 * Tear down the kernel mapping.
1979 */
1980
1981 pmap_remove(kpm, kva, kva + klen);
1982 pmap_update(kpm);
1983 uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
1984
1985 /*
1986 * Unwire the user pages.
1987 */
1988
1989 uvm_vsunlock(vs, (void *)uva, len);
1990 return error;
1991 }
1992
1993
1994 static void
1995 filt_genfsdetach(struct knote *kn)
1996 {
1997 struct vnode *vp = (struct vnode *)kn->kn_hook;
1998
1999 /* XXXLUKEM lock the struct? */
2000 SLIST_REMOVE(&vp->v_klist, kn, knote, kn_selnext);
2001 }
2002
2003 static int
2004 filt_genfsread(struct knote *kn, long hint)
2005 {
2006 struct vnode *vp = (struct vnode *)kn->kn_hook;
2007
2008 /*
2009 * filesystem is gone, so set the EOF flag and schedule
2010 * the knote for deletion.
2011 */
2012 if (hint == NOTE_REVOKE) {
2013 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
2014 return (1);
2015 }
2016
2017 /* XXXLUKEM lock the struct? */
2018 kn->kn_data = vp->v_size - kn->kn_fp->f_offset;
2019 return (kn->kn_data != 0);
2020 }
2021
2022 static int
2023 filt_genfsvnode(struct knote *kn, long hint)
2024 {
2025
2026 if (kn->kn_sfflags & hint)
2027 kn->kn_fflags |= hint;
2028 if (hint == NOTE_REVOKE) {
2029 kn->kn_flags |= EV_EOF;
2030 return (1);
2031 }
2032 return (kn->kn_fflags != 0);
2033 }
2034
2035 static const struct filterops genfsread_filtops =
2036 { 1, NULL, filt_genfsdetach, filt_genfsread };
2037 static const struct filterops genfsvnode_filtops =
2038 { 1, NULL, filt_genfsdetach, filt_genfsvnode };
2039
2040 int
2041 genfs_kqfilter(void *v)
2042 {
2043 struct vop_kqfilter_args /* {
2044 struct vnode *a_vp;
2045 struct knote *a_kn;
2046 } */ *ap = v;
2047 struct vnode *vp;
2048 struct knote *kn;
2049
2050 vp = ap->a_vp;
2051 kn = ap->a_kn;
2052 switch (kn->kn_filter) {
2053 case EVFILT_READ:
2054 kn->kn_fop = &genfsread_filtops;
2055 break;
2056 case EVFILT_VNODE:
2057 kn->kn_fop = &genfsvnode_filtops;
2058 break;
2059 default:
2060 return (1);
2061 }
2062
2063 kn->kn_hook = vp;
2064
2065 /* XXXLUKEM lock the struct? */
2066 SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
2067
2068 return (0);
2069 }
2070
2071 void
2072 genfs_node_wrlock(struct vnode *vp)
2073 {
2074 struct genfs_node *gp = VTOG(vp);
2075
2076 lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
2077 }
2078
2079 void
2080 genfs_node_rdlock(struct vnode *vp)
2081 {
2082 struct genfs_node *gp = VTOG(vp);
2083
2084 lockmgr(&gp->g_glock, LK_SHARED, NULL);
2085 }
2086
2087 void
2088 genfs_node_unlock(struct vnode *vp)
2089 {
2090 struct genfs_node *gp = VTOG(vp);
2091
2092 lockmgr(&gp->g_glock, LK_RELEASE, NULL);
2093 }
2094