puffs_vfsops.c revision 1.30 1 /* $NetBSD: puffs_vfsops.c,v 1.30 2007/03/20 10:21:59 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: puffs_vfsops.c,v 1.30 2007/03/20 10:21:59 pooka Exp $");
37
38 #include <sys/param.h>
39 #include <sys/mount.h>
40 #include <sys/malloc.h>
41 #include <sys/extattr.h>
42 #include <sys/queue.h>
43 #include <sys/vnode.h>
44 #include <sys/dirent.h>
45 #include <sys/kauth.h>
46 #include <sys/fstrans.h>
47
48 #include <lib/libkern/libkern.h>
49
50 #include <fs/puffs/puffs_msgif.h>
51 #include <fs/puffs/puffs_sys.h>
52
53 VFS_PROTOS(puffs);
54
55 MALLOC_DEFINE(M_PUFFS, "puffs", "pass-to-userspace file system structures");
56
57 #ifndef PUFFS_PNODEBUCKETS
58 #define PUFFS_PNODEBUCKETS 256
59 #endif
60 #ifndef PUFFS_MAXPNODEBUCKETS
61 #define PUFFS_MAXPNODEBUCKETS 65536
62 #endif
63 int puffs_pnodebuckets = PUFFS_PNODEBUCKETS;
64
65 int
66 puffs_mount(struct mount *mp, const char *path, void *data,
67 struct nameidata *ndp, struct lwp *l)
68 {
69 struct puffs_mount *pmp = NULL;
70 struct puffs_args *args;
71 char namebuf[PUFFSNAMESIZE+sizeof(PUFFS_NAMEPREFIX)+1]; /* spooky */
72 int error = 0, i;
73
74 if (mp->mnt_flag & MNT_GETARGS) {
75 pmp = MPTOPUFFSMP(mp);
76 return copyout(&pmp->pmp_args, data, sizeof(struct puffs_args));
77 }
78
79 /* update is not supported currently */
80 if (mp->mnt_flag & MNT_UPDATE)
81 return EOPNOTSUPP;
82
83 /*
84 * We need the file system name
85 */
86 if (!data)
87 return EINVAL;
88
89 MALLOC(args, struct puffs_args *, sizeof(struct puffs_args),
90 M_PUFFS, M_WAITOK);
91
92 error = copyin(data, args, sizeof(struct puffs_args));
93 if (error)
94 goto out;
95
96 /* devel phase */
97 if (args->pa_vers != (PUFFSVERSION | PUFFSDEVELVERS)) {
98 printf("puffs_mount: development version mismatch\n");
99 error = EINVAL;
100 goto out;
101 }
102
103 /* nuke spy bits */
104 args->pa_flags &= PUFFS_KFLAG_MASK;
105
106 /* build real name */
107 (void)strlcpy(namebuf, PUFFS_NAMEPREFIX, sizeof(namebuf));
108 (void)strlcat(namebuf, args->pa_name, sizeof(namebuf));
109
110 /* inform user server if it got the max request size it wanted */
111 if (args->pa_maxreqlen == 0 || args->pa_maxreqlen > PUFFS_REQ_MAXSIZE)
112 args->pa_maxreqlen = PUFFS_REQ_MAXSIZE;
113 else if (args->pa_maxreqlen < PUFFS_REQSTRUCT_MAX)
114 args->pa_maxreqlen = PUFFS_REQSTRUCT_MAX;
115 (void)strlcpy(args->pa_name, namebuf, sizeof(args->pa_name));
116
117 error = copyout(args, data, sizeof(struct puffs_args));
118 if (error)
119 goto out;
120
121 error = set_statvfs_info(path, UIO_USERSPACE, namebuf,
122 UIO_SYSSPACE, mp, l);
123 if (error)
124 goto out;
125 mp->mnt_stat.f_iosize = DEV_BSIZE;
126
127 MALLOC(pmp, struct puffs_mount *, sizeof(struct puffs_mount),
128 M_PUFFS, M_WAITOK | M_ZERO);
129
130 mp->mnt_fs_bshift = DEV_BSHIFT;
131 mp->mnt_dev_bshift = DEV_BSHIFT;
132 mp->mnt_flag &= ~MNT_LOCAL; /* we don't really know, so ... */
133 mp->mnt_data = pmp;
134 mp->mnt_iflag |= IMNT_HAS_TRANS;
135
136 pmp->pmp_status = PUFFSTAT_MOUNTING;
137 pmp->pmp_nextreq = 0;
138 pmp->pmp_mp = mp;
139 pmp->pmp_req_maxsize = args->pa_maxreqlen;
140 pmp->pmp_args = *args;
141
142 /* puffs_node hash buckets */
143 pmp->pmp_npnodehash = puffs_pnodebuckets;
144 if (pmp->pmp_npnodehash < 1)
145 pmp->pmp_npnodehash = 1;
146 if (pmp->pmp_npnodehash > PUFFS_MAXPNODEBUCKETS)
147 pmp->pmp_npnodehash = PUFFS_MAXPNODEBUCKETS;
148 pmp->pmp_pnodehash = malloc
149 (sizeof(struct puffs_pnode_hashlist *) * pmp->pmp_npnodehash,
150 M_PUFFS, M_WAITOK);
151 for (i = 0; i < pmp->pmp_npnodehash; i++)
152 LIST_INIT(&pmp->pmp_pnodehash[i]);
153
154 /*
155 * Inform the fileops processing code that we have a mountpoint.
156 * If it doesn't know about anyone with our pid/fd having the
157 * device open, punt
158 */
159 if (puffs_setpmp(l->l_proc->p_pid, args->pa_fd, pmp)) {
160 error = ENOENT;
161 goto out;
162 }
163
164 simple_lock_init(&pmp->pmp_lock);
165 TAILQ_INIT(&pmp->pmp_req_touser);
166 TAILQ_INIT(&pmp->pmp_req_replywait);
167 TAILQ_INIT(&pmp->pmp_req_sizepark);
168
169 DPRINTF(("puffs_mount: mount point at %p, puffs specific at %p\n",
170 mp, MPTOPUFFSMP(mp)));
171
172 vfs_getnewfsid(mp);
173
174 out:
175 if (error && pmp && pmp->pmp_pnodehash)
176 free(pmp->pmp_pnodehash, M_PUFFS);
177 if (error && pmp)
178 FREE(pmp, M_PUFFS);
179 FREE(args, M_PUFFS);
180 return error;
181 }
182
183 /*
184 * This is called from the first "Hello, I'm alive" ioctl
185 * from userspace.
186 */
187 int
188 puffs_start2(struct puffs_mount *pmp, struct puffs_startreq *sreq)
189 {
190 struct puffs_node *pn;
191 struct mount *mp;
192
193 mp = PMPTOMP(pmp);
194
195 simple_lock(&pmp->pmp_lock);
196
197 /*
198 * if someone has issued a VFS_ROOT() already, fill in the
199 * vnode cookie.
200 */
201 pn = NULL;
202 if (pmp->pmp_root) {
203 pn = VPTOPP(pmp->pmp_root);
204 pn->pn_cookie = sreq->psr_cookie;
205 }
206
207 /* We're good to fly */
208 pmp->pmp_rootcookie = sreq->psr_cookie;
209 pmp->pmp_status = PUFFSTAT_RUNNING;
210 simple_unlock(&pmp->pmp_lock);
211
212 /* do the VFS_STATVFS() we missed out on in sys_mount() */
213 copy_statvfs_info(&sreq->psr_sb, mp);
214 (void)memcpy(&mp->mnt_stat, &sreq->psr_sb, sizeof(mp->mnt_stat));
215 mp->mnt_stat.f_iosize = DEV_BSIZE;
216
217 DPRINTF(("puffs_start2: root vp %p, cur root pnode %p, cookie %p\n",
218 pmp->pmp_root, pn, sreq->psr_cookie));
219
220 return 0;
221 }
222
223 int
224 puffs_start(struct mount *mp, int flags, struct lwp *l)
225 {
226
227 /*
228 * This cannot travel to userspace, as this is called from
229 * the kernel context of the process doing mount(2). But
230 * it's probably a safe bet that the process doing mount(2)
231 * realizes it needs to start the filesystem also...
232 */
233 return 0;
234 }
235
236 int
237 puffs_unmount(struct mount *mp, int mntflags, struct lwp *l)
238 {
239 struct puffs_mount *pmp;
240 int error, force;
241
242 PUFFS_VFSREQ(unmount);
243
244 error = 0;
245 force = mntflags & MNT_FORCE;
246 pmp = MPTOPUFFSMP(mp);
247
248 DPRINTF(("puffs_unmount: detach filesystem from vfs, current "
249 "status 0x%x\n", pmp->pmp_status));
250
251 /*
252 * flush all the vnodes. VOP_RECLAIM() takes care that the
253 * root vnode does not get flushed until unmount. The
254 * userspace root node cookie is stored in the mount
255 * structure, so we can always re-instantiate a root vnode,
256 * should userspace unmount decide it doesn't want to
257 * cooperate.
258 */
259 error = vflush(mp, NULLVP, force ? FORCECLOSE : 0);
260 if (error)
261 goto out;
262
263 /*
264 * If we are not DYING, we should ask userspace's opinion
265 * about the situation
266 */
267 simple_lock(&pmp->pmp_lock);
268 if (pmp->pmp_status != PUFFSTAT_DYING) {
269 pmp->pmp_unmounting = 1;
270 simple_unlock(&pmp->pmp_lock);
271
272 unmount_arg.pvfsr_flags = mntflags;
273 unmount_arg.pvfsr_pid = puffs_lwp2pid(l);
274
275 error = puffs_vfstouser(pmp, PUFFS_VFS_UNMOUNT,
276 &unmount_arg, sizeof(unmount_arg));
277 DPRINTF(("puffs_unmount: error %d force %d\n", error, force));
278
279 simple_lock(&pmp->pmp_lock);
280 pmp->pmp_unmounting = 0;
281 wakeup(&pmp->pmp_unmounting);
282 }
283
284 /*
285 * if userspace cooperated or we really need to die,
286 * screw what userland thinks and just die.
287 */
288 if (error == 0 || force) {
289 /* tell waiters & other resources to go unwait themselves */
290 puffs_userdead(pmp);
291 puffs_nukebypmp(pmp);
292
293 /*
294 * Sink waiters. This is still not perfect, since the
295 * draining is done after userret, not when they really
296 * exit the file system. It will probably work as almost
297 * no call will block and therefore cause a context switch
298 * and therefore will protected by the biglock after
299 * exiting userspace. But ... it's an imperfect world.
300 */
301 while (pmp->pmp_req_touser_waiters != 0)
302 ltsleep(&pmp->pmp_req_touser_waiters, PVFS,
303 "puffsink", 0, &pmp->pmp_lock);
304 simple_unlock(&pmp->pmp_lock);
305
306 /* free resources now that we hopefully have no waiters left */
307 free(pmp->pmp_pnodehash, M_PUFFS);
308 FREE(pmp, M_PUFFS);
309 error = 0;
310 } else {
311 simple_unlock(&pmp->pmp_lock);
312 }
313
314 out:
315 DPRINTF(("puffs_unmount: return %d\n", error));
316 return error;
317 }
318
319 /*
320 * This doesn't need to travel to userspace
321 */
322 int
323 puffs_root(struct mount *mp, struct vnode **vpp)
324 {
325 struct puffs_mount *pmp;
326 struct puffs_node *pn;
327 struct vnode *vp;
328
329 pmp = MPTOPUFFSMP(mp);
330
331 /*
332 * pmp_lock must be held if vref()'ing or vrele()'ing the
333 * root vnode. the latter is controlled by puffs_inactive().
334 */
335 simple_lock(&pmp->pmp_lock);
336 vp = pmp->pmp_root;
337 if (vp) {
338 simple_lock(&vp->v_interlock);
339 simple_unlock(&pmp->pmp_lock);
340 pn = VPTOPP(vp);
341 if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK))
342 goto grabnew;
343 *vpp = vp;
344 return 0;
345 } else
346 simple_unlock(&pmp->pmp_lock);
347
348 /* XXX: this is wrong, so FIXME */
349 grabnew:
350
351 /*
352 * So, didn't have the magic root vnode available.
353 * No matter, grab another an stuff it with the cookie.
354 */
355 if (puffs_getvnode(mp, pmp->pmp_rootcookie, VDIR, 0, 0, &vp))
356 panic("sloppy programming");
357
358 simple_lock(&pmp->pmp_lock);
359 /*
360 * check if by mysterious force someone else created a root
361 * vnode while we were executing.
362 */
363 if (pmp->pmp_root) {
364 vref(pmp->pmp_root);
365 simple_unlock(&pmp->pmp_lock);
366 puffs_putvnode(vp);
367 vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
368 *vpp = pmp->pmp_root;
369 return 0;
370 }
371
372 /* store cache */
373 vp->v_flag = VROOT;
374 pmp->pmp_root = vp;
375 simple_unlock(&pmp->pmp_lock);
376
377 vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
378
379 *vpp = vp;
380 return 0;
381 }
382
383 int
384 puffs_quotactl(struct mount *mp, int cmd, uid_t uid, void *arg, struct lwp *l)
385 {
386
387 return EOPNOTSUPP;
388 }
389
390 int
391 puffs_statvfs(struct mount *mp, struct statvfs *sbp, struct lwp *l)
392 {
393 struct puffs_vfsreq_statvfs *statvfs_arg; /* too big for stack */
394 struct puffs_mount *pmp;
395 int error = 0;
396
397 pmp = MPTOPUFFSMP(mp);
398
399 /*
400 * If we are mounting, it means that the userspace counterpart
401 * is calling mount(2), but mount(2) also calls statvfs. So
402 * requesting statvfs from userspace would mean a deadlock.
403 * Compensate.
404 */
405 if (pmp->pmp_status == PUFFSTAT_MOUNTING)
406 return EINPROGRESS;
407
408 /* too big for stack */
409 MALLOC(statvfs_arg, struct puffs_vfsreq_statvfs *,
410 sizeof(struct puffs_vfsreq_statvfs), M_PUFFS, M_WAITOK | M_ZERO);
411 statvfs_arg->pvfsr_pid = puffs_lwp2pid(l);
412
413 error = puffs_vfstouser(pmp, PUFFS_VFS_STATVFS,
414 statvfs_arg, sizeof(*statvfs_arg));
415 statvfs_arg->pvfsr_sb.f_iosize = DEV_BSIZE;
416
417 /*
418 * Try to produce a sensible result even in the event
419 * of userspace error.
420 *
421 * XXX: cache the copy in non-error case
422 */
423 if (!error) {
424 copy_statvfs_info(&statvfs_arg->pvfsr_sb, mp);
425 (void)memcpy(sbp, &statvfs_arg->pvfsr_sb,
426 sizeof(struct statvfs));
427 } else {
428 copy_statvfs_info(sbp, mp);
429 }
430
431 FREE(statvfs_arg, M_PUFFS);
432 return error;
433 }
434
435 static int
436 pageflush(struct mount *mp, kauth_cred_t cred,
437 int waitfor, int suspending, struct lwp *l)
438 {
439 struct puffs_node *pn;
440 struct vnode *vp, *nvp;
441 int error, rv;
442
443 KASSERT(((waitfor == MNT_WAIT) && suspending) == 0);
444 KASSERT((suspending == 0)
445 || (fstrans_is_owner(mp)
446 && fstrans_getstate(mp) == FSTRANS_SUSPENDING));
447
448 error = 0;
449
450 /*
451 * Sync all cached data from regular vnodes (which are not
452 * currently locked, see below). After this we call VFS_SYNC
453 * for the fs server, which should handle data and metadata for
454 * all the nodes it knows to exist.
455 */
456 simple_lock(&mntvnode_slock);
457 loop:
458 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
459 /* check if we're on the right list */
460 if (vp->v_mount != mp)
461 goto loop;
462
463 simple_lock(&vp->v_interlock);
464 pn = VPTOPP(vp);
465 nvp = TAILQ_NEXT(vp, v_mntvnodes);
466
467 if (vp->v_type != VREG || UVM_OBJ_IS_CLEAN(&vp->v_uobj)) {
468 simple_unlock(&vp->v_interlock);
469 continue;
470 }
471
472 simple_unlock(&mntvnode_slock);
473
474 /*
475 * Here we try to get a reference to the vnode and to
476 * lock it. This is mostly cargo-culted, but I will
477 * offer an explanation to why I believe this might
478 * actually do the right thing.
479 *
480 * If the vnode is a goner, we quite obviously don't need
481 * to sync it.
482 *
483 * If the vnode was busy, we don't need to sync it because
484 * this is never called with MNT_WAIT except from
485 * dounmount(), when we are wait-flushing all the dirty
486 * vnodes through other routes in any case. So there,
487 * sync() doesn't actually sync. Happy now?
488 *
489 * NOTE: if we're suspending, vget() does NOT lock.
490 * See puffs_lock() for details.
491 */
492 rv = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
493 if (rv) {
494 simple_lock(&mntvnode_slock);
495 if (rv == ENOENT)
496 goto loop;
497 continue;
498 }
499
500 /*
501 * Thread information to puffs_strategy() through the
502 * pnode flags: we want to issue the putpages operations
503 * as FAF if we're suspending, since it's very probable
504 * that our execution context is that of the userspace
505 * daemon. We can do this because:
506 * + we send the "going to suspend" prior to this part
507 * + if any of the writes fails in userspace, it's the
508 * file system server's problem to decide if this was a
509 * failed snapshot when it gets the "snapshot complete"
510 * notification.
511 * + if any of the writes fail in the kernel already, we
512 * immediately fail *and* notify the user server of
513 * failure.
514 *
515 * We also do FAFs if we're called from the syncer. This
516 * is just general optimization for trickle sync: no need
517 * to really guarantee that the stuff ended on backing
518 * storage.
519 * TODO: Maybe also hint the user server of this twist?
520 */
521 if (suspending || waitfor == MNT_LAZY) {
522 simple_lock(&vp->v_interlock);
523 pn->pn_stat |= PNODE_SUSPEND;
524 simple_unlock(&vp->v_interlock);
525 }
526 rv = VOP_FSYNC(vp, cred, waitfor, 0, 0, l);
527 if (suspending || waitfor == MNT_LAZY) {
528 simple_lock(&vp->v_interlock);
529 pn->pn_stat &= ~PNODE_SUSPEND;
530 simple_unlock(&vp->v_interlock);
531 }
532 if (rv)
533 error = rv;
534 vput(vp);
535 simple_lock(&mntvnode_slock);
536 }
537 simple_unlock(&mntvnode_slock);
538
539 return error;
540 }
541
542 int
543 puffs_sync(struct mount *mp, int waitfor, struct kauth_cred *cred,
544 struct lwp *l)
545 {
546 int error, rv;
547
548 PUFFS_VFSREQ(sync);
549
550 error = pageflush(mp, cred, waitfor, 0, l);
551
552 /* sync fs */
553 sync_arg.pvfsr_waitfor = waitfor;
554 puffs_credcvt(&sync_arg.pvfsr_cred, cred);
555 sync_arg.pvfsr_pid = puffs_lwp2pid(l);
556
557 rv = puffs_vfstouser(MPTOPUFFSMP(mp), PUFFS_VFS_SYNC,
558 &sync_arg, sizeof(sync_arg));
559 if (rv)
560 error = rv;
561
562 return error;
563 }
564
565 int
566 puffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
567 {
568
569 return EOPNOTSUPP;
570 }
571
572 #if 0
573 /*ARGSUSED*/
574 int
575 puffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
576 {
577
578 return EOPNOTSUPP;
579 }
580
581 /*ARGSUSED*/
582 int
583 puffs_vptofh(struct vnode *vp, struct fid *fhp)
584 {
585
586 return EOPNOTSUPP;
587 }
588 #endif
589
590 void
591 puffs_init()
592 {
593
594 #ifdef _LKM
595 malloc_type_attach(M_PUFFS);
596 pool_init(&puffs_pnpool, sizeof(struct puffs_node), 0, 0, 0,
597 "puffspnpl", &pool_allocator_nointr, IPL_NONE);
598 #endif
599
600 return;
601 }
602
603 void
604 puffs_done()
605 {
606
607 #ifdef _LKM
608 pool_destroy(&puffs_pnpool);
609 malloc_type_detach(M_PUFFS);
610 #endif
611
612 return;
613 }
614
615 int
616 puffs_snapshot(struct mount *mp, struct vnode *vp, struct timespec *ts)
617 {
618
619 return EOPNOTSUPP;
620 }
621
622 int
623 puffs_suspendctl(struct mount *mp, int cmd)
624 {
625 struct puffs_mount *pmp;
626 int error;
627
628 pmp = MPTOPUFFSMP(mp);
629 switch (cmd) {
630 case SUSPEND_SUSPEND:
631 DPRINTF(("puffs_suspendctl: suspending\n"));
632 if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
633 break;
634 puffs_suspendtouser(pmp, PUFFS_SUSPEND_START);
635
636 error = pageflush(mp, FSCRED, 0, 1, curlwp);
637 if (error == 0)
638 error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
639
640 if (error != 0) {
641 puffs_suspendtouser(pmp, PUFFS_SUSPEND_ERROR);
642 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
643 break;
644 }
645
646 puffs_suspendtouser(pmp, PUFFS_SUSPEND_SUSPENDED);
647
648 break;
649
650 case SUSPEND_RESUME:
651 DPRINTF(("puffs_suspendctl: resume\n"));
652 error = 0;
653 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
654 puffs_suspendtouser(pmp, PUFFS_SUSPEND_RESUME);
655 break;
656
657 default:
658 error = EINVAL;
659 break;
660 }
661
662 DPRINTF(("puffs_suspendctl: return %d\n", error));
663 return error;
664 }
665
666 const struct vnodeopv_desc * const puffs_vnodeopv_descs[] = {
667 &puffs_vnodeop_opv_desc,
668 &puffs_specop_opv_desc,
669 &puffs_fifoop_opv_desc,
670 &puffs_msgop_opv_desc,
671 NULL,
672 };
673
674 struct vfsops puffs_vfsops = {
675 MOUNT_PUFFS,
676 puffs_mount, /* mount */
677 puffs_start, /* start */
678 puffs_unmount, /* unmount */
679 puffs_root, /* root */
680 puffs_quotactl, /* quotactl */
681 puffs_statvfs, /* statvfs */
682 puffs_sync, /* sync */
683 puffs_vget, /* vget */
684 (void *)eopnotsupp, /* fhtovp */
685 (void *)eopnotsupp, /* vptofh */
686 puffs_init, /* init */
687 NULL, /* reinit */
688 puffs_done, /* done */
689 NULL, /* mountroot */
690 puffs_snapshot, /* snapshot */
691 vfs_stdextattrctl, /* extattrctl */
692 puffs_suspendctl, /* suspendctl */
693 puffs_vnodeopv_descs, /* vnodeops */
694 0, /* refcount */
695 { NULL, NULL }
696 };
697 VFS_ATTACH(puffs_vfsops);
698