puffs_vfsops.c revision 1.33 1 /* $NetBSD: puffs_vfsops.c,v 1.33 2007/04/11 21:03:05 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: puffs_vfsops.c,v 1.33 2007/04/11 21:03:05 pooka Exp $");
37
38 #include <sys/param.h>
39 #include <sys/mount.h>
40 #include <sys/malloc.h>
41 #include <sys/extattr.h>
42 #include <sys/queue.h>
43 #include <sys/vnode.h>
44 #include <sys/dirent.h>
45 #include <sys/kauth.h>
46 #include <sys/fstrans.h>
47
48 #include <lib/libkern/libkern.h>
49
50 #include <fs/puffs/puffs_msgif.h>
51 #include <fs/puffs/puffs_sys.h>
52
53 VFS_PROTOS(puffs);
54
55 MALLOC_JUSTDEFINE(M_PUFFS, "puffs", "Pass-to-Userspace Framework File System");
56
57 #ifndef PUFFS_PNODEBUCKETS
58 #define PUFFS_PNODEBUCKETS 256
59 #endif
60 #ifndef PUFFS_MAXPNODEBUCKETS
61 #define PUFFS_MAXPNODEBUCKETS 65536
62 #endif
63 int puffs_pnodebuckets = PUFFS_PNODEBUCKETS;
64
65 int
66 puffs_mount(struct mount *mp, const char *path, void *data,
67 struct nameidata *ndp, struct lwp *l)
68 {
69 struct puffs_mount *pmp = NULL;
70 struct puffs_args *args;
71 char namebuf[PUFFSNAMESIZE+sizeof(PUFFS_NAMEPREFIX)+1]; /* spooky */
72 int error = 0, i;
73
74 if (mp->mnt_flag & MNT_GETARGS) {
75 pmp = MPTOPUFFSMP(mp);
76 return copyout(&pmp->pmp_args, data, sizeof(struct puffs_args));
77 }
78
79 /* update is not supported currently */
80 if (mp->mnt_flag & MNT_UPDATE)
81 return EOPNOTSUPP;
82
83 /*
84 * We need the file system name
85 */
86 if (!data)
87 return EINVAL;
88
89 MALLOC(args, struct puffs_args *, sizeof(struct puffs_args),
90 M_PUFFS, M_WAITOK);
91
92 error = copyin(data, args, sizeof(struct puffs_args));
93 if (error)
94 goto out;
95
96 /* devel phase */
97 if (args->pa_vers != (PUFFSVERSION | PUFFSDEVELVERS)) {
98 printf("puffs_mount: development version mismatch\n");
99 error = EINVAL;
100 goto out;
101 }
102
103 /* nuke spy bits */
104 args->pa_flags &= PUFFS_KFLAG_MASK;
105
106 /* build real name */
107 (void)strlcpy(namebuf, PUFFS_NAMEPREFIX, sizeof(namebuf));
108 (void)strlcat(namebuf, args->pa_name, sizeof(namebuf));
109
110 /* inform user server if it got the max request size it wanted */
111 if (args->pa_maxreqlen == 0 || args->pa_maxreqlen > PUFFS_REQ_MAXSIZE)
112 args->pa_maxreqlen = PUFFS_REQ_MAXSIZE;
113 else if (args->pa_maxreqlen < PUFFS_REQSTRUCT_MAX)
114 args->pa_maxreqlen = PUFFS_REQSTRUCT_MAX;
115 (void)strlcpy(args->pa_name, namebuf, sizeof(args->pa_name));
116
117 error = copyout(args, data, sizeof(struct puffs_args));
118 if (error)
119 goto out;
120
121 error = set_statvfs_info(path, UIO_USERSPACE, namebuf,
122 UIO_SYSSPACE, mp, l);
123 if (error)
124 goto out;
125 mp->mnt_stat.f_iosize = DEV_BSIZE;
126
127 MALLOC(pmp, struct puffs_mount *, sizeof(struct puffs_mount),
128 M_PUFFS, M_WAITOK | M_ZERO);
129
130 mp->mnt_fs_bshift = DEV_BSHIFT;
131 mp->mnt_dev_bshift = DEV_BSHIFT;
132 mp->mnt_flag &= ~MNT_LOCAL; /* we don't really know, so ... */
133 mp->mnt_data = pmp;
134 mp->mnt_iflag |= IMNT_HAS_TRANS;
135
136 pmp->pmp_status = PUFFSTAT_MOUNTING;
137 pmp->pmp_nextreq = 0;
138 pmp->pmp_mp = mp;
139 pmp->pmp_req_maxsize = args->pa_maxreqlen;
140 pmp->pmp_args = *args;
141
142 /* puffs_node hash buckets */
143 pmp->pmp_npnodehash = puffs_pnodebuckets;
144 if (pmp->pmp_npnodehash < 1)
145 pmp->pmp_npnodehash = 1;
146 if (pmp->pmp_npnodehash > PUFFS_MAXPNODEBUCKETS)
147 pmp->pmp_npnodehash = PUFFS_MAXPNODEBUCKETS;
148 pmp->pmp_pnodehash = malloc
149 (sizeof(struct puffs_pnode_hashlist *) * pmp->pmp_npnodehash,
150 M_PUFFS, M_WAITOK);
151 for (i = 0; i < pmp->pmp_npnodehash; i++)
152 LIST_INIT(&pmp->pmp_pnodehash[i]);
153
154 /*
155 * Inform the fileops processing code that we have a mountpoint.
156 * If it doesn't know about anyone with our pid/fd having the
157 * device open, punt
158 */
159 if (puffs_setpmp(l->l_proc->p_pid, args->pa_fd, pmp)) {
160 error = ENOENT;
161 goto out;
162 }
163
164 mutex_init(&pmp->pmp_lock, MUTEX_DEFAULT, IPL_NONE);
165 cv_init(&pmp->pmp_req_waiter_cv, "puffsget");
166 cv_init(&pmp->pmp_req_waitersink_cv, "puffsink");
167 cv_init(&pmp->pmp_unmounting_cv, "puffsum");
168 cv_init(&pmp->pmp_suspend_cv, "pufsusum");
169 TAILQ_INIT(&pmp->pmp_req_touser);
170 TAILQ_INIT(&pmp->pmp_req_replywait);
171 TAILQ_INIT(&pmp->pmp_req_sizepark);
172
173 DPRINTF(("puffs_mount: mount point at %p, puffs specific at %p\n",
174 mp, MPTOPUFFSMP(mp)));
175
176 vfs_getnewfsid(mp);
177
178 out:
179 if (error && pmp && pmp->pmp_pnodehash)
180 free(pmp->pmp_pnodehash, M_PUFFS);
181 if (error && pmp)
182 FREE(pmp, M_PUFFS);
183 FREE(args, M_PUFFS);
184 return error;
185 }
186
187 /*
188 * This is called from the first "Hello, I'm alive" ioctl
189 * from userspace.
190 */
191 int
192 puffs_start2(struct puffs_mount *pmp, struct puffs_startreq *sreq)
193 {
194 struct puffs_node *pn;
195 struct mount *mp;
196
197 mp = PMPTOMP(pmp);
198
199 mutex_enter(&pmp->pmp_lock);
200
201 /*
202 * if someone has issued a VFS_ROOT() already, fill in the
203 * vnode cookie.
204 */
205 pn = NULL;
206 if (pmp->pmp_root) {
207 pn = VPTOPP(pmp->pmp_root);
208 pn->pn_cookie = sreq->psr_cookie;
209 }
210
211 /* We're good to fly */
212 pmp->pmp_rootcookie = sreq->psr_cookie;
213 pmp->pmp_status = PUFFSTAT_RUNNING;
214 mutex_exit(&pmp->pmp_lock);
215
216 /* do the VFS_STATVFS() we missed out on in sys_mount() */
217 copy_statvfs_info(&sreq->psr_sb, mp);
218 (void)memcpy(&mp->mnt_stat, &sreq->psr_sb, sizeof(mp->mnt_stat));
219 mp->mnt_stat.f_iosize = DEV_BSIZE;
220
221 DPRINTF(("puffs_start2: root vp %p, cur root pnode %p, cookie %p\n",
222 pmp->pmp_root, pn, sreq->psr_cookie));
223
224 return 0;
225 }
226
227 int
228 puffs_start(struct mount *mp, int flags, struct lwp *l)
229 {
230
231 /*
232 * This cannot travel to userspace, as this is called from
233 * the kernel context of the process doing mount(2). But
234 * it's probably a safe bet that the process doing mount(2)
235 * realizes it needs to start the filesystem also...
236 */
237 return 0;
238 }
239
240 int
241 puffs_unmount(struct mount *mp, int mntflags, struct lwp *l)
242 {
243 struct puffs_mount *pmp;
244 int error, force;
245
246 PUFFS_VFSREQ(unmount);
247
248 error = 0;
249 force = mntflags & MNT_FORCE;
250 pmp = MPTOPUFFSMP(mp);
251
252 DPRINTF(("puffs_unmount: detach filesystem from vfs, current "
253 "status 0x%x\n", pmp->pmp_status));
254
255 /*
256 * flush all the vnodes. VOP_RECLAIM() takes care that the
257 * root vnode does not get flushed until unmount. The
258 * userspace root node cookie is stored in the mount
259 * structure, so we can always re-instantiate a root vnode,
260 * should userspace unmount decide it doesn't want to
261 * cooperate.
262 */
263 error = vflush(mp, NULLVP, force ? FORCECLOSE : 0);
264 if (error)
265 goto out;
266
267 /*
268 * If we are not DYING, we should ask userspace's opinion
269 * about the situation
270 */
271 mutex_enter(&pmp->pmp_lock);
272 if (pmp->pmp_status != PUFFSTAT_DYING) {
273 pmp->pmp_unmounting = 1;
274 mutex_exit(&pmp->pmp_lock);
275
276 unmount_arg.pvfsr_flags = mntflags;
277 unmount_arg.pvfsr_pid = puffs_lwp2pid(l);
278
279 error = puffs_vfstouser(pmp, PUFFS_VFS_UNMOUNT,
280 &unmount_arg, sizeof(unmount_arg));
281 DPRINTF(("puffs_unmount: error %d force %d\n", error, force));
282
283 mutex_enter(&pmp->pmp_lock);
284 pmp->pmp_unmounting = 0;
285 cv_broadcast(&pmp->pmp_unmounting_cv);
286 }
287
288 /*
289 * if userspace cooperated or we really need to die,
290 * screw what userland thinks and just die.
291 */
292 if (error == 0 || force) {
293 /* tell waiters & other resources to go unwait themselves */
294 puffs_userdead(pmp);
295 puffs_nukebypmp(pmp);
296
297 /*
298 * Sink waiters. This is still not perfect, since the
299 * draining is done after userret, not when they really
300 * exit the file system. It will probably work as almost
301 * no call will block and therefore cause a context switch
302 * and therefore will protected by the biglock after
303 * exiting userspace. But ... it's an imperfect world.
304 */
305 while (pmp->pmp_req_waiters != 0)
306 cv_wait(&pmp->pmp_req_waitersink_cv, &pmp->pmp_lock);
307 mutex_exit(&pmp->pmp_lock);
308
309 /* free resources now that we hopefully have no waiters left */
310 cv_destroy(&pmp->pmp_req_waiter_cv);
311 cv_destroy(&pmp->pmp_req_waitersink_cv);
312 cv_destroy(&pmp->pmp_unmounting_cv);
313 cv_destroy(&pmp->pmp_suspend_cv);
314 mutex_destroy(&pmp->pmp_lock);
315
316 free(pmp->pmp_pnodehash, M_PUFFS);
317 FREE(pmp, M_PUFFS);
318 error = 0;
319 } else {
320 mutex_exit(&pmp->pmp_lock);
321 }
322
323 out:
324 DPRINTF(("puffs_unmount: return %d\n", error));
325 return error;
326 }
327
328 /*
329 * This doesn't need to travel to userspace
330 */
331 int
332 puffs_root(struct mount *mp, struct vnode **vpp)
333 {
334 struct puffs_mount *pmp;
335 struct puffs_node *pn;
336 struct vnode *vp;
337
338 pmp = MPTOPUFFSMP(mp);
339
340 /*
341 * pmp_lock must be held if vref()'ing or vrele()'ing the
342 * root vnode. the latter is controlled by puffs_inactive().
343 */
344 mutex_enter(&pmp->pmp_lock);
345 vp = pmp->pmp_root;
346 if (vp) {
347 simple_lock(&vp->v_interlock);
348 mutex_exit(&pmp->pmp_lock);
349 pn = VPTOPP(vp);
350 if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK))
351 goto grabnew;
352 *vpp = vp;
353 return 0;
354 } else
355 mutex_exit(&pmp->pmp_lock);
356
357 /* XXX: this is wrong, so FIXME */
358 grabnew:
359
360 /*
361 * So, didn't have the magic root vnode available.
362 * No matter, grab another an stuff it with the cookie.
363 */
364 if (puffs_getvnode(mp, pmp->pmp_rootcookie, VDIR, 0, 0, &vp))
365 panic("sloppy programming");
366
367 mutex_enter(&pmp->pmp_lock);
368 /*
369 * check if by mysterious force someone else created a root
370 * vnode while we were executing.
371 */
372 if (pmp->pmp_root) {
373 vref(pmp->pmp_root);
374 mutex_exit(&pmp->pmp_lock);
375 puffs_putvnode(vp);
376 vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
377 *vpp = pmp->pmp_root;
378 return 0;
379 }
380
381 /* store cache */
382 vp->v_flag = VROOT;
383 pmp->pmp_root = vp;
384 mutex_exit(&pmp->pmp_lock);
385
386 vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
387
388 *vpp = vp;
389 return 0;
390 }
391
392 int
393 puffs_statvfs(struct mount *mp, struct statvfs *sbp, struct lwp *l)
394 {
395 struct puffs_vfsreq_statvfs *statvfs_arg; /* too big for stack */
396 struct puffs_mount *pmp;
397 int error = 0;
398
399 pmp = MPTOPUFFSMP(mp);
400
401 /*
402 * If we are mounting, it means that the userspace counterpart
403 * is calling mount(2), but mount(2) also calls statvfs. So
404 * requesting statvfs from userspace would mean a deadlock.
405 * Compensate.
406 */
407 if (pmp->pmp_status == PUFFSTAT_MOUNTING)
408 return EINPROGRESS;
409
410 /* too big for stack */
411 MALLOC(statvfs_arg, struct puffs_vfsreq_statvfs *,
412 sizeof(struct puffs_vfsreq_statvfs), M_PUFFS, M_WAITOK | M_ZERO);
413 statvfs_arg->pvfsr_pid = puffs_lwp2pid(l);
414
415 error = puffs_vfstouser(pmp, PUFFS_VFS_STATVFS,
416 statvfs_arg, sizeof(*statvfs_arg));
417 statvfs_arg->pvfsr_sb.f_iosize = DEV_BSIZE;
418
419 /*
420 * Try to produce a sensible result even in the event
421 * of userspace error.
422 *
423 * XXX: cache the copy in non-error case
424 */
425 if (!error) {
426 copy_statvfs_info(&statvfs_arg->pvfsr_sb, mp);
427 (void)memcpy(sbp, &statvfs_arg->pvfsr_sb,
428 sizeof(struct statvfs));
429 } else {
430 copy_statvfs_info(sbp, mp);
431 }
432
433 FREE(statvfs_arg, M_PUFFS);
434 return error;
435 }
436
437 static int
438 pageflush(struct mount *mp, kauth_cred_t cred,
439 int waitfor, int suspending, struct lwp *l)
440 {
441 struct puffs_node *pn;
442 struct vnode *vp, *nvp;
443 int error, rv;
444
445 KASSERT(((waitfor == MNT_WAIT) && suspending) == 0);
446 KASSERT((suspending == 0)
447 || (fstrans_is_owner(mp)
448 && fstrans_getstate(mp) == FSTRANS_SUSPENDING));
449
450 error = 0;
451
452 /*
453 * Sync all cached data from regular vnodes (which are not
454 * currently locked, see below). After this we call VFS_SYNC
455 * for the fs server, which should handle data and metadata for
456 * all the nodes it knows to exist.
457 */
458 simple_lock(&mntvnode_slock);
459 loop:
460 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
461 /* check if we're on the right list */
462 if (vp->v_mount != mp)
463 goto loop;
464
465 simple_lock(&vp->v_interlock);
466 pn = VPTOPP(vp);
467 nvp = TAILQ_NEXT(vp, v_mntvnodes);
468
469 if (vp->v_type != VREG || UVM_OBJ_IS_CLEAN(&vp->v_uobj)) {
470 simple_unlock(&vp->v_interlock);
471 continue;
472 }
473
474 simple_unlock(&mntvnode_slock);
475
476 /*
477 * Here we try to get a reference to the vnode and to
478 * lock it. This is mostly cargo-culted, but I will
479 * offer an explanation to why I believe this might
480 * actually do the right thing.
481 *
482 * If the vnode is a goner, we quite obviously don't need
483 * to sync it.
484 *
485 * If the vnode was busy, we don't need to sync it because
486 * this is never called with MNT_WAIT except from
487 * dounmount(), when we are wait-flushing all the dirty
488 * vnodes through other routes in any case. So there,
489 * sync() doesn't actually sync. Happy now?
490 *
491 * NOTE: if we're suspending, vget() does NOT lock.
492 * See puffs_lock() for details.
493 */
494 rv = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
495 if (rv) {
496 simple_lock(&mntvnode_slock);
497 if (rv == ENOENT)
498 goto loop;
499 continue;
500 }
501
502 /*
503 * Thread information to puffs_strategy() through the
504 * pnode flags: we want to issue the putpages operations
505 * as FAF if we're suspending, since it's very probable
506 * that our execution context is that of the userspace
507 * daemon. We can do this because:
508 * + we send the "going to suspend" prior to this part
509 * + if any of the writes fails in userspace, it's the
510 * file system server's problem to decide if this was a
511 * failed snapshot when it gets the "snapshot complete"
512 * notification.
513 * + if any of the writes fail in the kernel already, we
514 * immediately fail *and* notify the user server of
515 * failure.
516 *
517 * We also do FAFs if we're called from the syncer. This
518 * is just general optimization for trickle sync: no need
519 * to really guarantee that the stuff ended on backing
520 * storage.
521 * TODO: Maybe also hint the user server of this twist?
522 */
523 if (suspending || waitfor == MNT_LAZY) {
524 simple_lock(&vp->v_interlock);
525 pn->pn_stat |= PNODE_SUSPEND;
526 simple_unlock(&vp->v_interlock);
527 }
528 rv = VOP_FSYNC(vp, cred, waitfor, 0, 0, l);
529 if (suspending || waitfor == MNT_LAZY) {
530 simple_lock(&vp->v_interlock);
531 pn->pn_stat &= ~PNODE_SUSPEND;
532 simple_unlock(&vp->v_interlock);
533 }
534 if (rv)
535 error = rv;
536 vput(vp);
537 simple_lock(&mntvnode_slock);
538 }
539 simple_unlock(&mntvnode_slock);
540
541 return error;
542 }
543
544 int
545 puffs_sync(struct mount *mp, int waitfor, struct kauth_cred *cred,
546 struct lwp *l)
547 {
548 int error, rv;
549
550 PUFFS_VFSREQ(sync);
551
552 error = pageflush(mp, cred, waitfor, 0, l);
553
554 /* sync fs */
555 sync_arg.pvfsr_waitfor = waitfor;
556 puffs_credcvt(&sync_arg.pvfsr_cred, cred);
557 sync_arg.pvfsr_pid = puffs_lwp2pid(l);
558
559 rv = puffs_vfstouser(MPTOPUFFSMP(mp), PUFFS_VFS_SYNC,
560 &sync_arg, sizeof(sync_arg));
561 if (rv)
562 error = rv;
563
564 return error;
565 }
566
567 int
568 puffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
569 {
570 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
571 struct vnode *vp;
572 int error;
573
574 PUFFS_VFSREQ(fhtonode);
575
576 if ((pmp->pmp_flags & PUFFS_KFLAG_CANEXPORT) == 0)
577 return EOPNOTSUPP;
578
579 if (fhp->fid_len < PUFFS_FHSIZE + 4)
580 return EINVAL;
581
582 fhtonode_arg.pvfsr_dsize = PUFFS_FHSIZE;
583 memcpy(fhtonode_arg.pvfsr_data, fhp->fid_data, PUFFS_FHSIZE);
584
585 error = puffs_vfstouser(pmp, PUFFS_VFS_FHTOVP,
586 &fhtonode_arg, sizeof(fhtonode_arg));
587 if (error)
588 return error;
589
590 vp = puffs_pnode2vnode(pmp, fhtonode_arg.pvfsr_fhcookie, 1);
591 DPRINTF(("puffs_fhtovp: got cookie %p, existing vnode %p\n",
592 fhtonode_arg.pvfsr_fhcookie, vp));
593 if (!vp) {
594 error = puffs_getvnode(mp, fhtonode_arg.pvfsr_fhcookie,
595 fhtonode_arg.pvfsr_vtype, fhtonode_arg.pvfsr_size,
596 fhtonode_arg.pvfsr_rdev, &vp);
597 if (error)
598 return error;
599 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
600 }
601
602 *vpp = vp;
603 return 0;
604 }
605
606 int
607 puffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
608 {
609 struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
610 int error;
611
612 PUFFS_VFSREQ(nodetofh);
613
614 if ((pmp->pmp_flags & PUFFS_KFLAG_CANEXPORT) == 0)
615 return EOPNOTSUPP;
616
617 if (*fh_size < PUFFS_FHSIZE + 4) {
618 *fh_size = PUFFS_FHSIZE + 4;
619 return E2BIG;
620 }
621 *fh_size = PUFFS_FHSIZE + 4;
622
623 nodetofh_arg.pvfsr_fhcookie = VPTOPNC(vp);
624 nodetofh_arg.pvfsr_dsize = PUFFS_FHSIZE;
625
626 error = puffs_vfstouser(pmp, PUFFS_VFS_VPTOFH,
627 &nodetofh_arg, sizeof(nodetofh_arg));
628 if (error)
629 return error;
630
631 fhp->fid_len = PUFFS_FHSIZE + 4;
632 memcpy(fhp->fid_data,
633 nodetofh_arg.pvfsr_data, PUFFS_FHSIZE);
634
635 return 0;
636 }
637
638 void
639 puffs_init()
640 {
641
642 malloc_type_attach(M_PUFFS);
643
644 pool_init(&puffs_pnpool, sizeof(struct puffs_node), 0, 0, 0,
645 "puffpnpl", &pool_allocator_nointr, IPL_NONE);
646 puffs_transport_init();
647 puffs_msgif_init();
648 }
649
650 void
651 puffs_done()
652 {
653
654 puffs_msgif_destroy();
655 puffs_transport_destroy();
656 pool_destroy(&puffs_pnpool);
657
658 malloc_type_detach(M_PUFFS);
659 }
660
661 int
662 puffs_snapshot(struct mount *mp, struct vnode *vp, struct timespec *ts)
663 {
664
665 return EOPNOTSUPP;
666 }
667
668 int
669 puffs_suspendctl(struct mount *mp, int cmd)
670 {
671 struct puffs_mount *pmp;
672 int error;
673
674 pmp = MPTOPUFFSMP(mp);
675 switch (cmd) {
676 case SUSPEND_SUSPEND:
677 DPRINTF(("puffs_suspendctl: suspending\n"));
678 if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
679 break;
680 puffs_suspendtouser(pmp, PUFFS_SUSPEND_START);
681
682 error = pageflush(mp, FSCRED, 0, 1, curlwp);
683 if (error == 0)
684 error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
685
686 if (error != 0) {
687 puffs_suspendtouser(pmp, PUFFS_SUSPEND_ERROR);
688 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
689 break;
690 }
691
692 puffs_suspendtouser(pmp, PUFFS_SUSPEND_SUSPENDED);
693
694 break;
695
696 case SUSPEND_RESUME:
697 DPRINTF(("puffs_suspendctl: resume\n"));
698 error = 0;
699 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
700 puffs_suspendtouser(pmp, PUFFS_SUSPEND_RESUME);
701 break;
702
703 default:
704 error = EINVAL;
705 break;
706 }
707
708 DPRINTF(("puffs_suspendctl: return %d\n", error));
709 return error;
710 }
711
712 const struct vnodeopv_desc * const puffs_vnodeopv_descs[] = {
713 &puffs_vnodeop_opv_desc,
714 &puffs_specop_opv_desc,
715 &puffs_fifoop_opv_desc,
716 &puffs_msgop_opv_desc,
717 NULL,
718 };
719
720 struct vfsops puffs_vfsops = {
721 MOUNT_PUFFS,
722 puffs_mount, /* mount */
723 puffs_start, /* start */
724 puffs_unmount, /* unmount */
725 puffs_root, /* root */
726 (void *)eopnotsupp, /* quotactl */
727 puffs_statvfs, /* statvfs */
728 puffs_sync, /* sync */
729 (void *)eopnotsupp, /* vget */
730 puffs_fhtovp, /* fhtovp */
731 puffs_vptofh, /* vptofh */
732 puffs_init, /* init */
733 NULL, /* reinit */
734 puffs_done, /* done */
735 NULL, /* mountroot */
736 puffs_snapshot, /* snapshot */
737 vfs_stdextattrctl, /* extattrctl */
738 puffs_suspendctl, /* suspendctl */
739 puffs_vnodeopv_descs, /* vnodeops */
740 0, /* refcount */
741 { NULL, NULL }
742 };
743 VFS_ATTACH(puffs_vfsops);
744