puffs_vfsops.c revision 1.42 1 /* $NetBSD: puffs_vfsops.c,v 1.42 2007/05/17 13:59:22 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: puffs_vfsops.c,v 1.42 2007/05/17 13:59:22 pooka Exp $");
37
38 #include <sys/param.h>
39 #include <sys/mount.h>
40 #include <sys/malloc.h>
41 #include <sys/extattr.h>
42 #include <sys/queue.h>
43 #include <sys/vnode.h>
44 #include <sys/dirent.h>
45 #include <sys/kauth.h>
46 #include <sys/fstrans.h>
47
48 #include <lib/libkern/libkern.h>
49
50 #include <fs/puffs/puffs_msgif.h>
51 #include <fs/puffs/puffs_sys.h>
52
53 #include <nfs/nfsproto.h> /* for fh sizes */
54
55 VFS_PROTOS(puffs);
56
57 MALLOC_JUSTDEFINE(M_PUFFS, "puffs", "Pass-to-Userspace Framework File System");
58
59 #ifndef PUFFS_PNODEBUCKETS
60 #define PUFFS_PNODEBUCKETS 256
61 #endif
62 #ifndef PUFFS_MAXPNODEBUCKETS
63 #define PUFFS_MAXPNODEBUCKETS 8192
64 #endif
65 int puffs_pnodebuckets_default = PUFFS_PNODEBUCKETS;
66 int puffs_maxpnodebuckets = PUFFS_MAXPNODEBUCKETS;
67
68 int
69 puffs_mount(struct mount *mp, const char *path, void *data,
70 struct nameidata *ndp, struct lwp *l)
71 {
72 struct puffs_mount *pmp = NULL;
73 struct puffs_kargs *args;
74 char namebuf[PUFFSNAMESIZE+sizeof(PUFFS_NAMEPREFIX)+1]; /* spooky */
75 int error = 0, i;
76
77 if (mp->mnt_flag & MNT_GETARGS) {
78 pmp = MPTOPUFFSMP(mp);
79 return copyout(&pmp->pmp_args,data,sizeof(struct puffs_kargs));
80 }
81
82 /* update is not supported currently */
83 if (mp->mnt_flag & MNT_UPDATE)
84 return EOPNOTSUPP;
85
86 /*
87 * We need the file system name
88 */
89 if (!data)
90 return EINVAL;
91
92 MALLOC(args, struct puffs_kargs *, sizeof(struct puffs_kargs),
93 M_PUFFS, M_WAITOK);
94
95 error = copyin(data, args, sizeof(struct puffs_kargs));
96 if (error)
97 goto out;
98
99 /* devel phase */
100 if (args->pa_vers != (PUFFSVERSION | PUFFSDEVELVERS)) {
101 printf("puffs_mount: development version mismatch\n");
102 error = EINVAL;
103 goto out;
104 }
105
106 /* nuke spy bits */
107 args->pa_flags &= PUFFS_KFLAG_MASK;
108
109 /* sanitize file handle length */
110 if (PUFFS_TOFHSIZE(args->pa_fhsize) > FHANDLE_SIZE_MAX) {
111 printf("puffs_mount: handle size %zu too large\n",
112 args->pa_fhsize);
113 error = EINVAL;
114 goto out;
115 }
116 /* sanity check file handle max sizes */
117 if (args->pa_fhsize && args->pa_fhflags & PUFFS_FHFLAG_PROTOMASK) {
118 size_t kfhsize = PUFFS_TOFHSIZE(args->pa_fhsize);
119
120 if (args->pa_fhflags & PUFFS_FHFLAG_NFSV2) {
121 if (NFSX_FHTOOBIG_P(kfhsize, 0)) {
122 printf("puffs_mount: fhsize larger than "
123 "NFSv2 max %d\n",
124 PUFFS_FROMFHSIZE(NFSX_V2FH));
125 error = EINVAL;
126 goto out;
127 }
128 }
129
130 if (args->pa_fhflags & PUFFS_FHFLAG_NFSV3) {
131 if (NFSX_FHTOOBIG_P(kfhsize, 1)) {
132 printf("puffs_mount: fhsize larger than "
133 "NFSv3 max %d\n",
134 PUFFS_FROMFHSIZE(NFSX_V3FHMAX));
135 error = EINVAL;
136 goto out;
137 }
138 }
139 }
140
141 /* build real name */
142 (void)strlcpy(namebuf, PUFFS_NAMEPREFIX, sizeof(namebuf));
143 (void)strlcat(namebuf, args->pa_name, sizeof(namebuf));
144
145 /* inform user server if it got the max request size it wanted */
146 if (args->pa_maxreqlen == 0 || args->pa_maxreqlen > PUFFS_REQ_MAXSIZE)
147 args->pa_maxreqlen = PUFFS_REQ_MAXSIZE;
148 else if (args->pa_maxreqlen < PUFFS_REQSTRUCT_MAX)
149 args->pa_maxreqlen = PUFFS_REQSTRUCT_MAX;
150 (void)strlcpy(args->pa_name, namebuf, sizeof(args->pa_name));
151
152 if (args->pa_nhashbuckets == 0)
153 args->pa_nhashbuckets = puffs_pnodebuckets_default;
154 if (args->pa_nhashbuckets < 1)
155 args->pa_nhashbuckets = 1;
156 if (args->pa_nhashbuckets > PUFFS_MAXPNODEBUCKETS) {
157 args->pa_nhashbuckets = puffs_maxpnodebuckets;
158 printf("puffs_mount: using %d hash buckets. "
159 "adjust puffs_maxpnodebuckets for more\n",
160 puffs_maxpnodebuckets);
161 }
162
163 error = copyout(args, data, sizeof(struct puffs_kargs));
164 if (error)
165 goto out;
166
167 error = set_statvfs_info(path, UIO_USERSPACE, namebuf,
168 UIO_SYSSPACE, mp, l);
169 if (error)
170 goto out;
171 mp->mnt_stat.f_iosize = DEV_BSIZE;
172
173 /*
174 * We can't handle the VFS_STATVFS() mount_domount() does
175 * after VFS_MOUNT() because we'd deadlock, so handle it
176 * here already.
177 */
178 copy_statvfs_info(&args->pa_svfsb, mp);
179 (void)memcpy(&mp->mnt_stat, &args->pa_svfsb, sizeof(mp->mnt_stat));
180
181 MALLOC(pmp, struct puffs_mount *, sizeof(struct puffs_mount),
182 M_PUFFS, M_WAITOK | M_ZERO);
183
184 mp->mnt_fs_bshift = DEV_BSHIFT;
185 mp->mnt_dev_bshift = DEV_BSHIFT;
186 mp->mnt_flag &= ~MNT_LOCAL; /* we don't really know, so ... */
187 mp->mnt_data = pmp;
188 mp->mnt_iflag |= IMNT_HAS_TRANS;
189
190 pmp->pmp_status = PUFFSTAT_MOUNTING;
191 pmp->pmp_nextreq = 0;
192 pmp->pmp_mp = mp;
193 pmp->pmp_req_maxsize = args->pa_maxreqlen;
194 pmp->pmp_args = *args;
195
196 pmp->pmp_npnodehash = args->pa_nhashbuckets;
197 pmp->pmp_pnodehash = malloc
198 (sizeof(struct puffs_pnode_hashlist *) * pmp->pmp_npnodehash,
199 M_PUFFS, M_WAITOK);
200 for (i = 0; i < pmp->pmp_npnodehash; i++)
201 LIST_INIT(&pmp->pmp_pnodehash[i]);
202
203 /*
204 * Inform the fileops processing code that we have a mountpoint.
205 * If it doesn't know about anyone with our pid/fd having the
206 * device open, punt
207 */
208 if (puffs_setpmp(l->l_proc->p_pid, args->pa_fd, pmp)) {
209 error = ENOENT;
210 goto out;
211 }
212
213 /* XXX: check parameters */
214 pmp->pmp_root_cookie = args->pa_root_cookie;
215 pmp->pmp_root_vtype = args->pa_root_vtype;
216 pmp->pmp_root_vsize = args->pa_root_vsize;
217 pmp->pmp_root_rdev = args->pa_root_rdev;
218
219 mutex_init(&pmp->pmp_lock, MUTEX_DEFAULT, IPL_NONE);
220 cv_init(&pmp->pmp_req_waiter_cv, "puffsget");
221 cv_init(&pmp->pmp_refcount_cv, "puffsref");
222 cv_init(&pmp->pmp_unmounting_cv, "puffsum");
223 TAILQ_INIT(&pmp->pmp_req_touser);
224 TAILQ_INIT(&pmp->pmp_req_replywait);
225 TAILQ_INIT(&pmp->pmp_req_sizepark);
226
227 DPRINTF(("puffs_mount: mount point at %p, puffs specific at %p\n",
228 mp, MPTOPUFFSMP(mp)));
229
230 vfs_getnewfsid(mp);
231
232 out:
233 if (error && pmp && pmp->pmp_pnodehash)
234 free(pmp->pmp_pnodehash, M_PUFFS);
235 if (error && pmp)
236 FREE(pmp, M_PUFFS);
237 FREE(args, M_PUFFS);
238 return error;
239 }
240
241 int
242 puffs_start(struct mount *mp, int flags, struct lwp *l)
243 {
244 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
245
246 KASSERT(pmp->pmp_status == PUFFSTAT_MOUNTING);
247 pmp->pmp_status = PUFFSTAT_RUNNING;
248
249 return 0;
250 }
251
252 int
253 puffs_unmount(struct mount *mp, int mntflags, struct lwp *l)
254 {
255 struct puffs_mount *pmp;
256 int error, force;
257
258 PUFFS_VFSREQ(unmount);
259
260 error = 0;
261 force = mntflags & MNT_FORCE;
262 pmp = MPTOPUFFSMP(mp);
263
264 DPRINTF(("puffs_unmount: detach filesystem from vfs, current "
265 "status 0x%x\n", pmp->pmp_status));
266
267 /*
268 * flush all the vnodes. VOP_RECLAIM() takes care that the
269 * root vnode does not get flushed until unmount. The
270 * userspace root node cookie is stored in the mount
271 * structure, so we can always re-instantiate a root vnode,
272 * should userspace unmount decide it doesn't want to
273 * cooperate.
274 */
275 error = vflush(mp, NULLVP, force ? FORCECLOSE : 0);
276 if (error)
277 goto out;
278
279 /*
280 * If we are not DYING, we should ask userspace's opinion
281 * about the situation
282 */
283 mutex_enter(&pmp->pmp_lock);
284 if (pmp->pmp_status != PUFFSTAT_DYING) {
285 pmp->pmp_unmounting = 1;
286 mutex_exit(&pmp->pmp_lock);
287
288 unmount_arg.pvfsr_flags = mntflags;
289 unmount_arg.pvfsr_pid = puffs_lwp2pid(l);
290
291 error = puffs_vfstouser(pmp, PUFFS_VFS_UNMOUNT,
292 &unmount_arg, sizeof(unmount_arg));
293 DPRINTF(("puffs_unmount: error %d force %d\n", error, force));
294
295 mutex_enter(&pmp->pmp_lock);
296 pmp->pmp_unmounting = 0;
297 cv_broadcast(&pmp->pmp_unmounting_cv);
298 }
299
300 /*
301 * if userspace cooperated or we really need to die,
302 * screw what userland thinks and just die.
303 */
304 if (error == 0 || force) {
305 /* tell waiters & other resources to go unwait themselves */
306 puffs_userdead(pmp);
307 puffs_nukebypmp(pmp);
308
309 /*
310 * Wait until there are no more users for the mount resource.
311 * Notice that this is hooked against transport_close
312 * and return from touser. In an ideal world, it would
313 * be hooked against final return from all operations.
314 * But currently it works well enough, since nobody
315 * does weird blocking voodoo after return from touser().
316 */
317 while (pmp->pmp_refcount != 0)
318 cv_wait(&pmp->pmp_refcount_cv, &pmp->pmp_lock);
319 mutex_exit(&pmp->pmp_lock);
320
321 /* free resources now that we hopefully have no waiters left */
322 cv_destroy(&pmp->pmp_unmounting_cv);
323 cv_destroy(&pmp->pmp_refcount_cv);
324 cv_destroy(&pmp->pmp_req_waiter_cv);
325 mutex_destroy(&pmp->pmp_lock);
326
327 free(pmp->pmp_pnodehash, M_PUFFS);
328 FREE(pmp, M_PUFFS);
329 error = 0;
330 } else {
331 mutex_exit(&pmp->pmp_lock);
332 }
333
334 out:
335 DPRINTF(("puffs_unmount: return %d\n", error));
336 return error;
337 }
338
339 /*
340 * This doesn't need to travel to userspace
341 */
342 int
343 puffs_root(struct mount *mp, struct vnode **vpp)
344 {
345 struct puffs_mount *pmp;
346 struct puffs_node *pn;
347 struct vnode *vp;
348
349 pmp = MPTOPUFFSMP(mp);
350
351 /*
352 * pmp_lock must be held if vref()'ing or vrele()'ing the
353 * root vnode. the latter is controlled by puffs_inactive().
354 */
355 mutex_enter(&pmp->pmp_lock);
356 vp = pmp->pmp_root;
357 if (vp) {
358 simple_lock(&vp->v_interlock);
359 mutex_exit(&pmp->pmp_lock);
360 pn = VPTOPP(vp);
361 if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK))
362 goto grabnew;
363 *vpp = vp;
364 return 0;
365 } else
366 mutex_exit(&pmp->pmp_lock);
367
368 /* XXX: this is wrong, so FIXME */
369 grabnew:
370
371 /*
372 * So, didn't have the magic root vnode available.
373 * No matter, grab another an stuff it with the cookie.
374 */
375 if (puffs_getvnode(mp, pmp->pmp_root_cookie, pmp->pmp_root_vtype,
376 pmp->pmp_root_vsize, pmp->pmp_root_rdev, &vp))
377 panic("sloppy programming");
378
379 mutex_enter(&pmp->pmp_lock);
380 /*
381 * check if by mysterious force someone else created a root
382 * vnode while we were executing.
383 */
384 if (pmp->pmp_root) {
385 vref(pmp->pmp_root);
386 mutex_exit(&pmp->pmp_lock);
387 puffs_putvnode(vp);
388 vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
389 *vpp = pmp->pmp_root;
390 return 0;
391 }
392
393 /* store cache */
394 vp->v_flag = VROOT;
395 pmp->pmp_root = vp;
396 mutex_exit(&pmp->pmp_lock);
397
398 vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
399
400 *vpp = vp;
401 return 0;
402 }
403
404 int
405 puffs_statvfs(struct mount *mp, struct statvfs *sbp, struct lwp *l)
406 {
407 struct puffs_vfsreq_statvfs *statvfs_arg; /* too big for stack */
408 struct puffs_mount *pmp;
409 int error = 0;
410
411 pmp = MPTOPUFFSMP(mp);
412
413 /*
414 * If we are mounting, it means that the userspace counterpart
415 * is calling mount(2), but mount(2) also calls statvfs. So
416 * requesting statvfs from userspace would mean a deadlock.
417 * Compensate.
418 */
419 if (pmp->pmp_status == PUFFSTAT_MOUNTING)
420 return EINPROGRESS;
421
422 /* too big for stack */
423 MALLOC(statvfs_arg, struct puffs_vfsreq_statvfs *,
424 sizeof(struct puffs_vfsreq_statvfs), M_PUFFS, M_WAITOK | M_ZERO);
425 statvfs_arg->pvfsr_pid = puffs_lwp2pid(l);
426
427 error = puffs_vfstouser(pmp, PUFFS_VFS_STATVFS,
428 statvfs_arg, sizeof(*statvfs_arg));
429 statvfs_arg->pvfsr_sb.f_iosize = DEV_BSIZE;
430
431 /*
432 * Try to produce a sensible result even in the event
433 * of userspace error.
434 *
435 * XXX: cache the copy in non-error case
436 */
437 if (!error) {
438 copy_statvfs_info(&statvfs_arg->pvfsr_sb, mp);
439 (void)memcpy(sbp, &statvfs_arg->pvfsr_sb,
440 sizeof(struct statvfs));
441 } else {
442 copy_statvfs_info(sbp, mp);
443 }
444
445 FREE(statvfs_arg, M_PUFFS);
446 return error;
447 }
448
449 static int
450 pageflush(struct mount *mp, kauth_cred_t cred,
451 int waitfor, int suspending, struct lwp *l)
452 {
453 struct puffs_node *pn;
454 struct vnode *vp, *nvp;
455 int error, rv;
456
457 KASSERT(((waitfor == MNT_WAIT) && suspending) == 0);
458 KASSERT((suspending == 0)
459 || (fstrans_is_owner(mp)
460 && fstrans_getstate(mp) == FSTRANS_SUSPENDING));
461
462 error = 0;
463
464 /*
465 * Sync all cached data from regular vnodes (which are not
466 * currently locked, see below). After this we call VFS_SYNC
467 * for the fs server, which should handle data and metadata for
468 * all the nodes it knows to exist.
469 */
470 simple_lock(&mntvnode_slock);
471 loop:
472 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
473 /* check if we're on the right list */
474 if (vp->v_mount != mp)
475 goto loop;
476
477 simple_lock(&vp->v_interlock);
478 pn = VPTOPP(vp);
479 nvp = TAILQ_NEXT(vp, v_mntvnodes);
480
481 if (vp->v_type != VREG || UVM_OBJ_IS_CLEAN(&vp->v_uobj)) {
482 simple_unlock(&vp->v_interlock);
483 continue;
484 }
485
486 simple_unlock(&mntvnode_slock);
487
488 /*
489 * Here we try to get a reference to the vnode and to
490 * lock it. This is mostly cargo-culted, but I will
491 * offer an explanation to why I believe this might
492 * actually do the right thing.
493 *
494 * If the vnode is a goner, we quite obviously don't need
495 * to sync it.
496 *
497 * If the vnode was busy, we don't need to sync it because
498 * this is never called with MNT_WAIT except from
499 * dounmount(), when we are wait-flushing all the dirty
500 * vnodes through other routes in any case. So there,
501 * sync() doesn't actually sync. Happy now?
502 *
503 * NOTE: if we're suspending, vget() does NOT lock.
504 * See puffs_lock() for details.
505 */
506 rv = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
507 if (rv) {
508 simple_lock(&mntvnode_slock);
509 if (rv == ENOENT)
510 goto loop;
511 continue;
512 }
513
514 /*
515 * Thread information to puffs_strategy() through the
516 * pnode flags: we want to issue the putpages operations
517 * as FAF if we're suspending, since it's very probable
518 * that our execution context is that of the userspace
519 * daemon. We can do this because:
520 * + we send the "going to suspend" prior to this part
521 * + if any of the writes fails in userspace, it's the
522 * file system server's problem to decide if this was a
523 * failed snapshot when it gets the "snapshot complete"
524 * notification.
525 * + if any of the writes fail in the kernel already, we
526 * immediately fail *and* notify the user server of
527 * failure.
528 *
529 * We also do FAFs if we're called from the syncer. This
530 * is just general optimization for trickle sync: no need
531 * to really guarantee that the stuff ended on backing
532 * storage.
533 * TODO: Maybe also hint the user server of this twist?
534 */
535 if (suspending || waitfor == MNT_LAZY) {
536 simple_lock(&vp->v_interlock);
537 pn->pn_stat |= PNODE_SUSPEND;
538 simple_unlock(&vp->v_interlock);
539 }
540 rv = VOP_FSYNC(vp, cred, waitfor, 0, 0, l);
541 if (suspending || waitfor == MNT_LAZY) {
542 simple_lock(&vp->v_interlock);
543 pn->pn_stat &= ~PNODE_SUSPEND;
544 simple_unlock(&vp->v_interlock);
545 }
546 if (rv)
547 error = rv;
548 vput(vp);
549 simple_lock(&mntvnode_slock);
550 }
551 simple_unlock(&mntvnode_slock);
552
553 return error;
554 }
555
556 int
557 puffs_sync(struct mount *mp, int waitfor, struct kauth_cred *cred,
558 struct lwp *l)
559 {
560 int error, rv;
561
562 PUFFS_VFSREQ(sync);
563
564 error = pageflush(mp, cred, waitfor, 0, l);
565
566 /* sync fs */
567 sync_arg.pvfsr_waitfor = waitfor;
568 puffs_credcvt(&sync_arg.pvfsr_cred, cred);
569 sync_arg.pvfsr_pid = puffs_lwp2pid(l);
570
571 rv = puffs_vfstouser(MPTOPUFFSMP(mp), PUFFS_VFS_SYNC,
572 &sync_arg, sizeof(sync_arg));
573 if (rv)
574 error = rv;
575
576 return error;
577 }
578
579 int
580 puffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
581 {
582 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
583 struct puffs_vfsreq_fhtonode *fhtonode_argp;
584 struct vnode *vp;
585 size_t argsize;
586 int error;
587
588 if (pmp->pmp_args.pa_fhsize == 0)
589 return EOPNOTSUPP;
590
591 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) {
592 if (pmp->pmp_args.pa_fhsize < PUFFS_FROMFHSIZE(fhp->fid_len))
593 return EINVAL;
594 } else {
595 if (pmp->pmp_args.pa_fhsize != PUFFS_FROMFHSIZE(fhp->fid_len))
596 return EINVAL;
597 }
598
599 argsize = sizeof(struct puffs_vfsreq_fhtonode)
600 + PUFFS_FROMFHSIZE(fhp->fid_len);
601 fhtonode_argp = malloc(argsize, M_PUFFS, M_ZERO | M_WAITOK);
602 fhtonode_argp->pvfsr_dsize = PUFFS_FROMFHSIZE(fhp->fid_len);
603 memcpy(fhtonode_argp->pvfsr_data, fhp->fid_data,
604 PUFFS_FROMFHSIZE(fhp->fid_len));
605
606 error = puffs_vfstouser(pmp, PUFFS_VFS_FHTOVP, fhtonode_argp, argsize);
607 if (error)
608 goto out;
609
610 vp = puffs_pnode2vnode(pmp, fhtonode_argp->pvfsr_fhcookie, 1);
611 DPRINTF(("puffs_fhtovp: got cookie %p, existing vnode %p\n",
612 fhtonode_argp->pvfsr_fhcookie, vp));
613 if (!vp) {
614 error = puffs_getvnode(mp, fhtonode_argp->pvfsr_fhcookie,
615 fhtonode_argp->pvfsr_vtype, fhtonode_argp->pvfsr_size,
616 fhtonode_argp->pvfsr_rdev, &vp);
617 if (error)
618 goto out;
619 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
620 }
621
622 *vpp = vp;
623 out:
624 free(fhtonode_argp, M_PUFFS);
625 return error;
626 }
627
628 int
629 puffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
630 {
631 struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
632 struct puffs_vfsreq_nodetofh *nodetofh_argp;
633 size_t argsize;
634 int error;
635
636 if (pmp->pmp_args.pa_fhsize == 0)
637 return EOPNOTSUPP;
638
639 /* if file handles are static length, we can return immediately */
640 if (((pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) == 0)
641 && (PUFFS_FROMFHSIZE(*fh_size) < pmp->pmp_args.pa_fhsize)) {
642 *fh_size = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
643 return E2BIG;
644 }
645
646 argsize = sizeof(struct puffs_vfsreq_nodetofh)
647 + PUFFS_FROMFHSIZE(*fh_size);
648 nodetofh_argp = malloc(argsize, M_PUFFS, M_ZERO | M_WAITOK);
649 nodetofh_argp->pvfsr_fhcookie = VPTOPNC(vp);
650 nodetofh_argp->pvfsr_dsize = PUFFS_FROMFHSIZE(*fh_size);
651
652 error = puffs_vfstouser(pmp, PUFFS_VFS_VPTOFH, nodetofh_argp, argsize);
653 if (error) {
654 if (error == E2BIG)
655 *fh_size = PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize);
656 goto out;
657 }
658
659 if (PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize) > FHANDLE_SIZE_MAX) {
660 /* XXX: wrong direction */
661 error = EINVAL;
662 goto out;
663 }
664
665 if (*fh_size < PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize)) {
666 *fh_size = PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize);
667 error = E2BIG;
668 goto out;
669 }
670 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) {
671 *fh_size = PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize);
672 } else {
673 *fh_size = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
674 }
675
676 if (fhp) {
677 fhp->fid_len = *fh_size;
678 memcpy(fhp->fid_data,
679 nodetofh_argp->pvfsr_data, nodetofh_argp->pvfsr_dsize);
680 }
681
682 out:
683 free(nodetofh_argp, M_PUFFS);
684 return error;
685 }
686
687 void
688 puffs_init()
689 {
690
691 malloc_type_attach(M_PUFFS);
692
693 pool_init(&puffs_pnpool, sizeof(struct puffs_node), 0, 0, 0,
694 "puffpnpl", &pool_allocator_nointr, IPL_NONE);
695 puffs_transport_init();
696 puffs_msgif_init();
697 }
698
699 void
700 puffs_done()
701 {
702
703 puffs_msgif_destroy();
704 puffs_transport_destroy();
705 pool_destroy(&puffs_pnpool);
706
707 malloc_type_detach(M_PUFFS);
708 }
709
710 int
711 puffs_snapshot(struct mount *mp, struct vnode *vp, struct timespec *ts)
712 {
713
714 return EOPNOTSUPP;
715 }
716
717 int
718 puffs_suspendctl(struct mount *mp, int cmd)
719 {
720 struct puffs_mount *pmp;
721 int error;
722
723 pmp = MPTOPUFFSMP(mp);
724 switch (cmd) {
725 case SUSPEND_SUSPEND:
726 DPRINTF(("puffs_suspendctl: suspending\n"));
727 if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
728 break;
729 puffs_suspendtouser(pmp, PUFFS_SUSPEND_START);
730
731 error = pageflush(mp, FSCRED, 0, 1, curlwp);
732 if (error == 0)
733 error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
734
735 if (error != 0) {
736 puffs_suspendtouser(pmp, PUFFS_SUSPEND_ERROR);
737 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
738 break;
739 }
740
741 puffs_suspendtouser(pmp, PUFFS_SUSPEND_SUSPENDED);
742
743 break;
744
745 case SUSPEND_RESUME:
746 DPRINTF(("puffs_suspendctl: resume\n"));
747 error = 0;
748 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
749 puffs_suspendtouser(pmp, PUFFS_SUSPEND_RESUME);
750 break;
751
752 default:
753 error = EINVAL;
754 break;
755 }
756
757 DPRINTF(("puffs_suspendctl: return %d\n", error));
758 return error;
759 }
760
761 const struct vnodeopv_desc * const puffs_vnodeopv_descs[] = {
762 &puffs_vnodeop_opv_desc,
763 &puffs_specop_opv_desc,
764 &puffs_fifoop_opv_desc,
765 &puffs_msgop_opv_desc,
766 NULL,
767 };
768
769 struct vfsops puffs_vfsops = {
770 MOUNT_PUFFS,
771 puffs_mount, /* mount */
772 puffs_start, /* start */
773 puffs_unmount, /* unmount */
774 puffs_root, /* root */
775 (void *)eopnotsupp, /* quotactl */
776 puffs_statvfs, /* statvfs */
777 puffs_sync, /* sync */
778 (void *)eopnotsupp, /* vget */
779 puffs_fhtovp, /* fhtovp */
780 puffs_vptofh, /* vptofh */
781 puffs_init, /* init */
782 NULL, /* reinit */
783 puffs_done, /* done */
784 NULL, /* mountroot */
785 puffs_snapshot, /* snapshot */
786 vfs_stdextattrctl, /* extattrctl */
787 puffs_suspendctl, /* suspendctl */
788 puffs_vnodeopv_descs, /* vnodeops */
789 0, /* refcount */
790 { NULL, NULL }
791 };
792 VFS_ATTACH(puffs_vfsops);
793