puffs_vfsops.c revision 1.28.6.5 1 /* $NetBSD: puffs_vfsops.c,v 1.28.6.5 2007/06/08 14:15:01 ad Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: puffs_vfsops.c,v 1.28.6.5 2007/06/08 14:15:01 ad Exp $");
37
38 #include <sys/param.h>
39 #include <sys/mount.h>
40 #include <sys/malloc.h>
41 #include <sys/extattr.h>
42 #include <sys/queue.h>
43 #include <sys/vnode.h>
44 #include <sys/dirent.h>
45 #include <sys/kauth.h>
46 #include <sys/fstrans.h>
47 #include <sys/proc.h>
48
49 #include <lib/libkern/libkern.h>
50
51 #include <fs/puffs/puffs_msgif.h>
52 #include <fs/puffs/puffs_sys.h>
53
54 #include <nfs/nfsproto.h> /* for fh sizes */
55
56 VFS_PROTOS(puffs);
57
58 MALLOC_JUSTDEFINE(M_PUFFS, "puffs", "Pass-to-Userspace Framework File System");
59
60 #ifndef PUFFS_PNODEBUCKETS
61 #define PUFFS_PNODEBUCKETS 256
62 #endif
63 #ifndef PUFFS_MAXPNODEBUCKETS
64 #define PUFFS_MAXPNODEBUCKETS 8192
65 #endif
66 int puffs_pnodebuckets_default = PUFFS_PNODEBUCKETS;
67 int puffs_maxpnodebuckets = PUFFS_MAXPNODEBUCKETS;
68
69 int
70 puffs_mount(struct mount *mp, const char *path, void *data,
71 struct nameidata *ndp, struct lwp *l)
72 {
73 struct puffs_mount *pmp = NULL;
74 struct puffs_kargs *args;
75 char namebuf[PUFFSNAMESIZE+sizeof(PUFFS_NAMEPREFIX)+1]; /* spooky */
76 int error = 0, i;
77
78 if (mp->mnt_flag & MNT_GETARGS) {
79 pmp = MPTOPUFFSMP(mp);
80 return copyout(&pmp->pmp_args,data,sizeof(struct puffs_kargs));
81 }
82
83 /* update is not supported currently */
84 if (mp->mnt_flag & MNT_UPDATE)
85 return EOPNOTSUPP;
86
87 /*
88 * We need the file system name
89 */
90 if (!data)
91 return EINVAL;
92
93 MALLOC(args, struct puffs_kargs *, sizeof(struct puffs_kargs),
94 M_PUFFS, M_WAITOK);
95
96 error = copyin(data, args, sizeof(struct puffs_kargs));
97 if (error)
98 goto out;
99
100 /* devel phase */
101 if (args->pa_vers != (PUFFSVERSION | PUFFSDEVELVERS)) {
102 printf("puffs_mount: development version mismatch\n");
103 error = EINVAL;
104 goto out;
105 }
106
107 /* nuke spy bits */
108 args->pa_flags &= PUFFS_KFLAG_MASK;
109
110 /* sanitize file handle length */
111 if (PUFFS_TOFHSIZE(args->pa_fhsize) > FHANDLE_SIZE_MAX) {
112 printf("puffs_mount: handle size %zu too large\n",
113 args->pa_fhsize);
114 error = EINVAL;
115 goto out;
116 }
117 /* sanity check file handle max sizes */
118 if (args->pa_fhsize && args->pa_fhflags & PUFFS_FHFLAG_PROTOMASK) {
119 size_t kfhsize = PUFFS_TOFHSIZE(args->pa_fhsize);
120
121 if (args->pa_fhflags & PUFFS_FHFLAG_NFSV2) {
122 if (NFSX_FHTOOBIG_P(kfhsize, 0)) {
123 printf("puffs_mount: fhsize larger than "
124 "NFSv2 max %d\n",
125 PUFFS_FROMFHSIZE(NFSX_V2FH));
126 error = EINVAL;
127 goto out;
128 }
129 }
130
131 if (args->pa_fhflags & PUFFS_FHFLAG_NFSV3) {
132 if (NFSX_FHTOOBIG_P(kfhsize, 1)) {
133 printf("puffs_mount: fhsize larger than "
134 "NFSv3 max %d\n",
135 PUFFS_FROMFHSIZE(NFSX_V3FHMAX));
136 error = EINVAL;
137 goto out;
138 }
139 }
140 }
141
142 /* build real name */
143 (void)strlcpy(namebuf, PUFFS_NAMEPREFIX, sizeof(namebuf));
144 (void)strlcat(namebuf, args->pa_name, sizeof(namebuf));
145
146 /* inform user server if it got the max request size it wanted */
147 if (args->pa_maxreqlen == 0 || args->pa_maxreqlen > PUFFS_REQ_MAXSIZE)
148 args->pa_maxreqlen = PUFFS_REQ_MAXSIZE;
149 else if (args->pa_maxreqlen < PUFFS_REQSTRUCT_MAX)
150 args->pa_maxreqlen = PUFFS_REQSTRUCT_MAX;
151 (void)strlcpy(args->pa_name, namebuf, sizeof(args->pa_name));
152
153 if (args->pa_nhashbuckets == 0)
154 args->pa_nhashbuckets = puffs_pnodebuckets_default;
155 if (args->pa_nhashbuckets < 1)
156 args->pa_nhashbuckets = 1;
157 if (args->pa_nhashbuckets > PUFFS_MAXPNODEBUCKETS) {
158 args->pa_nhashbuckets = puffs_maxpnodebuckets;
159 printf("puffs_mount: using %d hash buckets. "
160 "adjust puffs_maxpnodebuckets for more\n",
161 puffs_maxpnodebuckets);
162 }
163
164 error = copyout(args, data, sizeof(struct puffs_kargs));
165 if (error)
166 goto out;
167
168 error = set_statvfs_info(path, UIO_USERSPACE, namebuf,
169 UIO_SYSSPACE, mp, l);
170 if (error)
171 goto out;
172 mp->mnt_stat.f_iosize = DEV_BSIZE;
173
174 /*
175 * We can't handle the VFS_STATVFS() mount_domount() does
176 * after VFS_MOUNT() because we'd deadlock, so handle it
177 * here already.
178 */
179 copy_statvfs_info(&args->pa_svfsb, mp);
180 (void)memcpy(&mp->mnt_stat, &args->pa_svfsb, sizeof(mp->mnt_stat));
181
182 MALLOC(pmp, struct puffs_mount *, sizeof(struct puffs_mount),
183 M_PUFFS, M_WAITOK | M_ZERO);
184
185 mp->mnt_fs_bshift = DEV_BSHIFT;
186 mp->mnt_dev_bshift = DEV_BSHIFT;
187 mp->mnt_flag &= ~MNT_LOCAL; /* we don't really know, so ... */
188 mp->mnt_data = pmp;
189 mp->mnt_iflag |= IMNT_HAS_TRANS;
190
191 pmp->pmp_status = PUFFSTAT_MOUNTING;
192 pmp->pmp_nextreq = 0;
193 pmp->pmp_mp = mp;
194 pmp->pmp_req_maxsize = args->pa_maxreqlen;
195 pmp->pmp_args = *args;
196
197 pmp->pmp_npnodehash = args->pa_nhashbuckets;
198 pmp->pmp_pnodehash = malloc
199 (sizeof(struct puffs_pnode_hashlist *) * pmp->pmp_npnodehash,
200 M_PUFFS, M_WAITOK);
201 for (i = 0; i < pmp->pmp_npnodehash; i++)
202 LIST_INIT(&pmp->pmp_pnodehash[i]);
203
204 /*
205 * Inform the fileops processing code that we have a mountpoint.
206 * If it doesn't know about anyone with our pid/fd having the
207 * device open, punt
208 */
209 if (puffs_setpmp(l->l_proc->p_pid, args->pa_fd, pmp)) {
210 error = ENOENT;
211 goto out;
212 }
213
214 /* XXX: check parameters */
215 pmp->pmp_root_cookie = args->pa_root_cookie;
216 pmp->pmp_root_vtype = args->pa_root_vtype;
217 pmp->pmp_root_vsize = args->pa_root_vsize;
218 pmp->pmp_root_rdev = args->pa_root_rdev;
219
220 mutex_init(&pmp->pmp_lock, MUTEX_DEFAULT, IPL_NONE);
221 cv_init(&pmp->pmp_req_waiter_cv, "puffsget");
222 cv_init(&pmp->pmp_refcount_cv, "puffsref");
223 cv_init(&pmp->pmp_unmounting_cv, "puffsum");
224 TAILQ_INIT(&pmp->pmp_req_touser);
225 TAILQ_INIT(&pmp->pmp_req_replywait);
226 TAILQ_INIT(&pmp->pmp_req_sizepark);
227
228 DPRINTF(("puffs_mount: mount point at %p, puffs specific at %p\n",
229 mp, MPTOPUFFSMP(mp)));
230
231 vfs_getnewfsid(mp);
232
233 out:
234 if (error && pmp && pmp->pmp_pnodehash)
235 free(pmp->pmp_pnodehash, M_PUFFS);
236 if (error && pmp)
237 FREE(pmp, M_PUFFS);
238 FREE(args, M_PUFFS);
239 return error;
240 }
241
242 int
243 puffs_start(struct mount *mp, int flags, struct lwp *l)
244 {
245 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
246
247 KASSERT(pmp->pmp_status == PUFFSTAT_MOUNTING);
248 pmp->pmp_status = PUFFSTAT_RUNNING;
249
250 return 0;
251 }
252
253 int
254 puffs_unmount(struct mount *mp, int mntflags, struct lwp *l)
255 {
256 struct puffs_mount *pmp;
257 int error, force;
258
259 PUFFS_VFSREQ(unmount);
260
261 error = 0;
262 force = mntflags & MNT_FORCE;
263 pmp = MPTOPUFFSMP(mp);
264
265 DPRINTF(("puffs_unmount: detach filesystem from vfs, current "
266 "status 0x%x\n", pmp->pmp_status));
267
268 /*
269 * flush all the vnodes. VOP_RECLAIM() takes care that the
270 * root vnode does not get flushed until unmount. The
271 * userspace root node cookie is stored in the mount
272 * structure, so we can always re-instantiate a root vnode,
273 * should userspace unmount decide it doesn't want to
274 * cooperate.
275 */
276 error = vflush(mp, NULLVP, force ? FORCECLOSE : 0);
277 if (error)
278 goto out;
279
280 /*
281 * If we are not DYING, we should ask userspace's opinion
282 * about the situation
283 */
284 mutex_enter(&pmp->pmp_lock);
285 if (pmp->pmp_status != PUFFSTAT_DYING) {
286 pmp->pmp_unmounting = 1;
287 mutex_exit(&pmp->pmp_lock);
288
289 unmount_arg.pvfsr_flags = mntflags;
290 unmount_arg.pvfsr_pid = puffs_lwp2pid(l);
291
292 error = puffs_vfstouser(pmp, PUFFS_VFS_UNMOUNT,
293 &unmount_arg, sizeof(unmount_arg));
294 DPRINTF(("puffs_unmount: error %d force %d\n", error, force));
295
296 mutex_enter(&pmp->pmp_lock);
297 pmp->pmp_unmounting = 0;
298 cv_broadcast(&pmp->pmp_unmounting_cv);
299 }
300
301 /*
302 * if userspace cooperated or we really need to die,
303 * screw what userland thinks and just die.
304 */
305 if (error == 0 || force) {
306 /* tell waiters & other resources to go unwait themselves */
307 puffs_userdead(pmp);
308 puffs_nukebypmp(pmp);
309
310 /*
311 * Wait until there are no more users for the mount resource.
312 * Notice that this is hooked against transport_close
313 * and return from touser. In an ideal world, it would
314 * be hooked against final return from all operations.
315 * But currently it works well enough, since nobody
316 * does weird blocking voodoo after return from touser().
317 */
318 while (pmp->pmp_refcount != 0)
319 cv_wait(&pmp->pmp_refcount_cv, &pmp->pmp_lock);
320 mutex_exit(&pmp->pmp_lock);
321
322 /* free resources now that we hopefully have no waiters left */
323 cv_destroy(&pmp->pmp_unmounting_cv);
324 cv_destroy(&pmp->pmp_refcount_cv);
325 cv_destroy(&pmp->pmp_req_waiter_cv);
326 mutex_destroy(&pmp->pmp_lock);
327
328 free(pmp->pmp_pnodehash, M_PUFFS);
329 FREE(pmp, M_PUFFS);
330 error = 0;
331 } else {
332 mutex_exit(&pmp->pmp_lock);
333 }
334
335 out:
336 DPRINTF(("puffs_unmount: return %d\n", error));
337 return error;
338 }
339
340 /*
341 * This doesn't need to travel to userspace
342 */
343 int
344 puffs_root(struct mount *mp, struct vnode **vpp)
345 {
346 struct puffs_mount *pmp;
347 struct puffs_node *pn;
348 struct vnode *vp;
349
350 pmp = MPTOPUFFSMP(mp);
351
352 /*
353 * pmp_lock must be held if vref()'ing or vrele()'ing the
354 * root vnode. the latter is controlled by puffs_inactive().
355 */
356 mutex_enter(&pmp->pmp_lock);
357 vp = pmp->pmp_root;
358 if (vp) {
359 mutex_enter(&vp->v_interlock);
360 mutex_exit(&pmp->pmp_lock);
361 pn = VPTOPP(vp);
362 if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK))
363 goto grabnew;
364 *vpp = vp;
365 return 0;
366 } else
367 mutex_exit(&pmp->pmp_lock);
368
369 /* XXX: this is wrong, so FIXME */
370 grabnew:
371
372 /*
373 * So, didn't have the magic root vnode available.
374 * No matter, grab another an stuff it with the cookie.
375 */
376 if (puffs_getvnode(mp, pmp->pmp_root_cookie, pmp->pmp_root_vtype,
377 pmp->pmp_root_vsize, pmp->pmp_root_rdev, &vp))
378 panic("sloppy programming");
379
380 mutex_enter(&pmp->pmp_lock);
381 /*
382 * check if by mysterious force someone else created a root
383 * vnode while we were executing.
384 */
385 if (pmp->pmp_root) {
386 vref(pmp->pmp_root);
387 mutex_exit(&pmp->pmp_lock);
388 puffs_putvnode(vp);
389 vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
390 *vpp = pmp->pmp_root;
391 return 0;
392 }
393
394 /* store cache */
395 vp->v_flag = VROOT;
396 pmp->pmp_root = vp;
397 mutex_exit(&pmp->pmp_lock);
398
399 vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
400
401 *vpp = vp;
402 return 0;
403 }
404
405 int
406 puffs_statvfs(struct mount *mp, struct statvfs *sbp, struct lwp *l)
407 {
408 struct puffs_vfsreq_statvfs *statvfs_arg; /* too big for stack */
409 struct puffs_mount *pmp;
410 int error = 0;
411
412 pmp = MPTOPUFFSMP(mp);
413
414 /*
415 * If we are mounting, it means that the userspace counterpart
416 * is calling mount(2), but mount(2) also calls statvfs. So
417 * requesting statvfs from userspace would mean a deadlock.
418 * Compensate.
419 */
420 if (pmp->pmp_status == PUFFSTAT_MOUNTING)
421 return EINPROGRESS;
422
423 /* too big for stack */
424 MALLOC(statvfs_arg, struct puffs_vfsreq_statvfs *,
425 sizeof(struct puffs_vfsreq_statvfs), M_PUFFS, M_WAITOK | M_ZERO);
426 statvfs_arg->pvfsr_pid = puffs_lwp2pid(l);
427
428 error = puffs_vfstouser(pmp, PUFFS_VFS_STATVFS,
429 statvfs_arg, sizeof(*statvfs_arg));
430 statvfs_arg->pvfsr_sb.f_iosize = DEV_BSIZE;
431
432 /*
433 * Try to produce a sensible result even in the event
434 * of userspace error.
435 *
436 * XXX: cache the copy in non-error case
437 */
438 if (!error) {
439 copy_statvfs_info(&statvfs_arg->pvfsr_sb, mp);
440 (void)memcpy(sbp, &statvfs_arg->pvfsr_sb,
441 sizeof(struct statvfs));
442 } else {
443 copy_statvfs_info(sbp, mp);
444 }
445
446 FREE(statvfs_arg, M_PUFFS);
447 return error;
448 }
449
450 static int
451 pageflush(struct mount *mp, kauth_cred_t cred,
452 int waitfor, int suspending, struct lwp *l)
453 {
454 struct puffs_node *pn;
455 struct vnode *vp, *nvp;
456 int error, rv;
457
458 KASSERT(((waitfor == MNT_WAIT) && suspending) == 0);
459 KASSERT((suspending == 0)
460 || (fstrans_is_owner(mp)
461 && fstrans_getstate(mp) == FSTRANS_SUSPENDING));
462
463 error = 0;
464
465 /*
466 * Sync all cached data from regular vnodes (which are not
467 * currently locked, see below). After this we call VFS_SYNC
468 * for the fs server, which should handle data and metadata for
469 * all the nodes it knows to exist.
470 */
471 mutex_enter(&mntvnode_lock);
472 loop:
473 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
474 /* check if we're on the right list */
475 if (vp->v_mount != mp)
476 goto loop;
477
478 mutex_enter(&vp->v_interlock);
479 pn = VPTOPP(vp);
480 nvp = TAILQ_NEXT(vp, v_mntvnodes);
481
482 if (vp->v_type != VREG || UVM_OBJ_IS_CLEAN(&vp->v_uobj)) {
483 mutex_exit(&vp->v_interlock);
484 continue;
485 }
486
487 mutex_exit(&mntvnode_lock);
488
489 /*
490 * Here we try to get a reference to the vnode and to
491 * lock it. This is mostly cargo-culted, but I will
492 * offer an explanation to why I believe this might
493 * actually do the right thing.
494 *
495 * If the vnode is a goner, we quite obviously don't need
496 * to sync it.
497 *
498 * If the vnode was busy, we don't need to sync it because
499 * this is never called with MNT_WAIT except from
500 * dounmount(), when we are wait-flushing all the dirty
501 * vnodes through other routes in any case. So there,
502 * sync() doesn't actually sync. Happy now?
503 *
504 * NOTE: if we're suspending, vget() does NOT lock.
505 * See puffs_lock() for details.
506 */
507 rv = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
508 if (rv) {
509 mutex_enter(&mntvnode_lock);
510 if (rv == ENOENT)
511 goto loop;
512 continue;
513 }
514
515 /*
516 * Thread information to puffs_strategy() through the
517 * pnode flags: we want to issue the putpages operations
518 * as FAF if we're suspending, since it's very probable
519 * that our execution context is that of the userspace
520 * daemon. We can do this because:
521 * + we send the "going to suspend" prior to this part
522 * + if any of the writes fails in userspace, it's the
523 * file system server's problem to decide if this was a
524 * failed snapshot when it gets the "snapshot complete"
525 * notification.
526 * + if any of the writes fail in the kernel already, we
527 * immediately fail *and* notify the user server of
528 * failure.
529 *
530 * We also do FAFs if we're called from the syncer. This
531 * is just general optimization for trickle sync: no need
532 * to really guarantee that the stuff ended on backing
533 * storage.
534 * TODO: Maybe also hint the user server of this twist?
535 */
536 if (suspending || waitfor == MNT_LAZY) {
537 mutex_enter(&vp->v_interlock);
538 pn->pn_stat |= PNODE_SUSPEND;
539 mutex_exit(&vp->v_interlock);
540 }
541 rv = VOP_FSYNC(vp, cred, waitfor, 0, 0, l);
542 if (suspending || waitfor == MNT_LAZY) {
543 mutex_enter(&vp->v_interlock);
544 pn->pn_stat &= ~PNODE_SUSPEND;
545 mutex_exit(&vp->v_interlock);
546 }
547 if (rv)
548 error = rv;
549 vput(vp);
550 mutex_enter(&mntvnode_lock);
551 }
552 mutex_exit(&mntvnode_lock);
553
554 return error;
555 }
556
557 int
558 puffs_sync(struct mount *mp, int waitfor, struct kauth_cred *cred,
559 struct lwp *l)
560 {
561 int error, rv;
562
563 PUFFS_VFSREQ(sync);
564
565 error = pageflush(mp, cred, waitfor, 0, l);
566
567 /* sync fs */
568 sync_arg.pvfsr_waitfor = waitfor;
569 puffs_credcvt(&sync_arg.pvfsr_cred, cred);
570 sync_arg.pvfsr_pid = puffs_lwp2pid(l);
571
572 rv = puffs_vfstouser(MPTOPUFFSMP(mp), PUFFS_VFS_SYNC,
573 &sync_arg, sizeof(sync_arg));
574 if (rv)
575 error = rv;
576
577 return error;
578 }
579
580 int
581 puffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
582 {
583 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
584 struct puffs_vfsreq_fhtonode *fhtonode_argp;
585 struct vnode *vp;
586 size_t argsize;
587 int error;
588
589 if (pmp->pmp_args.pa_fhsize == 0)
590 return EOPNOTSUPP;
591
592 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) {
593 if (pmp->pmp_args.pa_fhsize < PUFFS_FROMFHSIZE(fhp->fid_len))
594 return EINVAL;
595 } else {
596 if (pmp->pmp_args.pa_fhsize != PUFFS_FROMFHSIZE(fhp->fid_len))
597 return EINVAL;
598 }
599
600 argsize = sizeof(struct puffs_vfsreq_fhtonode)
601 + PUFFS_FROMFHSIZE(fhp->fid_len);
602 fhtonode_argp = malloc(argsize, M_PUFFS, M_ZERO | M_WAITOK);
603 fhtonode_argp->pvfsr_dsize = PUFFS_FROMFHSIZE(fhp->fid_len);
604 memcpy(fhtonode_argp->pvfsr_data, fhp->fid_data,
605 PUFFS_FROMFHSIZE(fhp->fid_len));
606
607 error = puffs_vfstouser(pmp, PUFFS_VFS_FHTOVP, fhtonode_argp, argsize);
608 if (error)
609 goto out;
610
611 vp = puffs_pnode2vnode(pmp, fhtonode_argp->pvfsr_fhcookie, 1);
612 DPRINTF(("puffs_fhtovp: got cookie %p, existing vnode %p\n",
613 fhtonode_argp->pvfsr_fhcookie, vp));
614 if (!vp) {
615 error = puffs_getvnode(mp, fhtonode_argp->pvfsr_fhcookie,
616 fhtonode_argp->pvfsr_vtype, fhtonode_argp->pvfsr_size,
617 fhtonode_argp->pvfsr_rdev, &vp);
618 if (error)
619 goto out;
620 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
621 }
622
623 *vpp = vp;
624 out:
625 free(fhtonode_argp, M_PUFFS);
626 return error;
627 }
628
629 int
630 puffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
631 {
632 struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
633 struct puffs_vfsreq_nodetofh *nodetofh_argp;
634 size_t argsize;
635 int error;
636
637 if (pmp->pmp_args.pa_fhsize == 0)
638 return EOPNOTSUPP;
639
640 /* if file handles are static length, we can return immediately */
641 if (((pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) == 0)
642 && (PUFFS_FROMFHSIZE(*fh_size) < pmp->pmp_args.pa_fhsize)) {
643 *fh_size = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
644 return E2BIG;
645 }
646
647 argsize = sizeof(struct puffs_vfsreq_nodetofh)
648 + PUFFS_FROMFHSIZE(*fh_size);
649 nodetofh_argp = malloc(argsize, M_PUFFS, M_ZERO | M_WAITOK);
650 nodetofh_argp->pvfsr_fhcookie = VPTOPNC(vp);
651 nodetofh_argp->pvfsr_dsize = PUFFS_FROMFHSIZE(*fh_size);
652
653 error = puffs_vfstouser(pmp, PUFFS_VFS_VPTOFH, nodetofh_argp, argsize);
654 if (error) {
655 if (error == E2BIG)
656 *fh_size = PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize);
657 goto out;
658 }
659
660 if (PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize) > FHANDLE_SIZE_MAX) {
661 /* XXX: wrong direction */
662 error = EINVAL;
663 goto out;
664 }
665
666 if (*fh_size < PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize)) {
667 *fh_size = PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize);
668 error = E2BIG;
669 goto out;
670 }
671 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) {
672 *fh_size = PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize);
673 } else {
674 *fh_size = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
675 }
676
677 if (fhp) {
678 fhp->fid_len = *fh_size;
679 memcpy(fhp->fid_data,
680 nodetofh_argp->pvfsr_data, nodetofh_argp->pvfsr_dsize);
681 }
682
683 out:
684 free(nodetofh_argp, M_PUFFS);
685 return error;
686 }
687
688 void
689 puffs_init()
690 {
691
692 malloc_type_attach(M_PUFFS);
693
694 pool_init(&puffs_pnpool, sizeof(struct puffs_node), 0, 0, 0,
695 "puffpnpl", &pool_allocator_nointr, IPL_NONE);
696 puffs_transport_init();
697 puffs_msgif_init();
698 }
699
700 void
701 puffs_done()
702 {
703
704 puffs_msgif_destroy();
705 puffs_transport_destroy();
706 pool_destroy(&puffs_pnpool);
707
708 malloc_type_detach(M_PUFFS);
709 }
710
711 int
712 puffs_snapshot(struct mount *mp, struct vnode *vp, struct timespec *ts)
713 {
714
715 return EOPNOTSUPP;
716 }
717
718 int
719 puffs_suspendctl(struct mount *mp, int cmd)
720 {
721 struct puffs_mount *pmp;
722 int error;
723
724 pmp = MPTOPUFFSMP(mp);
725 switch (cmd) {
726 case SUSPEND_SUSPEND:
727 DPRINTF(("puffs_suspendctl: suspending\n"));
728 if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
729 break;
730 puffs_suspendtouser(pmp, PUFFS_SUSPEND_START);
731
732 error = pageflush(mp, FSCRED, 0, 1, curlwp);
733 if (error == 0)
734 error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
735
736 if (error != 0) {
737 puffs_suspendtouser(pmp, PUFFS_SUSPEND_ERROR);
738 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
739 break;
740 }
741
742 puffs_suspendtouser(pmp, PUFFS_SUSPEND_SUSPENDED);
743
744 break;
745
746 case SUSPEND_RESUME:
747 DPRINTF(("puffs_suspendctl: resume\n"));
748 error = 0;
749 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
750 puffs_suspendtouser(pmp, PUFFS_SUSPEND_RESUME);
751 break;
752
753 default:
754 error = EINVAL;
755 break;
756 }
757
758 DPRINTF(("puffs_suspendctl: return %d\n", error));
759 return error;
760 }
761
762 const struct vnodeopv_desc * const puffs_vnodeopv_descs[] = {
763 &puffs_vnodeop_opv_desc,
764 &puffs_specop_opv_desc,
765 &puffs_fifoop_opv_desc,
766 &puffs_msgop_opv_desc,
767 NULL,
768 };
769
770 struct vfsops puffs_vfsops = {
771 MOUNT_PUFFS,
772 puffs_mount, /* mount */
773 puffs_start, /* start */
774 puffs_unmount, /* unmount */
775 puffs_root, /* root */
776 (void *)eopnotsupp, /* quotactl */
777 puffs_statvfs, /* statvfs */
778 puffs_sync, /* sync */
779 (void *)eopnotsupp, /* vget */
780 puffs_fhtovp, /* fhtovp */
781 puffs_vptofh, /* vptofh */
782 puffs_init, /* init */
783 NULL, /* reinit */
784 puffs_done, /* done */
785 NULL, /* mountroot */
786 puffs_snapshot, /* snapshot */
787 vfs_stdextattrctl, /* extattrctl */
788 puffs_suspendctl, /* suspendctl */
789 puffs_vnodeopv_descs, /* vnodeops */
790 0, /* refcount */
791 { NULL, NULL }
792 };
793 VFS_ATTACH(puffs_vfsops);
794