puffs_vfsops.c revision 1.40 1 /* $NetBSD: puffs_vfsops.c,v 1.40 2007/04/16 13:54:07 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: puffs_vfsops.c,v 1.40 2007/04/16 13:54:07 pooka Exp $");
37
38 #include <sys/param.h>
39 #include <sys/mount.h>
40 #include <sys/malloc.h>
41 #include <sys/extattr.h>
42 #include <sys/queue.h>
43 #include <sys/vnode.h>
44 #include <sys/dirent.h>
45 #include <sys/kauth.h>
46 #include <sys/fstrans.h>
47
48 #include <lib/libkern/libkern.h>
49
50 #include <fs/puffs/puffs_msgif.h>
51 #include <fs/puffs/puffs_sys.h>
52
53 #include <nfs/nfsproto.h> /* for fh sizes */
54
55 VFS_PROTOS(puffs);
56
57 MALLOC_JUSTDEFINE(M_PUFFS, "puffs", "Pass-to-Userspace Framework File System");
58
59 #ifndef PUFFS_PNODEBUCKETS
60 #define PUFFS_PNODEBUCKETS 256
61 #endif
62 #ifndef PUFFS_MAXPNODEBUCKETS
63 #define PUFFS_MAXPNODEBUCKETS 8192
64 #endif
65 int puffs_pnodebuckets_default = PUFFS_PNODEBUCKETS;
66 int puffs_maxpnodebuckets = PUFFS_MAXPNODEBUCKETS;
67
68 int
69 puffs_mount(struct mount *mp, const char *path, void *data,
70 struct nameidata *ndp, struct lwp *l)
71 {
72 struct puffs_mount *pmp = NULL;
73 struct puffs_kargs *args;
74 char namebuf[PUFFSNAMESIZE+sizeof(PUFFS_NAMEPREFIX)+1]; /* spooky */
75 int error = 0, i;
76
77 if (mp->mnt_flag & MNT_GETARGS) {
78 pmp = MPTOPUFFSMP(mp);
79 return copyout(&pmp->pmp_args,data,sizeof(struct puffs_kargs));
80 }
81
82 /* update is not supported currently */
83 if (mp->mnt_flag & MNT_UPDATE)
84 return EOPNOTSUPP;
85
86 /*
87 * We need the file system name
88 */
89 if (!data)
90 return EINVAL;
91
92 MALLOC(args, struct puffs_kargs *, sizeof(struct puffs_kargs),
93 M_PUFFS, M_WAITOK);
94
95 error = copyin(data, args, sizeof(struct puffs_kargs));
96 if (error)
97 goto out;
98
99 /* devel phase */
100 if (args->pa_vers != (PUFFSVERSION | PUFFSDEVELVERS)) {
101 printf("puffs_mount: development version mismatch\n");
102 error = EINVAL;
103 goto out;
104 }
105
106 /* nuke spy bits */
107 args->pa_flags &= PUFFS_KFLAG_MASK;
108
109 /* sanitize file handle length */
110 if (PUFFS_TOFHSIZE(args->pa_fhsize) > FHANDLE_SIZE_MAX) {
111 printf("puffs_mount: handle size %zu too large\n",
112 args->pa_fhsize);
113 error = EINVAL;
114 goto out;
115 }
116 /* sanity check file handle max sizes */
117 if (args->pa_fhsize && args->pa_fhflags & PUFFS_FHFLAG_PROTOMASK) {
118 size_t kfhsize = PUFFS_TOFHSIZE(args->pa_fhsize);
119
120 if (args->pa_fhflags & PUFFS_FHFLAG_NFSV2) {
121 if (NFSX_FHTOOBIG_P(kfhsize, 0)) {
122 printf("puffs_mount: fhsize larger than "
123 "NFSv2 max %d\n",
124 PUFFS_FROMFHSIZE(NFSX_V2FH));
125 error = EINVAL;
126 goto out;
127 }
128 }
129
130 if (args->pa_fhflags & PUFFS_FHFLAG_NFSV3) {
131 if (NFSX_FHTOOBIG_P(kfhsize, 1)) {
132 printf("puffs_mount: fhsize larger than "
133 "NFSv3 max %d\n",
134 PUFFS_FROMFHSIZE(NFSX_V3FHMAX));
135 error = EINVAL;
136 goto out;
137 }
138 }
139 }
140
141 /* build real name */
142 (void)strlcpy(namebuf, PUFFS_NAMEPREFIX, sizeof(namebuf));
143 (void)strlcat(namebuf, args->pa_name, sizeof(namebuf));
144
145 /* inform user server if it got the max request size it wanted */
146 if (args->pa_maxreqlen == 0 || args->pa_maxreqlen > PUFFS_REQ_MAXSIZE)
147 args->pa_maxreqlen = PUFFS_REQ_MAXSIZE;
148 else if (args->pa_maxreqlen < PUFFS_REQSTRUCT_MAX)
149 args->pa_maxreqlen = PUFFS_REQSTRUCT_MAX;
150 (void)strlcpy(args->pa_name, namebuf, sizeof(args->pa_name));
151
152 if (args->pa_nhashbuckets == 0)
153 args->pa_nhashbuckets = puffs_pnodebuckets_default;
154 if (args->pa_nhashbuckets < 1)
155 args->pa_nhashbuckets = 1;
156 if (args->pa_nhashbuckets > PUFFS_MAXPNODEBUCKETS) {
157 args->pa_nhashbuckets = puffs_maxpnodebuckets;
158 printf("puffs_mount: using %d hash buckets. "
159 "adjust puffs_maxpnodebuckets for more\n",
160 puffs_maxpnodebuckets);
161 }
162
163 error = copyout(args, data, sizeof(struct puffs_kargs));
164 if (error)
165 goto out;
166
167 error = set_statvfs_info(path, UIO_USERSPACE, namebuf,
168 UIO_SYSSPACE, mp, l);
169 if (error)
170 goto out;
171 mp->mnt_stat.f_iosize = DEV_BSIZE;
172
173 MALLOC(pmp, struct puffs_mount *, sizeof(struct puffs_mount),
174 M_PUFFS, M_WAITOK | M_ZERO);
175
176 mp->mnt_fs_bshift = DEV_BSHIFT;
177 mp->mnt_dev_bshift = DEV_BSHIFT;
178 mp->mnt_flag &= ~MNT_LOCAL; /* we don't really know, so ... */
179 mp->mnt_data = pmp;
180 mp->mnt_iflag |= IMNT_HAS_TRANS;
181
182 pmp->pmp_status = PUFFSTAT_MOUNTING;
183 pmp->pmp_nextreq = 0;
184 pmp->pmp_mp = mp;
185 pmp->pmp_req_maxsize = args->pa_maxreqlen;
186 pmp->pmp_args = *args;
187
188 pmp->pmp_npnodehash = args->pa_nhashbuckets;
189 pmp->pmp_pnodehash = malloc
190 (sizeof(struct puffs_pnode_hashlist *) * pmp->pmp_npnodehash,
191 M_PUFFS, M_WAITOK);
192 for (i = 0; i < pmp->pmp_npnodehash; i++)
193 LIST_INIT(&pmp->pmp_pnodehash[i]);
194
195 /*
196 * Inform the fileops processing code that we have a mountpoint.
197 * If it doesn't know about anyone with our pid/fd having the
198 * device open, punt
199 */
200 if (puffs_setpmp(l->l_proc->p_pid, args->pa_fd, pmp)) {
201 error = ENOENT;
202 goto out;
203 }
204
205 mutex_init(&pmp->pmp_lock, MUTEX_DEFAULT, IPL_NONE);
206 cv_init(&pmp->pmp_req_waiter_cv, "puffsget");
207 cv_init(&pmp->pmp_req_waitersink_cv, "puffsink");
208 cv_init(&pmp->pmp_unmounting_cv, "puffsum");
209 cv_init(&pmp->pmp_suspend_cv, "pufsusum");
210 TAILQ_INIT(&pmp->pmp_req_touser);
211 TAILQ_INIT(&pmp->pmp_req_replywait);
212 TAILQ_INIT(&pmp->pmp_req_sizepark);
213
214 DPRINTF(("puffs_mount: mount point at %p, puffs specific at %p\n",
215 mp, MPTOPUFFSMP(mp)));
216
217 vfs_getnewfsid(mp);
218
219 out:
220 if (error && pmp && pmp->pmp_pnodehash)
221 free(pmp->pmp_pnodehash, M_PUFFS);
222 if (error && pmp)
223 FREE(pmp, M_PUFFS);
224 FREE(args, M_PUFFS);
225 return error;
226 }
227
228 /*
229 * This is called from the first "Hello, I'm alive" ioctl
230 * from userspace.
231 */
232 int
233 puffs_start2(struct puffs_mount *pmp, struct puffs_startreq *sreq)
234 {
235 struct puffs_node *pn;
236 struct mount *mp;
237
238 mp = PMPTOMP(pmp);
239
240 mutex_enter(&pmp->pmp_lock);
241
242 /*
243 * if someone has issued a VFS_ROOT() already, fill in the
244 * vnode cookie.
245 */
246 pn = NULL;
247 if (pmp->pmp_root) {
248 pn = VPTOPP(pmp->pmp_root);
249 pn->pn_cookie = sreq->psr_cookie;
250 }
251
252 /* We're good to fly */
253 pmp->pmp_rootcookie = sreq->psr_cookie;
254 pmp->pmp_status = PUFFSTAT_RUNNING;
255 mutex_exit(&pmp->pmp_lock);
256
257 /* do the VFS_STATVFS() we missed out on in sys_mount() */
258 copy_statvfs_info(&sreq->psr_sb, mp);
259 (void)memcpy(&mp->mnt_stat, &sreq->psr_sb, sizeof(mp->mnt_stat));
260 mp->mnt_stat.f_iosize = DEV_BSIZE;
261
262 DPRINTF(("puffs_start2: root vp %p, cur root pnode %p, cookie %p\n",
263 pmp->pmp_root, pn, sreq->psr_cookie));
264
265 return 0;
266 }
267
268 int
269 puffs_start(struct mount *mp, int flags, struct lwp *l)
270 {
271
272 /*
273 * This cannot travel to userspace, as this is called from
274 * the kernel context of the process doing mount(2). But
275 * it's probably a safe bet that the process doing mount(2)
276 * realizes it needs to start the filesystem also...
277 */
278 return 0;
279 }
280
281 int
282 puffs_unmount(struct mount *mp, int mntflags, struct lwp *l)
283 {
284 struct puffs_mount *pmp;
285 int error, force;
286
287 PUFFS_VFSREQ(unmount);
288
289 error = 0;
290 force = mntflags & MNT_FORCE;
291 pmp = MPTOPUFFSMP(mp);
292
293 DPRINTF(("puffs_unmount: detach filesystem from vfs, current "
294 "status 0x%x\n", pmp->pmp_status));
295
296 /*
297 * flush all the vnodes. VOP_RECLAIM() takes care that the
298 * root vnode does not get flushed until unmount. The
299 * userspace root node cookie is stored in the mount
300 * structure, so we can always re-instantiate a root vnode,
301 * should userspace unmount decide it doesn't want to
302 * cooperate.
303 */
304 error = vflush(mp, NULLVP, force ? FORCECLOSE : 0);
305 if (error)
306 goto out;
307
308 /*
309 * If we are not DYING, we should ask userspace's opinion
310 * about the situation
311 */
312 mutex_enter(&pmp->pmp_lock);
313 if (pmp->pmp_status != PUFFSTAT_DYING) {
314 pmp->pmp_unmounting = 1;
315 mutex_exit(&pmp->pmp_lock);
316
317 unmount_arg.pvfsr_flags = mntflags;
318 unmount_arg.pvfsr_pid = puffs_lwp2pid(l);
319
320 error = puffs_vfstouser(pmp, PUFFS_VFS_UNMOUNT,
321 &unmount_arg, sizeof(unmount_arg));
322 DPRINTF(("puffs_unmount: error %d force %d\n", error, force));
323
324 mutex_enter(&pmp->pmp_lock);
325 pmp->pmp_unmounting = 0;
326 cv_broadcast(&pmp->pmp_unmounting_cv);
327 }
328
329 /*
330 * if userspace cooperated or we really need to die,
331 * screw what userland thinks and just die.
332 */
333 if (error == 0 || force) {
334 /* tell waiters & other resources to go unwait themselves */
335 puffs_userdead(pmp);
336 puffs_nukebypmp(pmp);
337
338 /*
339 * Sink waiters. This is still not perfect, since the
340 * draining is done after userret, not when they really
341 * exit the file system. It will probably work as almost
342 * no call will block and therefore cause a context switch
343 * and therefore will protected by the biglock after
344 * exiting userspace. But ... it's an imperfect world.
345 */
346 while (pmp->pmp_req_waiters != 0)
347 cv_wait(&pmp->pmp_req_waitersink_cv, &pmp->pmp_lock);
348 mutex_exit(&pmp->pmp_lock);
349
350 /* free resources now that we hopefully have no waiters left */
351 cv_destroy(&pmp->pmp_req_waiter_cv);
352 cv_destroy(&pmp->pmp_req_waitersink_cv);
353 cv_destroy(&pmp->pmp_unmounting_cv);
354 cv_destroy(&pmp->pmp_suspend_cv);
355 mutex_destroy(&pmp->pmp_lock);
356
357 free(pmp->pmp_pnodehash, M_PUFFS);
358 FREE(pmp, M_PUFFS);
359 error = 0;
360 } else {
361 mutex_exit(&pmp->pmp_lock);
362 }
363
364 out:
365 DPRINTF(("puffs_unmount: return %d\n", error));
366 return error;
367 }
368
369 /*
370 * This doesn't need to travel to userspace
371 */
372 int
373 puffs_root(struct mount *mp, struct vnode **vpp)
374 {
375 struct puffs_mount *pmp;
376 struct puffs_node *pn;
377 struct vnode *vp;
378
379 pmp = MPTOPUFFSMP(mp);
380
381 /*
382 * pmp_lock must be held if vref()'ing or vrele()'ing the
383 * root vnode. the latter is controlled by puffs_inactive().
384 */
385 mutex_enter(&pmp->pmp_lock);
386 vp = pmp->pmp_root;
387 if (vp) {
388 simple_lock(&vp->v_interlock);
389 mutex_exit(&pmp->pmp_lock);
390 pn = VPTOPP(vp);
391 if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK))
392 goto grabnew;
393 *vpp = vp;
394 return 0;
395 } else
396 mutex_exit(&pmp->pmp_lock);
397
398 /* XXX: this is wrong, so FIXME */
399 grabnew:
400
401 /*
402 * So, didn't have the magic root vnode available.
403 * No matter, grab another an stuff it with the cookie.
404 */
405 if (puffs_getvnode(mp, pmp->pmp_rootcookie, VDIR, 0, 0, &vp))
406 panic("sloppy programming");
407
408 mutex_enter(&pmp->pmp_lock);
409 /*
410 * check if by mysterious force someone else created a root
411 * vnode while we were executing.
412 */
413 if (pmp->pmp_root) {
414 vref(pmp->pmp_root);
415 mutex_exit(&pmp->pmp_lock);
416 puffs_putvnode(vp);
417 vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
418 *vpp = pmp->pmp_root;
419 return 0;
420 }
421
422 /* store cache */
423 vp->v_flag = VROOT;
424 pmp->pmp_root = vp;
425 mutex_exit(&pmp->pmp_lock);
426
427 vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
428
429 *vpp = vp;
430 return 0;
431 }
432
433 int
434 puffs_statvfs(struct mount *mp, struct statvfs *sbp, struct lwp *l)
435 {
436 struct puffs_vfsreq_statvfs *statvfs_arg; /* too big for stack */
437 struct puffs_mount *pmp;
438 int error = 0;
439
440 pmp = MPTOPUFFSMP(mp);
441
442 /*
443 * If we are mounting, it means that the userspace counterpart
444 * is calling mount(2), but mount(2) also calls statvfs. So
445 * requesting statvfs from userspace would mean a deadlock.
446 * Compensate.
447 */
448 if (pmp->pmp_status == PUFFSTAT_MOUNTING)
449 return EINPROGRESS;
450
451 /* too big for stack */
452 MALLOC(statvfs_arg, struct puffs_vfsreq_statvfs *,
453 sizeof(struct puffs_vfsreq_statvfs), M_PUFFS, M_WAITOK | M_ZERO);
454 statvfs_arg->pvfsr_pid = puffs_lwp2pid(l);
455
456 error = puffs_vfstouser(pmp, PUFFS_VFS_STATVFS,
457 statvfs_arg, sizeof(*statvfs_arg));
458 statvfs_arg->pvfsr_sb.f_iosize = DEV_BSIZE;
459
460 /*
461 * Try to produce a sensible result even in the event
462 * of userspace error.
463 *
464 * XXX: cache the copy in non-error case
465 */
466 if (!error) {
467 copy_statvfs_info(&statvfs_arg->pvfsr_sb, mp);
468 (void)memcpy(sbp, &statvfs_arg->pvfsr_sb,
469 sizeof(struct statvfs));
470 } else {
471 copy_statvfs_info(sbp, mp);
472 }
473
474 FREE(statvfs_arg, M_PUFFS);
475 return error;
476 }
477
478 static int
479 pageflush(struct mount *mp, kauth_cred_t cred,
480 int waitfor, int suspending, struct lwp *l)
481 {
482 struct puffs_node *pn;
483 struct vnode *vp, *nvp;
484 int error, rv;
485
486 KASSERT(((waitfor == MNT_WAIT) && suspending) == 0);
487 KASSERT((suspending == 0)
488 || (fstrans_is_owner(mp)
489 && fstrans_getstate(mp) == FSTRANS_SUSPENDING));
490
491 error = 0;
492
493 /*
494 * Sync all cached data from regular vnodes (which are not
495 * currently locked, see below). After this we call VFS_SYNC
496 * for the fs server, which should handle data and metadata for
497 * all the nodes it knows to exist.
498 */
499 simple_lock(&mntvnode_slock);
500 loop:
501 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
502 /* check if we're on the right list */
503 if (vp->v_mount != mp)
504 goto loop;
505
506 simple_lock(&vp->v_interlock);
507 pn = VPTOPP(vp);
508 nvp = TAILQ_NEXT(vp, v_mntvnodes);
509
510 if (vp->v_type != VREG || UVM_OBJ_IS_CLEAN(&vp->v_uobj)) {
511 simple_unlock(&vp->v_interlock);
512 continue;
513 }
514
515 simple_unlock(&mntvnode_slock);
516
517 /*
518 * Here we try to get a reference to the vnode and to
519 * lock it. This is mostly cargo-culted, but I will
520 * offer an explanation to why I believe this might
521 * actually do the right thing.
522 *
523 * If the vnode is a goner, we quite obviously don't need
524 * to sync it.
525 *
526 * If the vnode was busy, we don't need to sync it because
527 * this is never called with MNT_WAIT except from
528 * dounmount(), when we are wait-flushing all the dirty
529 * vnodes through other routes in any case. So there,
530 * sync() doesn't actually sync. Happy now?
531 *
532 * NOTE: if we're suspending, vget() does NOT lock.
533 * See puffs_lock() for details.
534 */
535 rv = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
536 if (rv) {
537 simple_lock(&mntvnode_slock);
538 if (rv == ENOENT)
539 goto loop;
540 continue;
541 }
542
543 /*
544 * Thread information to puffs_strategy() through the
545 * pnode flags: we want to issue the putpages operations
546 * as FAF if we're suspending, since it's very probable
547 * that our execution context is that of the userspace
548 * daemon. We can do this because:
549 * + we send the "going to suspend" prior to this part
550 * + if any of the writes fails in userspace, it's the
551 * file system server's problem to decide if this was a
552 * failed snapshot when it gets the "snapshot complete"
553 * notification.
554 * + if any of the writes fail in the kernel already, we
555 * immediately fail *and* notify the user server of
556 * failure.
557 *
558 * We also do FAFs if we're called from the syncer. This
559 * is just general optimization for trickle sync: no need
560 * to really guarantee that the stuff ended on backing
561 * storage.
562 * TODO: Maybe also hint the user server of this twist?
563 */
564 if (suspending || waitfor == MNT_LAZY) {
565 simple_lock(&vp->v_interlock);
566 pn->pn_stat |= PNODE_SUSPEND;
567 simple_unlock(&vp->v_interlock);
568 }
569 rv = VOP_FSYNC(vp, cred, waitfor, 0, 0, l);
570 if (suspending || waitfor == MNT_LAZY) {
571 simple_lock(&vp->v_interlock);
572 pn->pn_stat &= ~PNODE_SUSPEND;
573 simple_unlock(&vp->v_interlock);
574 }
575 if (rv)
576 error = rv;
577 vput(vp);
578 simple_lock(&mntvnode_slock);
579 }
580 simple_unlock(&mntvnode_slock);
581
582 return error;
583 }
584
585 int
586 puffs_sync(struct mount *mp, int waitfor, struct kauth_cred *cred,
587 struct lwp *l)
588 {
589 int error, rv;
590
591 PUFFS_VFSREQ(sync);
592
593 error = pageflush(mp, cred, waitfor, 0, l);
594
595 /* sync fs */
596 sync_arg.pvfsr_waitfor = waitfor;
597 puffs_credcvt(&sync_arg.pvfsr_cred, cred);
598 sync_arg.pvfsr_pid = puffs_lwp2pid(l);
599
600 rv = puffs_vfstouser(MPTOPUFFSMP(mp), PUFFS_VFS_SYNC,
601 &sync_arg, sizeof(sync_arg));
602 if (rv)
603 error = rv;
604
605 return error;
606 }
607
608 int
609 puffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
610 {
611 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
612 struct puffs_vfsreq_fhtonode *fhtonode_argp;
613 struct vnode *vp;
614 size_t argsize;
615 int error;
616
617 if (pmp->pmp_args.pa_fhsize == 0)
618 return EOPNOTSUPP;
619
620 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) {
621 if (pmp->pmp_args.pa_fhsize < PUFFS_FROMFHSIZE(fhp->fid_len))
622 return EINVAL;
623 } else {
624 if (pmp->pmp_args.pa_fhsize != PUFFS_FROMFHSIZE(fhp->fid_len))
625 return EINVAL;
626 }
627
628 argsize = sizeof(struct puffs_vfsreq_fhtonode)
629 + PUFFS_FROMFHSIZE(fhp->fid_len);
630 fhtonode_argp = malloc(argsize, M_PUFFS, M_ZERO | M_WAITOK);
631 fhtonode_argp->pvfsr_dsize = PUFFS_FROMFHSIZE(fhp->fid_len);
632 memcpy(fhtonode_argp->pvfsr_data, fhp->fid_data,
633 PUFFS_FROMFHSIZE(fhp->fid_len));
634
635 error = puffs_vfstouser(pmp, PUFFS_VFS_FHTOVP, fhtonode_argp, argsize);
636 if (error)
637 goto out;
638
639 vp = puffs_pnode2vnode(pmp, fhtonode_argp->pvfsr_fhcookie, 1);
640 DPRINTF(("puffs_fhtovp: got cookie %p, existing vnode %p\n",
641 fhtonode_argp->pvfsr_fhcookie, vp));
642 if (!vp) {
643 error = puffs_getvnode(mp, fhtonode_argp->pvfsr_fhcookie,
644 fhtonode_argp->pvfsr_vtype, fhtonode_argp->pvfsr_size,
645 fhtonode_argp->pvfsr_rdev, &vp);
646 if (error)
647 goto out;
648 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
649 }
650
651 *vpp = vp;
652 out:
653 free(fhtonode_argp, M_PUFFS);
654 return error;
655 }
656
657 int
658 puffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
659 {
660 struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
661 struct puffs_vfsreq_nodetofh *nodetofh_argp;
662 size_t argsize;
663 int error;
664
665 if (pmp->pmp_args.pa_fhsize == 0)
666 return EOPNOTSUPP;
667
668 /* if file handles are static length, we can return immediately */
669 if (((pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) == 0)
670 && (PUFFS_FROMFHSIZE(*fh_size) < pmp->pmp_args.pa_fhsize)) {
671 *fh_size = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
672 return E2BIG;
673 }
674
675 argsize = sizeof(struct puffs_vfsreq_nodetofh)
676 + PUFFS_FROMFHSIZE(*fh_size);
677 nodetofh_argp = malloc(argsize, M_PUFFS, M_ZERO | M_WAITOK);
678 nodetofh_argp->pvfsr_fhcookie = VPTOPNC(vp);
679 nodetofh_argp->pvfsr_dsize = PUFFS_FROMFHSIZE(*fh_size);
680
681 error = puffs_vfstouser(pmp, PUFFS_VFS_VPTOFH, nodetofh_argp, argsize);
682 if (error) {
683 if (error == E2BIG)
684 *fh_size = PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize);
685 goto out;
686 }
687
688 if (PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize) > FHANDLE_SIZE_MAX) {
689 /* XXX: wrong direction */
690 error = EINVAL;
691 goto out;
692 }
693
694 if (*fh_size < PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize)) {
695 *fh_size = PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize);
696 error = E2BIG;
697 goto out;
698 }
699 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) {
700 *fh_size = PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize);
701 } else {
702 *fh_size = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
703 }
704
705 if (fhp) {
706 fhp->fid_len = *fh_size;
707 memcpy(fhp->fid_data,
708 nodetofh_argp->pvfsr_data, nodetofh_argp->pvfsr_dsize);
709 }
710
711 out:
712 free(nodetofh_argp, M_PUFFS);
713 return error;
714 }
715
716 void
717 puffs_init()
718 {
719
720 malloc_type_attach(M_PUFFS);
721
722 pool_init(&puffs_pnpool, sizeof(struct puffs_node), 0, 0, 0,
723 "puffpnpl", &pool_allocator_nointr, IPL_NONE);
724 puffs_transport_init();
725 puffs_msgif_init();
726 }
727
728 void
729 puffs_done()
730 {
731
732 puffs_msgif_destroy();
733 puffs_transport_destroy();
734 pool_destroy(&puffs_pnpool);
735
736 malloc_type_detach(M_PUFFS);
737 }
738
739 int
740 puffs_snapshot(struct mount *mp, struct vnode *vp, struct timespec *ts)
741 {
742
743 return EOPNOTSUPP;
744 }
745
746 int
747 puffs_suspendctl(struct mount *mp, int cmd)
748 {
749 struct puffs_mount *pmp;
750 int error;
751
752 pmp = MPTOPUFFSMP(mp);
753 switch (cmd) {
754 case SUSPEND_SUSPEND:
755 DPRINTF(("puffs_suspendctl: suspending\n"));
756 if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
757 break;
758 puffs_suspendtouser(pmp, PUFFS_SUSPEND_START);
759
760 error = pageflush(mp, FSCRED, 0, 1, curlwp);
761 if (error == 0)
762 error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
763
764 if (error != 0) {
765 puffs_suspendtouser(pmp, PUFFS_SUSPEND_ERROR);
766 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
767 break;
768 }
769
770 puffs_suspendtouser(pmp, PUFFS_SUSPEND_SUSPENDED);
771
772 break;
773
774 case SUSPEND_RESUME:
775 DPRINTF(("puffs_suspendctl: resume\n"));
776 error = 0;
777 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
778 puffs_suspendtouser(pmp, PUFFS_SUSPEND_RESUME);
779 break;
780
781 default:
782 error = EINVAL;
783 break;
784 }
785
786 DPRINTF(("puffs_suspendctl: return %d\n", error));
787 return error;
788 }
789
790 const struct vnodeopv_desc * const puffs_vnodeopv_descs[] = {
791 &puffs_vnodeop_opv_desc,
792 &puffs_specop_opv_desc,
793 &puffs_fifoop_opv_desc,
794 &puffs_msgop_opv_desc,
795 NULL,
796 };
797
798 struct vfsops puffs_vfsops = {
799 MOUNT_PUFFS,
800 puffs_mount, /* mount */
801 puffs_start, /* start */
802 puffs_unmount, /* unmount */
803 puffs_root, /* root */
804 (void *)eopnotsupp, /* quotactl */
805 puffs_statvfs, /* statvfs */
806 puffs_sync, /* sync */
807 (void *)eopnotsupp, /* vget */
808 puffs_fhtovp, /* fhtovp */
809 puffs_vptofh, /* vptofh */
810 puffs_init, /* init */
811 NULL, /* reinit */
812 puffs_done, /* done */
813 NULL, /* mountroot */
814 puffs_snapshot, /* snapshot */
815 vfs_stdextattrctl, /* extattrctl */
816 puffs_suspendctl, /* suspendctl */
817 puffs_vnodeopv_descs, /* vnodeops */
818 0, /* refcount */
819 { NULL, NULL }
820 };
821 VFS_ATTACH(puffs_vfsops);
822