puffs_vfsops.c revision 1.41 1 /* $NetBSD: puffs_vfsops.c,v 1.41 2007/05/01 12:18:40 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: puffs_vfsops.c,v 1.41 2007/05/01 12:18:40 pooka Exp $");
37
38 #include <sys/param.h>
39 #include <sys/mount.h>
40 #include <sys/malloc.h>
41 #include <sys/extattr.h>
42 #include <sys/queue.h>
43 #include <sys/vnode.h>
44 #include <sys/dirent.h>
45 #include <sys/kauth.h>
46 #include <sys/fstrans.h>
47
48 #include <lib/libkern/libkern.h>
49
50 #include <fs/puffs/puffs_msgif.h>
51 #include <fs/puffs/puffs_sys.h>
52
53 #include <nfs/nfsproto.h> /* for fh sizes */
54
55 VFS_PROTOS(puffs);
56
57 MALLOC_JUSTDEFINE(M_PUFFS, "puffs", "Pass-to-Userspace Framework File System");
58
59 #ifndef PUFFS_PNODEBUCKETS
60 #define PUFFS_PNODEBUCKETS 256
61 #endif
62 #ifndef PUFFS_MAXPNODEBUCKETS
63 #define PUFFS_MAXPNODEBUCKETS 8192
64 #endif
65 int puffs_pnodebuckets_default = PUFFS_PNODEBUCKETS;
66 int puffs_maxpnodebuckets = PUFFS_MAXPNODEBUCKETS;
67
68 int
69 puffs_mount(struct mount *mp, const char *path, void *data,
70 struct nameidata *ndp, struct lwp *l)
71 {
72 struct puffs_mount *pmp = NULL;
73 struct puffs_kargs *args;
74 char namebuf[PUFFSNAMESIZE+sizeof(PUFFS_NAMEPREFIX)+1]; /* spooky */
75 int error = 0, i;
76
77 if (mp->mnt_flag & MNT_GETARGS) {
78 pmp = MPTOPUFFSMP(mp);
79 return copyout(&pmp->pmp_args,data,sizeof(struct puffs_kargs));
80 }
81
82 /* update is not supported currently */
83 if (mp->mnt_flag & MNT_UPDATE)
84 return EOPNOTSUPP;
85
86 /*
87 * We need the file system name
88 */
89 if (!data)
90 return EINVAL;
91
92 MALLOC(args, struct puffs_kargs *, sizeof(struct puffs_kargs),
93 M_PUFFS, M_WAITOK);
94
95 error = copyin(data, args, sizeof(struct puffs_kargs));
96 if (error)
97 goto out;
98
99 /* devel phase */
100 if (args->pa_vers != (PUFFSVERSION | PUFFSDEVELVERS)) {
101 printf("puffs_mount: development version mismatch\n");
102 error = EINVAL;
103 goto out;
104 }
105
106 /* nuke spy bits */
107 args->pa_flags &= PUFFS_KFLAG_MASK;
108
109 /* sanitize file handle length */
110 if (PUFFS_TOFHSIZE(args->pa_fhsize) > FHANDLE_SIZE_MAX) {
111 printf("puffs_mount: handle size %zu too large\n",
112 args->pa_fhsize);
113 error = EINVAL;
114 goto out;
115 }
116 /* sanity check file handle max sizes */
117 if (args->pa_fhsize && args->pa_fhflags & PUFFS_FHFLAG_PROTOMASK) {
118 size_t kfhsize = PUFFS_TOFHSIZE(args->pa_fhsize);
119
120 if (args->pa_fhflags & PUFFS_FHFLAG_NFSV2) {
121 if (NFSX_FHTOOBIG_P(kfhsize, 0)) {
122 printf("puffs_mount: fhsize larger than "
123 "NFSv2 max %d\n",
124 PUFFS_FROMFHSIZE(NFSX_V2FH));
125 error = EINVAL;
126 goto out;
127 }
128 }
129
130 if (args->pa_fhflags & PUFFS_FHFLAG_NFSV3) {
131 if (NFSX_FHTOOBIG_P(kfhsize, 1)) {
132 printf("puffs_mount: fhsize larger than "
133 "NFSv3 max %d\n",
134 PUFFS_FROMFHSIZE(NFSX_V3FHMAX));
135 error = EINVAL;
136 goto out;
137 }
138 }
139 }
140
141 /* build real name */
142 (void)strlcpy(namebuf, PUFFS_NAMEPREFIX, sizeof(namebuf));
143 (void)strlcat(namebuf, args->pa_name, sizeof(namebuf));
144
145 /* inform user server if it got the max request size it wanted */
146 if (args->pa_maxreqlen == 0 || args->pa_maxreqlen > PUFFS_REQ_MAXSIZE)
147 args->pa_maxreqlen = PUFFS_REQ_MAXSIZE;
148 else if (args->pa_maxreqlen < PUFFS_REQSTRUCT_MAX)
149 args->pa_maxreqlen = PUFFS_REQSTRUCT_MAX;
150 (void)strlcpy(args->pa_name, namebuf, sizeof(args->pa_name));
151
152 if (args->pa_nhashbuckets == 0)
153 args->pa_nhashbuckets = puffs_pnodebuckets_default;
154 if (args->pa_nhashbuckets < 1)
155 args->pa_nhashbuckets = 1;
156 if (args->pa_nhashbuckets > PUFFS_MAXPNODEBUCKETS) {
157 args->pa_nhashbuckets = puffs_maxpnodebuckets;
158 printf("puffs_mount: using %d hash buckets. "
159 "adjust puffs_maxpnodebuckets for more\n",
160 puffs_maxpnodebuckets);
161 }
162
163 error = copyout(args, data, sizeof(struct puffs_kargs));
164 if (error)
165 goto out;
166
167 error = set_statvfs_info(path, UIO_USERSPACE, namebuf,
168 UIO_SYSSPACE, mp, l);
169 if (error)
170 goto out;
171 mp->mnt_stat.f_iosize = DEV_BSIZE;
172
173 MALLOC(pmp, struct puffs_mount *, sizeof(struct puffs_mount),
174 M_PUFFS, M_WAITOK | M_ZERO);
175
176 mp->mnt_fs_bshift = DEV_BSHIFT;
177 mp->mnt_dev_bshift = DEV_BSHIFT;
178 mp->mnt_flag &= ~MNT_LOCAL; /* we don't really know, so ... */
179 mp->mnt_data = pmp;
180 mp->mnt_iflag |= IMNT_HAS_TRANS;
181
182 pmp->pmp_status = PUFFSTAT_MOUNTING;
183 pmp->pmp_nextreq = 0;
184 pmp->pmp_mp = mp;
185 pmp->pmp_req_maxsize = args->pa_maxreqlen;
186 pmp->pmp_args = *args;
187
188 pmp->pmp_npnodehash = args->pa_nhashbuckets;
189 pmp->pmp_pnodehash = malloc
190 (sizeof(struct puffs_pnode_hashlist *) * pmp->pmp_npnodehash,
191 M_PUFFS, M_WAITOK);
192 for (i = 0; i < pmp->pmp_npnodehash; i++)
193 LIST_INIT(&pmp->pmp_pnodehash[i]);
194
195 /*
196 * Inform the fileops processing code that we have a mountpoint.
197 * If it doesn't know about anyone with our pid/fd having the
198 * device open, punt
199 */
200 if (puffs_setpmp(l->l_proc->p_pid, args->pa_fd, pmp)) {
201 error = ENOENT;
202 goto out;
203 }
204
205 mutex_init(&pmp->pmp_lock, MUTEX_DEFAULT, IPL_NONE);
206 cv_init(&pmp->pmp_req_waiter_cv, "puffsget");
207 cv_init(&pmp->pmp_refcount_cv, "puffsref");
208 cv_init(&pmp->pmp_unmounting_cv, "puffsum");
209 TAILQ_INIT(&pmp->pmp_req_touser);
210 TAILQ_INIT(&pmp->pmp_req_replywait);
211 TAILQ_INIT(&pmp->pmp_req_sizepark);
212
213 DPRINTF(("puffs_mount: mount point at %p, puffs specific at %p\n",
214 mp, MPTOPUFFSMP(mp)));
215
216 vfs_getnewfsid(mp);
217
218 out:
219 if (error && pmp && pmp->pmp_pnodehash)
220 free(pmp->pmp_pnodehash, M_PUFFS);
221 if (error && pmp)
222 FREE(pmp, M_PUFFS);
223 FREE(args, M_PUFFS);
224 return error;
225 }
226
227 /*
228 * This is called from the first "Hello, I'm alive" ioctl
229 * from userspace.
230 */
231 int
232 puffs_start2(struct puffs_mount *pmp, struct puffs_startreq *sreq)
233 {
234 struct puffs_node *pn;
235 struct mount *mp;
236
237 mp = PMPTOMP(pmp);
238
239 mutex_enter(&pmp->pmp_lock);
240
241 /*
242 * if someone has issued a VFS_ROOT() already, fill in the
243 * vnode cookie.
244 */
245 pn = NULL;
246 if (pmp->pmp_root) {
247 pn = VPTOPP(pmp->pmp_root);
248 pn->pn_cookie = sreq->psr_cookie;
249 }
250
251 /* We're good to fly */
252 pmp->pmp_rootcookie = sreq->psr_cookie;
253 pmp->pmp_status = PUFFSTAT_RUNNING;
254 mutex_exit(&pmp->pmp_lock);
255
256 /* do the VFS_STATVFS() we missed out on in sys_mount() */
257 copy_statvfs_info(&sreq->psr_sb, mp);
258 (void)memcpy(&mp->mnt_stat, &sreq->psr_sb, sizeof(mp->mnt_stat));
259 mp->mnt_stat.f_iosize = DEV_BSIZE;
260
261 DPRINTF(("puffs_start2: root vp %p, cur root pnode %p, cookie %p\n",
262 pmp->pmp_root, pn, sreq->psr_cookie));
263
264 return 0;
265 }
266
267 int
268 puffs_start(struct mount *mp, int flags, struct lwp *l)
269 {
270
271 /*
272 * This cannot travel to userspace, as this is called from
273 * the kernel context of the process doing mount(2). But
274 * it's probably a safe bet that the process doing mount(2)
275 * realizes it needs to start the filesystem also...
276 */
277 return 0;
278 }
279
280 int
281 puffs_unmount(struct mount *mp, int mntflags, struct lwp *l)
282 {
283 struct puffs_mount *pmp;
284 int error, force;
285
286 PUFFS_VFSREQ(unmount);
287
288 error = 0;
289 force = mntflags & MNT_FORCE;
290 pmp = MPTOPUFFSMP(mp);
291
292 DPRINTF(("puffs_unmount: detach filesystem from vfs, current "
293 "status 0x%x\n", pmp->pmp_status));
294
295 /*
296 * flush all the vnodes. VOP_RECLAIM() takes care that the
297 * root vnode does not get flushed until unmount. The
298 * userspace root node cookie is stored in the mount
299 * structure, so we can always re-instantiate a root vnode,
300 * should userspace unmount decide it doesn't want to
301 * cooperate.
302 */
303 error = vflush(mp, NULLVP, force ? FORCECLOSE : 0);
304 if (error)
305 goto out;
306
307 /*
308 * If we are not DYING, we should ask userspace's opinion
309 * about the situation
310 */
311 mutex_enter(&pmp->pmp_lock);
312 if (pmp->pmp_status != PUFFSTAT_DYING) {
313 pmp->pmp_unmounting = 1;
314 mutex_exit(&pmp->pmp_lock);
315
316 unmount_arg.pvfsr_flags = mntflags;
317 unmount_arg.pvfsr_pid = puffs_lwp2pid(l);
318
319 error = puffs_vfstouser(pmp, PUFFS_VFS_UNMOUNT,
320 &unmount_arg, sizeof(unmount_arg));
321 DPRINTF(("puffs_unmount: error %d force %d\n", error, force));
322
323 mutex_enter(&pmp->pmp_lock);
324 pmp->pmp_unmounting = 0;
325 cv_broadcast(&pmp->pmp_unmounting_cv);
326 }
327
328 /*
329 * if userspace cooperated or we really need to die,
330 * screw what userland thinks and just die.
331 */
332 if (error == 0 || force) {
333 /* tell waiters & other resources to go unwait themselves */
334 puffs_userdead(pmp);
335 puffs_nukebypmp(pmp);
336
337 /*
338 * Wait until there are no more users for the mount resource.
339 * Notice that this is hooked against transport_close
340 * and return from touser. In an ideal world, it would
341 * be hooked against final return from all operations.
342 * But currently it works well enough, since nobody
343 * does weird blocking voodoo after return from touser().
344 */
345 while (pmp->pmp_refcount != 0)
346 cv_wait(&pmp->pmp_refcount_cv, &pmp->pmp_lock);
347 mutex_exit(&pmp->pmp_lock);
348
349 /* free resources now that we hopefully have no waiters left */
350 cv_destroy(&pmp->pmp_unmounting_cv);
351 cv_destroy(&pmp->pmp_refcount_cv);
352 cv_destroy(&pmp->pmp_req_waiter_cv);
353 mutex_destroy(&pmp->pmp_lock);
354
355 free(pmp->pmp_pnodehash, M_PUFFS);
356 FREE(pmp, M_PUFFS);
357 error = 0;
358 } else {
359 mutex_exit(&pmp->pmp_lock);
360 }
361
362 out:
363 DPRINTF(("puffs_unmount: return %d\n", error));
364 return error;
365 }
366
367 /*
368 * This doesn't need to travel to userspace
369 */
370 int
371 puffs_root(struct mount *mp, struct vnode **vpp)
372 {
373 struct puffs_mount *pmp;
374 struct puffs_node *pn;
375 struct vnode *vp;
376
377 pmp = MPTOPUFFSMP(mp);
378
379 /*
380 * pmp_lock must be held if vref()'ing or vrele()'ing the
381 * root vnode. the latter is controlled by puffs_inactive().
382 */
383 mutex_enter(&pmp->pmp_lock);
384 vp = pmp->pmp_root;
385 if (vp) {
386 simple_lock(&vp->v_interlock);
387 mutex_exit(&pmp->pmp_lock);
388 pn = VPTOPP(vp);
389 if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK))
390 goto grabnew;
391 *vpp = vp;
392 return 0;
393 } else
394 mutex_exit(&pmp->pmp_lock);
395
396 /* XXX: this is wrong, so FIXME */
397 grabnew:
398
399 /*
400 * So, didn't have the magic root vnode available.
401 * No matter, grab another an stuff it with the cookie.
402 */
403 if (puffs_getvnode(mp, pmp->pmp_rootcookie, VDIR, 0, 0, &vp))
404 panic("sloppy programming");
405
406 mutex_enter(&pmp->pmp_lock);
407 /*
408 * check if by mysterious force someone else created a root
409 * vnode while we were executing.
410 */
411 if (pmp->pmp_root) {
412 vref(pmp->pmp_root);
413 mutex_exit(&pmp->pmp_lock);
414 puffs_putvnode(vp);
415 vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
416 *vpp = pmp->pmp_root;
417 return 0;
418 }
419
420 /* store cache */
421 vp->v_flag = VROOT;
422 pmp->pmp_root = vp;
423 mutex_exit(&pmp->pmp_lock);
424
425 vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
426
427 *vpp = vp;
428 return 0;
429 }
430
431 int
432 puffs_statvfs(struct mount *mp, struct statvfs *sbp, struct lwp *l)
433 {
434 struct puffs_vfsreq_statvfs *statvfs_arg; /* too big for stack */
435 struct puffs_mount *pmp;
436 int error = 0;
437
438 pmp = MPTOPUFFSMP(mp);
439
440 /*
441 * If we are mounting, it means that the userspace counterpart
442 * is calling mount(2), but mount(2) also calls statvfs. So
443 * requesting statvfs from userspace would mean a deadlock.
444 * Compensate.
445 */
446 if (pmp->pmp_status == PUFFSTAT_MOUNTING)
447 return EINPROGRESS;
448
449 /* too big for stack */
450 MALLOC(statvfs_arg, struct puffs_vfsreq_statvfs *,
451 sizeof(struct puffs_vfsreq_statvfs), M_PUFFS, M_WAITOK | M_ZERO);
452 statvfs_arg->pvfsr_pid = puffs_lwp2pid(l);
453
454 error = puffs_vfstouser(pmp, PUFFS_VFS_STATVFS,
455 statvfs_arg, sizeof(*statvfs_arg));
456 statvfs_arg->pvfsr_sb.f_iosize = DEV_BSIZE;
457
458 /*
459 * Try to produce a sensible result even in the event
460 * of userspace error.
461 *
462 * XXX: cache the copy in non-error case
463 */
464 if (!error) {
465 copy_statvfs_info(&statvfs_arg->pvfsr_sb, mp);
466 (void)memcpy(sbp, &statvfs_arg->pvfsr_sb,
467 sizeof(struct statvfs));
468 } else {
469 copy_statvfs_info(sbp, mp);
470 }
471
472 FREE(statvfs_arg, M_PUFFS);
473 return error;
474 }
475
476 static int
477 pageflush(struct mount *mp, kauth_cred_t cred,
478 int waitfor, int suspending, struct lwp *l)
479 {
480 struct puffs_node *pn;
481 struct vnode *vp, *nvp;
482 int error, rv;
483
484 KASSERT(((waitfor == MNT_WAIT) && suspending) == 0);
485 KASSERT((suspending == 0)
486 || (fstrans_is_owner(mp)
487 && fstrans_getstate(mp) == FSTRANS_SUSPENDING));
488
489 error = 0;
490
491 /*
492 * Sync all cached data from regular vnodes (which are not
493 * currently locked, see below). After this we call VFS_SYNC
494 * for the fs server, which should handle data and metadata for
495 * all the nodes it knows to exist.
496 */
497 simple_lock(&mntvnode_slock);
498 loop:
499 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
500 /* check if we're on the right list */
501 if (vp->v_mount != mp)
502 goto loop;
503
504 simple_lock(&vp->v_interlock);
505 pn = VPTOPP(vp);
506 nvp = TAILQ_NEXT(vp, v_mntvnodes);
507
508 if (vp->v_type != VREG || UVM_OBJ_IS_CLEAN(&vp->v_uobj)) {
509 simple_unlock(&vp->v_interlock);
510 continue;
511 }
512
513 simple_unlock(&mntvnode_slock);
514
515 /*
516 * Here we try to get a reference to the vnode and to
517 * lock it. This is mostly cargo-culted, but I will
518 * offer an explanation to why I believe this might
519 * actually do the right thing.
520 *
521 * If the vnode is a goner, we quite obviously don't need
522 * to sync it.
523 *
524 * If the vnode was busy, we don't need to sync it because
525 * this is never called with MNT_WAIT except from
526 * dounmount(), when we are wait-flushing all the dirty
527 * vnodes through other routes in any case. So there,
528 * sync() doesn't actually sync. Happy now?
529 *
530 * NOTE: if we're suspending, vget() does NOT lock.
531 * See puffs_lock() for details.
532 */
533 rv = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
534 if (rv) {
535 simple_lock(&mntvnode_slock);
536 if (rv == ENOENT)
537 goto loop;
538 continue;
539 }
540
541 /*
542 * Thread information to puffs_strategy() through the
543 * pnode flags: we want to issue the putpages operations
544 * as FAF if we're suspending, since it's very probable
545 * that our execution context is that of the userspace
546 * daemon. We can do this because:
547 * + we send the "going to suspend" prior to this part
548 * + if any of the writes fails in userspace, it's the
549 * file system server's problem to decide if this was a
550 * failed snapshot when it gets the "snapshot complete"
551 * notification.
552 * + if any of the writes fail in the kernel already, we
553 * immediately fail *and* notify the user server of
554 * failure.
555 *
556 * We also do FAFs if we're called from the syncer. This
557 * is just general optimization for trickle sync: no need
558 * to really guarantee that the stuff ended on backing
559 * storage.
560 * TODO: Maybe also hint the user server of this twist?
561 */
562 if (suspending || waitfor == MNT_LAZY) {
563 simple_lock(&vp->v_interlock);
564 pn->pn_stat |= PNODE_SUSPEND;
565 simple_unlock(&vp->v_interlock);
566 }
567 rv = VOP_FSYNC(vp, cred, waitfor, 0, 0, l);
568 if (suspending || waitfor == MNT_LAZY) {
569 simple_lock(&vp->v_interlock);
570 pn->pn_stat &= ~PNODE_SUSPEND;
571 simple_unlock(&vp->v_interlock);
572 }
573 if (rv)
574 error = rv;
575 vput(vp);
576 simple_lock(&mntvnode_slock);
577 }
578 simple_unlock(&mntvnode_slock);
579
580 return error;
581 }
582
583 int
584 puffs_sync(struct mount *mp, int waitfor, struct kauth_cred *cred,
585 struct lwp *l)
586 {
587 int error, rv;
588
589 PUFFS_VFSREQ(sync);
590
591 error = pageflush(mp, cred, waitfor, 0, l);
592
593 /* sync fs */
594 sync_arg.pvfsr_waitfor = waitfor;
595 puffs_credcvt(&sync_arg.pvfsr_cred, cred);
596 sync_arg.pvfsr_pid = puffs_lwp2pid(l);
597
598 rv = puffs_vfstouser(MPTOPUFFSMP(mp), PUFFS_VFS_SYNC,
599 &sync_arg, sizeof(sync_arg));
600 if (rv)
601 error = rv;
602
603 return error;
604 }
605
606 int
607 puffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
608 {
609 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
610 struct puffs_vfsreq_fhtonode *fhtonode_argp;
611 struct vnode *vp;
612 size_t argsize;
613 int error;
614
615 if (pmp->pmp_args.pa_fhsize == 0)
616 return EOPNOTSUPP;
617
618 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) {
619 if (pmp->pmp_args.pa_fhsize < PUFFS_FROMFHSIZE(fhp->fid_len))
620 return EINVAL;
621 } else {
622 if (pmp->pmp_args.pa_fhsize != PUFFS_FROMFHSIZE(fhp->fid_len))
623 return EINVAL;
624 }
625
626 argsize = sizeof(struct puffs_vfsreq_fhtonode)
627 + PUFFS_FROMFHSIZE(fhp->fid_len);
628 fhtonode_argp = malloc(argsize, M_PUFFS, M_ZERO | M_WAITOK);
629 fhtonode_argp->pvfsr_dsize = PUFFS_FROMFHSIZE(fhp->fid_len);
630 memcpy(fhtonode_argp->pvfsr_data, fhp->fid_data,
631 PUFFS_FROMFHSIZE(fhp->fid_len));
632
633 error = puffs_vfstouser(pmp, PUFFS_VFS_FHTOVP, fhtonode_argp, argsize);
634 if (error)
635 goto out;
636
637 vp = puffs_pnode2vnode(pmp, fhtonode_argp->pvfsr_fhcookie, 1);
638 DPRINTF(("puffs_fhtovp: got cookie %p, existing vnode %p\n",
639 fhtonode_argp->pvfsr_fhcookie, vp));
640 if (!vp) {
641 error = puffs_getvnode(mp, fhtonode_argp->pvfsr_fhcookie,
642 fhtonode_argp->pvfsr_vtype, fhtonode_argp->pvfsr_size,
643 fhtonode_argp->pvfsr_rdev, &vp);
644 if (error)
645 goto out;
646 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
647 }
648
649 *vpp = vp;
650 out:
651 free(fhtonode_argp, M_PUFFS);
652 return error;
653 }
654
655 int
656 puffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
657 {
658 struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
659 struct puffs_vfsreq_nodetofh *nodetofh_argp;
660 size_t argsize;
661 int error;
662
663 if (pmp->pmp_args.pa_fhsize == 0)
664 return EOPNOTSUPP;
665
666 /* if file handles are static length, we can return immediately */
667 if (((pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) == 0)
668 && (PUFFS_FROMFHSIZE(*fh_size) < pmp->pmp_args.pa_fhsize)) {
669 *fh_size = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
670 return E2BIG;
671 }
672
673 argsize = sizeof(struct puffs_vfsreq_nodetofh)
674 + PUFFS_FROMFHSIZE(*fh_size);
675 nodetofh_argp = malloc(argsize, M_PUFFS, M_ZERO | M_WAITOK);
676 nodetofh_argp->pvfsr_fhcookie = VPTOPNC(vp);
677 nodetofh_argp->pvfsr_dsize = PUFFS_FROMFHSIZE(*fh_size);
678
679 error = puffs_vfstouser(pmp, PUFFS_VFS_VPTOFH, nodetofh_argp, argsize);
680 if (error) {
681 if (error == E2BIG)
682 *fh_size = PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize);
683 goto out;
684 }
685
686 if (PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize) > FHANDLE_SIZE_MAX) {
687 /* XXX: wrong direction */
688 error = EINVAL;
689 goto out;
690 }
691
692 if (*fh_size < PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize)) {
693 *fh_size = PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize);
694 error = E2BIG;
695 goto out;
696 }
697 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) {
698 *fh_size = PUFFS_TOFHSIZE(nodetofh_argp->pvfsr_dsize);
699 } else {
700 *fh_size = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
701 }
702
703 if (fhp) {
704 fhp->fid_len = *fh_size;
705 memcpy(fhp->fid_data,
706 nodetofh_argp->pvfsr_data, nodetofh_argp->pvfsr_dsize);
707 }
708
709 out:
710 free(nodetofh_argp, M_PUFFS);
711 return error;
712 }
713
714 void
715 puffs_init()
716 {
717
718 malloc_type_attach(M_PUFFS);
719
720 pool_init(&puffs_pnpool, sizeof(struct puffs_node), 0, 0, 0,
721 "puffpnpl", &pool_allocator_nointr, IPL_NONE);
722 puffs_transport_init();
723 puffs_msgif_init();
724 }
725
726 void
727 puffs_done()
728 {
729
730 puffs_msgif_destroy();
731 puffs_transport_destroy();
732 pool_destroy(&puffs_pnpool);
733
734 malloc_type_detach(M_PUFFS);
735 }
736
737 int
738 puffs_snapshot(struct mount *mp, struct vnode *vp, struct timespec *ts)
739 {
740
741 return EOPNOTSUPP;
742 }
743
744 int
745 puffs_suspendctl(struct mount *mp, int cmd)
746 {
747 struct puffs_mount *pmp;
748 int error;
749
750 pmp = MPTOPUFFSMP(mp);
751 switch (cmd) {
752 case SUSPEND_SUSPEND:
753 DPRINTF(("puffs_suspendctl: suspending\n"));
754 if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
755 break;
756 puffs_suspendtouser(pmp, PUFFS_SUSPEND_START);
757
758 error = pageflush(mp, FSCRED, 0, 1, curlwp);
759 if (error == 0)
760 error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
761
762 if (error != 0) {
763 puffs_suspendtouser(pmp, PUFFS_SUSPEND_ERROR);
764 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
765 break;
766 }
767
768 puffs_suspendtouser(pmp, PUFFS_SUSPEND_SUSPENDED);
769
770 break;
771
772 case SUSPEND_RESUME:
773 DPRINTF(("puffs_suspendctl: resume\n"));
774 error = 0;
775 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
776 puffs_suspendtouser(pmp, PUFFS_SUSPEND_RESUME);
777 break;
778
779 default:
780 error = EINVAL;
781 break;
782 }
783
784 DPRINTF(("puffs_suspendctl: return %d\n", error));
785 return error;
786 }
787
788 const struct vnodeopv_desc * const puffs_vnodeopv_descs[] = {
789 &puffs_vnodeop_opv_desc,
790 &puffs_specop_opv_desc,
791 &puffs_fifoop_opv_desc,
792 &puffs_msgop_opv_desc,
793 NULL,
794 };
795
796 struct vfsops puffs_vfsops = {
797 MOUNT_PUFFS,
798 puffs_mount, /* mount */
799 puffs_start, /* start */
800 puffs_unmount, /* unmount */
801 puffs_root, /* root */
802 (void *)eopnotsupp, /* quotactl */
803 puffs_statvfs, /* statvfs */
804 puffs_sync, /* sync */
805 (void *)eopnotsupp, /* vget */
806 puffs_fhtovp, /* fhtovp */
807 puffs_vptofh, /* vptofh */
808 puffs_init, /* init */
809 NULL, /* reinit */
810 puffs_done, /* done */
811 NULL, /* mountroot */
812 puffs_snapshot, /* snapshot */
813 vfs_stdextattrctl, /* extattrctl */
814 puffs_suspendctl, /* suspendctl */
815 puffs_vnodeopv_descs, /* vnodeops */
816 0, /* refcount */
817 { NULL, NULL }
818 };
819 VFS_ATTACH(puffs_vfsops);
820