puffs_vfsops.c revision 1.112 1 /* $NetBSD: puffs_vfsops.c,v 1.112 2014/05/25 17:43:47 hannken Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: puffs_vfsops.c,v 1.112 2014/05/25 17:43:47 hannken Exp $");
34
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/mount.h>
38 #include <sys/malloc.h>
39 #include <sys/extattr.h>
40 #include <sys/queue.h>
41 #include <sys/vnode.h>
42 #include <sys/dirent.h>
43 #include <sys/kauth.h>
44 #include <sys/proc.h>
45 #include <sys/module.h>
46 #include <sys/kthread.h>
47
48 #include <uvm/uvm.h>
49
50 #include <dev/putter/putter_sys.h>
51
52 #include <miscfs/genfs/genfs.h>
53
54 #include <fs/puffs/puffs_msgif.h>
55 #include <fs/puffs/puffs_sys.h>
56
57 #include <lib/libkern/libkern.h>
58
59 #include <nfs/nfsproto.h> /* for fh sizes */
60
61 MODULE(MODULE_CLASS_VFS, puffs, "putter");
62
63 VFS_PROTOS(puffs_vfsop);
64
65 #ifndef PUFFS_PNODEBUCKETS
66 #define PUFFS_PNODEBUCKETS 256
67 #endif
68 #ifndef PUFFS_MAXPNODEBUCKETS
69 #define PUFFS_MAXPNODEBUCKETS 8192
70 #endif
71 int puffs_pnodebuckets_default = PUFFS_PNODEBUCKETS;
72 int puffs_maxpnodebuckets = PUFFS_MAXPNODEBUCKETS;
73
74 #define BUCKETALLOC(a) (sizeof(struct puffs_pnode_hashlist *) * (a))
75
76 static struct putter_ops puffs_putter = {
77 .pop_getout = puffs_msgif_getout,
78 .pop_releaseout = puffs_msgif_releaseout,
79 .pop_waitcount = puffs_msgif_waitcount,
80 .pop_dispatch = puffs_msgif_dispatch,
81 .pop_close = puffs_msgif_close,
82 };
83
84 /*
85 * Try to ensure data structures used by the puffs protocol
86 * do not unexpectedly change.
87 */
88 #if defined(__i386__) && defined(__ELF__)
89 CTASSERT(sizeof(struct puffs_kargs) == 3928);
90 CTASSERT(sizeof(struct vattr) == 136);
91 CTASSERT(sizeof(struct puffs_req) == 44);
92 #endif
93
94 int
95 puffs_vfsop_mount(struct mount *mp, const char *path, void *data,
96 size_t *data_len)
97 {
98 struct puffs_mount *pmp = NULL;
99 struct puffs_kargs *args;
100 char fstype[_VFS_NAMELEN];
101 char *p;
102 int error = 0, i;
103 pid_t mntpid = curlwp->l_proc->p_pid;
104
105 if (data == NULL)
106 return EINVAL;
107 if (*data_len < sizeof *args)
108 return EINVAL;
109
110 if (mp->mnt_flag & MNT_GETARGS) {
111 pmp = MPTOPUFFSMP(mp);
112 *(struct puffs_kargs *)data = pmp->pmp_args;
113 *data_len = sizeof *args;
114 return 0;
115 }
116
117 /* update is not supported currently */
118 if (mp->mnt_flag & MNT_UPDATE)
119 return EOPNOTSUPP;
120
121 args = (struct puffs_kargs *)data;
122
123 if (args->pa_vers != PUFFSVERSION) {
124 printf("puffs_mount: development version mismatch: "
125 "kernel %d, lib %d\n", PUFFSVERSION, args->pa_vers);
126 error = EINVAL;
127 goto out;
128 }
129
130 if ((args->pa_flags & ~PUFFS_KFLAG_MASK) != 0) {
131 printf("puffs_mount: invalid KFLAGs 0x%x\n", args->pa_flags);
132 error = EINVAL;
133 goto out;
134 }
135 if ((args->pa_fhflags & ~PUFFS_FHFLAG_MASK) != 0) {
136 printf("puffs_mount: invalid FHFLAGs 0x%x\n", args->pa_fhflags);
137 error = EINVAL;
138 goto out;
139 }
140
141 for (i = 0; i < __arraycount(args->pa_spare); i++) {
142 if (args->pa_spare[i] != 0) {
143 printf("puffs_mount: pa_spare[%d] = 0x%x\n",
144 i, args->pa_spare[i]);
145 error = EINVAL;
146 goto out;
147 }
148 }
149
150 /* use dummy value for passthrough */
151 if (args->pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH)
152 args->pa_fhsize = sizeof(struct fid);
153
154 /* sanitize file handle length */
155 if (PUFFS_TOFHSIZE(args->pa_fhsize) > FHANDLE_SIZE_MAX) {
156 printf("puffs_mount: handle size %zu too large\n",
157 args->pa_fhsize);
158 error = EINVAL;
159 goto out;
160 }
161 /* sanity check file handle max sizes */
162 if (args->pa_fhsize && args->pa_fhflags & PUFFS_FHFLAG_PROTOMASK) {
163 size_t kfhsize = PUFFS_TOFHSIZE(args->pa_fhsize);
164
165 if (args->pa_fhflags & PUFFS_FHFLAG_NFSV2) {
166 if (NFSX_FHTOOBIG_P(kfhsize, 0)) {
167 printf("puffs_mount: fhsize larger than "
168 "NFSv2 max %d\n",
169 PUFFS_FROMFHSIZE(NFSX_V2FH));
170 error = EINVAL;
171 goto out;
172 }
173 }
174
175 if (args->pa_fhflags & PUFFS_FHFLAG_NFSV3) {
176 if (NFSX_FHTOOBIG_P(kfhsize, 1)) {
177 printf("puffs_mount: fhsize larger than "
178 "NFSv3 max %d\n",
179 PUFFS_FROMFHSIZE(NFSX_V3FHMAX));
180 error = EINVAL;
181 goto out;
182 }
183 }
184 }
185
186 /* don't allow non-printing characters (like my sweet umlauts.. snif) */
187 args->pa_typename[sizeof(args->pa_typename)-1] = '\0';
188 for (p = args->pa_typename; *p; p++)
189 if (*p < ' ' || *p > '~')
190 *p = '.';
191
192 args->pa_mntfromname[sizeof(args->pa_mntfromname)-1] = '\0';
193 for (p = args->pa_mntfromname; *p; p++)
194 if (*p < ' ' || *p > '~')
195 *p = '.';
196
197 /* build real name */
198 (void)strlcpy(fstype, PUFFS_TYPEPREFIX, sizeof(fstype));
199 (void)strlcat(fstype, args->pa_typename, sizeof(fstype));
200
201 /* inform user server if it got the max request size it wanted */
202 if (args->pa_maxmsglen == 0 || args->pa_maxmsglen > PUFFS_MSG_MAXSIZE)
203 args->pa_maxmsglen = PUFFS_MSG_MAXSIZE;
204 else if (args->pa_maxmsglen < 2*PUFFS_MSGSTRUCT_MAX)
205 args->pa_maxmsglen = 2*PUFFS_MSGSTRUCT_MAX;
206
207 (void)strlcpy(args->pa_typename, fstype, sizeof(args->pa_typename));
208
209 if (args->pa_nhashbuckets == 0)
210 args->pa_nhashbuckets = puffs_pnodebuckets_default;
211 if (args->pa_nhashbuckets < 1)
212 args->pa_nhashbuckets = 1;
213 if (args->pa_nhashbuckets > PUFFS_MAXPNODEBUCKETS) {
214 args->pa_nhashbuckets = puffs_maxpnodebuckets;
215 printf("puffs_mount: using %d hash buckets. "
216 "adjust puffs_maxpnodebuckets for more\n",
217 puffs_maxpnodebuckets);
218 }
219
220 error = set_statvfs_info(path, UIO_USERSPACE, args->pa_mntfromname,
221 UIO_SYSSPACE, fstype, mp, curlwp);
222 if (error)
223 goto out;
224 mp->mnt_stat.f_iosize = DEV_BSIZE;
225 mp->mnt_stat.f_namemax = args->pa_svfsb.f_namemax;
226
227 /*
228 * We can't handle the VFS_STATVFS() mount_domount() does
229 * after VFS_MOUNT() because we'd deadlock, so handle it
230 * here already.
231 */
232 copy_statvfs_info(&args->pa_svfsb, mp);
233 (void)memcpy(&mp->mnt_stat, &args->pa_svfsb, sizeof(mp->mnt_stat));
234
235 KASSERT(curlwp != uvm.pagedaemon_lwp);
236 pmp = kmem_zalloc(sizeof(struct puffs_mount), KM_SLEEP);
237
238 mp->mnt_fs_bshift = DEV_BSHIFT;
239 mp->mnt_dev_bshift = DEV_BSHIFT;
240 mp->mnt_flag &= ~MNT_LOCAL; /* we don't really know, so ... */
241 mp->mnt_data = pmp;
242
243 #if 0
244 /*
245 * XXX: puffs code is MPSAFE. However, VFS really isn't.
246 * Currently, there is nothing which protects an inode from
247 * reclaim while there are threads inside the file system.
248 * This means that in the event of a server crash, an MPSAFE
249 * mount is likely to end up accessing invalid memory. For the
250 * non-mpsafe case, the kernel lock, general structure of
251 * puffs and pmp_refcount protect the threads during escape.
252 *
253 * Fixing this will require:
254 * a) fixing vfs
255 * OR
256 * b) adding a small sleep to puffs_msgif_close() between
257 * userdead() and dounmount().
258 * (well, this isn't really a fix, but would solve
259 * 99.999% of the race conditions).
260 *
261 * Also, in the event of "b", unmount -f should be used,
262 * like with any other file system, sparingly and only when
263 * it is "known" to be safe.
264 */
265 mp->mnt_iflags |= IMNT_MPSAFE;
266 #endif
267
268 pmp->pmp_status = PUFFSTAT_MOUNTING;
269 pmp->pmp_mp = mp;
270 pmp->pmp_msg_maxsize = args->pa_maxmsglen;
271 pmp->pmp_args = *args;
272
273 pmp->pmp_npnodehash = args->pa_nhashbuckets;
274 pmp->pmp_pnodehash = kmem_alloc(BUCKETALLOC(pmp->pmp_npnodehash),
275 KM_SLEEP);
276 for (i = 0; i < pmp->pmp_npnodehash; i++)
277 LIST_INIT(&pmp->pmp_pnodehash[i]);
278 LIST_INIT(&pmp->pmp_newcookie);
279
280 /*
281 * Inform the fileops processing code that we have a mountpoint.
282 * If it doesn't know about anyone with our pid/fd having the
283 * device open, punt
284 */
285 if ((pmp->pmp_pi
286 = putter_attach(mntpid, args->pa_fd, pmp, &puffs_putter)) == NULL) {
287 error = ENOENT;
288 goto out;
289 }
290
291 /* XXX: check parameters */
292 pmp->pmp_root_cookie = args->pa_root_cookie;
293 pmp->pmp_root_vtype = args->pa_root_vtype;
294 pmp->pmp_root_vsize = args->pa_root_vsize;
295 pmp->pmp_root_rdev = args->pa_root_rdev;
296 pmp->pmp_docompat = args->pa_time32;
297
298 mutex_init(&pmp->pmp_lock, MUTEX_DEFAULT, IPL_NONE);
299 mutex_init(&pmp->pmp_sopmtx, MUTEX_DEFAULT, IPL_NONE);
300 cv_init(&pmp->pmp_msg_waiter_cv, "puffsget");
301 cv_init(&pmp->pmp_refcount_cv, "puffsref");
302 cv_init(&pmp->pmp_unmounting_cv, "puffsum");
303 cv_init(&pmp->pmp_sopcv, "puffsop");
304 TAILQ_INIT(&pmp->pmp_msg_touser);
305 TAILQ_INIT(&pmp->pmp_msg_replywait);
306 TAILQ_INIT(&pmp->pmp_sopfastreqs);
307 TAILQ_INIT(&pmp->pmp_sopnodereqs);
308
309 if ((error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
310 puffs_sop_thread, pmp, NULL, "puffsop")) != 0)
311 goto out;
312 pmp->pmp_sopthrcount = 1;
313
314 DPRINTF(("puffs_mount: mount point at %p, puffs specific at %p\n",
315 mp, MPTOPUFFSMP(mp)));
316
317 vfs_getnewfsid(mp);
318
319 out:
320 if (error && pmp && pmp->pmp_pi)
321 putter_detach(pmp->pmp_pi);
322 if (error && pmp && pmp->pmp_pnodehash)
323 kmem_free(pmp->pmp_pnodehash, BUCKETALLOC(pmp->pmp_npnodehash));
324 if (error && pmp)
325 kmem_free(pmp, sizeof(struct puffs_mount));
326 return error;
327 }
328
329 int
330 puffs_vfsop_start(struct mount *mp, int flags)
331 {
332 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
333
334 KASSERT(pmp->pmp_status == PUFFSTAT_MOUNTING);
335 pmp->pmp_status = PUFFSTAT_RUNNING;
336
337 return 0;
338 }
339
340 int
341 puffs_vfsop_unmount(struct mount *mp, int mntflags)
342 {
343 PUFFS_MSG_VARS(vfs, unmount);
344 struct puffs_mount *pmp;
345 int error, force;
346
347 error = 0;
348 force = mntflags & MNT_FORCE;
349 pmp = MPTOPUFFSMP(mp);
350
351 DPRINTF(("puffs_unmount: detach filesystem from vfs, current "
352 "status 0x%x\n", pmp->pmp_status));
353
354 /*
355 * flush all the vnodes. VOP_RECLAIM() takes care that the
356 * root vnode does not get flushed until unmount. The
357 * userspace root node cookie is stored in the mount
358 * structure, so we can always re-instantiate a root vnode,
359 * should userspace unmount decide it doesn't want to
360 * cooperate.
361 */
362 error = vflush(mp, NULLVP, force ? FORCECLOSE : 0);
363 if (error)
364 goto out;
365
366 /*
367 * If we are not DYING, we should ask userspace's opinion
368 * about the situation
369 */
370 mutex_enter(&pmp->pmp_lock);
371 if (pmp->pmp_status != PUFFSTAT_DYING) {
372 pmp->pmp_unmounting = 1;
373 mutex_exit(&pmp->pmp_lock);
374
375 PUFFS_MSG_ALLOC(vfs, unmount);
376 puffs_msg_setinfo(park_unmount,
377 PUFFSOP_VFS, PUFFS_VFS_UNMOUNT, NULL);
378 unmount_msg->pvfsr_flags = mntflags;
379
380 PUFFS_MSG_ENQUEUEWAIT(pmp, park_unmount, error);
381 PUFFS_MSG_RELEASE(unmount);
382
383 error = checkerr(pmp, error, __func__);
384 DPRINTF(("puffs_unmount: error %d force %d\n", error, force));
385
386 mutex_enter(&pmp->pmp_lock);
387 pmp->pmp_unmounting = 0;
388 cv_broadcast(&pmp->pmp_unmounting_cv);
389 }
390
391 /*
392 * if userspace cooperated or we really need to die,
393 * screw what userland thinks and just die.
394 */
395 if (error == 0 || force) {
396 struct puffs_sopreq *psopr;
397
398 /* tell waiters & other resources to go unwait themselves */
399 puffs_userdead(pmp);
400 putter_detach(pmp->pmp_pi);
401
402 /*
403 * Wait until there are no more users for the mount resource.
404 * Notice that this is hooked against transport_close
405 * and return from touser. In an ideal world, it would
406 * be hooked against final return from all operations.
407 * But currently it works well enough, since nobody
408 * does weird blocking voodoo after return from touser().
409 */
410 while (pmp->pmp_refcount != 0)
411 cv_wait(&pmp->pmp_refcount_cv, &pmp->pmp_lock);
412 mutex_exit(&pmp->pmp_lock);
413
414 /*
415 * Release kernel thread now that there is nothing
416 * it would be wanting to lock.
417 */
418 KASSERT(curlwp != uvm.pagedaemon_lwp);
419 psopr = kmem_alloc(sizeof(*psopr), KM_SLEEP);
420 psopr->psopr_sopreq = PUFFS_SOPREQSYS_EXIT;
421 mutex_enter(&pmp->pmp_sopmtx);
422 if (pmp->pmp_sopthrcount == 0) {
423 mutex_exit(&pmp->pmp_sopmtx);
424 kmem_free(psopr, sizeof(*psopr));
425 mutex_enter(&pmp->pmp_sopmtx);
426 KASSERT(pmp->pmp_sopthrcount == 0);
427 } else {
428 TAILQ_INSERT_TAIL(&pmp->pmp_sopfastreqs,
429 psopr, psopr_entries);
430 cv_signal(&pmp->pmp_sopcv);
431 }
432 while (pmp->pmp_sopthrcount > 0)
433 cv_wait(&pmp->pmp_sopcv, &pmp->pmp_sopmtx);
434 mutex_exit(&pmp->pmp_sopmtx);
435
436 /* free resources now that we hopefully have no waiters left */
437 cv_destroy(&pmp->pmp_unmounting_cv);
438 cv_destroy(&pmp->pmp_refcount_cv);
439 cv_destroy(&pmp->pmp_msg_waiter_cv);
440 cv_destroy(&pmp->pmp_sopcv);
441 mutex_destroy(&pmp->pmp_lock);
442 mutex_destroy(&pmp->pmp_sopmtx);
443
444 kmem_free(pmp->pmp_pnodehash, BUCKETALLOC(pmp->pmp_npnodehash));
445 kmem_free(pmp, sizeof(struct puffs_mount));
446 error = 0;
447 } else {
448 mutex_exit(&pmp->pmp_lock);
449 }
450
451 out:
452 DPRINTF(("puffs_unmount: return %d\n", error));
453 return error;
454 }
455
456 /*
457 * This doesn't need to travel to userspace
458 */
459 int
460 puffs_vfsop_root(struct mount *mp, struct vnode **vpp)
461 {
462 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
463 int rv;
464
465 rv = puffs_cookie2vnode(pmp, pmp->pmp_root_cookie, 1, 1, vpp);
466 KASSERT(rv != PUFFS_NOSUCHCOOKIE);
467 return rv;
468 }
469
470 int
471 puffs_vfsop_statvfs(struct mount *mp, struct statvfs *sbp)
472 {
473 PUFFS_MSG_VARS(vfs, statvfs);
474 struct puffs_mount *pmp;
475 int error = 0;
476
477 pmp = MPTOPUFFSMP(mp);
478
479 /*
480 * If we are mounting, it means that the userspace counterpart
481 * is calling mount(2), but mount(2) also calls statvfs. So
482 * requesting statvfs from userspace would mean a deadlock.
483 * Compensate.
484 */
485 if (__predict_false(pmp->pmp_status == PUFFSTAT_MOUNTING))
486 return EINPROGRESS;
487
488 PUFFS_MSG_ALLOC(vfs, statvfs);
489 puffs_msg_setinfo(park_statvfs, PUFFSOP_VFS, PUFFS_VFS_STATVFS, NULL);
490
491 PUFFS_MSG_ENQUEUEWAIT(pmp, park_statvfs, error);
492 error = checkerr(pmp, error, __func__);
493 statvfs_msg->pvfsr_sb.f_iosize = DEV_BSIZE;
494
495 /*
496 * Try to produce a sensible result even in the event
497 * of userspace error.
498 *
499 * XXX: cache the copy in non-error case
500 */
501 if (!error) {
502 copy_statvfs_info(&statvfs_msg->pvfsr_sb, mp);
503 (void)memcpy(sbp, &statvfs_msg->pvfsr_sb,
504 sizeof(struct statvfs));
505 } else {
506 copy_statvfs_info(sbp, mp);
507 }
508
509 PUFFS_MSG_RELEASE(statvfs);
510 return error;
511 }
512
513 static bool
514 pageflush_selector(void *cl, struct vnode *vp)
515 {
516
517 return vp->v_type == VREG && !UVM_OBJ_IS_CLEAN(&vp->v_uobj);
518 }
519
520 static int
521 pageflush(struct mount *mp, kauth_cred_t cred, int waitfor)
522 {
523 struct puffs_node *pn;
524 struct vnode *vp;
525 struct vnode_iterator *marker;
526 int error, rv, fsyncwait;
527
528 error = 0;
529 fsyncwait = (waitfor == MNT_WAIT) ? FSYNC_WAIT : 0;
530
531 /*
532 * Sync all cached data from regular vnodes (which are not
533 * currently locked, see below). After this we call VFS_SYNC
534 * for the fs server, which should handle data and metadata for
535 * all the nodes it knows to exist.
536 */
537 vfs_vnode_iterator_init(mp, &marker);
538 while ((vp = vfs_vnode_iterator_next(marker, pageflush_selector,
539 NULL)))
540 {
541 /*
542 * Here we try to get a reference to the vnode and to
543 * lock it. This is mostly cargo-culted, but I will
544 * offer an explanation to why I believe this might
545 * actually do the right thing.
546 *
547 * If the vnode is a goner, we quite obviously don't need
548 * to sync it.
549 *
550 * If the vnode was busy, we don't need to sync it because
551 * this is never called with MNT_WAIT except from
552 * dounmount(), when we are wait-flushing all the dirty
553 * vnodes through other routes in any case. So there,
554 * sync() doesn't actually sync. Happy now?
555 */
556 error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
557 if (error) {
558 vrele(vp);
559 continue;
560 }
561 pn = VPTOPP(vp);
562 /* hmm.. is the FAF thing entirely sensible? */
563 if (waitfor == MNT_LAZY) {
564 mutex_enter(vp->v_interlock);
565 pn->pn_stat |= PNODE_FAF;
566 mutex_exit(vp->v_interlock);
567 }
568 rv = VOP_FSYNC(vp, cred, fsyncwait, 0, 0);
569 if (waitfor == MNT_LAZY) {
570 mutex_enter(vp->v_interlock);
571 pn->pn_stat &= ~PNODE_FAF;
572 mutex_exit(vp->v_interlock);
573 }
574 if (rv)
575 error = rv;
576 vput(vp);
577 }
578 vfs_vnode_iterator_destroy(marker);
579
580 return error;
581 }
582
583 int
584 puffs_vfsop_sync(struct mount *mp, int waitfor, struct kauth_cred *cred)
585 {
586 PUFFS_MSG_VARS(vfs, sync);
587 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
588 int error, rv;
589
590 error = pageflush(mp, cred, waitfor);
591
592 /* sync fs */
593 PUFFS_MSG_ALLOC(vfs, sync);
594 sync_msg->pvfsr_waitfor = waitfor;
595 puffs_credcvt(&sync_msg->pvfsr_cred, cred);
596 puffs_msg_setinfo(park_sync, PUFFSOP_VFS, PUFFS_VFS_SYNC, NULL);
597
598 PUFFS_MSG_ENQUEUEWAIT(pmp, park_sync, rv);
599 rv = checkerr(pmp, rv, __func__);
600 if (rv)
601 error = rv;
602
603 PUFFS_MSG_RELEASE(sync);
604 return error;
605 }
606
607 int
608 puffs_vfsop_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
609 {
610 PUFFS_MSG_VARS(vfs, fhtonode);
611 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
612 struct vnode *vp;
613 void *fhdata;
614 size_t argsize, fhlen;
615 int error;
616
617 if (pmp->pmp_args.pa_fhsize == 0)
618 return EOPNOTSUPP;
619
620 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) {
621 fhlen = fhp->fid_len;
622 fhdata = fhp;
623 } else {
624 fhlen = PUFFS_FROMFHSIZE(fhp->fid_len);
625 fhdata = fhp->fid_data;
626
627 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) {
628 if (pmp->pmp_args.pa_fhsize < fhlen)
629 return EINVAL;
630 } else {
631 if (pmp->pmp_args.pa_fhsize != fhlen)
632 return EINVAL;
633 }
634 }
635
636 argsize = sizeof(struct puffs_vfsmsg_fhtonode) + fhlen;
637 puffs_msgmem_alloc(argsize, &park_fhtonode, (void *)&fhtonode_msg, 1);
638 fhtonode_msg->pvfsr_dsize = fhlen;
639 memcpy(fhtonode_msg->pvfsr_data, fhdata, fhlen);
640 puffs_msg_setinfo(park_fhtonode, PUFFSOP_VFS, PUFFS_VFS_FHTOVP, NULL);
641
642 PUFFS_MSG_ENQUEUEWAIT(pmp, park_fhtonode, error);
643 error = checkerr(pmp, error, __func__);
644 if (error)
645 goto out;
646
647 error = puffs_cookie2vnode(pmp, fhtonode_msg->pvfsr_fhcookie, 1,1,&vp);
648 DPRINTF(("puffs_fhtovp: got cookie %p, existing vnode %p\n",
649 fhtonode_msg->pvfsr_fhcookie, vp));
650 if (error == PUFFS_NOSUCHCOOKIE) {
651 error = puffs_getvnode(mp, fhtonode_msg->pvfsr_fhcookie,
652 fhtonode_msg->pvfsr_vtype, fhtonode_msg->pvfsr_size,
653 fhtonode_msg->pvfsr_rdev, &vp);
654 if (error)
655 goto out;
656 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
657 } else if (error) {
658 goto out;
659 }
660
661 *vpp = vp;
662 out:
663 puffs_msgmem_release(park_fhtonode);
664 return error;
665 }
666
667 int
668 puffs_vfsop_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
669 {
670 PUFFS_MSG_VARS(vfs, nodetofh);
671 struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
672 size_t argsize, fhlen;
673 int error;
674
675 if (pmp->pmp_args.pa_fhsize == 0)
676 return EOPNOTSUPP;
677
678 /* if file handles are static len, we can test len immediately */
679 if (((pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) == 0)
680 && ((pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) == 0)
681 && (PUFFS_FROMFHSIZE(*fh_size) < pmp->pmp_args.pa_fhsize)) {
682 *fh_size = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
683 return E2BIG;
684 }
685
686 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH)
687 fhlen = *fh_size;
688 else
689 fhlen = PUFFS_FROMFHSIZE(*fh_size);
690
691 argsize = sizeof(struct puffs_vfsmsg_nodetofh) + fhlen;
692 puffs_msgmem_alloc(argsize, &park_nodetofh, (void *)&nodetofh_msg, 1);
693 nodetofh_msg->pvfsr_fhcookie = VPTOPNC(vp);
694 nodetofh_msg->pvfsr_dsize = fhlen;
695 puffs_msg_setinfo(park_nodetofh, PUFFSOP_VFS, PUFFS_VFS_VPTOFH, NULL);
696
697 PUFFS_MSG_ENQUEUEWAIT(pmp, park_nodetofh, error);
698 error = checkerr(pmp, error, __func__);
699
700 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH)
701 fhlen = nodetofh_msg->pvfsr_dsize;
702 else if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC)
703 fhlen = PUFFS_TOFHSIZE(nodetofh_msg->pvfsr_dsize);
704 else
705 fhlen = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
706
707 if (error) {
708 if (error == E2BIG)
709 *fh_size = fhlen;
710 goto out;
711 }
712
713 if (fhlen > FHANDLE_SIZE_MAX) {
714 puffs_senderr(pmp, PUFFS_ERR_VPTOFH, E2BIG,
715 "file handle too big", VPTOPNC(vp));
716 error = EPROTO;
717 goto out;
718 }
719
720 if (*fh_size < fhlen) {
721 *fh_size = fhlen;
722 error = E2BIG;
723 goto out;
724 }
725 *fh_size = fhlen;
726
727 if (fhp) {
728 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) {
729 memcpy(fhp, nodetofh_msg->pvfsr_data, fhlen);
730 } else {
731 fhp->fid_len = *fh_size;
732 memcpy(fhp->fid_data, nodetofh_msg->pvfsr_data,
733 nodetofh_msg->pvfsr_dsize);
734 }
735 }
736
737 out:
738 puffs_msgmem_release(park_nodetofh);
739 return error;
740 }
741
742 void
743 puffs_vfsop_init(void)
744 {
745
746 /* some checks depend on this */
747 KASSERT(VNOVAL == VSIZENOTSET);
748
749 pool_init(&puffs_pnpool, sizeof(struct puffs_node), 0, 0, 0,
750 "puffpnpl", &pool_allocator_nointr, IPL_NONE);
751 pool_init(&puffs_vapool, sizeof(struct vattr), 0, 0, 0,
752 "puffvapl", &pool_allocator_nointr, IPL_NONE);
753 puffs_msgif_init();
754 }
755
756 void
757 puffs_vfsop_done(void)
758 {
759
760 puffs_msgif_destroy();
761 pool_destroy(&puffs_pnpool);
762 pool_destroy(&puffs_vapool);
763 }
764
765 int
766 puffs_vfsop_snapshot(struct mount *mp, struct vnode *vp, struct timespec *ts)
767 {
768
769 return EOPNOTSUPP;
770 }
771
772 int
773 puffs_vfsop_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
774 int attrnamespace, const char *attrname)
775 {
776 PUFFS_MSG_VARS(vfs, extattrctl);
777 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
778 struct puffs_node *pnp;
779 puffs_cookie_t pnc;
780 int error, flags;
781
782 if (vp) {
783 /* doesn't make sense for puffs servers */
784 if (vp->v_mount != mp)
785 return EXDEV;
786 pnp = vp->v_data;
787 pnc = pnp->pn_cookie;
788 flags = PUFFS_EXTATTRCTL_HASNODE;
789 } else {
790 pnp = pnc = NULL;
791 flags = 0;
792 }
793
794 PUFFS_MSG_ALLOC(vfs, extattrctl);
795 extattrctl_msg->pvfsr_cmd = cmd;
796 extattrctl_msg->pvfsr_attrnamespace = attrnamespace;
797 extattrctl_msg->pvfsr_flags = flags;
798 if (attrname) {
799 strlcpy(extattrctl_msg->pvfsr_attrname, attrname,
800 sizeof(extattrctl_msg->pvfsr_attrname));
801 extattrctl_msg->pvfsr_flags |= PUFFS_EXTATTRCTL_HASATTRNAME;
802 }
803 puffs_msg_setinfo(park_extattrctl,
804 PUFFSOP_VFS, PUFFS_VFS_EXTATTRCTL, pnc);
805
806 puffs_msg_enqueue(pmp, park_extattrctl);
807 if (vp) {
808 mutex_enter(&pnp->pn_mtx);
809 puffs_referencenode(pnp);
810 mutex_exit(&pnp->pn_mtx);
811 VOP_UNLOCK(vp);
812 }
813 error = puffs_msg_wait2(pmp, park_extattrctl, pnp, NULL);
814 PUFFS_MSG_RELEASE(extattrctl);
815 if (vp) {
816 puffs_releasenode(pnp);
817 }
818
819 return checkerr(pmp, error, __func__);
820 }
821
822 const struct vnodeopv_desc * const puffs_vnodeopv_descs[] = {
823 &puffs_vnodeop_opv_desc,
824 &puffs_specop_opv_desc,
825 &puffs_fifoop_opv_desc,
826 &puffs_msgop_opv_desc,
827 NULL,
828 };
829
830 struct vfsops puffs_vfsops = {
831 .vfs_name = MOUNT_PUFFS,
832 .vfs_min_mount_data = sizeof (struct puffs_kargs),
833 .vfs_mount = puffs_vfsop_mount,
834 .vfs_start = puffs_vfsop_start,
835 .vfs_unmount = puffs_vfsop_unmount,
836 .vfs_root = puffs_vfsop_root,
837 .vfs_quotactl = (void *)eopnotsupp,
838 .vfs_statvfs = puffs_vfsop_statvfs,
839 .vfs_sync = puffs_vfsop_sync,
840 .vfs_vget = (void *)eopnotsupp,
841 .vfs_fhtovp = puffs_vfsop_fhtovp,
842 .vfs_vptofh = puffs_vfsop_vptofh,
843 .vfs_init = puffs_vfsop_init,
844 .vfs_done = puffs_vfsop_done,
845 .vfs_snapshot = puffs_vfsop_snapshot,
846 .vfs_extattrctl = puffs_vfsop_extattrctl,
847 .vfs_suspendctl = (void *)eopnotsupp,
848 .vfs_renamelock_enter = genfs_renamelock_enter,
849 .vfs_renamelock_exit = genfs_renamelock_exit,
850 .vfs_fsync = (void *)eopnotsupp,
851 .vfs_opv_descs = puffs_vnodeopv_descs
852 };
853
854 static int
855 puffs_modcmd(modcmd_t cmd, void *arg)
856 {
857
858 switch (cmd) {
859 case MODULE_CMD_INIT:
860 return vfs_attach(&puffs_vfsops);
861 case MODULE_CMD_FINI:
862 return vfs_detach(&puffs_vfsops);
863 default:
864 return ENOTTY;
865 }
866 }
867