puffs_vfsops.c revision 1.124 1 /* $NetBSD: puffs_vfsops.c,v 1.124 2020/01/17 20:08:08 ad Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: puffs_vfsops.c,v 1.124 2020/01/17 20:08:08 ad Exp $");
34
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/mount.h>
38 #include <sys/extattr.h>
39 #include <sys/queue.h>
40 #include <sys/vnode.h>
41 #include <sys/dirent.h>
42 #include <sys/kauth.h>
43 #include <sys/proc.h>
44 #include <sys/module.h>
45 #include <sys/kthread.h>
46
47 #include <uvm/uvm.h>
48
49 #include <dev/putter/putter_sys.h>
50
51 #include <miscfs/genfs/genfs.h>
52
53 #include <fs/puffs/puffs_msgif.h>
54 #include <fs/puffs/puffs_sys.h>
55
56 #include <lib/libkern/libkern.h>
57
58 #include <nfs/nfsproto.h> /* for fh sizes */
59
60 MODULE(MODULE_CLASS_VFS, puffs, "putter");
61
62 VFS_PROTOS(puffs_vfsop);
63
64 static struct putter_ops puffs_putter = {
65 .pop_getout = puffs_msgif_getout,
66 .pop_releaseout = puffs_msgif_releaseout,
67 .pop_waitcount = puffs_msgif_waitcount,
68 .pop_dispatch = puffs_msgif_dispatch,
69 .pop_close = puffs_msgif_close,
70 };
71
72 static const struct genfs_ops puffs_genfsops = {
73 .gop_size = puffs_gop_size,
74 .gop_write = genfs_gop_write,
75 .gop_markupdate = puffs_gop_markupdate,
76 #if 0
77 .gop_alloc, should ask userspace
78 #endif
79 .gop_putrange = genfs_gop_putrange,
80 };
81
82 /*
83 * Try to ensure data structures used by the puffs protocol
84 * do not unexpectedly change.
85 */
86 #if defined(__i386__) && defined(__ELF__)
87 CTASSERT(sizeof(struct puffs_kargs) == 3928);
88 CTASSERT(sizeof(struct vattr) == 136);
89 CTASSERT(sizeof(struct puffs_req) == 44);
90 #endif
91
92 int
93 puffs_vfsop_mount(struct mount *mp, const char *path, void *data,
94 size_t *data_len)
95 {
96 struct puffs_mount *pmp = NULL;
97 struct puffs_kargs *args;
98 char fstype[_VFS_NAMELEN];
99 char *p;
100 int error = 0, i;
101 pid_t mntpid = curlwp->l_proc->p_pid;
102
103 if (data == NULL)
104 return EINVAL;
105 if (*data_len < sizeof *args)
106 return EINVAL;
107
108 if (mp->mnt_flag & MNT_GETARGS) {
109 pmp = MPTOPUFFSMP(mp);
110 *(struct puffs_kargs *)data = pmp->pmp_args;
111 *data_len = sizeof *args;
112 return 0;
113 }
114
115 /* update is not supported currently */
116 if (mp->mnt_flag & MNT_UPDATE)
117 return EOPNOTSUPP;
118
119 args = (struct puffs_kargs *)data;
120
121 if (args->pa_vers != PUFFSVERSION) {
122 printf("puffs_mount: development version mismatch: "
123 "kernel %d, lib %d\n", PUFFSVERSION, args->pa_vers);
124 error = EINVAL;
125 goto out;
126 }
127
128 if ((args->pa_flags & ~PUFFS_KFLAG_MASK) != 0) {
129 printf("puffs_mount: invalid KFLAGs 0x%x\n", args->pa_flags);
130 error = EINVAL;
131 goto out;
132 }
133 if ((args->pa_fhflags & ~PUFFS_FHFLAG_MASK) != 0) {
134 printf("puffs_mount: invalid FHFLAGs 0x%x\n", args->pa_fhflags);
135 error = EINVAL;
136 goto out;
137 }
138
139 for (i = 0; i < __arraycount(args->pa_spare); i++) {
140 if (args->pa_spare[i] != 0) {
141 printf("puffs_mount: pa_spare[%d] = 0x%x\n",
142 i, args->pa_spare[i]);
143 error = EINVAL;
144 goto out;
145 }
146 }
147
148 /* use dummy value for passthrough */
149 if (args->pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH)
150 args->pa_fhsize = sizeof(struct fid);
151
152 /* sanitize file handle length */
153 if (PUFFS_TOFHSIZE(args->pa_fhsize) > FHANDLE_SIZE_MAX) {
154 printf("puffs_mount: handle size %zu too large\n",
155 args->pa_fhsize);
156 error = EINVAL;
157 goto out;
158 }
159 /* sanity check file handle max sizes */
160 if (args->pa_fhsize && args->pa_fhflags & PUFFS_FHFLAG_PROTOMASK) {
161 size_t kfhsize = PUFFS_TOFHSIZE(args->pa_fhsize);
162
163 if (args->pa_fhflags & PUFFS_FHFLAG_NFSV2) {
164 if (NFSX_FHTOOBIG_P(kfhsize, 0)) {
165 printf("puffs_mount: fhsize larger than "
166 "NFSv2 max %d\n",
167 PUFFS_FROMFHSIZE(NFSX_V2FH));
168 error = EINVAL;
169 goto out;
170 }
171 }
172
173 if (args->pa_fhflags & PUFFS_FHFLAG_NFSV3) {
174 if (NFSX_FHTOOBIG_P(kfhsize, 1)) {
175 printf("puffs_mount: fhsize larger than "
176 "NFSv3 max %d\n",
177 PUFFS_FROMFHSIZE(NFSX_V3FHMAX));
178 error = EINVAL;
179 goto out;
180 }
181 }
182 }
183
184 /* don't allow non-printing characters (like my sweet umlauts.. snif) */
185 args->pa_typename[sizeof(args->pa_typename)-1] = '\0';
186 for (p = args->pa_typename; *p; p++)
187 if (*p < ' ' || *p > '~')
188 *p = '.';
189
190 args->pa_mntfromname[sizeof(args->pa_mntfromname)-1] = '\0';
191 for (p = args->pa_mntfromname; *p; p++)
192 if (*p < ' ' || *p > '~')
193 *p = '.';
194
195 /* build real name */
196 (void)strlcpy(fstype, PUFFS_TYPEPREFIX, sizeof(fstype));
197 (void)strlcat(fstype, args->pa_typename, sizeof(fstype));
198
199 /* inform user server if it got the max request size it wanted */
200 if (args->pa_maxmsglen == 0 || args->pa_maxmsglen > PUFFS_MSG_MAXSIZE)
201 args->pa_maxmsglen = PUFFS_MSG_MAXSIZE;
202 else if (args->pa_maxmsglen < 2*PUFFS_MSGSTRUCT_MAX)
203 args->pa_maxmsglen = 2*PUFFS_MSGSTRUCT_MAX;
204
205 (void)strlcpy(args->pa_typename, fstype, sizeof(args->pa_typename));
206
207 error = set_statvfs_info(path, UIO_USERSPACE, args->pa_mntfromname,
208 UIO_SYSSPACE, fstype, mp, curlwp);
209 if (error)
210 goto out;
211 mp->mnt_stat.f_iosize = DEV_BSIZE;
212 mp->mnt_stat.f_namemax = args->pa_svfsb.f_namemax;
213
214 /*
215 * We can't handle the VFS_STATVFS() mount_domount() does
216 * after VFS_MOUNT() because we'd deadlock, so handle it
217 * here already.
218 */
219 struct statvfs *sb = STATVFSBUF_GET();
220 puffs_statvfs_to_statvfs(&args->pa_svfsb, sb);
221 copy_statvfs_info(sb, mp);
222 STATVFSBUF_PUT(sb);
223
224 statvfs_to_puffs_statvfs(&mp->mnt_stat, &args->pa_svfsb);
225
226 KASSERT(curlwp != uvm.pagedaemon_lwp);
227 pmp = kmem_zalloc(sizeof(struct puffs_mount), KM_SLEEP);
228
229 mp->mnt_fs_bshift = DEV_BSHIFT;
230 mp->mnt_dev_bshift = DEV_BSHIFT;
231 mp->mnt_flag &= ~MNT_LOCAL; /* we don't really know, so ... */
232 mp->mnt_data = pmp;
233
234 #if 0
235 /*
236 * XXX: puffs code is MPSAFE. However, VFS really isn't.
237 * Currently, there is nothing which protects an inode from
238 * reclaim while there are threads inside the file system.
239 * This means that in the event of a server crash, an MPSAFE
240 * mount is likely to end up accessing invalid memory. For the
241 * non-mpsafe case, the kernel lock, general structure of
242 * puffs and pmp_refcount protect the threads during escape.
243 *
244 * Fixing this will require:
245 * a) fixing vfs
246 * OR
247 * b) adding a small sleep to puffs_msgif_close() between
248 * userdead() and dounmount().
249 * (well, this isn't really a fix, but would solve
250 * 99.999% of the race conditions).
251 *
252 * Also, in the event of "b", unmount -f should be used,
253 * like with any other file system, sparingly and only when
254 * it is "known" to be safe.
255 */
256 mp->mnt_iflags |= IMNT_MPSAFE;
257 #endif
258
259 pmp->pmp_status = PUFFSTAT_MOUNTING;
260 pmp->pmp_mp = mp;
261 pmp->pmp_msg_maxsize = args->pa_maxmsglen;
262 pmp->pmp_args = *args;
263
264 /*
265 * Inform the fileops processing code that we have a mountpoint.
266 * If it doesn't know about anyone with our pid/fd having the
267 * device open, punt
268 */
269 if ((pmp->pmp_pi
270 = putter_attach(mntpid, args->pa_fd, pmp, &puffs_putter)) == NULL) {
271 error = ENOENT;
272 goto out;
273 }
274
275 /* XXX: check parameters */
276 pmp->pmp_root_cookie = args->pa_root_cookie;
277 switch (args->pa_root_vtype) {
278 case VNON: case VREG: case VDIR: case VBLK:
279 case VCHR: case VLNK: case VSOCK: case VFIFO:
280 break;
281 default:
282 error = EINVAL;
283 goto out;
284 }
285 pmp->pmp_root_vtype = args->pa_root_vtype;
286
287 if (args->pa_root_vsize < 0) {
288 error = EINVAL;
289 goto out;
290 }
291 pmp->pmp_root_vsize = args->pa_root_vsize;
292
293 pmp->pmp_root_rdev = args->pa_root_rdev;
294 pmp->pmp_docompat = args->pa_time32;
295
296 mutex_init(&pmp->pmp_lock, MUTEX_DEFAULT, IPL_NONE);
297 mutex_init(&pmp->pmp_sopmtx, MUTEX_DEFAULT, IPL_NONE);
298 cv_init(&pmp->pmp_msg_waiter_cv, "puffsget");
299 cv_init(&pmp->pmp_refcount_cv, "puffsref");
300 cv_init(&pmp->pmp_unmounting_cv, "puffsum");
301 cv_init(&pmp->pmp_sopcv, "puffsop");
302 TAILQ_INIT(&pmp->pmp_msg_touser);
303 TAILQ_INIT(&pmp->pmp_msg_replywait);
304 TAILQ_INIT(&pmp->pmp_sopfastreqs);
305 TAILQ_INIT(&pmp->pmp_sopnodereqs);
306
307 if ((error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
308 puffs_sop_thread, pmp, NULL, "puffsop")) != 0)
309 goto out;
310 pmp->pmp_sopthrcount = 1;
311
312 DPRINTF(("puffs_mount: mount point at %p, puffs specific at %p\n",
313 mp, MPTOPUFFSMP(mp)));
314
315 vfs_getnewfsid(mp);
316
317 out:
318 if (error && pmp && pmp->pmp_pi)
319 putter_detach(pmp->pmp_pi);
320 if (error && pmp)
321 kmem_free(pmp, sizeof(struct puffs_mount));
322 return error;
323 }
324
325 int
326 puffs_vfsop_start(struct mount *mp, int flags)
327 {
328 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
329
330 KASSERT(pmp->pmp_status == PUFFSTAT_MOUNTING);
331 pmp->pmp_status = PUFFSTAT_RUNNING;
332
333 return 0;
334 }
335
336 int
337 puffs_vfsop_unmount(struct mount *mp, int mntflags)
338 {
339 PUFFS_MSG_VARS(vfs, unmount);
340 struct puffs_mount *pmp;
341 int error, force;
342
343 error = 0;
344 force = mntflags & MNT_FORCE;
345 pmp = MPTOPUFFSMP(mp);
346
347 DPRINTF(("puffs_unmount: detach filesystem from vfs, current "
348 "status 0x%x\n", pmp->pmp_status));
349
350 /*
351 * flush all the vnodes. VOP_RECLAIM() takes care that the
352 * root vnode does not get flushed until unmount. The
353 * userspace root node cookie is stored in the mount
354 * structure, so we can always re-instantiate a root vnode,
355 * should userspace unmount decide it doesn't want to
356 * cooperate.
357 */
358 error = vflush(mp, NULLVP, force ? FORCECLOSE : 0);
359 if (error)
360 goto out;
361
362 /*
363 * If we are not DYING, we should ask userspace's opinion
364 * about the situation
365 */
366 mutex_enter(&pmp->pmp_lock);
367 if (pmp->pmp_status != PUFFSTAT_DYING) {
368 pmp->pmp_unmounting = 1;
369 mutex_exit(&pmp->pmp_lock);
370
371 PUFFS_MSG_ALLOC(vfs, unmount);
372 puffs_msg_setinfo(park_unmount,
373 PUFFSOP_VFS, PUFFS_VFS_UNMOUNT, NULL);
374 unmount_msg->pvfsr_flags = mntflags;
375
376 PUFFS_MSG_ENQUEUEWAIT(pmp, park_unmount, error);
377 PUFFS_MSG_RELEASE(unmount);
378
379 error = checkerr(pmp, error, __func__);
380 DPRINTF(("puffs_unmount: error %d force %d\n", error, force));
381
382 mutex_enter(&pmp->pmp_lock);
383 pmp->pmp_unmounting = 0;
384 cv_broadcast(&pmp->pmp_unmounting_cv);
385 }
386
387 /*
388 * if userspace cooperated or we really need to die,
389 * screw what userland thinks and just die.
390 */
391 if (error == 0 || force) {
392 struct puffs_sopreq *psopr;
393
394 /* tell waiters & other resources to go unwait themselves */
395 puffs_userdead(pmp);
396 putter_detach(pmp->pmp_pi);
397
398 /*
399 * Wait until there are no more users for the mount resource.
400 * Notice that this is hooked against transport_close
401 * and return from touser. In an ideal world, it would
402 * be hooked against final return from all operations.
403 * But currently it works well enough, since nobody
404 * does weird blocking voodoo after return from touser().
405 */
406 while (pmp->pmp_refcount != 0)
407 cv_wait(&pmp->pmp_refcount_cv, &pmp->pmp_lock);
408 mutex_exit(&pmp->pmp_lock);
409
410 /*
411 * Release kernel thread now that there is nothing
412 * it would be wanting to lock.
413 */
414 KASSERT(curlwp != uvm.pagedaemon_lwp);
415 psopr = kmem_alloc(sizeof(*psopr), KM_SLEEP);
416 psopr->psopr_sopreq = PUFFS_SOPREQSYS_EXIT;
417 mutex_enter(&pmp->pmp_sopmtx);
418 if (pmp->pmp_sopthrcount == 0) {
419 mutex_exit(&pmp->pmp_sopmtx);
420 kmem_free(psopr, sizeof(*psopr));
421 mutex_enter(&pmp->pmp_sopmtx);
422 KASSERT(pmp->pmp_sopthrcount == 0);
423 } else {
424 TAILQ_INSERT_TAIL(&pmp->pmp_sopfastreqs,
425 psopr, psopr_entries);
426 cv_signal(&pmp->pmp_sopcv);
427 }
428 while (pmp->pmp_sopthrcount > 0)
429 cv_wait(&pmp->pmp_sopcv, &pmp->pmp_sopmtx);
430 mutex_exit(&pmp->pmp_sopmtx);
431
432 /* free resources now that we hopefully have no waiters left */
433 cv_destroy(&pmp->pmp_unmounting_cv);
434 cv_destroy(&pmp->pmp_refcount_cv);
435 cv_destroy(&pmp->pmp_msg_waiter_cv);
436 cv_destroy(&pmp->pmp_sopcv);
437 mutex_destroy(&pmp->pmp_lock);
438 mutex_destroy(&pmp->pmp_sopmtx);
439
440 kmem_free(pmp, sizeof(struct puffs_mount));
441 error = 0;
442 } else {
443 mutex_exit(&pmp->pmp_lock);
444 }
445
446 out:
447 DPRINTF(("puffs_unmount: return %d\n", error));
448 return error;
449 }
450
451 /*
452 * This doesn't need to travel to userspace
453 */
454 int
455 puffs_vfsop_root(struct mount *mp, int lktype, struct vnode **vpp)
456 {
457 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
458 int rv;
459
460 rv = puffs_cookie2vnode(pmp, pmp->pmp_root_cookie, vpp);
461 KASSERT(rv != PUFFS_NOSUCHCOOKIE);
462 if (rv != 0)
463 return rv;
464 rv = vn_lock(*vpp, lktype);
465 if (rv != 0) {
466 vrele(*vpp);
467 *vpp = NULL;
468 return rv;
469 }
470 return 0;
471 }
472
473 int
474 puffs_vfsop_statvfs(struct mount *mp, struct statvfs *sbp)
475 {
476 PUFFS_MSG_VARS(vfs, statvfs);
477 struct puffs_mount *pmp;
478 int error = 0;
479
480 pmp = MPTOPUFFSMP(mp);
481
482 /*
483 * If we are mounting, it means that the userspace counterpart
484 * is calling mount(2), but mount(2) also calls statvfs. So
485 * requesting statvfs from userspace would mean a deadlock.
486 * Compensate.
487 */
488 if (__predict_false(pmp->pmp_status == PUFFSTAT_MOUNTING))
489 return EINPROGRESS;
490
491 PUFFS_MSG_ALLOC(vfs, statvfs);
492 puffs_msg_setinfo(park_statvfs, PUFFSOP_VFS, PUFFS_VFS_STATVFS, NULL);
493
494 PUFFS_MSG_ENQUEUEWAIT(pmp, park_statvfs, error);
495 error = checkerr(pmp, error, __func__);
496 statvfs_msg->pvfsr_sb.f_iosize = DEV_BSIZE;
497
498 /*
499 * Try to produce a sensible result even in the event
500 * of userspace error.
501 *
502 * XXX: cache the copy in non-error case
503 */
504 copy_statvfs_info(sbp, mp);
505 if (!error) {
506 statvfs_to_puffs_statvfs(sbp, &statvfs_msg->pvfsr_sb);
507 }
508
509 PUFFS_MSG_RELEASE(statvfs);
510 return error;
511 }
512
513 static bool
514 pageflush_selector(void *cl, struct vnode *vp)
515 {
516
517 KASSERT(mutex_owned(vp->v_interlock));
518
519 return vp->v_type == VREG &&
520 !(LIST_EMPTY(&vp->v_dirtyblkhd) && UVM_OBJ_IS_CLEAN(&vp->v_uobj));
521 }
522
523 static int
524 pageflush(struct mount *mp, kauth_cred_t cred, int waitfor)
525 {
526 struct puffs_node *pn;
527 struct vnode *vp;
528 struct vnode_iterator *marker;
529 int error, rv, fsyncwait;
530
531 error = 0;
532 fsyncwait = (waitfor == MNT_WAIT) ? FSYNC_WAIT : 0;
533
534 /*
535 * Sync all cached data from regular vnodes (which are not
536 * currently locked, see below). After this we call VFS_SYNC
537 * for the fs server, which should handle data and metadata for
538 * all the nodes it knows to exist.
539 */
540 vfs_vnode_iterator_init(mp, &marker);
541 while ((vp = vfs_vnode_iterator_next(marker, pageflush_selector,
542 NULL)))
543 {
544 /*
545 * Here we try to get a reference to the vnode and to
546 * lock it. This is mostly cargo-culted, but I will
547 * offer an explanation to why I believe this might
548 * actually do the right thing.
549 *
550 * If the vnode is a goner, we quite obviously don't need
551 * to sync it.
552 *
553 * If the vnode was busy, we don't need to sync it because
554 * this is never called with MNT_WAIT except from
555 * dounmount(), when we are wait-flushing all the dirty
556 * vnodes through other routes in any case. So there,
557 * sync() doesn't actually sync. Happy now?
558 */
559 error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
560 if (error) {
561 vrele(vp);
562 continue;
563 }
564 pn = VPTOPP(vp);
565 /* hmm.. is the FAF thing entirely sensible? */
566 if (waitfor == MNT_LAZY) {
567 mutex_enter(vp->v_interlock);
568 pn->pn_stat |= PNODE_FAF;
569 mutex_exit(vp->v_interlock);
570 }
571 rv = VOP_FSYNC(vp, cred, fsyncwait, 0, 0);
572 if (waitfor == MNT_LAZY) {
573 mutex_enter(vp->v_interlock);
574 pn->pn_stat &= ~PNODE_FAF;
575 mutex_exit(vp->v_interlock);
576 }
577 if (rv)
578 error = rv;
579 vput(vp);
580 }
581 vfs_vnode_iterator_destroy(marker);
582
583 return error;
584 }
585
586 int
587 puffs_vfsop_sync(struct mount *mp, int waitfor, struct kauth_cred *cred)
588 {
589 PUFFS_MSG_VARS(vfs, sync);
590 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
591 int error, rv;
592
593 error = pageflush(mp, cred, waitfor);
594
595 /* sync fs */
596 PUFFS_MSG_ALLOC(vfs, sync);
597 sync_msg->pvfsr_waitfor = waitfor;
598 puffs_credcvt(&sync_msg->pvfsr_cred, cred);
599 puffs_msg_setinfo(park_sync, PUFFSOP_VFS, PUFFS_VFS_SYNC, NULL);
600
601 PUFFS_MSG_ENQUEUEWAIT(pmp, park_sync, rv);
602 rv = checkerr(pmp, rv, __func__);
603 if (rv)
604 error = rv;
605
606 PUFFS_MSG_RELEASE(sync);
607 return error;
608 }
609
610 int
611 puffs_vfsop_fhtovp(struct mount *mp, struct fid *fhp, int lktype,
612 struct vnode **vpp)
613 {
614 PUFFS_MSG_VARS(vfs, fhtonode);
615 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
616 struct vnode *vp;
617 void *fhdata;
618 size_t argsize, fhlen;
619 int error;
620
621 if (pmp->pmp_args.pa_fhsize == 0)
622 return EOPNOTSUPP;
623
624 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) {
625 fhlen = fhp->fid_len;
626 fhdata = fhp;
627 } else {
628 fhlen = PUFFS_FROMFHSIZE(fhp->fid_len);
629 fhdata = fhp->fid_data;
630
631 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) {
632 if (pmp->pmp_args.pa_fhsize < fhlen)
633 return EINVAL;
634 } else {
635 if (pmp->pmp_args.pa_fhsize != fhlen)
636 return EINVAL;
637 }
638 }
639
640 argsize = sizeof(struct puffs_vfsmsg_fhtonode) + fhlen;
641 puffs_msgmem_alloc(argsize, &park_fhtonode, (void *)&fhtonode_msg, 1);
642 fhtonode_msg->pvfsr_dsize = fhlen;
643 memcpy(fhtonode_msg->pvfsr_data, fhdata, fhlen);
644 puffs_msg_setinfo(park_fhtonode, PUFFSOP_VFS, PUFFS_VFS_FHTOVP, NULL);
645
646 PUFFS_MSG_ENQUEUEWAIT(pmp, park_fhtonode, error);
647 error = checkerr(pmp, error, __func__);
648 if (error)
649 goto out;
650
651 error = puffs_getvnode(mp, fhtonode_msg->pvfsr_fhcookie,
652 fhtonode_msg->pvfsr_vtype, fhtonode_msg->pvfsr_size,
653 fhtonode_msg->pvfsr_rdev, &vp);
654 if (error)
655 goto out;
656 vn_lock(vp, lktype | LK_RETRY);
657
658 *vpp = vp;
659 out:
660 puffs_msgmem_release(park_fhtonode);
661 return error;
662 }
663
664 int
665 puffs_vfsop_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
666 {
667 PUFFS_MSG_VARS(vfs, nodetofh);
668 struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
669 size_t argsize, fhlen;
670 int error;
671
672 if (pmp->pmp_args.pa_fhsize == 0)
673 return EOPNOTSUPP;
674
675 /* if file handles are static len, we can test len immediately */
676 if (((pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) == 0)
677 && ((pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) == 0)
678 && (PUFFS_FROMFHSIZE(*fh_size) < pmp->pmp_args.pa_fhsize)) {
679 *fh_size = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
680 return E2BIG;
681 }
682
683 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH)
684 fhlen = *fh_size;
685 else
686 fhlen = PUFFS_FROMFHSIZE(*fh_size);
687
688 argsize = sizeof(struct puffs_vfsmsg_nodetofh) + fhlen;
689 puffs_msgmem_alloc(argsize, &park_nodetofh, (void *)&nodetofh_msg, 1);
690 nodetofh_msg->pvfsr_fhcookie = VPTOPNC(vp);
691 nodetofh_msg->pvfsr_dsize = fhlen;
692 puffs_msg_setinfo(park_nodetofh, PUFFSOP_VFS, PUFFS_VFS_VPTOFH, NULL);
693
694 PUFFS_MSG_ENQUEUEWAIT(pmp, park_nodetofh, error);
695 error = checkerr(pmp, error, __func__);
696
697 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH)
698 fhlen = nodetofh_msg->pvfsr_dsize;
699 else if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC)
700 fhlen = PUFFS_TOFHSIZE(nodetofh_msg->pvfsr_dsize);
701 else
702 fhlen = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
703
704 if (error) {
705 if (error == E2BIG)
706 *fh_size = fhlen;
707 goto out;
708 }
709
710 if (fhlen > FHANDLE_SIZE_MAX) {
711 puffs_senderr(pmp, PUFFS_ERR_VPTOFH, E2BIG,
712 "file handle too big", VPTOPNC(vp));
713 error = EPROTO;
714 goto out;
715 }
716
717 if (*fh_size < fhlen) {
718 *fh_size = fhlen;
719 error = E2BIG;
720 goto out;
721 }
722 *fh_size = fhlen;
723
724 if (fhp) {
725 if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) {
726 memcpy(fhp, nodetofh_msg->pvfsr_data, fhlen);
727 } else {
728 fhp->fid_len = *fh_size;
729 memcpy(fhp->fid_data, nodetofh_msg->pvfsr_data,
730 nodetofh_msg->pvfsr_dsize);
731 }
732 }
733
734 out:
735 puffs_msgmem_release(park_nodetofh);
736 return error;
737 }
738
739 int
740 puffs_vfsop_loadvnode(struct mount *mp, struct vnode *vp,
741 const void *key, size_t key_len, const void **new_key)
742 {
743 struct puffs_mount *pmp;
744 struct puffs_node *pnode;
745
746 KASSERT(key_len == sizeof(puffs_cookie_t));
747
748 pmp = MPTOPUFFSMP(mp);
749
750 /* Allocate and initialize the pnode. */
751 pnode = pool_get(&puffs_pnpool, PR_WAITOK);
752 memset(pnode, 0, sizeof(struct puffs_node));
753
754 pnode->pn_vp = vp;
755 memcpy(&pnode->pn_cookie, key, key_len);
756 pnode->pn_refcount = 1;
757 mutex_init(&pnode->pn_mtx, MUTEX_DEFAULT, IPL_NONE);
758 mutex_init(&pnode->pn_sizemtx, MUTEX_DEFAULT, IPL_NONE);
759 selinit(&pnode->pn_sel);
760 vp->v_tag = VT_PUFFS;
761 vp->v_type = VNON;
762 vp->v_op = puffs_vnodeop_p;
763 if (pnode->pn_cookie == pmp->pmp_root_cookie)
764 vp->v_vflag |= VV_ROOT;
765 vp->v_data = pnode;
766
767 genfs_node_init(vp, &puffs_genfsops);
768 uvm_vnp_setsize(vp, 0);
769
770 *new_key = &pnode->pn_cookie;
771 return 0;
772 }
773
774 void
775 puffs_vfsop_init(void)
776 {
777
778 /* some checks depend on this */
779 KASSERT(VNOVAL == VSIZENOTSET);
780
781 pool_init(&puffs_pnpool, sizeof(struct puffs_node), 0, 0, 0,
782 "puffpnpl", &pool_allocator_nointr, IPL_NONE);
783 pool_init(&puffs_vapool, sizeof(struct vattr), 0, 0, 0,
784 "puffvapl", &pool_allocator_nointr, IPL_NONE);
785 puffs_msgif_init();
786 }
787
788 void
789 puffs_vfsop_done(void)
790 {
791
792 puffs_msgif_destroy();
793 pool_destroy(&puffs_pnpool);
794 pool_destroy(&puffs_vapool);
795 }
796
797 int
798 puffs_vfsop_snapshot(struct mount *mp, struct vnode *vp, struct timespec *ts)
799 {
800
801 return EOPNOTSUPP;
802 }
803
804 int
805 puffs_vfsop_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
806 int attrnamespace, const char *attrname)
807 {
808 PUFFS_MSG_VARS(vfs, extattrctl);
809 struct puffs_mount *pmp = MPTOPUFFSMP(mp);
810 struct puffs_node *pnp;
811 puffs_cookie_t pnc;
812 int error, flags;
813
814 if (vp) {
815 /* doesn't make sense for puffs servers */
816 if (vp->v_mount != mp)
817 return EXDEV;
818 pnp = vp->v_data;
819 pnc = pnp->pn_cookie;
820 flags = PUFFS_EXTATTRCTL_HASNODE;
821 } else {
822 pnp = pnc = NULL;
823 flags = 0;
824 }
825
826 PUFFS_MSG_ALLOC(vfs, extattrctl);
827 extattrctl_msg->pvfsr_cmd = cmd;
828 extattrctl_msg->pvfsr_attrnamespace = attrnamespace;
829 extattrctl_msg->pvfsr_flags = flags;
830 if (attrname) {
831 strlcpy(extattrctl_msg->pvfsr_attrname, attrname,
832 sizeof(extattrctl_msg->pvfsr_attrname));
833 extattrctl_msg->pvfsr_flags |= PUFFS_EXTATTRCTL_HASATTRNAME;
834 }
835 puffs_msg_setinfo(park_extattrctl,
836 PUFFSOP_VFS, PUFFS_VFS_EXTATTRCTL, pnc);
837
838 puffs_msg_enqueue(pmp, park_extattrctl);
839 if (vp) {
840 mutex_enter(&pnp->pn_mtx);
841 puffs_referencenode(pnp);
842 mutex_exit(&pnp->pn_mtx);
843 VOP_UNLOCK(vp);
844 }
845 error = puffs_msg_wait2(pmp, park_extattrctl, pnp, NULL);
846 PUFFS_MSG_RELEASE(extattrctl);
847 if (vp) {
848 puffs_releasenode(pnp);
849 }
850
851 return checkerr(pmp, error, __func__);
852 }
853
854 const struct vnodeopv_desc * const puffs_vnodeopv_descs[] = {
855 &puffs_vnodeop_opv_desc,
856 &puffs_specop_opv_desc,
857 &puffs_fifoop_opv_desc,
858 &puffs_msgop_opv_desc,
859 NULL,
860 };
861
862 struct vfsops puffs_vfsops = {
863 .vfs_name = MOUNT_PUFFS,
864 .vfs_min_mount_data = sizeof (struct puffs_kargs),
865 .vfs_mount = puffs_vfsop_mount,
866 .vfs_start = puffs_vfsop_start,
867 .vfs_unmount = puffs_vfsop_unmount,
868 .vfs_root = puffs_vfsop_root,
869 .vfs_quotactl = (void *)eopnotsupp,
870 .vfs_statvfs = puffs_vfsop_statvfs,
871 .vfs_sync = puffs_vfsop_sync,
872 .vfs_vget = (void *)eopnotsupp,
873 .vfs_loadvnode = puffs_vfsop_loadvnode,
874 .vfs_fhtovp = puffs_vfsop_fhtovp,
875 .vfs_vptofh = puffs_vfsop_vptofh,
876 .vfs_init = puffs_vfsop_init,
877 .vfs_done = puffs_vfsop_done,
878 .vfs_snapshot = puffs_vfsop_snapshot,
879 .vfs_extattrctl = puffs_vfsop_extattrctl,
880 .vfs_suspendctl = genfs_suspendctl,
881 .vfs_renamelock_enter = genfs_renamelock_enter,
882 .vfs_renamelock_exit = genfs_renamelock_exit,
883 .vfs_fsync = (void *)eopnotsupp,
884 .vfs_opv_descs = puffs_vnodeopv_descs
885 };
886
887 static int
888 puffs_modcmd(modcmd_t cmd, void *arg)
889 {
890
891 switch (cmd) {
892 case MODULE_CMD_INIT:
893 return vfs_attach(&puffs_vfsops);
894 case MODULE_CMD_FINI:
895 return vfs_detach(&puffs_vfsops);
896 default:
897 return ENOTTY;
898 }
899 }
900