Home | History | Annotate | Line # | Download | only in puffs
puffs_vfsops.c revision 1.27
      1 /*	$NetBSD: puffs_vfsops.c,v 1.27 2007/01/29 15:42:50 hannken Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2005, 2006  Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by the
      7  * Google Summer of Code program and the Ulla Tuominen Foundation.
      8  * The Google SoC project was mentored by Bill Studenmund.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. The name of the company nor the name of the author may be used to
     19  *    endorse or promote products derived from this software without specific
     20  *    prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     23  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     24  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     25  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     28  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  */
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: puffs_vfsops.c,v 1.27 2007/01/29 15:42:50 hannken Exp $");
     37 
     38 #include <sys/param.h>
     39 #include <sys/mount.h>
     40 #include <sys/malloc.h>
     41 #include <sys/extattr.h>
     42 #include <sys/queue.h>
     43 #include <sys/vnode.h>
     44 #include <sys/dirent.h>
     45 #include <sys/kauth.h>
     46 #include <sys/fstrans.h>
     47 
     48 #include <lib/libkern/libkern.h>
     49 
     50 #include <fs/puffs/puffs_msgif.h>
     51 #include <fs/puffs/puffs_sys.h>
     52 
     53 VFS_PROTOS(puffs);
     54 
     55 MALLOC_DEFINE(M_PUFFS, "puffs", "pass-to-userspace file system structures");
     56 
     57 #ifndef PUFFS_PNODEBUCKETS
     58 #define PUFFS_PNODEBUCKETS 256
     59 #endif
     60 #ifndef PUFFS_MAXPNODEBUCKETS
     61 #define PUFFS_MAXPNODEBUCKETS 65536
     62 #endif
     63 int puffs_pnodebuckets = PUFFS_PNODEBUCKETS;
     64 
     65 int
     66 puffs_mount(struct mount *mp, const char *path, void *data,
     67 	    struct nameidata *ndp, struct lwp *l)
     68 {
     69 	struct puffs_mount *pmp = NULL;
     70 	struct puffs_args *args;
     71 	char namebuf[PUFFSNAMESIZE+sizeof(PUFFS_NAMEPREFIX)+1]; /* spooky */
     72 	int error = 0, i;
     73 
     74 	if (mp->mnt_flag & MNT_GETARGS) {
     75 		pmp = MPTOPUFFSMP(mp);
     76 		return copyout(&pmp->pmp_args, data, sizeof(struct puffs_args));
     77 	}
     78 
     79 	/* update is not supported currently */
     80 	if (mp->mnt_flag & MNT_UPDATE)
     81 		return EOPNOTSUPP;
     82 
     83 	/*
     84 	 * We need the file system name
     85 	 */
     86 	if (!data)
     87 		return EINVAL;
     88 
     89 	MALLOC(args, struct puffs_args *, sizeof(struct puffs_args),
     90 	    M_PUFFS, M_WAITOK);
     91 
     92 	error = copyin(data, args, sizeof(struct puffs_args));
     93 	if (error)
     94 		goto out;
     95 
     96 	/* devel phase */
     97 	if (args->pa_vers != (PUFFSVERSION | PUFFSDEVELVERS)) {
     98 		printf("puffs_mount: development version mismatch\n");
     99 		error = EINVAL;
    100 		goto out;
    101 	}
    102 
    103 	/* nuke spy bits */
    104 	args->pa_flags &= PUFFS_KFLAG_MASK;
    105 
    106 	/* build real name */
    107 	(void)strlcpy(namebuf, PUFFS_NAMEPREFIX, sizeof(namebuf));
    108 	(void)strlcat(namebuf, args->pa_name, sizeof(namebuf));
    109 
    110 	/* inform user server if it got the max request size it wanted */
    111 	if (args->pa_maxreqlen == 0 || args->pa_maxreqlen > PUFFS_REQ_MAXSIZE)
    112 		args->pa_maxreqlen = PUFFS_REQ_MAXSIZE;
    113 	else if (args->pa_maxreqlen < PUFFS_REQSTRUCT_MAX)
    114 		args->pa_maxreqlen = PUFFS_REQSTRUCT_MAX;
    115 	(void)strlcpy(args->pa_name, namebuf, sizeof(args->pa_name));
    116 
    117 	error = copyout(args, data, sizeof(struct puffs_args));
    118 	if (error)
    119 		goto out;
    120 
    121 	error = set_statvfs_info(path, UIO_USERSPACE, namebuf,
    122 	    UIO_SYSSPACE, mp, l);
    123 	if (error)
    124 		goto out;
    125 	mp->mnt_stat.f_iosize = DEV_BSIZE;
    126 
    127 	MALLOC(pmp, struct puffs_mount *, sizeof(struct puffs_mount),
    128 	    M_PUFFS, M_WAITOK | M_ZERO);
    129 
    130 	mp->mnt_fs_bshift = DEV_BSHIFT;
    131 	mp->mnt_dev_bshift = DEV_BSHIFT;
    132 	mp->mnt_flag &= ~MNT_LOCAL; /* we don't really know, so ... */
    133 	mp->mnt_data = pmp;
    134 #ifdef NEWVNGATE
    135 	mp->mnt_iflag |= IMNT_HAS_TRANS;
    136 #endif
    137 
    138 	pmp->pmp_status = PUFFSTAT_MOUNTING;
    139 	pmp->pmp_nextreq = 0;
    140 	pmp->pmp_mp = mp;
    141 	pmp->pmp_req_maxsize = args->pa_maxreqlen;
    142 	pmp->pmp_args = *args;
    143 
    144 	/* puffs_node hash buckets */
    145 	pmp->pmp_npnodehash = puffs_pnodebuckets;
    146 	if (pmp->pmp_npnodehash < 1)
    147 		pmp->pmp_npnodehash = 1;
    148 	if (pmp->pmp_npnodehash > PUFFS_MAXPNODEBUCKETS)
    149 		pmp->pmp_npnodehash = PUFFS_MAXPNODEBUCKETS;
    150 	pmp->pmp_pnodehash = malloc
    151 	    (sizeof(struct puffs_pnode_hashlist *) * pmp->pmp_npnodehash,
    152 	    M_PUFFS, M_WAITOK);
    153 	for (i = 0; i < pmp->pmp_npnodehash; i++)
    154 		LIST_INIT(&pmp->pmp_pnodehash[i]);
    155 
    156 	/*
    157 	 * Inform the fileops processing code that we have a mountpoint.
    158 	 * If it doesn't know about anyone with our pid/fd having the
    159 	 * device open, punt
    160 	 */
    161 	if (puffs_setpmp(l->l_proc->p_pid, args->pa_fd, pmp)) {
    162 		error = ENOENT;
    163 		goto out;
    164 	}
    165 
    166 	simple_lock_init(&pmp->pmp_lock);
    167 	TAILQ_INIT(&pmp->pmp_req_touser);
    168 	TAILQ_INIT(&pmp->pmp_req_replywait);
    169 	TAILQ_INIT(&pmp->pmp_req_sizepark);
    170 
    171 	DPRINTF(("puffs_mount: mount point at %p, puffs specific at %p\n",
    172 	    mp, MPTOPUFFSMP(mp)));
    173 
    174 	vfs_getnewfsid(mp);
    175 
    176  out:
    177 	if (error && pmp && pmp->pmp_pnodehash)
    178 		free(pmp->pmp_pnodehash, M_PUFFS);
    179 	if (error && pmp)
    180 		FREE(pmp, M_PUFFS);
    181 	FREE(args, M_PUFFS);
    182 	return error;
    183 }
    184 
    185 /*
    186  * This is called from the first "Hello, I'm alive" ioctl
    187  * from userspace.
    188  */
    189 int
    190 puffs_start2(struct puffs_mount *pmp, struct puffs_startreq *sreq)
    191 {
    192 	struct puffs_node *pn;
    193 	struct mount *mp;
    194 
    195 	mp = PMPTOMP(pmp);
    196 
    197 	simple_lock(&pmp->pmp_lock);
    198 
    199 	/*
    200 	 * if someone has issued a VFS_ROOT() already, fill in the
    201 	 * vnode cookie.
    202 	 */
    203 	pn = NULL;
    204 	if (pmp->pmp_root) {
    205 		pn = VPTOPP(pmp->pmp_root);
    206 		pn->pn_cookie = sreq->psr_cookie;
    207 	}
    208 
    209 	/* We're good to fly */
    210 	pmp->pmp_rootcookie = sreq->psr_cookie;
    211 	pmp->pmp_status = PUFFSTAT_RUNNING;
    212 	simple_unlock(&pmp->pmp_lock);
    213 
    214 	/* do the VFS_STATVFS() we missed out on in sys_mount() */
    215 	copy_statvfs_info(&sreq->psr_sb, mp);
    216 	(void)memcpy(&mp->mnt_stat, &sreq->psr_sb, sizeof(mp->mnt_stat));
    217 	mp->mnt_stat.f_iosize = DEV_BSIZE;
    218 
    219 	DPRINTF(("puffs_start2: root vp %p, cur root pnode %p, cookie %p\n",
    220 	    pmp->pmp_root, pn, sreq->psr_cookie));
    221 
    222 	return 0;
    223 }
    224 
    225 int
    226 puffs_start(struct mount *mp, int flags, struct lwp *l)
    227 {
    228 
    229 	/*
    230 	 * This cannot travel to userspace, as this is called from
    231 	 * the kernel context of the process doing mount(2).  But
    232 	 * it's probably a safe bet that the process doing mount(2)
    233 	 * realizes it needs to start the filesystem also...
    234 	 */
    235 	return 0;
    236 }
    237 
    238 int
    239 puffs_unmount(struct mount *mp, int mntflags, struct lwp *l)
    240 {
    241 	struct puffs_mount *pmp;
    242 	int error, force;
    243 
    244 	PUFFS_VFSREQ(unmount);
    245 
    246 	error = 0;
    247 	force = mntflags & MNT_FORCE;
    248 	pmp = MPTOPUFFSMP(mp);
    249 
    250 	DPRINTF(("puffs_unmount: detach filesystem from vfs, current "
    251 	    "status 0x%x\n", pmp->pmp_status));
    252 
    253 	/*
    254 	 * flush all the vnodes.  VOP_RECLAIM() takes care that the
    255 	 * root vnode does not get flushed until unmount.  The
    256 	 * userspace root node cookie is stored in the mount
    257 	 * structure, so we can always re-instantiate a root vnode,
    258 	 * should userspace unmount decide it doesn't want to
    259 	 * cooperate.
    260 	 */
    261 	error = vflush(mp, NULLVP, force ? FORCECLOSE : 0);
    262 	if (error)
    263 		goto out;
    264 
    265 	/*
    266 	 * If we are not DYING, we should ask userspace's opinion
    267 	 * about the situation
    268 	 */
    269 	simple_lock(&pmp->pmp_lock);
    270 	if (pmp->pmp_status != PUFFSTAT_DYING) {
    271 		pmp->pmp_unmounting = 1;
    272 		simple_unlock(&pmp->pmp_lock);
    273 
    274 		unmount_arg.pvfsr_flags = mntflags;
    275 		unmount_arg.pvfsr_pid = puffs_lwp2pid(l);
    276 
    277 		error = puffs_vfstouser(pmp, PUFFS_VFS_UNMOUNT,
    278 		     &unmount_arg, sizeof(unmount_arg));
    279 		DPRINTF(("puffs_unmount: error %d force %d\n", error, force));
    280 
    281 		simple_lock(&pmp->pmp_lock);
    282 		pmp->pmp_unmounting = 0;
    283 		wakeup(&pmp->pmp_unmounting);
    284 	}
    285 
    286 	/*
    287 	 * if userspace cooperated or we really need to die,
    288 	 * screw what userland thinks and just die.
    289 	 */
    290 	if (error == 0 || force) {
    291 		/* tell waiters & other resources to go unwait themselves */
    292 		puffs_userdead(pmp);
    293 		puffs_nukebypmp(pmp);
    294 
    295 		/*
    296 		 * Sink waiters.  This is still not perfect, since the
    297 		 * draining is done after userret, not when they really
    298 		 * exit the file system.  It will probably work as almost
    299 		 * no call will block and therefore cause a context switch
    300 		 * and therefore will protected by the biglock after
    301 		 * exiting userspace.  But ... it's an imperfect world.
    302 		 */
    303 		while (pmp->pmp_req_touser_waiters != 0)
    304 			ltsleep(&pmp->pmp_req_touser_waiters, PVFS,
    305 			    "puffsink", 0, &pmp->pmp_lock);
    306 		simple_unlock(&pmp->pmp_lock);
    307 
    308 		/* free resources now that we hopefully have no waiters left */
    309 		free(pmp->pmp_pnodehash, M_PUFFS);
    310 		FREE(pmp, M_PUFFS);
    311 		error = 0;
    312 	} else {
    313 		simple_unlock(&pmp->pmp_lock);
    314 	}
    315 
    316  out:
    317 	DPRINTF(("puffs_unmount: return %d\n", error));
    318 	return error;
    319 }
    320 
    321 /*
    322  * This doesn't need to travel to userspace
    323  */
    324 int
    325 puffs_root(struct mount *mp, struct vnode **vpp)
    326 {
    327 	struct puffs_mount *pmp;
    328 	struct puffs_node *pn;
    329 	struct vnode *vp;
    330 
    331 	pmp = MPTOPUFFSMP(mp);
    332 
    333 	/*
    334 	 * pmp_lock must be held if vref()'ing or vrele()'ing the
    335 	 * root vnode.  the latter is controlled by puffs_inactive().
    336 	 */
    337 	simple_lock(&pmp->pmp_lock);
    338 	vp = pmp->pmp_root;
    339 	if (vp) {
    340 		simple_lock(&vp->v_interlock);
    341 		simple_unlock(&pmp->pmp_lock);
    342 		pn = VPTOPP(vp);
    343 		if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK))
    344 			goto grabnew;
    345 		*vpp = vp;
    346 		return 0;
    347 	} else
    348 		simple_unlock(&pmp->pmp_lock);
    349 
    350 	/* XXX: this is wrong, so FIXME */
    351  grabnew:
    352 
    353 	/*
    354 	 * So, didn't have the magic root vnode available.
    355 	 * No matter, grab another an stuff it with the cookie.
    356 	 */
    357 	if (puffs_getvnode(mp, pmp->pmp_rootcookie, VDIR, 0, 0, &vp))
    358 		panic("sloppy programming");
    359 
    360 	simple_lock(&pmp->pmp_lock);
    361 	/*
    362 	 * check if by mysterious force someone else created a root
    363 	 * vnode while we were executing.
    364 	 */
    365 	if (pmp->pmp_root) {
    366 		vref(pmp->pmp_root);
    367 		simple_unlock(&pmp->pmp_lock);
    368 		puffs_putvnode(vp);
    369 		vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
    370 		*vpp = pmp->pmp_root;
    371 		return 0;
    372 	}
    373 
    374 	/* store cache */
    375 	vp->v_flag = VROOT;
    376 	pmp->pmp_root = vp;
    377 	simple_unlock(&pmp->pmp_lock);
    378 
    379 	vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
    380 
    381 	*vpp = vp;
    382 	return 0;
    383 }
    384 
    385 int
    386 puffs_quotactl(struct mount *mp, int cmd, uid_t uid, void *arg, struct lwp *l)
    387 {
    388 
    389 	return EOPNOTSUPP;
    390 }
    391 
    392 int
    393 puffs_statvfs(struct mount *mp, struct statvfs *sbp, struct lwp *l)
    394 {
    395 	struct puffs_vfsreq_statvfs *statvfs_arg; /* too big for stack */
    396 	struct puffs_mount *pmp;
    397 	int error = 0;
    398 
    399 	pmp = MPTOPUFFSMP(mp);
    400 
    401 	/*
    402 	 * If we are mounting, it means that the userspace counterpart
    403 	 * is calling mount(2), but mount(2) also calls statvfs.  So
    404 	 * requesting statvfs from userspace would mean a deadlock.
    405 	 * Compensate.
    406 	 */
    407 	if (pmp->pmp_status == PUFFSTAT_MOUNTING)
    408 		return EINPROGRESS;
    409 
    410 	/* too big for stack */
    411 	MALLOC(statvfs_arg, struct puffs_vfsreq_statvfs *,
    412 	    sizeof(struct puffs_vfsreq_statvfs), M_PUFFS, M_WAITOK | M_ZERO);
    413 	statvfs_arg->pvfsr_pid = puffs_lwp2pid(l);
    414 
    415 	error = puffs_vfstouser(pmp, PUFFS_VFS_STATVFS,
    416 	    statvfs_arg, sizeof(*statvfs_arg));
    417 	statvfs_arg->pvfsr_sb.f_iosize = DEV_BSIZE;
    418 
    419 	/*
    420 	 * Try to produce a sensible result even in the event
    421 	 * of userspace error.
    422 	 *
    423 	 * XXX: cache the copy in non-error case
    424 	 */
    425 	if (!error) {
    426 		copy_statvfs_info(&statvfs_arg->pvfsr_sb, mp);
    427 		(void)memcpy(sbp, &statvfs_arg->pvfsr_sb,
    428 		    sizeof(struct statvfs));
    429 	} else {
    430 		copy_statvfs_info(sbp, mp);
    431 	}
    432 
    433 	FREE(statvfs_arg, M_PUFFS);
    434 	return error;
    435 }
    436 
    437 static int
    438 pageflush(struct mount *mp, int waitfor, int suspending)
    439 {
    440 	struct puffs_node *pn;
    441 	struct vnode *vp, *nvp;
    442 	int error, rv, ppflags;
    443 
    444 	KASSERT(((waitfor == MNT_WAIT) && suspending) == 0);
    445 	KASSERT((suspending == 0)
    446 	    || (fstrans_is_owner(mp)
    447 	      && fstrans_getstate(mp) == FSTRANS_SUSPENDING));
    448 
    449 	error = 0;
    450 	ppflags = PGO_CLEANIT | PGO_ALLPAGES;
    451 	if (waitfor == MNT_WAIT)
    452 		ppflags |= PGO_SYNCIO;
    453 
    454 	/*
    455 	 * Sync all cached data from regular vnodes (which are not
    456 	 * currently locked, see below).  After this we call VFS_SYNC
    457 	 * for the fs server, which should handle data and metadata for
    458 	 * all the nodes it knows to exist.
    459 	 */
    460 	simple_lock(&mntvnode_slock);
    461  loop:
    462 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
    463 		/* check if we're on the right list */
    464 		if (vp->v_mount != mp)
    465 			goto loop;
    466 
    467 		simple_lock(&vp->v_interlock);
    468 		pn = VPTOPP(vp);
    469 		nvp = TAILQ_NEXT(vp, v_mntvnodes);
    470 
    471 		if (vp->v_type != VREG || UVM_OBJ_IS_CLEAN(&vp->v_uobj)) {
    472 			simple_unlock(&vp->v_interlock);
    473 			continue;
    474 		}
    475 
    476 		simple_unlock(&mntvnode_slock);
    477 
    478 		/*
    479 		 * Here we try to get a reference to the vnode and to
    480 		 * lock it.  This is mostly cargo-culted, but I will
    481 		 * offer an explanation to why I believe this might
    482 		 * actually do the right thing.
    483 		 *
    484 		 * If the vnode is a goner, we quite obviously don't need
    485 		 * to sync it.
    486 		 *
    487 		 * If the vnode was busy, we don't need to sync it because
    488 		 * this is never called with MNT_WAIT except from
    489 		 * dounmount(), when we are wait-flushing all the dirty
    490 		 * vnodes through other routes in any case.  So there,
    491 		 * sync() doesn't actually sync.  Happy now?
    492 		 *
    493 		 * NOTE: if we're suspending, vget() does NOT lock.
    494 		 * See puffs_lock() for details.
    495 		 */
    496 		rv = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
    497 		if (rv) {
    498 			simple_lock(&mntvnode_slock);
    499 			if (rv == ENOENT)
    500 				goto loop;
    501 			continue;
    502 		}
    503 
    504 		/*
    505 		 * Thread information to puffs_strategy() through the
    506 		 * pnode flags: we want to issue the putpages operations
    507 		 * as FAF if we're suspending, since it's very probable
    508 		 * that our execution context is that of the userspace
    509 		 * daemon.  We can do this because:
    510 		 *   + we send the "going to suspend" prior to this part
    511 		 *   + if any of the writes fails in userspace, it's the
    512 		 *     file system server's problem to decide if this was a
    513 		 *     failed snapshot when it gets the "snapshot complete"
    514 		 *     notification.
    515 		 *   + if any of the writes fail in the kernel already, we
    516 		 *     immediately fail *and* notify the user server of
    517 		 *     failure.
    518 		 *
    519 		 * We also do FAFs if we're called from the syncer.  This
    520 		 * is just general optimization for trickle sync: no need
    521 		 * to really guarantee that the stuff ended on backing
    522 		 * storage.
    523 		 * TODO: Maybe also hint the user server of this twist?
    524 		 */
    525 		simple_lock(&vp->v_interlock);
    526 		if (suspending || waitfor == MNT_LAZY)
    527 			pn->pn_stat |= PNODE_SUSPEND;
    528 		rv = VOP_PUTPAGES(vp, 0, 0, ppflags);
    529 		if (suspending || waitfor == MNT_LAZY) {
    530 			simple_lock(&vp->v_interlock);
    531 			pn->pn_stat &= ~PNODE_SUSPEND;
    532 			simple_unlock(&vp->v_interlock);
    533 		}
    534 		if (rv)
    535 			error = rv;
    536 		vput(vp);
    537 		simple_lock(&mntvnode_slock);
    538 	}
    539 	simple_unlock(&mntvnode_slock);
    540 
    541 	return error;
    542 }
    543 
    544 int
    545 puffs_sync(struct mount *mp, int waitfor, struct kauth_cred *cred,
    546 	struct lwp *l)
    547 {
    548 	int error, rv;
    549 
    550 	PUFFS_VFSREQ(sync);
    551 
    552 	error = pageflush(mp, waitfor, 0);
    553 
    554 	/* sync fs */
    555 	sync_arg.pvfsr_waitfor = waitfor;
    556 	puffs_credcvt(&sync_arg.pvfsr_cred, cred);
    557 	sync_arg.pvfsr_pid = puffs_lwp2pid(l);
    558 
    559 	rv = puffs_vfstouser(MPTOPUFFSMP(mp), PUFFS_VFS_SYNC,
    560 	    &sync_arg, sizeof(sync_arg));
    561 	if (rv)
    562 		error = rv;
    563 
    564 	return error;
    565 }
    566 
    567 int
    568 puffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
    569 {
    570 
    571 	return EOPNOTSUPP;
    572 }
    573 
    574 #if 0
    575 /*ARGSUSED*/
    576 int
    577 puffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
    578 {
    579 
    580 	return EOPNOTSUPP;
    581 }
    582 
    583 /*ARGSUSED*/
    584 int
    585 puffs_vptofh(struct vnode *vp, struct fid *fhp)
    586 {
    587 
    588 	return EOPNOTSUPP;
    589 }
    590 #endif
    591 
    592 void
    593 puffs_init()
    594 {
    595 
    596 #ifdef _LKM
    597 	malloc_type_attach(M_PUFFS);
    598 	pool_init(&puffs_pnpool, sizeof(struct puffs_node), 0, 0, 0,
    599 	    "puffspnpl", &pool_allocator_nointr);
    600 #endif
    601 
    602 	return;
    603 }
    604 
    605 void
    606 puffs_done()
    607 {
    608 
    609 #ifdef _LKM
    610 	pool_destroy(&puffs_pnpool);
    611 	malloc_type_detach(M_PUFFS);
    612 #endif
    613 
    614 	return;
    615 }
    616 
    617 int
    618 puffs_snapshot(struct mount *mp, struct vnode *vp, struct timespec *ts)
    619 {
    620 
    621 	return EOPNOTSUPP;
    622 }
    623 
    624 int
    625 puffs_suspendctl(struct mount *mp, int cmd)
    626 {
    627 	struct puffs_mount *pmp;
    628 	int error;
    629 
    630 	pmp = MPTOPUFFSMP(mp);
    631 	switch (cmd) {
    632 	case SUSPEND_SUSPEND:
    633 		DPRINTF(("puffs_suspendctl: suspending\n"));
    634 		if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
    635 			break;
    636 		puffs_suspendtouser(pmp, PUFFS_SUSPEND_START);
    637 
    638 		error = pageflush(mp, 0, 1);
    639 		if (error == 0)
    640 			error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
    641 
    642 		if (error != 0) {
    643 			puffs_suspendtouser(pmp, PUFFS_SUSPEND_ERROR);
    644 			(void) fstrans_setstate(mp, FSTRANS_NORMAL);
    645 			break;
    646 		}
    647 
    648 		puffs_suspendtouser(pmp, PUFFS_SUSPEND_SUSPENDED);
    649 
    650 		break;
    651 
    652 	case SUSPEND_RESUME:
    653 		DPRINTF(("puffs_suspendctl: resume\n"));
    654 		error = 0;
    655 		(void) fstrans_setstate(mp, FSTRANS_NORMAL);
    656 		puffs_suspendtouser(pmp, PUFFS_SUSPEND_RESUME);
    657 		break;
    658 
    659 	default:
    660 		error = EINVAL;
    661 		break;
    662 	}
    663 
    664 	DPRINTF(("puffs_suspendctl: return %d\n", error));
    665 	return error;
    666 }
    667 
    668 const struct vnodeopv_desc * const puffs_vnodeopv_descs[] = {
    669 	&puffs_vnodeop_opv_desc,
    670 	&puffs_specop_opv_desc,
    671 	&puffs_fifoop_opv_desc,
    672 	&puffs_msgop_opv_desc,
    673 	NULL,
    674 };
    675 
    676 struct vfsops puffs_vfsops = {
    677 	MOUNT_PUFFS,
    678 	puffs_mount,		/* mount	*/
    679 	puffs_start,		/* start	*/
    680 	puffs_unmount,		/* unmount	*/
    681 	puffs_root,		/* root		*/
    682 	puffs_quotactl,		/* quotactl	*/
    683 	puffs_statvfs,		/* statvfs	*/
    684 	puffs_sync,		/* sync		*/
    685 	puffs_vget,		/* vget		*/
    686 	(void *)eopnotsupp,	/* fhtovp	*/
    687 	(void *)eopnotsupp,	/* vptofh	*/
    688 	puffs_init,		/* init		*/
    689 	NULL,			/* reinit	*/
    690 	puffs_done,		/* done		*/
    691 	NULL,			/* mountroot	*/
    692 	puffs_snapshot,		/* snapshot	*/
    693 	vfs_stdextattrctl,	/* extattrctl	*/
    694 	puffs_suspendctl,	/* suspendctl	*/
    695 	puffs_vnodeopv_descs,	/* vnodeops	*/
    696 	0,			/* refcount	*/
    697 	{ NULL, NULL }
    698 };
    699 VFS_ATTACH(puffs_vfsops);
    700