Home | History | Annotate | Line # | Download | only in puffs
puffs_vfsops.c revision 1.28.6.3
      1 /*	$NetBSD: puffs_vfsops.c,v 1.28.6.3 2007/04/05 21:57:48 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2005, 2006  Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by the
      7  * Google Summer of Code program and the Ulla Tuominen Foundation.
      8  * The Google SoC project was mentored by Bill Studenmund.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. The name of the company nor the name of the author may be used to
     19  *    endorse or promote products derived from this software without specific
     20  *    prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     23  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     24  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     25  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     28  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  */
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: puffs_vfsops.c,v 1.28.6.3 2007/04/05 21:57:48 ad Exp $");
     37 
     38 #include <sys/param.h>
     39 #include <sys/mount.h>
     40 #include <sys/malloc.h>
     41 #include <sys/extattr.h>
     42 #include <sys/queue.h>
     43 #include <sys/vnode.h>
     44 #include <sys/dirent.h>
     45 #include <sys/kauth.h>
     46 #include <sys/fstrans.h>
     47 #include <sys/proc.h>
     48 
     49 #include <lib/libkern/libkern.h>
     50 
     51 #include <fs/puffs/puffs_msgif.h>
     52 #include <fs/puffs/puffs_sys.h>
     53 
     54 VFS_PROTOS(puffs);
     55 
     56 MALLOC_DEFINE(M_PUFFS, "puffs", "pass-to-userspace file system structures");
     57 
     58 #ifndef PUFFS_PNODEBUCKETS
     59 #define PUFFS_PNODEBUCKETS 256
     60 #endif
     61 #ifndef PUFFS_MAXPNODEBUCKETS
     62 #define PUFFS_MAXPNODEBUCKETS 65536
     63 #endif
     64 int puffs_pnodebuckets = PUFFS_PNODEBUCKETS;
     65 
     66 int
     67 puffs_mount(struct mount *mp, const char *path, void *data,
     68 	    struct nameidata *ndp, struct lwp *l)
     69 {
     70 	struct puffs_mount *pmp = NULL;
     71 	struct puffs_args *args;
     72 	char namebuf[PUFFSNAMESIZE+sizeof(PUFFS_NAMEPREFIX)+1]; /* spooky */
     73 	int error = 0, i;
     74 
     75 	if (mp->mnt_flag & MNT_GETARGS) {
     76 		pmp = MPTOPUFFSMP(mp);
     77 		return copyout(&pmp->pmp_args, data, sizeof(struct puffs_args));
     78 	}
     79 
     80 	/* update is not supported currently */
     81 	if (mp->mnt_flag & MNT_UPDATE)
     82 		return EOPNOTSUPP;
     83 
     84 	/*
     85 	 * We need the file system name
     86 	 */
     87 	if (!data)
     88 		return EINVAL;
     89 
     90 	MALLOC(args, struct puffs_args *, sizeof(struct puffs_args),
     91 	    M_PUFFS, M_WAITOK);
     92 
     93 	error = copyin(data, args, sizeof(struct puffs_args));
     94 	if (error)
     95 		goto out;
     96 
     97 	/* devel phase */
     98 	if (args->pa_vers != (PUFFSVERSION | PUFFSDEVELVERS)) {
     99 		printf("puffs_mount: development version mismatch\n");
    100 		error = EINVAL;
    101 		goto out;
    102 	}
    103 
    104 	/* nuke spy bits */
    105 	args->pa_flags &= PUFFS_KFLAG_MASK;
    106 
    107 	/* build real name */
    108 	(void)strlcpy(namebuf, PUFFS_NAMEPREFIX, sizeof(namebuf));
    109 	(void)strlcat(namebuf, args->pa_name, sizeof(namebuf));
    110 
    111 	/* inform user server if it got the max request size it wanted */
    112 	if (args->pa_maxreqlen == 0 || args->pa_maxreqlen > PUFFS_REQ_MAXSIZE)
    113 		args->pa_maxreqlen = PUFFS_REQ_MAXSIZE;
    114 	else if (args->pa_maxreqlen < PUFFS_REQSTRUCT_MAX)
    115 		args->pa_maxreqlen = PUFFS_REQSTRUCT_MAX;
    116 	(void)strlcpy(args->pa_name, namebuf, sizeof(args->pa_name));
    117 
    118 	error = copyout(args, data, sizeof(struct puffs_args));
    119 	if (error)
    120 		goto out;
    121 
    122 	error = set_statvfs_info(path, UIO_USERSPACE, namebuf,
    123 	    UIO_SYSSPACE, mp, l);
    124 	if (error)
    125 		goto out;
    126 	mp->mnt_stat.f_iosize = DEV_BSIZE;
    127 
    128 	MALLOC(pmp, struct puffs_mount *, sizeof(struct puffs_mount),
    129 	    M_PUFFS, M_WAITOK | M_ZERO);
    130 
    131 	mp->mnt_fs_bshift = DEV_BSHIFT;
    132 	mp->mnt_dev_bshift = DEV_BSHIFT;
    133 	mp->mnt_flag &= ~MNT_LOCAL; /* we don't really know, so ... */
    134 	mp->mnt_data = pmp;
    135 	mp->mnt_iflag |= IMNT_HAS_TRANS;
    136 
    137 	pmp->pmp_status = PUFFSTAT_MOUNTING;
    138 	pmp->pmp_nextreq = 0;
    139 	pmp->pmp_mp = mp;
    140 	pmp->pmp_req_maxsize = args->pa_maxreqlen;
    141 	pmp->pmp_args = *args;
    142 
    143 	/* puffs_node hash buckets */
    144 	pmp->pmp_npnodehash = puffs_pnodebuckets;
    145 	if (pmp->pmp_npnodehash < 1)
    146 		pmp->pmp_npnodehash = 1;
    147 	if (pmp->pmp_npnodehash > PUFFS_MAXPNODEBUCKETS)
    148 		pmp->pmp_npnodehash = PUFFS_MAXPNODEBUCKETS;
    149 	pmp->pmp_pnodehash = malloc
    150 	    (sizeof(struct puffs_pnode_hashlist *) * pmp->pmp_npnodehash,
    151 	    M_PUFFS, M_WAITOK);
    152 	for (i = 0; i < pmp->pmp_npnodehash; i++)
    153 		LIST_INIT(&pmp->pmp_pnodehash[i]);
    154 
    155 	/*
    156 	 * Inform the fileops processing code that we have a mountpoint.
    157 	 * If it doesn't know about anyone with our pid/fd having the
    158 	 * device open, punt
    159 	 */
    160 	if (puffs_setpmp(l->l_proc->p_pid, args->pa_fd, pmp)) {
    161 		error = ENOENT;
    162 		goto out;
    163 	}
    164 
    165 	simple_lock_init(&pmp->pmp_lock);
    166 	TAILQ_INIT(&pmp->pmp_req_touser);
    167 	TAILQ_INIT(&pmp->pmp_req_replywait);
    168 	TAILQ_INIT(&pmp->pmp_req_sizepark);
    169 
    170 	DPRINTF(("puffs_mount: mount point at %p, puffs specific at %p\n",
    171 	    mp, MPTOPUFFSMP(mp)));
    172 
    173 	vfs_getnewfsid(mp);
    174 
    175  out:
    176 	if (error && pmp && pmp->pmp_pnodehash)
    177 		free(pmp->pmp_pnodehash, M_PUFFS);
    178 	if (error && pmp)
    179 		FREE(pmp, M_PUFFS);
    180 	FREE(args, M_PUFFS);
    181 	return error;
    182 }
    183 
    184 /*
    185  * This is called from the first "Hello, I'm alive" ioctl
    186  * from userspace.
    187  */
    188 int
    189 puffs_start2(struct puffs_mount *pmp, struct puffs_startreq *sreq)
    190 {
    191 	struct puffs_node *pn;
    192 	struct mount *mp;
    193 
    194 	mp = PMPTOMP(pmp);
    195 
    196 	simple_lock(&pmp->pmp_lock);
    197 
    198 	/*
    199 	 * if someone has issued a VFS_ROOT() already, fill in the
    200 	 * vnode cookie.
    201 	 */
    202 	pn = NULL;
    203 	if (pmp->pmp_root) {
    204 		pn = VPTOPP(pmp->pmp_root);
    205 		pn->pn_cookie = sreq->psr_cookie;
    206 	}
    207 
    208 	/* We're good to fly */
    209 	pmp->pmp_rootcookie = sreq->psr_cookie;
    210 	pmp->pmp_status = PUFFSTAT_RUNNING;
    211 	simple_unlock(&pmp->pmp_lock);
    212 
    213 	/* do the VFS_STATVFS() we missed out on in sys_mount() */
    214 	copy_statvfs_info(&sreq->psr_sb, mp);
    215 	(void)memcpy(&mp->mnt_stat, &sreq->psr_sb, sizeof(mp->mnt_stat));
    216 	mp->mnt_stat.f_iosize = DEV_BSIZE;
    217 
    218 	DPRINTF(("puffs_start2: root vp %p, cur root pnode %p, cookie %p\n",
    219 	    pmp->pmp_root, pn, sreq->psr_cookie));
    220 
    221 	return 0;
    222 }
    223 
    224 int
    225 puffs_start(struct mount *mp, int flags, struct lwp *l)
    226 {
    227 
    228 	/*
    229 	 * This cannot travel to userspace, as this is called from
    230 	 * the kernel context of the process doing mount(2).  But
    231 	 * it's probably a safe bet that the process doing mount(2)
    232 	 * realizes it needs to start the filesystem also...
    233 	 */
    234 	return 0;
    235 }
    236 
    237 int
    238 puffs_unmount(struct mount *mp, int mntflags, struct lwp *l)
    239 {
    240 	struct puffs_mount *pmp;
    241 	int error, force;
    242 
    243 	PUFFS_VFSREQ(unmount);
    244 
    245 	error = 0;
    246 	force = mntflags & MNT_FORCE;
    247 	pmp = MPTOPUFFSMP(mp);
    248 
    249 	DPRINTF(("puffs_unmount: detach filesystem from vfs, current "
    250 	    "status 0x%x\n", pmp->pmp_status));
    251 
    252 	/*
    253 	 * flush all the vnodes.  VOP_RECLAIM() takes care that the
    254 	 * root vnode does not get flushed until unmount.  The
    255 	 * userspace root node cookie is stored in the mount
    256 	 * structure, so we can always re-instantiate a root vnode,
    257 	 * should userspace unmount decide it doesn't want to
    258 	 * cooperate.
    259 	 */
    260 	error = vflush(mp, NULLVP, force ? FORCECLOSE : 0);
    261 	if (error)
    262 		goto out;
    263 
    264 	/*
    265 	 * If we are not DYING, we should ask userspace's opinion
    266 	 * about the situation
    267 	 */
    268 	simple_lock(&pmp->pmp_lock);
    269 	if (pmp->pmp_status != PUFFSTAT_DYING) {
    270 		pmp->pmp_unmounting = 1;
    271 		simple_unlock(&pmp->pmp_lock);
    272 
    273 		unmount_arg.pvfsr_flags = mntflags;
    274 		unmount_arg.pvfsr_pid = puffs_lwp2pid(l);
    275 
    276 		error = puffs_vfstouser(pmp, PUFFS_VFS_UNMOUNT,
    277 		     &unmount_arg, sizeof(unmount_arg));
    278 		DPRINTF(("puffs_unmount: error %d force %d\n", error, force));
    279 
    280 		simple_lock(&pmp->pmp_lock);
    281 		pmp->pmp_unmounting = 0;
    282 		wakeup(&pmp->pmp_unmounting);
    283 	}
    284 
    285 	/*
    286 	 * if userspace cooperated or we really need to die,
    287 	 * screw what userland thinks and just die.
    288 	 */
    289 	if (error == 0 || force) {
    290 		/* tell waiters & other resources to go unwait themselves */
    291 		puffs_userdead(pmp);
    292 		puffs_nukebypmp(pmp);
    293 
    294 		/*
    295 		 * Sink waiters.  This is still not perfect, since the
    296 		 * draining is done after userret, not when they really
    297 		 * exit the file system.  It will probably work as almost
    298 		 * no call will block and therefore cause a context switch
    299 		 * and therefore will protected by the biglock after
    300 		 * exiting userspace.  But ... it's an imperfect world.
    301 		 */
    302 		while (pmp->pmp_req_touser_waiters != 0)
    303 			ltsleep(&pmp->pmp_req_touser_waiters, PVFS,
    304 			    "puffsink", 0, &pmp->pmp_lock);
    305 		simple_unlock(&pmp->pmp_lock);
    306 
    307 		/* free resources now that we hopefully have no waiters left */
    308 		free(pmp->pmp_pnodehash, M_PUFFS);
    309 		FREE(pmp, M_PUFFS);
    310 		error = 0;
    311 	} else {
    312 		simple_unlock(&pmp->pmp_lock);
    313 	}
    314 
    315  out:
    316 	DPRINTF(("puffs_unmount: return %d\n", error));
    317 	return error;
    318 }
    319 
    320 /*
    321  * This doesn't need to travel to userspace
    322  */
    323 int
    324 puffs_root(struct mount *mp, struct vnode **vpp)
    325 {
    326 	struct puffs_mount *pmp;
    327 	struct puffs_node *pn;
    328 	struct vnode *vp;
    329 
    330 	pmp = MPTOPUFFSMP(mp);
    331 
    332 	/*
    333 	 * pmp_lock must be held if vref()'ing or vrele()'ing the
    334 	 * root vnode.  the latter is controlled by puffs_inactive().
    335 	 */
    336 	simple_lock(&pmp->pmp_lock);
    337 	vp = pmp->pmp_root;
    338 	if (vp) {
    339 		mutex_enter(&vp->v_interlock);
    340 		simple_unlock(&pmp->pmp_lock);
    341 		pn = VPTOPP(vp);
    342 		if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK))
    343 			goto grabnew;
    344 		*vpp = vp;
    345 		return 0;
    346 	} else
    347 		simple_unlock(&pmp->pmp_lock);
    348 
    349 	/* XXX: this is wrong, so FIXME */
    350  grabnew:
    351 
    352 	/*
    353 	 * So, didn't have the magic root vnode available.
    354 	 * No matter, grab another an stuff it with the cookie.
    355 	 */
    356 	if (puffs_getvnode(mp, pmp->pmp_rootcookie, VDIR, 0, 0, &vp))
    357 		panic("sloppy programming");
    358 
    359 	simple_lock(&pmp->pmp_lock);
    360 	/*
    361 	 * check if by mysterious force someone else created a root
    362 	 * vnode while we were executing.
    363 	 */
    364 	if (pmp->pmp_root) {
    365 		vref(pmp->pmp_root);
    366 		simple_unlock(&pmp->pmp_lock);
    367 		puffs_putvnode(vp);
    368 		vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
    369 		*vpp = pmp->pmp_root;
    370 		return 0;
    371 	}
    372 
    373 	/* store cache */
    374 	vp->v_flag = VROOT;
    375 	pmp->pmp_root = vp;
    376 	simple_unlock(&pmp->pmp_lock);
    377 
    378 	vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
    379 
    380 	*vpp = vp;
    381 	return 0;
    382 }
    383 
    384 int
    385 puffs_quotactl(struct mount *mp, int cmd, uid_t uid, void *arg, struct lwp *l)
    386 {
    387 
    388 	return EOPNOTSUPP;
    389 }
    390 
    391 int
    392 puffs_statvfs(struct mount *mp, struct statvfs *sbp, struct lwp *l)
    393 {
    394 	struct puffs_vfsreq_statvfs *statvfs_arg; /* too big for stack */
    395 	struct puffs_mount *pmp;
    396 	int error = 0;
    397 
    398 	pmp = MPTOPUFFSMP(mp);
    399 
    400 	/*
    401 	 * If we are mounting, it means that the userspace counterpart
    402 	 * is calling mount(2), but mount(2) also calls statvfs.  So
    403 	 * requesting statvfs from userspace would mean a deadlock.
    404 	 * Compensate.
    405 	 */
    406 	if (pmp->pmp_status == PUFFSTAT_MOUNTING)
    407 		return EINPROGRESS;
    408 
    409 	/* too big for stack */
    410 	MALLOC(statvfs_arg, struct puffs_vfsreq_statvfs *,
    411 	    sizeof(struct puffs_vfsreq_statvfs), M_PUFFS, M_WAITOK | M_ZERO);
    412 	statvfs_arg->pvfsr_pid = puffs_lwp2pid(l);
    413 
    414 	error = puffs_vfstouser(pmp, PUFFS_VFS_STATVFS,
    415 	    statvfs_arg, sizeof(*statvfs_arg));
    416 	statvfs_arg->pvfsr_sb.f_iosize = DEV_BSIZE;
    417 
    418 	/*
    419 	 * Try to produce a sensible result even in the event
    420 	 * of userspace error.
    421 	 *
    422 	 * XXX: cache the copy in non-error case
    423 	 */
    424 	if (!error) {
    425 		copy_statvfs_info(&statvfs_arg->pvfsr_sb, mp);
    426 		(void)memcpy(sbp, &statvfs_arg->pvfsr_sb,
    427 		    sizeof(struct statvfs));
    428 	} else {
    429 		copy_statvfs_info(sbp, mp);
    430 	}
    431 
    432 	FREE(statvfs_arg, M_PUFFS);
    433 	return error;
    434 }
    435 
    436 static int
    437 pageflush(struct mount *mp, int waitfor, int suspending)
    438 {
    439 	struct puffs_node *pn;
    440 	struct vnode *vp, *nvp;
    441 	int error, rv, ppflags;
    442 
    443 	KASSERT(((waitfor == MNT_WAIT) && suspending) == 0);
    444 	KASSERT((suspending == 0)
    445 	    || (fstrans_is_owner(mp)
    446 	      && fstrans_getstate(mp) == FSTRANS_SUSPENDING));
    447 
    448 	error = 0;
    449 	ppflags = PGO_CLEANIT | PGO_ALLPAGES;
    450 	if (waitfor == MNT_WAIT)
    451 		ppflags |= PGO_SYNCIO;
    452 
    453 	/*
    454 	 * Sync all cached data from regular vnodes (which are not
    455 	 * currently locked, see below).  After this we call VFS_SYNC
    456 	 * for the fs server, which should handle data and metadata for
    457 	 * all the nodes it knows to exist.
    458 	 */
    459 	mutex_enter(&mntvnode_lock);
    460  loop:
    461 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
    462 		/* check if we're on the right list */
    463 		if (vp->v_mount != mp)
    464 			goto loop;
    465 
    466 		mutex_enter(&vp->v_interlock);
    467 		pn = VPTOPP(vp);
    468 		nvp = TAILQ_NEXT(vp, v_mntvnodes);
    469 
    470 		if (vp->v_type != VREG || UVM_OBJ_IS_CLEAN(&vp->v_uobj)) {
    471 			mutex_exit(&vp->v_interlock);
    472 			continue;
    473 		}
    474 
    475 		mutex_exit(&mntvnode_lock);
    476 
    477 		/*
    478 		 * Here we try to get a reference to the vnode and to
    479 		 * lock it.  This is mostly cargo-culted, but I will
    480 		 * offer an explanation to why I believe this might
    481 		 * actually do the right thing.
    482 		 *
    483 		 * If the vnode is a goner, we quite obviously don't need
    484 		 * to sync it.
    485 		 *
    486 		 * If the vnode was busy, we don't need to sync it because
    487 		 * this is never called with MNT_WAIT except from
    488 		 * dounmount(), when we are wait-flushing all the dirty
    489 		 * vnodes through other routes in any case.  So there,
    490 		 * sync() doesn't actually sync.  Happy now?
    491 		 *
    492 		 * NOTE: if we're suspending, vget() does NOT lock.
    493 		 * See puffs_lock() for details.
    494 		 */
    495 		rv = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
    496 		if (rv) {
    497 			mutex_enter(&mntvnode_lock);
    498 			if (rv == ENOENT)
    499 				goto loop;
    500 			continue;
    501 		}
    502 
    503 		/*
    504 		 * Thread information to puffs_strategy() through the
    505 		 * pnode flags: we want to issue the putpages operations
    506 		 * as FAF if we're suspending, since it's very probable
    507 		 * that our execution context is that of the userspace
    508 		 * daemon.  We can do this because:
    509 		 *   + we send the "going to suspend" prior to this part
    510 		 *   + if any of the writes fails in userspace, it's the
    511 		 *     file system server's problem to decide if this was a
    512 		 *     failed snapshot when it gets the "snapshot complete"
    513 		 *     notification.
    514 		 *   + if any of the writes fail in the kernel already, we
    515 		 *     immediately fail *and* notify the user server of
    516 		 *     failure.
    517 		 *
    518 		 * We also do FAFs if we're called from the syncer.  This
    519 		 * is just general optimization for trickle sync: no need
    520 		 * to really guarantee that the stuff ended on backing
    521 		 * storage.
    522 		 * TODO: Maybe also hint the user server of this twist?
    523 		 */
    524 		mutex_enter(&vp->v_interlock);
    525 		if (suspending || waitfor == MNT_LAZY)
    526 			pn->pn_stat |= PNODE_SUSPEND;
    527 		rv = VOP_PUTPAGES(vp, 0, 0, ppflags);
    528 		if (suspending || waitfor == MNT_LAZY) {
    529 			mutex_enter(&vp->v_interlock);
    530 			pn->pn_stat &= ~PNODE_SUSPEND;
    531 			mutex_exit(&vp->v_interlock);
    532 		}
    533 		if (rv)
    534 			error = rv;
    535 		vput(vp);
    536 		mutex_enter(&mntvnode_lock);
    537 	}
    538 	mutex_exit(&mntvnode_lock);
    539 
    540 	return error;
    541 }
    542 
    543 int
    544 puffs_sync(struct mount *mp, int waitfor, struct kauth_cred *cred,
    545 	struct lwp *l)
    546 {
    547 	int error, rv;
    548 
    549 	PUFFS_VFSREQ(sync);
    550 
    551 	error = pageflush(mp, waitfor, 0);
    552 
    553 	/* sync fs */
    554 	sync_arg.pvfsr_waitfor = waitfor;
    555 	puffs_credcvt(&sync_arg.pvfsr_cred, cred);
    556 	sync_arg.pvfsr_pid = puffs_lwp2pid(l);
    557 
    558 	rv = puffs_vfstouser(MPTOPUFFSMP(mp), PUFFS_VFS_SYNC,
    559 	    &sync_arg, sizeof(sync_arg));
    560 	if (rv)
    561 		error = rv;
    562 
    563 	return error;
    564 }
    565 
    566 int
    567 puffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
    568 {
    569 
    570 	return EOPNOTSUPP;
    571 }
    572 
    573 #if 0
    574 /*ARGSUSED*/
    575 int
    576 puffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
    577 {
    578 
    579 	return EOPNOTSUPP;
    580 }
    581 
    582 /*ARGSUSED*/
    583 int
    584 puffs_vptofh(struct vnode *vp, struct fid *fhp)
    585 {
    586 
    587 	return EOPNOTSUPP;
    588 }
    589 #endif
    590 
    591 void
    592 puffs_init()
    593 {
    594 
    595 #ifdef _LKM
    596 	malloc_type_attach(M_PUFFS);
    597 	pool_init(&puffs_pnpool, sizeof(struct puffs_node), 0, 0, 0,
    598 	    "puffspnpl", &pool_allocator_nointr, IPL_NONE);
    599 #endif
    600 
    601 	return;
    602 }
    603 
    604 void
    605 puffs_done()
    606 {
    607 
    608 #ifdef _LKM
    609 	pool_destroy(&puffs_pnpool);
    610 	malloc_type_detach(M_PUFFS);
    611 #endif
    612 
    613 	return;
    614 }
    615 
    616 int
    617 puffs_snapshot(struct mount *mp, struct vnode *vp, struct timespec *ts)
    618 {
    619 
    620 	return EOPNOTSUPP;
    621 }
    622 
    623 int
    624 puffs_suspendctl(struct mount *mp, int cmd)
    625 {
    626 	struct puffs_mount *pmp;
    627 	int error;
    628 
    629 	pmp = MPTOPUFFSMP(mp);
    630 	switch (cmd) {
    631 	case SUSPEND_SUSPEND:
    632 		DPRINTF(("puffs_suspendctl: suspending\n"));
    633 		if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
    634 			break;
    635 		puffs_suspendtouser(pmp, PUFFS_SUSPEND_START);
    636 
    637 		error = pageflush(mp, 0, 1);
    638 		if (error == 0)
    639 			error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
    640 
    641 		if (error != 0) {
    642 			puffs_suspendtouser(pmp, PUFFS_SUSPEND_ERROR);
    643 			(void) fstrans_setstate(mp, FSTRANS_NORMAL);
    644 			break;
    645 		}
    646 
    647 		puffs_suspendtouser(pmp, PUFFS_SUSPEND_SUSPENDED);
    648 
    649 		break;
    650 
    651 	case SUSPEND_RESUME:
    652 		DPRINTF(("puffs_suspendctl: resume\n"));
    653 		error = 0;
    654 		(void) fstrans_setstate(mp, FSTRANS_NORMAL);
    655 		puffs_suspendtouser(pmp, PUFFS_SUSPEND_RESUME);
    656 		break;
    657 
    658 	default:
    659 		error = EINVAL;
    660 		break;
    661 	}
    662 
    663 	DPRINTF(("puffs_suspendctl: return %d\n", error));
    664 	return error;
    665 }
    666 
    667 const struct vnodeopv_desc * const puffs_vnodeopv_descs[] = {
    668 	&puffs_vnodeop_opv_desc,
    669 	&puffs_specop_opv_desc,
    670 	&puffs_fifoop_opv_desc,
    671 	&puffs_msgop_opv_desc,
    672 	NULL,
    673 };
    674 
    675 struct vfsops puffs_vfsops = {
    676 	MOUNT_PUFFS,
    677 	puffs_mount,		/* mount	*/
    678 	puffs_start,		/* start	*/
    679 	puffs_unmount,		/* unmount	*/
    680 	puffs_root,		/* root		*/
    681 	puffs_quotactl,		/* quotactl	*/
    682 	puffs_statvfs,		/* statvfs	*/
    683 	puffs_sync,		/* sync		*/
    684 	puffs_vget,		/* vget		*/
    685 	(void *)eopnotsupp,	/* fhtovp	*/
    686 	(void *)eopnotsupp,	/* vptofh	*/
    687 	puffs_init,		/* init		*/
    688 	NULL,			/* reinit	*/
    689 	puffs_done,		/* done		*/
    690 	NULL,			/* mountroot	*/
    691 	puffs_snapshot,		/* snapshot	*/
    692 	vfs_stdextattrctl,	/* extattrctl	*/
    693 	puffs_suspendctl,	/* suspendctl	*/
    694 	puffs_vnodeopv_descs,	/* vnodeops	*/
    695 	0,			/* refcount	*/
    696 	{ NULL, NULL }
    697 };
    698 VFS_ATTACH(puffs_vfsops);
    699