Home | History | Annotate | Line # | Download | only in puffs
puffs_vfsops.c revision 1.31
      1 /*	$NetBSD: puffs_vfsops.c,v 1.31 2007/03/29 16:04:26 pooka Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2005, 2006  Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by the
      7  * Google Summer of Code program and the Ulla Tuominen Foundation.
      8  * The Google SoC project was mentored by Bill Studenmund.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. The name of the company nor the name of the author may be used to
     19  *    endorse or promote products derived from this software without specific
     20  *    prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     23  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     24  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     25  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     28  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  */
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: puffs_vfsops.c,v 1.31 2007/03/29 16:04:26 pooka Exp $");
     37 
     38 #include <sys/param.h>
     39 #include <sys/mount.h>
     40 #include <sys/malloc.h>
     41 #include <sys/extattr.h>
     42 #include <sys/queue.h>
     43 #include <sys/vnode.h>
     44 #include <sys/dirent.h>
     45 #include <sys/kauth.h>
     46 #include <sys/fstrans.h>
     47 
     48 #include <lib/libkern/libkern.h>
     49 
     50 #include <fs/puffs/puffs_msgif.h>
     51 #include <fs/puffs/puffs_sys.h>
     52 
     53 VFS_PROTOS(puffs);
     54 
     55 MALLOC_DEFINE(M_PUFFS, "puffs", "pass-to-userspace file system structures");
     56 
     57 #ifndef PUFFS_PNODEBUCKETS
     58 #define PUFFS_PNODEBUCKETS 256
     59 #endif
     60 #ifndef PUFFS_MAXPNODEBUCKETS
     61 #define PUFFS_MAXPNODEBUCKETS 65536
     62 #endif
     63 int puffs_pnodebuckets = PUFFS_PNODEBUCKETS;
     64 
     65 int
     66 puffs_mount(struct mount *mp, const char *path, void *data,
     67 	    struct nameidata *ndp, struct lwp *l)
     68 {
     69 	struct puffs_mount *pmp = NULL;
     70 	struct puffs_args *args;
     71 	char namebuf[PUFFSNAMESIZE+sizeof(PUFFS_NAMEPREFIX)+1]; /* spooky */
     72 	int error = 0, i;
     73 
     74 	if (mp->mnt_flag & MNT_GETARGS) {
     75 		pmp = MPTOPUFFSMP(mp);
     76 		return copyout(&pmp->pmp_args, data, sizeof(struct puffs_args));
     77 	}
     78 
     79 	/* update is not supported currently */
     80 	if (mp->mnt_flag & MNT_UPDATE)
     81 		return EOPNOTSUPP;
     82 
     83 	/*
     84 	 * We need the file system name
     85 	 */
     86 	if (!data)
     87 		return EINVAL;
     88 
     89 	MALLOC(args, struct puffs_args *, sizeof(struct puffs_args),
     90 	    M_PUFFS, M_WAITOK);
     91 
     92 	error = copyin(data, args, sizeof(struct puffs_args));
     93 	if (error)
     94 		goto out;
     95 
     96 	/* devel phase */
     97 	if (args->pa_vers != (PUFFSVERSION | PUFFSDEVELVERS)) {
     98 		printf("puffs_mount: development version mismatch\n");
     99 		error = EINVAL;
    100 		goto out;
    101 	}
    102 
    103 	/* nuke spy bits */
    104 	args->pa_flags &= PUFFS_KFLAG_MASK;
    105 
    106 	/* build real name */
    107 	(void)strlcpy(namebuf, PUFFS_NAMEPREFIX, sizeof(namebuf));
    108 	(void)strlcat(namebuf, args->pa_name, sizeof(namebuf));
    109 
    110 	/* inform user server if it got the max request size it wanted */
    111 	if (args->pa_maxreqlen == 0 || args->pa_maxreqlen > PUFFS_REQ_MAXSIZE)
    112 		args->pa_maxreqlen = PUFFS_REQ_MAXSIZE;
    113 	else if (args->pa_maxreqlen < PUFFS_REQSTRUCT_MAX)
    114 		args->pa_maxreqlen = PUFFS_REQSTRUCT_MAX;
    115 	(void)strlcpy(args->pa_name, namebuf, sizeof(args->pa_name));
    116 
    117 	error = copyout(args, data, sizeof(struct puffs_args));
    118 	if (error)
    119 		goto out;
    120 
    121 	error = set_statvfs_info(path, UIO_USERSPACE, namebuf,
    122 	    UIO_SYSSPACE, mp, l);
    123 	if (error)
    124 		goto out;
    125 	mp->mnt_stat.f_iosize = DEV_BSIZE;
    126 
    127 	MALLOC(pmp, struct puffs_mount *, sizeof(struct puffs_mount),
    128 	    M_PUFFS, M_WAITOK | M_ZERO);
    129 
    130 	mp->mnt_fs_bshift = DEV_BSHIFT;
    131 	mp->mnt_dev_bshift = DEV_BSHIFT;
    132 	mp->mnt_flag &= ~MNT_LOCAL; /* we don't really know, so ... */
    133 	mp->mnt_data = pmp;
    134 	mp->mnt_iflag |= IMNT_HAS_TRANS;
    135 
    136 	pmp->pmp_status = PUFFSTAT_MOUNTING;
    137 	pmp->pmp_nextreq = 0;
    138 	pmp->pmp_mp = mp;
    139 	pmp->pmp_req_maxsize = args->pa_maxreqlen;
    140 	pmp->pmp_args = *args;
    141 
    142 	/* puffs_node hash buckets */
    143 	pmp->pmp_npnodehash = puffs_pnodebuckets;
    144 	if (pmp->pmp_npnodehash < 1)
    145 		pmp->pmp_npnodehash = 1;
    146 	if (pmp->pmp_npnodehash > PUFFS_MAXPNODEBUCKETS)
    147 		pmp->pmp_npnodehash = PUFFS_MAXPNODEBUCKETS;
    148 	pmp->pmp_pnodehash = malloc
    149 	    (sizeof(struct puffs_pnode_hashlist *) * pmp->pmp_npnodehash,
    150 	    M_PUFFS, M_WAITOK);
    151 	for (i = 0; i < pmp->pmp_npnodehash; i++)
    152 		LIST_INIT(&pmp->pmp_pnodehash[i]);
    153 
    154 	/*
    155 	 * Inform the fileops processing code that we have a mountpoint.
    156 	 * If it doesn't know about anyone with our pid/fd having the
    157 	 * device open, punt
    158 	 */
    159 	if (puffs_setpmp(l->l_proc->p_pid, args->pa_fd, pmp)) {
    160 		error = ENOENT;
    161 		goto out;
    162 	}
    163 
    164 	mutex_init(&pmp->pmp_lock, MUTEX_DEFAULT, IPL_NONE);
    165 	cv_init(&pmp->pmp_req_waiter_cv, "puffsget");
    166 	cv_init(&pmp->pmp_req_waitersink_cv, "puffsink");
    167 	cv_init(&pmp->pmp_unmounting_cv, "puffsum");
    168 	cv_init(&pmp->pmp_suspend_cv, "pufsusum");
    169 	TAILQ_INIT(&pmp->pmp_req_touser);
    170 	TAILQ_INIT(&pmp->pmp_req_replywait);
    171 	TAILQ_INIT(&pmp->pmp_req_sizepark);
    172 
    173 	DPRINTF(("puffs_mount: mount point at %p, puffs specific at %p\n",
    174 	    mp, MPTOPUFFSMP(mp)));
    175 
    176 	vfs_getnewfsid(mp);
    177 
    178  out:
    179 	if (error && pmp && pmp->pmp_pnodehash)
    180 		free(pmp->pmp_pnodehash, M_PUFFS);
    181 	if (error && pmp)
    182 		FREE(pmp, M_PUFFS);
    183 	FREE(args, M_PUFFS);
    184 	return error;
    185 }
    186 
    187 /*
    188  * This is called from the first "Hello, I'm alive" ioctl
    189  * from userspace.
    190  */
    191 int
    192 puffs_start2(struct puffs_mount *pmp, struct puffs_startreq *sreq)
    193 {
    194 	struct puffs_node *pn;
    195 	struct mount *mp;
    196 
    197 	mp = PMPTOMP(pmp);
    198 
    199 	mutex_enter(&pmp->pmp_lock);
    200 
    201 	/*
    202 	 * if someone has issued a VFS_ROOT() already, fill in the
    203 	 * vnode cookie.
    204 	 */
    205 	pn = NULL;
    206 	if (pmp->pmp_root) {
    207 		pn = VPTOPP(pmp->pmp_root);
    208 		pn->pn_cookie = sreq->psr_cookie;
    209 	}
    210 
    211 	/* We're good to fly */
    212 	pmp->pmp_rootcookie = sreq->psr_cookie;
    213 	pmp->pmp_status = PUFFSTAT_RUNNING;
    214 	mutex_exit(&pmp->pmp_lock);
    215 
    216 	/* do the VFS_STATVFS() we missed out on in sys_mount() */
    217 	copy_statvfs_info(&sreq->psr_sb, mp);
    218 	(void)memcpy(&mp->mnt_stat, &sreq->psr_sb, sizeof(mp->mnt_stat));
    219 	mp->mnt_stat.f_iosize = DEV_BSIZE;
    220 
    221 	DPRINTF(("puffs_start2: root vp %p, cur root pnode %p, cookie %p\n",
    222 	    pmp->pmp_root, pn, sreq->psr_cookie));
    223 
    224 	return 0;
    225 }
    226 
    227 int
    228 puffs_start(struct mount *mp, int flags, struct lwp *l)
    229 {
    230 
    231 	/*
    232 	 * This cannot travel to userspace, as this is called from
    233 	 * the kernel context of the process doing mount(2).  But
    234 	 * it's probably a safe bet that the process doing mount(2)
    235 	 * realizes it needs to start the filesystem also...
    236 	 */
    237 	return 0;
    238 }
    239 
    240 int
    241 puffs_unmount(struct mount *mp, int mntflags, struct lwp *l)
    242 {
    243 	struct puffs_mount *pmp;
    244 	int error, force;
    245 
    246 	PUFFS_VFSREQ(unmount);
    247 
    248 	error = 0;
    249 	force = mntflags & MNT_FORCE;
    250 	pmp = MPTOPUFFSMP(mp);
    251 
    252 	DPRINTF(("puffs_unmount: detach filesystem from vfs, current "
    253 	    "status 0x%x\n", pmp->pmp_status));
    254 
    255 	/*
    256 	 * flush all the vnodes.  VOP_RECLAIM() takes care that the
    257 	 * root vnode does not get flushed until unmount.  The
    258 	 * userspace root node cookie is stored in the mount
    259 	 * structure, so we can always re-instantiate a root vnode,
    260 	 * should userspace unmount decide it doesn't want to
    261 	 * cooperate.
    262 	 */
    263 	error = vflush(mp, NULLVP, force ? FORCECLOSE : 0);
    264 	if (error)
    265 		goto out;
    266 
    267 	/*
    268 	 * If we are not DYING, we should ask userspace's opinion
    269 	 * about the situation
    270 	 */
    271 	mutex_enter(&pmp->pmp_lock);
    272 	if (pmp->pmp_status != PUFFSTAT_DYING) {
    273 		pmp->pmp_unmounting = 1;
    274 		mutex_exit(&pmp->pmp_lock);
    275 
    276 		unmount_arg.pvfsr_flags = mntflags;
    277 		unmount_arg.pvfsr_pid = puffs_lwp2pid(l);
    278 
    279 		error = puffs_vfstouser(pmp, PUFFS_VFS_UNMOUNT,
    280 		     &unmount_arg, sizeof(unmount_arg));
    281 		DPRINTF(("puffs_unmount: error %d force %d\n", error, force));
    282 
    283 		mutex_enter(&pmp->pmp_lock);
    284 		pmp->pmp_unmounting = 0;
    285 		cv_broadcast(&pmp->pmp_unmounting_cv);
    286 	}
    287 
    288 	/*
    289 	 * if userspace cooperated or we really need to die,
    290 	 * screw what userland thinks and just die.
    291 	 */
    292 	if (error == 0 || force) {
    293 		/* tell waiters & other resources to go unwait themselves */
    294 		puffs_userdead(pmp);
    295 		puffs_nukebypmp(pmp);
    296 
    297 		/*
    298 		 * Sink waiters.  This is still not perfect, since the
    299 		 * draining is done after userret, not when they really
    300 		 * exit the file system.  It will probably work as almost
    301 		 * no call will block and therefore cause a context switch
    302 		 * and therefore will protected by the biglock after
    303 		 * exiting userspace.  But ... it's an imperfect world.
    304 		 */
    305 		while (pmp->pmp_req_waiters != 0)
    306 			cv_wait(&pmp->pmp_req_waitersink_cv, &pmp->pmp_lock);
    307 		mutex_exit(&pmp->pmp_lock);
    308 
    309 		/* free resources now that we hopefully have no waiters left */
    310 		cv_destroy(&pmp->pmp_req_waiter_cv);
    311 		cv_destroy(&pmp->pmp_req_waitersink_cv);
    312 		cv_destroy(&pmp->pmp_unmounting_cv);
    313 		cv_destroy(&pmp->pmp_suspend_cv);
    314 		mutex_destroy(&pmp->pmp_lock);
    315 
    316 		free(pmp->pmp_pnodehash, M_PUFFS);
    317 		FREE(pmp, M_PUFFS);
    318 		error = 0;
    319 	} else {
    320 		mutex_exit(&pmp->pmp_lock);
    321 	}
    322 
    323  out:
    324 	DPRINTF(("puffs_unmount: return %d\n", error));
    325 	return error;
    326 }
    327 
    328 /*
    329  * This doesn't need to travel to userspace
    330  */
    331 int
    332 puffs_root(struct mount *mp, struct vnode **vpp)
    333 {
    334 	struct puffs_mount *pmp;
    335 	struct puffs_node *pn;
    336 	struct vnode *vp;
    337 
    338 	pmp = MPTOPUFFSMP(mp);
    339 
    340 	/*
    341 	 * pmp_lock must be held if vref()'ing or vrele()'ing the
    342 	 * root vnode.  the latter is controlled by puffs_inactive().
    343 	 */
    344 	mutex_enter(&pmp->pmp_lock);
    345 	vp = pmp->pmp_root;
    346 	if (vp) {
    347 		simple_lock(&vp->v_interlock);
    348 		mutex_exit(&pmp->pmp_lock);
    349 		pn = VPTOPP(vp);
    350 		if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK))
    351 			goto grabnew;
    352 		*vpp = vp;
    353 		return 0;
    354 	} else
    355 		mutex_exit(&pmp->pmp_lock);
    356 
    357 	/* XXX: this is wrong, so FIXME */
    358  grabnew:
    359 
    360 	/*
    361 	 * So, didn't have the magic root vnode available.
    362 	 * No matter, grab another an stuff it with the cookie.
    363 	 */
    364 	if (puffs_getvnode(mp, pmp->pmp_rootcookie, VDIR, 0, 0, &vp))
    365 		panic("sloppy programming");
    366 
    367 	mutex_enter(&pmp->pmp_lock);
    368 	/*
    369 	 * check if by mysterious force someone else created a root
    370 	 * vnode while we were executing.
    371 	 */
    372 	if (pmp->pmp_root) {
    373 		vref(pmp->pmp_root);
    374 		mutex_exit(&pmp->pmp_lock);
    375 		puffs_putvnode(vp);
    376 		vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
    377 		*vpp = pmp->pmp_root;
    378 		return 0;
    379 	}
    380 
    381 	/* store cache */
    382 	vp->v_flag = VROOT;
    383 	pmp->pmp_root = vp;
    384 	mutex_exit(&pmp->pmp_lock);
    385 
    386 	vn_lock(pmp->pmp_root, LK_EXCLUSIVE | LK_RETRY);
    387 
    388 	*vpp = vp;
    389 	return 0;
    390 }
    391 
    392 int
    393 puffs_quotactl(struct mount *mp, int cmd, uid_t uid, void *arg, struct lwp *l)
    394 {
    395 
    396 	return EOPNOTSUPP;
    397 }
    398 
    399 int
    400 puffs_statvfs(struct mount *mp, struct statvfs *sbp, struct lwp *l)
    401 {
    402 	struct puffs_vfsreq_statvfs *statvfs_arg; /* too big for stack */
    403 	struct puffs_mount *pmp;
    404 	int error = 0;
    405 
    406 	pmp = MPTOPUFFSMP(mp);
    407 
    408 	/*
    409 	 * If we are mounting, it means that the userspace counterpart
    410 	 * is calling mount(2), but mount(2) also calls statvfs.  So
    411 	 * requesting statvfs from userspace would mean a deadlock.
    412 	 * Compensate.
    413 	 */
    414 	if (pmp->pmp_status == PUFFSTAT_MOUNTING)
    415 		return EINPROGRESS;
    416 
    417 	/* too big for stack */
    418 	MALLOC(statvfs_arg, struct puffs_vfsreq_statvfs *,
    419 	    sizeof(struct puffs_vfsreq_statvfs), M_PUFFS, M_WAITOK | M_ZERO);
    420 	statvfs_arg->pvfsr_pid = puffs_lwp2pid(l);
    421 
    422 	error = puffs_vfstouser(pmp, PUFFS_VFS_STATVFS,
    423 	    statvfs_arg, sizeof(*statvfs_arg));
    424 	statvfs_arg->pvfsr_sb.f_iosize = DEV_BSIZE;
    425 
    426 	/*
    427 	 * Try to produce a sensible result even in the event
    428 	 * of userspace error.
    429 	 *
    430 	 * XXX: cache the copy in non-error case
    431 	 */
    432 	if (!error) {
    433 		copy_statvfs_info(&statvfs_arg->pvfsr_sb, mp);
    434 		(void)memcpy(sbp, &statvfs_arg->pvfsr_sb,
    435 		    sizeof(struct statvfs));
    436 	} else {
    437 		copy_statvfs_info(sbp, mp);
    438 	}
    439 
    440 	FREE(statvfs_arg, M_PUFFS);
    441 	return error;
    442 }
    443 
    444 static int
    445 pageflush(struct mount *mp, kauth_cred_t cred,
    446 	int waitfor, int suspending, struct lwp *l)
    447 {
    448 	struct puffs_node *pn;
    449 	struct vnode *vp, *nvp;
    450 	int error, rv;
    451 
    452 	KASSERT(((waitfor == MNT_WAIT) && suspending) == 0);
    453 	KASSERT((suspending == 0)
    454 	    || (fstrans_is_owner(mp)
    455 	      && fstrans_getstate(mp) == FSTRANS_SUSPENDING));
    456 
    457 	error = 0;
    458 
    459 	/*
    460 	 * Sync all cached data from regular vnodes (which are not
    461 	 * currently locked, see below).  After this we call VFS_SYNC
    462 	 * for the fs server, which should handle data and metadata for
    463 	 * all the nodes it knows to exist.
    464 	 */
    465 	simple_lock(&mntvnode_slock);
    466  loop:
    467 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
    468 		/* check if we're on the right list */
    469 		if (vp->v_mount != mp)
    470 			goto loop;
    471 
    472 		simple_lock(&vp->v_interlock);
    473 		pn = VPTOPP(vp);
    474 		nvp = TAILQ_NEXT(vp, v_mntvnodes);
    475 
    476 		if (vp->v_type != VREG || UVM_OBJ_IS_CLEAN(&vp->v_uobj)) {
    477 			simple_unlock(&vp->v_interlock);
    478 			continue;
    479 		}
    480 
    481 		simple_unlock(&mntvnode_slock);
    482 
    483 		/*
    484 		 * Here we try to get a reference to the vnode and to
    485 		 * lock it.  This is mostly cargo-culted, but I will
    486 		 * offer an explanation to why I believe this might
    487 		 * actually do the right thing.
    488 		 *
    489 		 * If the vnode is a goner, we quite obviously don't need
    490 		 * to sync it.
    491 		 *
    492 		 * If the vnode was busy, we don't need to sync it because
    493 		 * this is never called with MNT_WAIT except from
    494 		 * dounmount(), when we are wait-flushing all the dirty
    495 		 * vnodes through other routes in any case.  So there,
    496 		 * sync() doesn't actually sync.  Happy now?
    497 		 *
    498 		 * NOTE: if we're suspending, vget() does NOT lock.
    499 		 * See puffs_lock() for details.
    500 		 */
    501 		rv = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
    502 		if (rv) {
    503 			simple_lock(&mntvnode_slock);
    504 			if (rv == ENOENT)
    505 				goto loop;
    506 			continue;
    507 		}
    508 
    509 		/*
    510 		 * Thread information to puffs_strategy() through the
    511 		 * pnode flags: we want to issue the putpages operations
    512 		 * as FAF if we're suspending, since it's very probable
    513 		 * that our execution context is that of the userspace
    514 		 * daemon.  We can do this because:
    515 		 *   + we send the "going to suspend" prior to this part
    516 		 *   + if any of the writes fails in userspace, it's the
    517 		 *     file system server's problem to decide if this was a
    518 		 *     failed snapshot when it gets the "snapshot complete"
    519 		 *     notification.
    520 		 *   + if any of the writes fail in the kernel already, we
    521 		 *     immediately fail *and* notify the user server of
    522 		 *     failure.
    523 		 *
    524 		 * We also do FAFs if we're called from the syncer.  This
    525 		 * is just general optimization for trickle sync: no need
    526 		 * to really guarantee that the stuff ended on backing
    527 		 * storage.
    528 		 * TODO: Maybe also hint the user server of this twist?
    529 		 */
    530 		if (suspending || waitfor == MNT_LAZY) {
    531 			simple_lock(&vp->v_interlock);
    532 			pn->pn_stat |= PNODE_SUSPEND;
    533 			simple_unlock(&vp->v_interlock);
    534 		}
    535 		rv = VOP_FSYNC(vp, cred, waitfor, 0, 0, l);
    536 		if (suspending || waitfor == MNT_LAZY) {
    537 			simple_lock(&vp->v_interlock);
    538 			pn->pn_stat &= ~PNODE_SUSPEND;
    539 			simple_unlock(&vp->v_interlock);
    540 		}
    541 		if (rv)
    542 			error = rv;
    543 		vput(vp);
    544 		simple_lock(&mntvnode_slock);
    545 	}
    546 	simple_unlock(&mntvnode_slock);
    547 
    548 	return error;
    549 }
    550 
    551 int
    552 puffs_sync(struct mount *mp, int waitfor, struct kauth_cred *cred,
    553 	struct lwp *l)
    554 {
    555 	int error, rv;
    556 
    557 	PUFFS_VFSREQ(sync);
    558 
    559 	error = pageflush(mp, cred, waitfor, 0, l);
    560 
    561 	/* sync fs */
    562 	sync_arg.pvfsr_waitfor = waitfor;
    563 	puffs_credcvt(&sync_arg.pvfsr_cred, cred);
    564 	sync_arg.pvfsr_pid = puffs_lwp2pid(l);
    565 
    566 	rv = puffs_vfstouser(MPTOPUFFSMP(mp), PUFFS_VFS_SYNC,
    567 	    &sync_arg, sizeof(sync_arg));
    568 	if (rv)
    569 		error = rv;
    570 
    571 	return error;
    572 }
    573 
    574 int
    575 puffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
    576 {
    577 
    578 	return EOPNOTSUPP;
    579 }
    580 
    581 #if 0
    582 /*ARGSUSED*/
    583 int
    584 puffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
    585 {
    586 
    587 	return EOPNOTSUPP;
    588 }
    589 
    590 /*ARGSUSED*/
    591 int
    592 puffs_vptofh(struct vnode *vp, struct fid *fhp)
    593 {
    594 
    595 	return EOPNOTSUPP;
    596 }
    597 #endif
    598 
    599 void
    600 puffs_init()
    601 {
    602 
    603 #ifdef _LKM
    604 	malloc_type_attach(M_PUFFS);
    605 #endif
    606 
    607 	pool_init(&puffs_pnpool, sizeof(struct puffs_node), 0, 0, 0,
    608 	    "puffpnpl", &pool_allocator_nointr, IPL_NONE);
    609 	puffs_transport_init();
    610 	puffs_msgif_init();
    611 }
    612 
    613 void
    614 puffs_done()
    615 {
    616 
    617 	puffs_msgif_destroy();
    618 	puffs_transport_destroy();
    619 	pool_destroy(&puffs_pnpool);
    620 
    621 #ifdef _LKM
    622 	malloc_type_detach(M_PUFFS);
    623 #endif
    624 }
    625 
    626 int
    627 puffs_snapshot(struct mount *mp, struct vnode *vp, struct timespec *ts)
    628 {
    629 
    630 	return EOPNOTSUPP;
    631 }
    632 
    633 int
    634 puffs_suspendctl(struct mount *mp, int cmd)
    635 {
    636 	struct puffs_mount *pmp;
    637 	int error;
    638 
    639 	pmp = MPTOPUFFSMP(mp);
    640 	switch (cmd) {
    641 	case SUSPEND_SUSPEND:
    642 		DPRINTF(("puffs_suspendctl: suspending\n"));
    643 		if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
    644 			break;
    645 		puffs_suspendtouser(pmp, PUFFS_SUSPEND_START);
    646 
    647 		error = pageflush(mp, FSCRED, 0, 1, curlwp);
    648 		if (error == 0)
    649 			error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
    650 
    651 		if (error != 0) {
    652 			puffs_suspendtouser(pmp, PUFFS_SUSPEND_ERROR);
    653 			(void) fstrans_setstate(mp, FSTRANS_NORMAL);
    654 			break;
    655 		}
    656 
    657 		puffs_suspendtouser(pmp, PUFFS_SUSPEND_SUSPENDED);
    658 
    659 		break;
    660 
    661 	case SUSPEND_RESUME:
    662 		DPRINTF(("puffs_suspendctl: resume\n"));
    663 		error = 0;
    664 		(void) fstrans_setstate(mp, FSTRANS_NORMAL);
    665 		puffs_suspendtouser(pmp, PUFFS_SUSPEND_RESUME);
    666 		break;
    667 
    668 	default:
    669 		error = EINVAL;
    670 		break;
    671 	}
    672 
    673 	DPRINTF(("puffs_suspendctl: return %d\n", error));
    674 	return error;
    675 }
    676 
    677 const struct vnodeopv_desc * const puffs_vnodeopv_descs[] = {
    678 	&puffs_vnodeop_opv_desc,
    679 	&puffs_specop_opv_desc,
    680 	&puffs_fifoop_opv_desc,
    681 	&puffs_msgop_opv_desc,
    682 	NULL,
    683 };
    684 
    685 struct vfsops puffs_vfsops = {
    686 	MOUNT_PUFFS,
    687 	puffs_mount,		/* mount	*/
    688 	puffs_start,		/* start	*/
    689 	puffs_unmount,		/* unmount	*/
    690 	puffs_root,		/* root		*/
    691 	puffs_quotactl,		/* quotactl	*/
    692 	puffs_statvfs,		/* statvfs	*/
    693 	puffs_sync,		/* sync		*/
    694 	puffs_vget,		/* vget		*/
    695 	(void *)eopnotsupp,	/* fhtovp	*/
    696 	(void *)eopnotsupp,	/* vptofh	*/
    697 	puffs_init,		/* init		*/
    698 	NULL,			/* reinit	*/
    699 	puffs_done,		/* done		*/
    700 	NULL,			/* mountroot	*/
    701 	puffs_snapshot,		/* snapshot	*/
    702 	vfs_stdextattrctl,	/* extattrctl	*/
    703 	puffs_suspendctl,	/* suspendctl	*/
    704 	puffs_vnodeopv_descs,	/* vnodeops	*/
    705 	0,			/* refcount	*/
    706 	{ NULL, NULL }
    707 };
    708 VFS_ATTACH(puffs_vfsops);
    709