Home | History | Annotate | Line # | Download | only in puffs
puffs_msgif.c revision 1.43
      1 /*	$NetBSD: puffs_msgif.c,v 1.43 2007/10/02 01:17:17 pooka Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2005, 2006, 2007  Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by the
      7  * Google Summer of Code program and the Ulla Tuominen Foundation.
      8  * The Google SoC project was mentored by Bill Studenmund.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     20  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     22  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.43 2007/10/02 01:17:17 pooka Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/fstrans.h>
     37 #include <sys/malloc.h>
     38 #include <sys/mount.h>
     39 #include <sys/vnode.h>
     40 #include <sys/lock.h>
     41 #include <sys/proc.h>
     42 
     43 #include <fs/puffs/puffs_msgif.h>
     44 #include <fs/puffs/puffs_sys.h>
     45 
     46 /*
     47  * waitq data structures
     48  */
     49 
     50 /*
     51  * While a request is going to userspace, park the caller within the
     52  * kernel.  This is the kernel counterpart of "struct puffs_req".
     53  */
     54 struct puffs_park {
     55 	struct puffs_req	*park_preq;	/* req followed by buf	*/
     56 	uint64_t		park_id;	/* duplicate of preq_id */
     57 
     58 	size_t			park_copylen;	/* userspace copylength	*/
     59 	size_t			park_maxlen;	/* max size in comeback */
     60 
     61 	parkdone_fn		park_done;
     62 	void			*park_donearg;
     63 
     64 	int			park_flags;
     65 	int			park_refcount;
     66 
     67 	kcondvar_t		park_cv;
     68 	kmutex_t		park_mtx;
     69 
     70 	TAILQ_ENTRY(puffs_park) park_entries;
     71 };
     72 #define PARKFLAG_WAITERGONE	0x01
     73 #define PARKFLAG_DONE		0x02
     74 #define PARKFLAG_ONQUEUE1	0x04
     75 #define PARKFLAG_ONQUEUE2	0x08
     76 #define PARKFLAG_CALL		0x10
     77 #define PARKFLAG_WANTREPLY	0x20
     78 
     79 static struct pool_cache parkpc;
     80 static struct pool parkpool;
     81 
     82 static int
     83 makepark(void *arg, void *obj, int flags)
     84 {
     85 	struct puffs_park *park = obj;
     86 
     87 	mutex_init(&park->park_mtx, MUTEX_DEFAULT, IPL_NONE);
     88 	cv_init(&park->park_cv, "puffsrpl");
     89 
     90 	return 0;
     91 }
     92 
     93 static void
     94 nukepark(void *arg, void *obj)
     95 {
     96 	struct puffs_park *park = obj;
     97 
     98 	cv_destroy(&park->park_cv);
     99 	mutex_destroy(&park->park_mtx);
    100 }
    101 
    102 void
    103 puffs_msgif_init()
    104 {
    105 
    106 	pool_init(&parkpool, sizeof(struct puffs_park), 0, 0, 0,
    107 	    "puffprkl", &pool_allocator_nointr, IPL_NONE);
    108 	pool_cache_init(&parkpc, &parkpool, makepark, nukepark, NULL);
    109 }
    110 
    111 void
    112 puffs_msgif_destroy()
    113 {
    114 
    115 	pool_cache_destroy(&parkpc);
    116 	pool_destroy(&parkpool);
    117 }
    118 
    119 void *
    120 puffs_park_alloc(int waitok)
    121 {
    122 	struct puffs_park *park;
    123 
    124 	park = pool_cache_get(&parkpc, waitok ? PR_WAITOK : PR_NOWAIT);
    125 	if (park) {
    126 		park->park_refcount = 1;
    127 		mutex_enter(&park->park_mtx);
    128 	}
    129 
    130 	return park;
    131 }
    132 
    133 static void
    134 puffs_park_reference(struct puffs_park *park)
    135 {
    136 
    137 	mutex_enter(&park->park_mtx);
    138 	park->park_refcount++;
    139 }
    140 
    141 void
    142 puffs_park_release(void *arg, int fullnuke)
    143 {
    144 	struct puffs_park *park = arg;
    145 
    146 	KASSERT(mutex_owned(&park->park_mtx));
    147 	--park->park_refcount;
    148 
    149 	mutex_exit(&park->park_mtx);
    150 	if (park->park_refcount == 0 || fullnuke)
    151 		pool_cache_put(&parkpc, park);
    152 }
    153 
    154 #ifdef PUFFSDEBUG
    155 static void
    156 parkdump(struct puffs_park *park)
    157 {
    158 
    159 	DPRINTF(("park %p, preq %p, id %" PRIu64 "\n"
    160 	    "\tcopy %zu, max %zu - done: %p/%p\n"
    161 	    "\tflags 0x%08x, refcount %d, cv/mtx: %p/%p\n",
    162 	    park, park->park_preq, park->park_id,
    163 	    park->park_copylen, park->park_maxlen,
    164 	    park->park_done, park->park_donearg,
    165 	    park->park_flags, park->park_refcount,
    166 	    &park->park_cv, &park->park_mtx));
    167 }
    168 
    169 static void
    170 parkqdump(struct puffs_wq *q, int dumpall)
    171 {
    172 	struct puffs_park *park;
    173 	int total = 0;
    174 
    175 	TAILQ_FOREACH(park, q, park_entries) {
    176 		if (dumpall)
    177 			parkdump(park);
    178 		total++;
    179 	}
    180 	DPRINTF(("puffs waitqueue at %p dumped, %d total\n", q, total));
    181 
    182 }
    183 #endif /* PUFFSDEBUG */
    184 
    185 /*
    186  * Converts a non-FAF op to a FAF.  This simply involves making copies
    187  * of the park and request structures and tagging the request as a FAF.
    188  * It is safe to block here, since the original op is not a FAF.
    189  */
    190 static void
    191 puffs_reqtofaf(struct puffs_park *park)
    192 {
    193 	struct puffs_req *newpreq;
    194 
    195 	KASSERT((park->park_preq->preq_opclass & PUFFSOPFLAG_FAF) == 0);
    196 
    197 	MALLOC(newpreq, struct puffs_req *, park->park_copylen,
    198 	    M_PUFFS, M_ZERO | M_WAITOK);
    199 
    200 	memcpy(newpreq, park->park_preq, park->park_copylen);
    201 
    202 	park->park_preq = newpreq;
    203 	park->park_preq->preq_opclass |= PUFFSOPFLAG_FAF;
    204 	park->park_flags &= ~PARKFLAG_WANTREPLY;
    205 }
    206 
    207 
    208 /*
    209  * kernel-user-kernel waitqueues
    210  */
    211 
    212 static int touser(struct puffs_mount *, struct puffs_park *, uint64_t);
    213 
    214 uint64_t
    215 puffs_getreqid(struct puffs_mount *pmp)
    216 {
    217 	uint64_t rv;
    218 
    219 	mutex_enter(&pmp->pmp_lock);
    220 	rv = pmp->pmp_nextreq++;
    221 	mutex_exit(&pmp->pmp_lock);
    222 
    223 	return rv;
    224 }
    225 
    226 /* vfs request */
    227 int
    228 puffs_vfstouser(struct puffs_mount *pmp, int optype, void *kbuf, size_t buflen)
    229 {
    230 	struct puffs_park *park;
    231 
    232 	park = puffs_park_alloc(1);
    233 	park->park_preq = kbuf;
    234 
    235 	park->park_preq->preq_opclass = PUFFSOP_VFS;
    236 	park->park_preq->preq_optype = optype;
    237 
    238 	park->park_maxlen = park->park_copylen = buflen;
    239 	park->park_flags = 0;
    240 
    241 	return touser(pmp, park, puffs_getreqid(pmp));
    242 }
    243 
    244 void
    245 puffs_suspendtouser(struct puffs_mount *pmp, int status)
    246 {
    247 	struct puffs_vfsreq_suspend *pvfsr_susp;
    248 	struct puffs_park *park;
    249 
    250 	pvfsr_susp = malloc(sizeof(struct puffs_vfsreq_suspend),
    251 	    M_PUFFS, M_WAITOK | M_ZERO);
    252 	park = puffs_park_alloc(1);
    253 
    254 	pvfsr_susp->pvfsr_status = status;
    255 	park->park_preq = (struct puffs_req *)pvfsr_susp;
    256 
    257 	park->park_preq->preq_opclass = PUFFSOP_VFS | PUFFSOPFLAG_FAF;
    258 	park->park_preq->preq_optype = PUFFS_VFS_SUSPEND;
    259 
    260 	park->park_maxlen = park->park_copylen
    261 	    = sizeof(struct puffs_vfsreq_suspend);
    262 	park->park_flags = 0;
    263 
    264 	(void)touser(pmp, park, 0);
    265 }
    266 
    267 /*
    268  * vnode level request
    269  */
    270 int
    271 puffs_vntouser(struct puffs_mount *pmp, int optype,
    272 	void *kbuf, size_t buflen, size_t maxdelta,
    273 	struct vnode *vp_opc, struct vnode *vp_aux)
    274 {
    275 	struct puffs_park *park;
    276 	struct puffs_req *preq;
    277 	void *cookie = VPTOPNC(vp_opc);
    278 	struct puffs_node *pnode;
    279 	int rv;
    280 
    281 	park = puffs_park_alloc(1);
    282 	park->park_preq = kbuf;
    283 
    284 	park->park_preq->preq_opclass = PUFFSOP_VN;
    285 	park->park_preq->preq_optype = optype;
    286 	park->park_preq->preq_cookie = cookie;
    287 
    288 	park->park_copylen = buflen;
    289 	park->park_maxlen = buflen + maxdelta;
    290 	park->park_flags = 0;
    291 
    292 	rv = touser(pmp, park, puffs_getreqid(pmp));
    293 
    294 	/*
    295 	 * Check if the user server requests that inactive be called
    296 	 * when the time is right.
    297 	 */
    298 	preq = park->park_preq;
    299 	if (preq->preq_setbacks & PUFFS_SETBACK_INACT_N1) {
    300 		pnode = vp_opc->v_data;
    301 		pnode->pn_stat |= PNODE_DOINACT;
    302 	}
    303 	if (preq->preq_setbacks & PUFFS_SETBACK_INACT_N2) {
    304 		/* if no vp_aux, just ignore */
    305 		if (vp_aux) {
    306 			pnode = vp_aux->v_data;
    307 			pnode->pn_stat |= PNODE_DOINACT;
    308 		}
    309 	}
    310 	if (preq->preq_setbacks & PUFFS_SETBACK_NOREF_N1) {
    311 		pnode = vp_opc->v_data;
    312 		pnode->pn_stat |= PNODE_NOREFS;
    313 	}
    314 	if (preq->preq_setbacks & PUFFS_SETBACK_NOREF_N2) {
    315 		/* if no vp_aux, just ignore */
    316 		if (vp_aux) {
    317 			pnode = vp_aux->v_data;
    318 			pnode->pn_stat |= PNODE_NOREFS;
    319 		}
    320 	}
    321 
    322 	return rv;
    323 }
    324 
    325 int
    326 puffs_cookietouser(struct puffs_mount *pmp, int optype,
    327 	void *kbuf, size_t buflen, void *cookie, int faf)
    328 {
    329 	struct puffs_park *park;
    330 
    331 	park = puffs_park_alloc(1);
    332 	park->park_preq = kbuf;
    333 
    334 	park->park_preq->preq_opclass = PUFFSOP_VN |(faf ? PUFFSOPFLAG_FAF : 0);
    335 	park->park_preq->preq_optype = optype;
    336 	park->park_preq->preq_cookie = cookie;
    337 
    338 	park->park_copylen = park->park_maxlen = buflen;
    339 	park->park_flags = 0;
    340 
    341 	return touser(pmp, park, puffs_getreqid(pmp));
    342 }
    343 
    344 /*
    345  * vnode level request, caller-controller req id
    346  */
    347 int
    348 puffs_vntouser_req(struct puffs_mount *pmp, int optype,
    349 	void *kbuf, size_t buflen, size_t maxdelta,
    350 	uint64_t reqid, struct vnode *vp_opc, struct vnode *vp_aux)
    351 {
    352 	struct puffs_park *park;
    353 	void *cookie = VPTOPNC(vp_opc);
    354 
    355 	park = puffs_park_alloc(1);
    356 	park->park_preq = kbuf;
    357 
    358 	park->park_preq->preq_opclass = PUFFSOP_VN;
    359 	park->park_preq->preq_optype = optype;
    360 	park->park_preq->preq_cookie = cookie;
    361 
    362 	park->park_copylen = buflen;
    363 	park->park_maxlen = buflen + maxdelta;
    364 	park->park_flags = 0;
    365 
    366 	return touser(pmp, park, reqid);
    367 }
    368 
    369 void
    370 puffs_vntouser_call(struct puffs_mount *pmp, int optype,
    371 	void *kbuf, size_t buflen, size_t maxdelta,
    372 	parkdone_fn donefn, void *donearg,
    373 	struct vnode *vp_opc, struct vnode *vp_aux)
    374 {
    375 	struct puffs_park *park;
    376 	void *cookie = VPTOPNC(vp_opc);
    377 
    378 	park = puffs_park_alloc(1);
    379 	park->park_preq = kbuf;
    380 
    381 	park->park_preq->preq_opclass = PUFFSOP_VN;
    382 	park->park_preq->preq_optype = optype;
    383 	park->park_preq->preq_cookie = cookie;
    384 
    385 	park->park_copylen = buflen;
    386 	park->park_maxlen = buflen + maxdelta;
    387 	park->park_done = donefn;
    388 	park->park_donearg = donearg;
    389 	park->park_flags = PARKFLAG_CALL;
    390 
    391 	(void) touser(pmp, park, puffs_getreqid(pmp));
    392 }
    393 
    394 /*
    395  * Notice: kbuf will be free'd later.  I must be allocated from the
    396  * kernel heap and it's ownership is shifted to this function from
    397  * now on, i.e. the caller is not allowed to use it anymore!
    398  */
    399 void
    400 puffs_vntouser_faf(struct puffs_mount *pmp, int optype,
    401 	void *kbuf, size_t buflen, struct vnode *vp_opc)
    402 {
    403 	struct puffs_park *park;
    404 	void *cookie = VPTOPNC(vp_opc);
    405 
    406 	/* XXX: is it allowable to sleep here? */
    407 	park = puffs_park_alloc(0);
    408 	if (park == NULL)
    409 		return; /* 2bad */
    410 
    411 	park->park_preq = kbuf;
    412 
    413 	park->park_preq->preq_opclass = PUFFSOP_VN | PUFFSOPFLAG_FAF;
    414 	park->park_preq->preq_optype = optype;
    415 	park->park_preq->preq_cookie = cookie;
    416 
    417 	park->park_maxlen = park->park_copylen = buflen;
    418 	park->park_flags = 0;
    419 
    420 	(void)touser(pmp, park, 0);
    421 }
    422 
    423 void
    424 puffs_cacheop(struct puffs_mount *pmp, struct puffs_park *park,
    425 	struct puffs_cacheinfo *pcinfo, size_t pcilen, void *cookie)
    426 {
    427 
    428 	park->park_preq = (struct puffs_req *)pcinfo;
    429 	park->park_preq->preq_opclass = PUFFSOP_CACHE | PUFFSOPFLAG_FAF;
    430 	park->park_preq->preq_optype = PCACHE_TYPE_WRITE; /* XXX */
    431 	park->park_preq->preq_cookie = cookie;
    432 
    433 	park->park_maxlen = park->park_copylen = pcilen;
    434 	park->park_flags = 0;
    435 
    436 	(void)touser(pmp, park, 0);
    437 }
    438 
    439 void
    440 puffs_errnotify(struct puffs_mount *pmp, uint8_t type, int error,
    441 	const char *str, void *cookie)
    442 {
    443 	struct puffs_park *park;
    444 	struct puffs_error *perr;
    445 
    446 	park = puffs_park_alloc(1);
    447 	MALLOC(perr, struct puffs_error *, sizeof(struct puffs_error),
    448 	    M_PUFFS, M_ZERO | M_WAITOK);
    449 
    450 	perr->perr_error = error;
    451 	strlcpy(perr->perr_str, str, sizeof(perr->perr_str));
    452 
    453 	park->park_preq = (struct puffs_req *)perr;
    454 	park->park_preq->preq_opclass = PUFFSOP_ERROR | PUFFSOPFLAG_FAF;
    455 	park->park_preq->preq_optype = type;
    456 	park->park_preq->preq_cookie = cookie;
    457 
    458 	park->park_maxlen = park->park_copylen = sizeof(struct puffs_error);
    459 	park->park_flags = 0;
    460 
    461 	(void)touser(pmp, park, 0);
    462 }
    463 
    464 /*
    465  * Wait for the userspace ping-pong game in calling process context.
    466  *
    467  * This unlocks vnodes if they are supplied.  vp1 is the vnode
    468  * before in the locking order, i.e. the one which must be locked
    469  * before accessing vp2.  This is done here so that operations are
    470  * already ordered in the queue when vnodes are unlocked (I'm not
    471  * sure if that's really necessary, but it can't hurt).  Okok, maybe
    472  * there's a slight ugly-factor also, but let's not worry about that.
    473  */
    474 static int
    475 touser(struct puffs_mount *pmp, struct puffs_park *park, uint64_t reqid)
    476 {
    477 	struct lwp *l = curlwp;
    478 	struct mount *mp;
    479 	struct puffs_req *preq;
    480 	int rv = 0;
    481 
    482 	mp = PMPTOMP(pmp);
    483 	preq = park->park_preq;
    484 	preq->preq_id = park->park_id = reqid;
    485 	preq->preq_buflen = ALIGN(park->park_maxlen);
    486 
    487 	if (PUFFSOP_WANTREPLY(preq->preq_opclass))
    488 		park->park_flags |= PARKFLAG_WANTREPLY;
    489 
    490 	/*
    491 	 * To support PCATCH, yet another movie: check if there are signals
    492 	 * pending and we are issueing a non-FAF.  If so, return an error
    493 	 * directly UNLESS we are issueing INACTIVE.  In that case, convert
    494 	 * it to a FAF, fire off to the file server and return an error.
    495 	 * Yes, this is bordering disgusting.  Barfbags are on me.
    496 	 */
    497 	if ((park->park_flags & PARKFLAG_WANTREPLY)
    498 	   && (park->park_flags & PARKFLAG_CALL) == 0
    499 	   && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) {
    500 		if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
    501 		    && preq->preq_optype == PUFFS_VN_INACTIVE) {
    502 			puffs_reqtofaf(park);
    503 			DPRINTF(("puffs touser: converted to FAF %p\n", park));
    504 			rv = EINTR;
    505 		} else {
    506 			puffs_park_release(park, 0);
    507 			return EINTR;
    508 		}
    509 	}
    510 
    511 	/*
    512 	 * test for suspension lock.
    513 	 *
    514 	 * Note that we *DO NOT* keep the lock, since that might block
    515 	 * lock acquiring PLUS it would give userlandia control over
    516 	 * the lock.  The operation queue enforces a strict ordering:
    517 	 * when the fs server gets in the op stream, it knows things
    518 	 * are in order.  The kernel locks can't guarantee that for
    519 	 * userspace, in any case.
    520 	 *
    521 	 * BUT: this presents a problem for ops which have a consistency
    522 	 * clause based on more than one operation.  Unfortunately such
    523 	 * operations (read, write) do not reliably work yet.
    524 	 *
    525 	 * Ya, Ya, it's wrong wong wrong, me be fixink this someday.
    526 	 *
    527 	 * XXX: and there is one more problem.  We sometimes need to
    528 	 * take a lazy lock in case the fs is suspending and we are
    529 	 * executing as the fs server context.  This might happen
    530 	 * e.g. in the case that the user server triggers a reclaim
    531 	 * in the kernel while the fs is suspending.  It's not a very
    532 	 * likely event, but it needs to be fixed some day.
    533 	 */
    534 
    535 	/*
    536 	 * MOREXXX: once PUFFS_WCACHEINFO is enabled, we can't take
    537 	 * the mutex here, since getpages() might be called locked.
    538 	 */
    539 	fstrans_start(mp, FSTRANS_NORMAL);
    540 	mutex_enter(&pmp->pmp_lock);
    541 	fstrans_done(mp);
    542 
    543 	if (pmp->pmp_status != PUFFSTAT_RUNNING) {
    544 		mutex_exit(&pmp->pmp_lock);
    545 		puffs_park_release(park, 0);
    546 		return ENXIO;
    547 	}
    548 
    549 #ifdef PUFFSDEBUG
    550 	parkqdump(&pmp->pmp_req_touser, puffsdebug > 1);
    551 	parkqdump(&pmp->pmp_req_replywait, puffsdebug > 1);
    552 #endif
    553 
    554 	TAILQ_INSERT_TAIL(&pmp->pmp_req_touser, park, park_entries);
    555 	park->park_flags |= PARKFLAG_ONQUEUE1;
    556 	puffs_mp_reference(pmp);
    557 	pmp->pmp_req_touser_count++;
    558 	mutex_exit(&pmp->pmp_lock);
    559 
    560 	DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
    561 	    "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, park,
    562 	    preq->preq_opclass, preq->preq_optype, park->park_flags));
    563 
    564 	cv_broadcast(&pmp->pmp_req_waiter_cv);
    565 	selnotify(pmp->pmp_sel, 0);
    566 
    567 	if ((park->park_flags & PARKFLAG_WANTREPLY)
    568 	    && (park->park_flags & PARKFLAG_CALL) == 0) {
    569 		int error;
    570 
    571 		error = cv_wait_sig(&park->park_cv, &park->park_mtx);
    572 		if (error) {
    573 			park->park_flags |= PARKFLAG_WAITERGONE;
    574 			if (park->park_flags & PARKFLAG_DONE) {
    575 				rv = preq->preq_rv;
    576 				puffs_park_release(park, 0);
    577 			} else {
    578 				/*
    579 				 * ok, we marked it as going away, but
    580 				 * still need to do queue ops.  take locks
    581 				 * in correct order.
    582 				 *
    583 				 * We don't want to release our reference
    584 				 * if it's on replywait queue to avoid error
    585 				 * to file server.  putop() code will DTRT.
    586 				 */
    587 				KASSERT(park->park_flags &
    588 				    (PARKFLAG_ONQUEUE1 | PARKFLAG_ONQUEUE2));
    589 				mutex_exit(&park->park_mtx);
    590 
    591 				mutex_enter(&pmp->pmp_lock);
    592 				mutex_enter(&park->park_mtx);
    593 				if (park->park_flags & PARKFLAG_ONQUEUE1) {
    594 					TAILQ_REMOVE(&pmp->pmp_req_touser,
    595 					    park, park_entries);
    596 					pmp->pmp_req_touser_count--;
    597 					park->park_flags &= ~PARKFLAG_ONQUEUE1;
    598 				}
    599 				if ((park->park_flags & PARKFLAG_ONQUEUE2) == 0)
    600 					puffs_park_release(park, 0);
    601 				else
    602 					mutex_exit(&park->park_mtx);
    603 				mutex_exit(&pmp->pmp_lock);
    604 
    605 				rv = error;
    606 			}
    607 		} else {
    608 			rv = preq->preq_rv;
    609 			puffs_park_release(park, 0);
    610 		}
    611 
    612 		/*
    613 		 * retake the lock and release.  This makes sure (haha,
    614 		 * I'm humorous) that we don't process the same vnode in
    615 		 * multiple threads due to the locks hacks we have in
    616 		 * puffs_lock().  In reality this is well protected by
    617 		 * the biglock, but once that's gone, well, hopefully
    618 		 * this will be fixed for real.  (and when you read this
    619 		 * comment in 2017 and subsequently barf, my condolences ;).
    620 		 */
    621 		if (rv == 0 && !fstrans_is_owner(mp)) {
    622 			fstrans_start(mp, FSTRANS_NORMAL);
    623 			fstrans_done(mp);
    624 		}
    625 	} else {
    626 		mutex_exit(&park->park_mtx);
    627 	}
    628 
    629 	mutex_enter(&pmp->pmp_lock);
    630 	puffs_mp_release(pmp);
    631 	mutex_exit(&pmp->pmp_lock);
    632 
    633 	return rv;
    634 }
    635 
    636 
    637 /*
    638  * getop: scan through queued requests until:
    639  *  1) max number of requests satisfied
    640  *     OR
    641  *  2) buffer runs out of space
    642  *     OR
    643  *  3) nonblocking is set AND there are no operations available
    644  *     OR
    645  *  4) at least one operation was transferred AND there are no more waiting
    646  */
    647 int
    648 puffs_getop(struct puffs_mount *pmp, struct puffs_reqh_get *phg, int nonblock)
    649 {
    650 	struct puffs_park *park;
    651 	struct puffs_req *preq;
    652 	uint8_t *bufpos;
    653 	int error, donesome;
    654 
    655 	donesome = error = 0;
    656 	bufpos = phg->phg_buf;
    657 
    658 	mutex_enter(&pmp->pmp_lock);
    659 	while (phg->phg_nops == 0 || donesome != phg->phg_nops) {
    660  again:
    661 		if (pmp->pmp_status != PUFFSTAT_RUNNING) {
    662 			/* if we got some, they don't really matter anymore */
    663 			error = ENXIO;
    664 			goto out;
    665 		}
    666 		if (TAILQ_EMPTY(&pmp->pmp_req_touser)) {
    667 			if (donesome)
    668 				goto out;
    669 
    670 			if (nonblock) {
    671 				error = EWOULDBLOCK;
    672 				goto out;
    673 			}
    674 
    675 			error = cv_wait_sig(&pmp->pmp_req_waiter_cv,
    676 			    &pmp->pmp_lock);
    677 			if (error)
    678 				goto out;
    679 			else
    680 				goto again;
    681 		}
    682 
    683 		park = TAILQ_FIRST(&pmp->pmp_req_touser);
    684 		puffs_park_reference(park);
    685 
    686 		/* If it's a goner, don't process any furher */
    687 		if (park->park_flags & PARKFLAG_WAITERGONE) {
    688 			puffs_park_release(park, 0);
    689 			continue;
    690 		}
    691 
    692 		preq = park->park_preq;
    693 		if (phg->phg_buflen < preq->preq_buflen) {
    694 			if (!donesome)
    695 				error = E2BIG;
    696 			puffs_park_release(park, 0);
    697 			goto out;
    698 		}
    699 
    700 		TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
    701 		KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
    702 		park->park_flags &= ~PARKFLAG_ONQUEUE1;
    703 		pmp->pmp_req_touser_count--;
    704 		KASSERT(pmp->pmp_req_touser_count >= 0);
    705 		mutex_exit(&pmp->pmp_lock);
    706 
    707 		DPRINTF(("puffsgetop: get op %" PRIu64 " (%d.), from %p "
    708 		    "len %zu (buflen %zu), target %p\n", preq->preq_id,
    709 		    donesome, preq, park->park_copylen, preq->preq_buflen,
    710 		    bufpos));
    711 
    712 		if ((error = copyout(preq, bufpos, park->park_copylen)) != 0) {
    713 			DPRINTF(("puffs_getop: copyout failed: %d\n", error));
    714 			/*
    715 			 * ok, user server is probably trying to cheat.
    716 			 * stuff op back & return error to user.  We need
    717 			 * to take locks in the correct order.
    718 			 */
    719 			mutex_exit(&park->park_mtx);
    720 
    721 			/*
    722 			 * XXX: ONQUEUE1 | ONQUEUE2 invariant doesn't
    723 			 * hold here
    724 			 */
    725 
    726 			mutex_enter(&pmp->pmp_lock);
    727 			mutex_enter(&park->park_mtx);
    728 			if ((park->park_flags & PARKFLAG_WAITERGONE) == 0) {
    729 				 TAILQ_INSERT_HEAD(&pmp->pmp_req_touser, park,
    730 				     park_entries);
    731 				 park->park_flags |= PARKFLAG_ONQUEUE1;
    732 				 pmp->pmp_req_touser_count++;
    733 			}
    734 
    735 			if (donesome)
    736 				error = 0;
    737 			puffs_park_release(park, 0);
    738 			goto out;
    739 		}
    740 		bufpos += preq->preq_buflen;
    741 		phg->phg_buflen -= preq->preq_buflen;
    742 		donesome++;
    743 
    744 		/* XXXfixme: taking this lock in the wrong order */
    745 		mutex_enter(&pmp->pmp_lock);
    746 
    747 		if (park->park_flags & PARKFLAG_WANTREPLY) {
    748 			TAILQ_INSERT_TAIL(&pmp->pmp_req_replywait, park,
    749 			    park_entries);
    750 			park->park_flags |= PARKFLAG_ONQUEUE2;
    751 			puffs_park_release(park, 0);
    752 		} else {
    753 			free(preq, M_PUFFS);
    754 			puffs_park_release(park, 1);
    755 		}
    756 	}
    757 
    758  out:
    759 	phg->phg_more = pmp->pmp_req_touser_count;
    760 	mutex_exit(&pmp->pmp_lock);
    761 
    762 	phg->phg_nops = donesome;
    763 
    764 	return error;
    765 }
    766 
    767 int
    768 puffs_putop(struct puffs_mount *pmp, struct puffs_reqh_put *php)
    769 {
    770 	struct puffs_park *park;
    771 	struct puffs_req tmpreq;
    772 	struct puffs_req *nextpreq;
    773 	void *userbuf;
    774 	uint64_t id;
    775 	size_t reqlen;
    776 	int donesome, error, wgone, release;
    777 
    778 	donesome = error = wgone = 0;
    779 
    780 	id = php->php_id;
    781 	userbuf = php->php_buf;
    782 	reqlen = php->php_buflen;
    783 
    784 	mutex_enter(&pmp->pmp_lock);
    785 	while (donesome != php->php_nops) {
    786 		release = 0;
    787 #ifdef PUFFSDEBUG
    788 		DPRINTF(("puffsputop: searching for %" PRIu64 ", ubuf: %p, "
    789 		    "len %zu\n", id, userbuf, reqlen));
    790 #endif
    791 		TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
    792 			if (park->park_id == id)
    793 				break;
    794 		}
    795 
    796 		if (park == NULL) {
    797 			DPRINTF(("puffsputop: no request: %" PRIu64 "\n", id));
    798 			error = EINVAL;
    799 			break;
    800 		}
    801 
    802 		puffs_park_reference(park);
    803 		if (reqlen == 0 || reqlen > park->park_maxlen) {
    804 			DPRINTF(("puffsputop: invalid buffer length: "
    805 			    "%zu\n", reqlen));
    806 			error = E2BIG;
    807 			puffs_park_release(park, 0);
    808 			break;
    809 		}
    810 		wgone = park->park_flags & PARKFLAG_WAITERGONE;
    811 
    812 		/* check if it's still on the queue after acquiring lock */
    813 		if (park->park_flags & PARKFLAG_ONQUEUE2) {
    814 			TAILQ_REMOVE(&pmp->pmp_req_replywait, park,
    815 			    park_entries);
    816 			park->park_flags &= ~PARKFLAG_ONQUEUE2;
    817 		}
    818 
    819 		mutex_exit(&pmp->pmp_lock);
    820 
    821 		/*
    822 		 * If the caller has gone south, go to next, collect
    823 		 * $200 and free the structure there instead of wakeup.
    824 		 * We also need to copyin the header info.  Flag structure
    825 		 * release to mode total and utter destruction.
    826 		 */
    827 		if (wgone) {
    828 			DPRINTF(("puffs_putop: bad service - waiter gone for "
    829 			    "park %p\n", park));
    830 			error = copyin(userbuf, &tmpreq,
    831 			    sizeof(struct puffs_req));
    832 			release = 1;
    833 			if (error)
    834 				goto loopout;
    835 			nextpreq = &tmpreq;
    836 			goto next;
    837 		}
    838 
    839 		DPRINTF(("puffsputpop: copyin from %p to %p, len %zu\n",
    840 		    userbuf, park->park_preq, reqlen));
    841 		error = copyin(userbuf, park->park_preq, reqlen);
    842 		if (error)
    843 			goto loopout;
    844 		nextpreq = park->park_preq;
    845 
    846  next:
    847 		/* all's well, prepare for next op */
    848 		id = nextpreq->preq_id;
    849 		reqlen = nextpreq->preq_buflen;
    850 		userbuf = nextpreq->preq_nextbuf;
    851 		donesome++;
    852 
    853  loopout:
    854 		if (error && !wgone)
    855 			park->park_preq->preq_rv = error;
    856 
    857 		if (park->park_flags & PARKFLAG_CALL) {
    858 			DPRINTF(("puffsputopt: call for %p, arg %p\n",
    859 			    park->park_preq, park->park_donearg));
    860 			park->park_done(pmp,park->park_preq,park->park_donearg);
    861 			release = 1;
    862 		}
    863 
    864 		if (!wgone) {
    865 			DPRINTF(("puffs_putop: flagging done for "
    866 			    "park %p\n", park));
    867 
    868 			cv_signal(&park->park_cv);
    869 		}
    870 		park->park_flags |= PARKFLAG_DONE;
    871 		puffs_park_release(park, release);
    872 
    873 		mutex_enter(&pmp->pmp_lock);
    874 		if (error)
    875 			break;
    876 		wgone = 0;
    877 	}
    878 
    879 	mutex_exit(&pmp->pmp_lock);
    880 	php->php_nops -= donesome;
    881 
    882 	return error;
    883 }
    884 
    885 /*
    886  * We're dead, kaput, RIP, slightly more than merely pining for the
    887  * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
    888  * our maker, ceased to be, etcetc.  YASD.  It's a dead FS!
    889  *
    890  * Caller must hold puffs mutex.
    891  */
    892 void
    893 puffs_userdead(struct puffs_mount *pmp)
    894 {
    895 	struct puffs_park *park, *park_next;
    896 
    897 	/*
    898 	 * Mark filesystem status as dying so that operations don't
    899 	 * attempt to march to userspace any longer.
    900 	 */
    901 	pmp->pmp_status = PUFFSTAT_DYING;
    902 
    903 	/* signal waiters on REQUEST TO file server queue */
    904 	for (park = TAILQ_FIRST(&pmp->pmp_req_touser); park; park = park_next) {
    905 		uint8_t opclass;
    906 
    907 		puffs_park_reference(park);
    908 		park_next = TAILQ_NEXT(park, park_entries);
    909 
    910 		KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
    911 		TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
    912 		park->park_flags &= ~PARKFLAG_ONQUEUE1;
    913 		pmp->pmp_req_touser_count--;
    914 
    915 		/*
    916 		 * If the waiter is gone, we may *NOT* access preq anymore.
    917 		 */
    918 		if (park->park_flags & PARKFLAG_WAITERGONE) {
    919 			KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
    920 			KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
    921 			puffs_park_release(park, 0);
    922 		} else {
    923 			opclass = park->park_preq->preq_opclass;
    924 			park->park_preq->preq_rv = ENXIO;
    925 
    926 			if (park->park_flags & PARKFLAG_CALL) {
    927 				park->park_done(pmp, park->park_preq,
    928 				    park->park_donearg);
    929 				puffs_park_release(park, 1);
    930 			} else if ((park->park_flags & PARKFLAG_WANTREPLY)==0) {
    931 				free(park->park_preq, M_PUFFS);
    932 				puffs_park_release(park, 1);
    933 			} else {
    934 				park->park_preq->preq_rv = ENXIO;
    935 				cv_signal(&park->park_cv);
    936 				puffs_park_release(park, 0);
    937 			}
    938 		}
    939 	}
    940 
    941 	/* signal waiters on RESPONSE FROM file server queue */
    942 	for (park=TAILQ_FIRST(&pmp->pmp_req_replywait); park; park=park_next) {
    943 		puffs_park_reference(park);
    944 		park_next = TAILQ_NEXT(park, park_entries);
    945 
    946 		KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
    947 		KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
    948 
    949 		TAILQ_REMOVE(&pmp->pmp_req_replywait, park, park_entries);
    950 		park->park_flags &= ~PARKFLAG_ONQUEUE2;
    951 
    952 		/*
    953 		 * If the waiter is gone, we may *NOT* access preq anymore.
    954 		 */
    955 		if (park->park_flags & PARKFLAG_WAITERGONE) {
    956 			KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
    957 			puffs_park_release(park, 0);
    958 		} else {
    959 			park->park_preq->preq_rv = ENXIO;
    960 			if (park->park_flags & PARKFLAG_CALL) {
    961 				park->park_done(pmp, park->park_preq,
    962 				    park->park_donearg);
    963 				puffs_park_release(park, 1);
    964 			} else {
    965 				cv_signal(&park->park_cv);
    966 				puffs_park_release(park, 0);
    967 			}
    968 		}
    969 	}
    970 }
    971 
    972 /* this is probably going to die away at some point? */
    973 /*
    974  * XXX: currently bitrotted
    975  */
    976 #if 0
    977 static int
    978 puffssizeop(struct puffs_mount *pmp, struct puffs_sizeop *psop_user)
    979 {
    980 	struct puffs_sizepark *pspark;
    981 	void *kernbuf;
    982 	size_t copylen;
    983 	int error;
    984 
    985 	/* locate correct op */
    986 	mutex_enter(&pmp->pmp_lock);
    987 	TAILQ_FOREACH(pspark, &pmp->pmp_req_sizepark, pkso_entries) {
    988 		if (pspark->pkso_reqid == psop_user->pso_reqid) {
    989 			TAILQ_REMOVE(&pmp->pmp_req_sizepark, pspark,
    990 			    pkso_entries);
    991 			break;
    992 		}
    993 	}
    994 	mutex_exit(&pmp->pmp_lock);
    995 
    996 	if (pspark == NULL)
    997 		return EINVAL;
    998 
    999 	error = 0;
   1000 	copylen = MIN(pspark->pkso_bufsize, psop_user->pso_bufsize);
   1001 
   1002 	/*
   1003 	 * XXX: uvm stuff to avoid bouncy-bouncy copying?
   1004 	 */
   1005 	if (PUFFS_SIZEOP_UIO(pspark->pkso_reqtype)) {
   1006 		kernbuf = malloc(copylen, M_PUFFS, M_WAITOK | M_ZERO);
   1007 		if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_IN) {
   1008 			error = copyin(psop_user->pso_userbuf,
   1009 			    kernbuf, copylen);
   1010 			if (error) {
   1011 				printf("psop ERROR1 %d\n", error);
   1012 				goto escape;
   1013 			}
   1014 		}
   1015 		error = uiomove(kernbuf, copylen, pspark->pkso_uio);
   1016 		if (error) {
   1017 			printf("uiomove from kernel %p, len %d failed: %d\n",
   1018 			    kernbuf, (int)copylen, error);
   1019 			goto escape;
   1020 		}
   1021 
   1022 		if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_OUT) {
   1023 			error = copyout(kernbuf,
   1024 			    psop_user->pso_userbuf, copylen);
   1025 			if (error) {
   1026 				printf("psop ERROR2 %d\n", error);
   1027 				goto escape;
   1028 			}
   1029 		}
   1030  escape:
   1031 		free(kernbuf, M_PUFFS);
   1032 	} else if (PUFFS_SIZEOP_BUF(pspark->pkso_reqtype)) {
   1033 		copylen = MAX(pspark->pkso_bufsize, psop_user->pso_bufsize);
   1034 		if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_BUF_IN) {
   1035 			error = copyin(psop_user->pso_userbuf,
   1036 			pspark->pkso_copybuf, copylen);
   1037 		} else {
   1038 			error = copyout(pspark->pkso_copybuf,
   1039 			    psop_user->pso_userbuf, copylen);
   1040 		}
   1041 	}
   1042 #ifdef DIAGNOSTIC
   1043 	else
   1044 		panic("puffssizeop: invalid reqtype %d\n",
   1045 		    pspark->pkso_reqtype);
   1046 #endif /* DIAGNOSTIC */
   1047 
   1048 	return error;
   1049 }
   1050 #endif
   1051