Home | History | Annotate | Line # | Download | only in puffs
puffs_msgif.c revision 1.29
      1 /*	$NetBSD: puffs_msgif.c,v 1.29 2007/04/11 09:43:19 pooka Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2005, 2006, 2007  Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by the
      7  * Google Summer of Code program and the Ulla Tuominen Foundation.
      8  * The Google SoC project was mentored by Bill Studenmund.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. The name of the company nor the name of the author may be used to
     19  *    endorse or promote products derived from this software without specific
     20  *    prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     23  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     24  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     25  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     28  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  */
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.29 2007/04/11 09:43:19 pooka Exp $");
     37 
     38 #include <sys/param.h>
     39 #include <sys/fstrans.h>
     40 #include <sys/malloc.h>
     41 #include <sys/mount.h>
     42 #include <sys/vnode.h>
     43 #include <sys/lock.h>
     44 
     45 #include <fs/puffs/puffs_msgif.h>
     46 #include <fs/puffs/puffs_sys.h>
     47 
     48 /*
     49  * waitq data structures
     50  */
     51 
     52 /*
     53  * While a request is going to userspace, park the caller within the
     54  * kernel.  This is the kernel counterpart of "struct puffs_req".
     55  */
     56 struct puffs_park {
     57 	struct puffs_req	*park_preq;	/* req followed by buf	*/
     58 	uint64_t		park_id;	/* duplicate of preq_id */
     59 
     60 	size_t			park_copylen;	/* userspace copylength	*/
     61 	size_t			park_maxlen;	/* max size in comeback */
     62 
     63 	parkdone_fn		park_done;
     64 	void			*park_donearg;
     65 
     66 	int			park_flags;
     67 	int			park_refcount;
     68 
     69 	kcondvar_t		park_cv;
     70 	kmutex_t		park_mtx;
     71 
     72 	TAILQ_ENTRY(puffs_park) park_entries;
     73 };
     74 #define PARKFLAG_WAITERGONE	0x01
     75 #define PARKFLAG_DONE		0x02
     76 #define PARKFLAG_ONQUEUE1	0x04
     77 #define PARKFLAG_ONQUEUE2	0x08
     78 #define PARKFLAG_CALL		0x10
     79 
     80 static struct pool_cache parkpc;
     81 static struct pool parkpool;
     82 
     83 static int
     84 makepark(void *arg, void *obj, int flags)
     85 {
     86 	struct puffs_park *park = obj;
     87 
     88 	mutex_init(&park->park_mtx, MUTEX_DEFAULT, IPL_NONE);
     89 	cv_init(&park->park_cv, "puffsrpl");
     90 
     91 	return 0;
     92 }
     93 
     94 static void
     95 nukepark(void *arg, void *obj)
     96 {
     97 	struct puffs_park *park = obj;
     98 
     99 	cv_destroy(&park->park_cv);
    100 	mutex_destroy(&park->park_mtx);
    101 }
    102 
    103 void
    104 puffs_msgif_init()
    105 {
    106 
    107 	pool_init(&parkpool, sizeof(struct puffs_park), 0, 0, 0,
    108 	    "puffprkl", &pool_allocator_nointr, IPL_NONE);
    109 	pool_cache_init(&parkpc, &parkpool, makepark, nukepark, NULL);
    110 }
    111 
    112 void
    113 puffs_msgif_destroy()
    114 {
    115 
    116 	pool_cache_destroy(&parkpc);
    117 	pool_destroy(&parkpool);
    118 }
    119 
    120 void *
    121 puffs_park_alloc(int waitok)
    122 {
    123 	struct puffs_park *park;
    124 
    125 	park = pool_cache_get(&parkpc, waitok ? PR_WAITOK : PR_NOWAIT);
    126 	if (park) {
    127 		park->park_refcount = 1;
    128 		mutex_enter(&park->park_mtx);
    129 	}
    130 
    131 	return park;
    132 }
    133 
    134 static void
    135 puffs_park_reference(struct puffs_park *park)
    136 {
    137 
    138 	mutex_enter(&park->park_mtx);
    139 	park->park_refcount++;
    140 }
    141 
    142 void
    143 puffs_park_release(void *arg, int fullnuke)
    144 {
    145 	struct puffs_park *park = arg;
    146 
    147 	KASSERT(mutex_owned(&park->park_mtx));
    148 	--park->park_refcount;
    149 
    150 	mutex_exit(&park->park_mtx);
    151 	if (park->park_refcount == 0 || fullnuke)
    152 		pool_cache_put(&parkpc, park);
    153 }
    154 
    155 #ifdef PUFFSDEBUG
    156 static void
    157 parkdump(struct puffs_park *park)
    158 {
    159 
    160 	DPRINTF(("park %p, preq %p, id %" PRIu64 "\n"
    161 	    "\tcopy %zu, max %zu - done: %p/%p\n"
    162 	    "\tflags 0x%08x, refcount %d, cv/mtx: %p/%p\n",
    163 	    park, park->park_preq, park->park_id,
    164 	    park->park_copylen, park->park_maxlen,
    165 	    park->park_done, park->park_donearg,
    166 	    park->park_flags, park->park_refcount,
    167 	    &park->park_cv, &park->park_mtx));
    168 }
    169 
    170 static void
    171 parkqdump(struct puffs_wq *q, int dumpall)
    172 {
    173 	struct puffs_park *park;
    174 	int total = 0;
    175 
    176 	TAILQ_FOREACH(park, q, park_entries) {
    177 		if (dumpall)
    178 			parkdump(park);
    179 		total++;
    180 	}
    181 	DPRINTF(("puffs waitqueue at %p dumped, %d total\n", q, total));
    182 
    183 }
    184 #endif /* PUFFSDEBUG */
    185 
    186 /*
    187  * Converts a non-FAF op to a FAF.  This simply involves making copies
    188  * of the park and request structures and tagging the request as a FAF.
    189  * It is safe to block here, since the original op is not a FAF.
    190  */
    191 static void
    192 puffs_reqtofaf(struct puffs_park *park)
    193 {
    194 	struct puffs_req *newpreq;
    195 
    196 	KASSERT((park->park_preq->preq_opclass & PUFFSOPFLAG_FAF) == 0);
    197 
    198 	MALLOC(newpreq, struct puffs_req *, park->park_copylen,
    199 	    M_PUFFS, M_ZERO | M_WAITOK);
    200 
    201 	memcpy(newpreq, park->park_preq, park->park_copylen);
    202 
    203 	park->park_preq = newpreq;
    204 	park->park_preq->preq_opclass |= PUFFSOPFLAG_FAF;
    205 }
    206 
    207 
    208 /*
    209  * kernel-user-kernel waitqueues
    210  */
    211 
    212 static int touser(struct puffs_mount *, struct puffs_park *, uint64_t,
    213 		  struct vnode *, struct vnode *);
    214 
    215 uint64_t
    216 puffs_getreqid(struct puffs_mount *pmp)
    217 {
    218 	uint64_t rv;
    219 
    220 	mutex_enter(&pmp->pmp_lock);
    221 	rv = pmp->pmp_nextreq++;
    222 	mutex_exit(&pmp->pmp_lock);
    223 
    224 	return rv;
    225 }
    226 
    227 /* vfs request */
    228 int
    229 puffs_vfstouser(struct puffs_mount *pmp, int optype, void *kbuf, size_t buflen)
    230 {
    231 	struct puffs_park *park;
    232 
    233 	park = puffs_park_alloc(1);
    234 	park->park_preq = kbuf;
    235 
    236 	park->park_preq->preq_opclass = PUFFSOP_VFS;
    237 	park->park_preq->preq_optype = optype;
    238 
    239 	park->park_maxlen = park->park_copylen = buflen;
    240 	park->park_flags = 0;
    241 
    242 	return touser(pmp, park, puffs_getreqid(pmp), NULL, NULL);
    243 }
    244 
    245 void
    246 puffs_suspendtouser(struct puffs_mount *pmp, int status)
    247 {
    248 	struct puffs_vfsreq_suspend *pvfsr_susp;
    249 	struct puffs_park *park;
    250 
    251 	pvfsr_susp = malloc(sizeof(struct puffs_vfsreq_suspend),
    252 	    M_PUFFS, M_WAITOK | M_ZERO);
    253 	park = puffs_park_alloc(1);
    254 
    255 	pvfsr_susp->pvfsr_status = status;
    256 	park->park_preq = (struct puffs_req *)pvfsr_susp;
    257 
    258 	park->park_preq->preq_opclass = PUFFSOP_VFS | PUFFSOPFLAG_FAF;
    259 	park->park_preq->preq_optype = PUFFS_VFS_SUSPEND;
    260 
    261 	park->park_maxlen = park->park_copylen
    262 	    = sizeof(struct puffs_vfsreq_suspend);
    263 	park->park_flags = 0;
    264 
    265 	(void)touser(pmp, park, 0, NULL, NULL);
    266 }
    267 
    268 /*
    269  * vnode level request
    270  */
    271 int
    272 puffs_vntouser(struct puffs_mount *pmp, int optype,
    273 	void *kbuf, size_t buflen, size_t maxdelta, void *cookie,
    274 	struct vnode *vp1, struct vnode *vp2)
    275 {
    276 	struct puffs_park *park;
    277 
    278 	park = puffs_park_alloc(1);
    279 	park->park_preq = kbuf;
    280 
    281 	park->park_preq->preq_opclass = PUFFSOP_VN;
    282 	park->park_preq->preq_optype = optype;
    283 	park->park_preq->preq_cookie = cookie;
    284 
    285 	park->park_copylen = buflen;
    286 	park->park_maxlen = buflen + maxdelta;
    287 	park->park_flags = 0;
    288 
    289 	return touser(pmp, park, puffs_getreqid(pmp), vp1, vp2);
    290 }
    291 
    292 /*
    293  * vnode level request, caller-controller req id
    294  */
    295 int
    296 puffs_vntouser_req(struct puffs_mount *pmp, int optype,
    297 	void *kbuf, size_t buflen, size_t maxdelta, void *cookie,
    298 	uint64_t reqid, struct vnode *vp1, struct vnode *vp2)
    299 {
    300 	struct puffs_park *park;
    301 
    302 	park = puffs_park_alloc(1);
    303 	park->park_preq = kbuf;
    304 
    305 	park->park_preq->preq_opclass = PUFFSOP_VN;
    306 	park->park_preq->preq_optype = optype;
    307 	park->park_preq->preq_cookie = cookie;
    308 
    309 	park->park_copylen = buflen;
    310 	park->park_maxlen = buflen + maxdelta;
    311 	park->park_flags = 0;
    312 
    313 	return touser(pmp, park, reqid, vp1, vp2);
    314 }
    315 
    316 void
    317 puffs_vntouser_call(struct puffs_mount *pmp, int optype,
    318 	void *kbuf, size_t buflen, size_t maxdelta, void *cookie,
    319 	parkdone_fn donefn, void *donearg,
    320 	struct vnode *vp1, struct vnode *vp2)
    321 {
    322 	struct puffs_park *park;
    323 
    324 	park = puffs_park_alloc(1);
    325 	park->park_preq = kbuf;
    326 
    327 	park->park_preq->preq_opclass = PUFFSOP_VN;
    328 	park->park_preq->preq_optype = optype;
    329 	park->park_preq->preq_cookie = cookie;
    330 
    331 	park->park_copylen = buflen;
    332 	park->park_maxlen = buflen + maxdelta;
    333 	park->park_done = donefn;
    334 	park->park_donearg = donearg;
    335 	park->park_flags = PARKFLAG_CALL;
    336 
    337 	(void) touser(pmp, park, puffs_getreqid(pmp), vp1, vp2);
    338 }
    339 
    340 /*
    341  * Notice: kbuf will be free'd later.  I must be allocated from the
    342  * kernel heap and it's ownership is shifted to this function from
    343  * now on, i.e. the caller is not allowed to use it anymore!
    344  */
    345 void
    346 puffs_vntouser_faf(struct puffs_mount *pmp, int optype,
    347 	void *kbuf, size_t buflen, void *cookie)
    348 {
    349 	struct puffs_park *park;
    350 
    351 	/* XXX: is it allowable to sleep here? */
    352 	park = puffs_park_alloc(0);
    353 	if (park == NULL)
    354 		return; /* 2bad */
    355 
    356 	park->park_preq = kbuf;
    357 
    358 	park->park_preq->preq_opclass = PUFFSOP_VN | PUFFSOPFLAG_FAF;
    359 	park->park_preq->preq_optype = optype;
    360 	park->park_preq->preq_cookie = cookie;
    361 
    362 	park->park_maxlen = park->park_copylen = buflen;
    363 	park->park_flags = 0;
    364 
    365 	(void)touser(pmp, park, 0, NULL, NULL);
    366 }
    367 
    368 void
    369 puffs_cacheop(struct puffs_mount *pmp, struct puffs_park *park,
    370 	struct puffs_cacheinfo *pcinfo, size_t pcilen, void *cookie)
    371 {
    372 
    373 	park->park_preq = (struct puffs_req *)pcinfo;
    374 	park->park_preq->preq_opclass = PUFFSOP_CACHE | PUFFSOPFLAG_FAF;
    375 	park->park_preq->preq_optype = PCACHE_TYPE_WRITE; /* XXX */
    376 	park->park_preq->preq_cookie = cookie;
    377 
    378 	park->park_maxlen = park->park_copylen = pcilen;
    379 	park->park_flags = 0;
    380 
    381 	(void)touser(pmp, park, 0, NULL, NULL);
    382 }
    383 
    384 /*
    385  * Wait for the userspace ping-pong game in calling process context.
    386  *
    387  * This unlocks vnodes if they are supplied.  vp1 is the vnode
    388  * before in the locking order, i.e. the one which must be locked
    389  * before accessing vp2.  This is done here so that operations are
    390  * already ordered in the queue when vnodes are unlocked (I'm not
    391  * sure if that's really necessary, but it can't hurt).  Okok, maybe
    392  * there's a slight ugly-factor also, but let's not worry about that.
    393  */
    394 static int
    395 touser(struct puffs_mount *pmp, struct puffs_park *park, uint64_t reqid,
    396 	struct vnode *vp1, struct vnode *vp2)
    397 {
    398 	struct lwp *l = curlwp;
    399 	struct mount *mp;
    400 	struct puffs_req *preq;
    401 	int rv = 0;
    402 
    403 	mp = PMPTOMP(pmp);
    404 	preq = park->park_preq;
    405 	preq->preq_id = park->park_id = reqid;
    406 	preq->preq_buflen = ALIGN(park->park_maxlen);
    407 
    408 	/*
    409 	 * To support PCATCH, yet another movie: check if there are signals
    410 	 * pending and we are issueing a non-FAF.  If so, return an error
    411 	 * directly UNLESS we are issueing INACTIVE.  In that case, convert
    412 	 * it to a FAF, fire off to the file server and return an error.
    413 	 * Yes, this is bordering disgusting.  Barfbags are on me.
    414 	 */
    415 	if (PUFFSOP_WANTREPLY(preq->preq_opclass)
    416 	   && (park->park_flags & PARKFLAG_CALL) == 0
    417 	   && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) {
    418 		if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
    419 		    && preq->preq_optype == PUFFS_VN_INACTIVE) {
    420 			puffs_reqtofaf(park);
    421 			DPRINTF(("puffs touser: converted to FAF %p\n", park));
    422 			rv = EINTR;
    423 		} else {
    424 			puffs_park_release(park, 0);
    425 			return EINTR;
    426 		}
    427 	}
    428 
    429 	/*
    430 	 * test for suspension lock.
    431 	 *
    432 	 * Note that we *DO NOT* keep the lock, since that might block
    433 	 * lock acquiring PLUS it would give userlandia control over
    434 	 * the lock.  The operation queue enforces a strict ordering:
    435 	 * when the fs server gets in the op stream, it knows things
    436 	 * are in order.  The kernel locks can't guarantee that for
    437 	 * userspace, in any case.
    438 	 *
    439 	 * BUT: this presents a problem for ops which have a consistency
    440 	 * clause based on more than one operation.  Unfortunately such
    441 	 * operations (read, write) do not reliably work yet.
    442 	 *
    443 	 * Ya, Ya, it's wrong wong wrong, me be fixink this someday.
    444 	 *
    445 	 * XXX: and there is one more problem.  We sometimes need to
    446 	 * take a lazy lock in case the fs is suspending and we are
    447 	 * executing as the fs server context.  This might happen
    448 	 * e.g. in the case that the user server triggers a reclaim
    449 	 * in the kernel while the fs is suspending.  It's not a very
    450 	 * likely event, but it needs to be fixed some day.
    451 	 */
    452 
    453 	/*
    454 	 * MOREXXX: once PUFFS_WCACHEINFO is enabled, we can't take
    455 	 * the mutex here, since getpages() might be called locked.
    456 	 */
    457 	fstrans_start(mp, FSTRANS_NORMAL);
    458 	mutex_enter(&pmp->pmp_lock);
    459 	fstrans_done(mp);
    460 
    461 	if (pmp->pmp_status != PUFFSTAT_RUNNING) {
    462 		mutex_exit(&pmp->pmp_lock);
    463 		puffs_park_release(park, 0);
    464 		return ENXIO;
    465 	}
    466 
    467 #ifdef PUFFSDEBUG
    468 	parkqdump(&pmp->pmp_req_touser, puffsdebug > 1);
    469 	parkqdump(&pmp->pmp_req_replywait, puffsdebug > 1);
    470 #endif
    471 
    472 	TAILQ_INSERT_TAIL(&pmp->pmp_req_touser, park, park_entries);
    473 	park->park_flags |= PARKFLAG_ONQUEUE1;
    474 	pmp->pmp_req_waiters++;
    475 	mutex_exit(&pmp->pmp_lock);
    476 
    477 #if 0
    478 	/*
    479 	 * Don't do unlock-relock dance yet.  There are a couple of
    480 	 * unsolved issues with it.  If we don't unlock, we can have
    481 	 * processes wanting vn_lock in case userspace hangs.  But
    482 	 * that can be "solved" by killing the userspace process.  It
    483 	 * would of course be nicer to have antilocking in the userspace
    484 	 * interface protocol itself.. your patience will be rewarded.
    485 	 */
    486 	/* unlock */
    487 	if (vp2)
    488 		VOP_UNLOCK(vp2, 0);
    489 	if (vp1)
    490 		VOP_UNLOCK(vp1, 0);
    491 #endif
    492 
    493 	DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
    494 	    "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, park,
    495 	    preq->preq_opclass, preq->preq_optype, park->park_flags));
    496 
    497 	cv_broadcast(&pmp->pmp_req_waiter_cv);
    498 	selnotify(pmp->pmp_sel, 0);
    499 
    500 	if (PUFFSOP_WANTREPLY(preq->preq_opclass)
    501 	    && (park->park_flags & PARKFLAG_CALL) == 0) {
    502 		int error;
    503 
    504 		error = cv_wait_sig(&park->park_cv, &park->park_mtx);
    505 		if (error) {
    506 			park->park_flags |= PARKFLAG_WAITERGONE;
    507 			if (park->park_flags & PARKFLAG_DONE) {
    508 				rv = preq->preq_rv;
    509 				puffs_park_release(park, 0);
    510 			} else {
    511 				/*
    512 				 * ok, we marked it as going away, but
    513 				 * still need to do queue ops.  take locks
    514 				 * in correct order.
    515 				 *
    516 				 * We don't want to release our reference
    517 				 * if it's on replywait queue to avoid error
    518 				 * to file server.  putop() code will DTRT.
    519 				 */
    520 				KASSERT(park->park_flags &
    521 				    (PARKFLAG_ONQUEUE1 | PARKFLAG_ONQUEUE2));
    522 				mutex_exit(&park->park_mtx);
    523 
    524 				mutex_enter(&pmp->pmp_lock);
    525 				mutex_enter(&park->park_mtx);
    526 				if (park->park_flags & PARKFLAG_ONQUEUE1)
    527 					TAILQ_REMOVE(&pmp->pmp_req_touser,
    528 					    park, park_entries);
    529 				park->park_flags &= ~PARKFLAG_ONQUEUE1;
    530 				if ((park->park_flags & PARKFLAG_ONQUEUE2) == 0)
    531 					puffs_park_release(park, 0);
    532 				else
    533 					mutex_exit(&park->park_mtx);
    534 				mutex_exit(&pmp->pmp_lock);
    535 
    536 				rv = error;
    537 			}
    538 		} else {
    539 			rv = preq->preq_rv;
    540 			puffs_park_release(park, 0);
    541 		}
    542 
    543 		/*
    544 		 * retake the lock and release.  This makes sure (haha,
    545 		 * I'm humorous) that we don't process the same vnode in
    546 		 * multiple threads due to the locks hacks we have in
    547 		 * puffs_lock().  In reality this is well protected by
    548 		 * the biglock, but once that's gone, well, hopefully
    549 		 * this will be fixed for real.  (and when you read this
    550 		 * comment in 2017 and subsequently barf, my condolences ;).
    551 		 */
    552 		if (rv == 0 && !fstrans_is_owner(mp)) {
    553 			fstrans_start(mp, FSTRANS_NORMAL);
    554 			fstrans_done(mp);
    555 		}
    556 	} else {
    557 		mutex_exit(&park->park_mtx);
    558 	}
    559 
    560 #if 0
    561 	/* relock */
    562 	if (vp1)
    563 		KASSERT(vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY) == 0);
    564 	if (vp2)
    565 		KASSERT(vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY) == 0);
    566 #endif
    567 
    568 	mutex_enter(&pmp->pmp_lock);
    569 	if (--pmp->pmp_req_waiters == 0) {
    570 		KASSERT(cv_has_waiters(&pmp->pmp_req_waitersink_cv) <= 1);
    571 		cv_signal(&pmp->pmp_req_waitersink_cv);
    572 	}
    573 	mutex_exit(&pmp->pmp_lock);
    574 
    575 	return rv;
    576 }
    577 
    578 
    579 /*
    580  * getop: scan through queued requests until:
    581  *  1) max number of requests satisfied
    582  *     OR
    583  *  2) buffer runs out of space
    584  *     OR
    585  *  3) nonblocking is set AND there are no operations available
    586  *     OR
    587  *  4) at least one operation was transferred AND there are no more waiting
    588  */
    589 int
    590 puffs_getop(struct puffs_mount *pmp, struct puffs_reqh_get *phg, int nonblock)
    591 {
    592 	struct puffs_park *park;
    593 	struct puffs_req *preq;
    594 	uint8_t *bufpos;
    595 	int error, donesome;
    596 
    597 	donesome = error = 0;
    598 	bufpos = phg->phg_buf;
    599 
    600 	mutex_enter(&pmp->pmp_lock);
    601 	while (phg->phg_nops == 0 || donesome != phg->phg_nops) {
    602  again:
    603 		if (pmp->pmp_status != PUFFSTAT_RUNNING) {
    604 			/* if we got some, they don't really matter anymore */
    605 			error = ENXIO;
    606 			goto out;
    607 		}
    608 		if (TAILQ_EMPTY(&pmp->pmp_req_touser)) {
    609 			if (donesome)
    610 				goto out;
    611 
    612 			if (nonblock) {
    613 				error = EWOULDBLOCK;
    614 				goto out;
    615 			}
    616 
    617 			error = cv_wait_sig(&pmp->pmp_req_waiter_cv,
    618 			    &pmp->pmp_lock);
    619 			if (error)
    620 				goto out;
    621 			else
    622 				goto again;
    623 		}
    624 
    625 		park = TAILQ_FIRST(&pmp->pmp_req_touser);
    626 		puffs_park_reference(park);
    627 
    628 		/* If it's a goner, don't process any furher */
    629 		if (park->park_flags & PARKFLAG_WAITERGONE) {
    630 			puffs_park_release(park, 0);
    631 			continue;
    632 		}
    633 
    634 		preq = park->park_preq;
    635 		if (phg->phg_buflen < preq->preq_buflen) {
    636 			if (!donesome)
    637 				error = E2BIG;
    638 			puffs_park_release(park, 0);
    639 			goto out;
    640 		}
    641 
    642 		TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
    643 		KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
    644 		park->park_flags &= ~PARKFLAG_ONQUEUE1;
    645 		mutex_exit(&pmp->pmp_lock);
    646 
    647 		DPRINTF(("puffsgetop: get op %" PRIu64 " (%d.), from %p "
    648 		    "len %zu (buflen %zu), target %p\n", preq->preq_id,
    649 		    donesome, preq, park->park_copylen, preq->preq_buflen,
    650 		    bufpos));
    651 
    652 		if ((error = copyout(preq, bufpos, park->park_copylen)) != 0) {
    653 			DPRINTF(("puffs_getop: copyout failed: %d\n", error));
    654 			/*
    655 			 * ok, user server is probably trying to cheat.
    656 			 * stuff op back & return error to user.  We need
    657 			 * to take locks in the correct order.
    658 			 */
    659 			mutex_exit(&park->park_mtx);
    660 			mutex_enter(&pmp->pmp_lock);
    661 			mutex_enter(&park->park_mtx);
    662 			if ((park->park_flags & PARKFLAG_WAITERGONE) == 0) {
    663 				 TAILQ_INSERT_HEAD(&pmp->pmp_req_touser, park,
    664 				     park_entries);
    665 				 park->park_flags |= PARKFLAG_ONQUEUE1;
    666 			}
    667 
    668 			if (donesome)
    669 				error = 0;
    670 			puffs_park_release(park, 0);
    671 			goto out;
    672 		}
    673 		bufpos += preq->preq_buflen;
    674 		phg->phg_buflen -= preq->preq_buflen;
    675 		donesome++;
    676 
    677 		mutex_enter(&pmp->pmp_lock);
    678 		if (PUFFSOP_WANTREPLY(preq->preq_opclass)) {
    679 			TAILQ_INSERT_TAIL(&pmp->pmp_req_replywait, park,
    680 			    park_entries);
    681 			park->park_flags |= PARKFLAG_ONQUEUE2;
    682 			puffs_park_release(park, 0);
    683 		} else {
    684 			free(preq, M_PUFFS);
    685 			puffs_park_release(park, 1);
    686 		}
    687 	}
    688 
    689  out:
    690 	phg->phg_more = pmp->pmp_req_waiters;
    691 	mutex_exit(&pmp->pmp_lock);
    692 
    693 	phg->phg_nops = donesome;
    694 
    695 	return error;
    696 }
    697 
    698 int
    699 puffs_putop(struct puffs_mount *pmp, struct puffs_reqh_put *php)
    700 {
    701 	struct puffs_park *park;
    702 	struct puffs_req tmpreq;
    703 	struct puffs_req *nextpreq;
    704 	void *userbuf;
    705 	uint64_t id;
    706 	size_t reqlen;
    707 	int donesome, error, wgone, release;
    708 
    709 	donesome = error = wgone = 0;
    710 
    711 	id = php->php_id;
    712 	userbuf = php->php_buf;
    713 	reqlen = php->php_buflen;
    714 
    715 	mutex_enter(&pmp->pmp_lock);
    716 	while (donesome != php->php_nops) {
    717 		release = 0;
    718 #ifdef PUFFSDEBUG
    719 		DPRINTF(("puffsputop: searching for %" PRIu64 ", ubuf: %p, "
    720 		    "len %zu\n", id, userbuf, reqlen));
    721 #endif
    722 		TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
    723 			if (park->park_id == id)
    724 				break;
    725 		}
    726 
    727 		if (park == NULL) {
    728 			DPRINTF(("puffsputop: no request: %" PRIu64 "\n", id));
    729 			error = EINVAL;
    730 			break;
    731 		}
    732 
    733 		puffs_park_reference(park);
    734 		if (reqlen == 0 || reqlen > park->park_maxlen) {
    735 			DPRINTF(("puffsputop: invalid buffer length: "
    736 			    "%zu\n", reqlen));
    737 			error = E2BIG;
    738 			puffs_park_release(park, 0);
    739 			break;
    740 		}
    741 		wgone = park->park_flags & PARKFLAG_WAITERGONE;
    742 
    743 		/* check if it's still on the queue after acquiring lock */
    744 		if (park->park_flags & PARKFLAG_ONQUEUE2) {
    745 			TAILQ_REMOVE(&pmp->pmp_req_replywait, park,
    746 			    park_entries);
    747 			park->park_flags &= ~PARKFLAG_ONQUEUE2;
    748 		}
    749 
    750 		mutex_exit(&pmp->pmp_lock);
    751 
    752 		/*
    753 		 * If the caller has gone south, go to next, collect
    754 		 * $200 and free the structure there instead of wakeup.
    755 		 * We also need to copyin the header info.  Flag structure
    756 		 * release to mode total and utter destruction.
    757 		 */
    758 		if (wgone) {
    759 			DPRINTF(("puffs_putop: bad service - waiter gone for "
    760 			    "park %p\n", park));
    761 			error = copyin(userbuf, &tmpreq,
    762 			    sizeof(struct puffs_req));
    763 			release = 1;
    764 			if (error)
    765 				goto loopout;
    766 			nextpreq = &tmpreq;
    767 			goto next;
    768 		}
    769 
    770 		DPRINTF(("puffsputpop: copyin from %p to %p, len %zu\n",
    771 		    userbuf, park->park_preq, reqlen));
    772 		error = copyin(userbuf, park->park_preq, reqlen);
    773 		if (error)
    774 			goto loopout;
    775 		nextpreq = park->park_preq;
    776 
    777  next:
    778 		/* all's well, prepare for next op */
    779 		id = nextpreq->preq_id;
    780 		reqlen = nextpreq->preq_buflen;
    781 		userbuf = nextpreq->preq_nextbuf;
    782 		donesome++;
    783 
    784  loopout:
    785 		if (error)
    786 			park->park_preq->preq_rv = error;
    787 
    788 		if (park->park_flags & PARKFLAG_CALL) {
    789 			park->park_done(park->park_preq, park->park_donearg);
    790 			release = 1;
    791 		}
    792 
    793 		if (!wgone) {
    794 			DPRINTF(("puffs_putop: flagging done for "
    795 			    "park %p\n", park));
    796 
    797 			cv_signal(&park->park_cv);
    798 		}
    799 		puffs_park_release(park, release);
    800 
    801 		mutex_enter(&pmp->pmp_lock);
    802 		if (error)
    803 			break;
    804 		wgone = 0;
    805 	}
    806 
    807 	mutex_exit(&pmp->pmp_lock);
    808 	php->php_nops -= donesome;
    809 
    810 	return error;
    811 }
    812 
    813 /*
    814  * We're dead, kaput, RIP, slightly more than merely pining for the
    815  * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
    816  * our maker, ceased to be, etcetc.  YASD.  It's a dead FS!
    817  *
    818  * Caller must hold puffs mutex.
    819  */
    820 void
    821 puffs_userdead(struct puffs_mount *pmp)
    822 {
    823 	struct puffs_park *park;
    824 
    825 	/*
    826 	 * Mark filesystem status as dying so that operations don't
    827 	 * attempt to march to userspace any longer.
    828 	 */
    829 	pmp->pmp_status = PUFFSTAT_DYING;
    830 
    831 	/* signal waiters on REQUEST TO file server queue */
    832 	TAILQ_FOREACH(park, &pmp->pmp_req_touser, park_entries) {
    833 		uint8_t opclass;
    834 
    835 		puffs_park_reference(park);
    836 
    837 		KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
    838 
    839 		opclass = park->park_preq->preq_opclass;
    840 		park->park_preq->preq_rv = ENXIO;
    841 		TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
    842 		park->park_flags &= ~PARKFLAG_ONQUEUE1;
    843 
    844 		if (park->park_flags & PARKFLAG_CALL) {
    845 			park->park_done(park->park_preq, park->park_donearg);
    846 			puffs_park_release(park, 1);
    847 		} else if (!PUFFSOP_WANTREPLY(opclass)) {
    848 			free(park->park_preq, M_PUFFS);
    849 			puffs_park_release(park, 1);
    850 		} else {
    851 			park->park_preq->preq_rv = ENXIO;
    852 			cv_signal(&park->park_cv);
    853 			puffs_park_release(park, 0);
    854 		}
    855 	}
    856 
    857 	/* signal waiters on RESPONSE FROM file server queue */
    858 	TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
    859 		puffs_park_reference(park);
    860 
    861 		KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
    862 
    863 		TAILQ_REMOVE(&pmp->pmp_req_replywait, park, park_entries);
    864 		park->park_flags &= ~PARKFLAG_ONQUEUE2;
    865 
    866 		KASSERT(PUFFSOP_WANTREPLY(park->park_preq->preq_opclass));
    867 
    868 		park->park_preq->preq_rv = ENXIO;
    869 		if (park->park_flags & PARKFLAG_CALL) {
    870 			park->park_done(park->park_preq, park->park_donearg);
    871 			mutex_enter(&park->park_mtx);
    872 			puffs_park_release(park, 1);
    873 		} else {
    874 			cv_signal(&park->park_cv);
    875 			puffs_park_release(park, 0);
    876 		}
    877 	}
    878 }
    879 
    880 /* this is probably going to die away at some point? */
    881 /*
    882  * XXX: currently bitrotted
    883  */
    884 #if 0
    885 static int
    886 puffssizeop(struct puffs_mount *pmp, struct puffs_sizeop *psop_user)
    887 {
    888 	struct puffs_sizepark *pspark;
    889 	void *kernbuf;
    890 	size_t copylen;
    891 	int error;
    892 
    893 	/* locate correct op */
    894 	mutex_enter(&pmp->pmp_lock);
    895 	TAILQ_FOREACH(pspark, &pmp->pmp_req_sizepark, pkso_entries) {
    896 		if (pspark->pkso_reqid == psop_user->pso_reqid) {
    897 			TAILQ_REMOVE(&pmp->pmp_req_sizepark, pspark,
    898 			    pkso_entries);
    899 			break;
    900 		}
    901 	}
    902 	mutex_exit(&pmp->pmp_lock);
    903 
    904 	if (pspark == NULL)
    905 		return EINVAL;
    906 
    907 	error = 0;
    908 	copylen = MIN(pspark->pkso_bufsize, psop_user->pso_bufsize);
    909 
    910 	/*
    911 	 * XXX: uvm stuff to avoid bouncy-bouncy copying?
    912 	 */
    913 	if (PUFFS_SIZEOP_UIO(pspark->pkso_reqtype)) {
    914 		kernbuf = malloc(copylen, M_PUFFS, M_WAITOK | M_ZERO);
    915 		if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_IN) {
    916 			error = copyin(psop_user->pso_userbuf,
    917 			    kernbuf, copylen);
    918 			if (error) {
    919 				printf("psop ERROR1 %d\n", error);
    920 				goto escape;
    921 			}
    922 		}
    923 		error = uiomove(kernbuf, copylen, pspark->pkso_uio);
    924 		if (error) {
    925 			printf("uiomove from kernel %p, len %d failed: %d\n",
    926 			    kernbuf, (int)copylen, error);
    927 			goto escape;
    928 		}
    929 
    930 		if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_OUT) {
    931 			error = copyout(kernbuf,
    932 			    psop_user->pso_userbuf, copylen);
    933 			if (error) {
    934 				printf("psop ERROR2 %d\n", error);
    935 				goto escape;
    936 			}
    937 		}
    938  escape:
    939 		free(kernbuf, M_PUFFS);
    940 	} else if (PUFFS_SIZEOP_BUF(pspark->pkso_reqtype)) {
    941 		copylen = MAX(pspark->pkso_bufsize, psop_user->pso_bufsize);
    942 		if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_BUF_IN) {
    943 			error = copyin(psop_user->pso_userbuf,
    944 			pspark->pkso_copybuf, copylen);
    945 		} else {
    946 			error = copyout(pspark->pkso_copybuf,
    947 			    psop_user->pso_userbuf, copylen);
    948 		}
    949 	}
    950 #ifdef DIAGNOSTIC
    951 	else
    952 		panic("puffssizeop: invalid reqtype %d\n",
    953 		    pspark->pkso_reqtype);
    954 #endif /* DIAGNOSTIC */
    955 
    956 	return error;
    957 }
    958 #endif
    959