Home | History | Annotate | Line # | Download | only in puffs
puffs_msgif.c revision 1.22
      1 /*	$NetBSD: puffs_msgif.c,v 1.22 2007/03/29 16:04:26 pooka Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2005, 2006, 2007  Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by the
      7  * Google Summer of Code program and the Ulla Tuominen Foundation.
      8  * The Google SoC project was mentored by Bill Studenmund.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. The name of the company nor the name of the author may be used to
     19  *    endorse or promote products derived from this software without specific
     20  *    prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     23  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     24  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     25  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     28  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  */
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.22 2007/03/29 16:04:26 pooka Exp $");
     37 
     38 #include <sys/param.h>
     39 #include <sys/fstrans.h>
     40 #include <sys/malloc.h>
     41 #include <sys/mount.h>
     42 #include <sys/vnode.h>
     43 #include <sys/lock.h>
     44 
     45 #include <fs/puffs/puffs_msgif.h>
     46 #include <fs/puffs/puffs_sys.h>
     47 
     48 /*
     49  * waitq data structures
     50  */
     51 
     52 /*
     53  * While a request is going to userspace, park the caller within the
     54  * kernel.  This is the kernel counterpart of "struct puffs_req".
     55  */
     56 struct puffs_park {
     57 	struct puffs_req	*park_preq;	/* req followed by buf	*/
     58 	uint64_t		park_id;	/* duplicate of preq_id */
     59 
     60 	size_t			park_copylen;	/* userspace copylength	*/
     61 	size_t			park_maxlen;	/* max size in comeback */
     62 	struct buf		*park_bp;	/* bp, ASYNCBIOREAD	*/
     63 
     64 	int			park_flags;
     65 
     66 	kcondvar_t		park_cv;
     67 	kmutex_t		park_ilck;
     68 
     69 	TAILQ_ENTRY(puffs_park) park_entries;
     70 };
     71 #define PARKFLAG_WAITERGONE	0x01
     72 #define PARKFLAG_PROCESSING	0x02
     73 #define PARKFLAG_ASYNCBIOREAD	0x04
     74 
     75 static struct pool_cache parkpc;
     76 static struct pool parkpool;
     77 
     78 static int
     79 makepark(void *arg, void *obj, int flags)
     80 {
     81 	struct puffs_park *park = obj;
     82 
     83 	cv_init(&park->park_cv, "puffsrpl");
     84 
     85 	return 0;
     86 }
     87 
     88 static void
     89 nukepark(void *arg, void *obj)
     90 {
     91 	struct puffs_park *park = obj;
     92 
     93 	cv_destroy(&park->park_cv);
     94 }
     95 
     96 void
     97 puffs_msgif_init()
     98 {
     99 
    100 	pool_init(&parkpool, sizeof(struct puffs_park), 0, 0, 0,
    101 	    "puffprkl", &pool_allocator_nointr, IPL_NONE);
    102 	pool_cache_init(&parkpc, &parkpool, makepark, nukepark, NULL);
    103 }
    104 
    105 void
    106 puffs_msgif_destroy()
    107 {
    108 
    109 	pool_cache_destroy(&parkpc);
    110 	pool_destroy(&parkpool);
    111 }
    112 
    113 void *
    114 puffs_parkmem_alloc(int waitok)
    115 {
    116 
    117 	return pool_cache_get(&parkpc, waitok ? PR_WAITOK : PR_NOWAIT);
    118 }
    119 
    120 void
    121 puffs_parkmem_free(void *ppark)
    122 {
    123 
    124 	pool_cache_put(&parkpc, ppark);
    125 }
    126 
    127 
    128 /*
    129  * Converts a non-FAF op to a FAF.  This simply involves making copies
    130  * of the park and request structures and tagging the request as a FAF.
    131  * It is safe to block here, since the original op is not a FAF.
    132  */
    133 #if 0
    134 static void
    135 puffs_reqtofaf(struct puffs_park *ppark)
    136 {
    137 	struct puffs_req *newpreq;
    138 
    139 	KASSERT((ppark->park_preq->preq_opclass & PUFFSOPFLAG_FAF) == 0);
    140 
    141 	MALLOC(newpreq, struct puffs_req *, ppark->park_copylen,
    142 	    M_PUFFS, M_ZERO | M_WAITOK);
    143 
    144 	memcpy(newpreq, ppark->park_preq, ppark->park_copylen);
    145 
    146 	ppark->park_preq = newpreq;
    147 	ppark->park_preq->preq_opclass |= PUFFSOPFLAG_FAF;
    148 }
    149 #endif
    150 
    151 
    152 /*
    153  * kernel-user-kernel waitqueues
    154  */
    155 
    156 static int touser(struct puffs_mount *, struct puffs_park *, uint64_t,
    157 		  struct vnode *, struct vnode *);
    158 
    159 uint64_t
    160 puffs_getreqid(struct puffs_mount *pmp)
    161 {
    162 	uint64_t rv;
    163 
    164 	mutex_enter(&pmp->pmp_lock);
    165 	rv = pmp->pmp_nextreq++;
    166 	mutex_exit(&pmp->pmp_lock);
    167 
    168 	return rv;
    169 }
    170 
    171 /* vfs request */
    172 int
    173 puffs_vfstouser(struct puffs_mount *pmp, int optype, void *kbuf, size_t buflen)
    174 {
    175 	struct puffs_park *ppark;
    176 
    177 	ppark = pool_cache_get(&parkpc, PR_WAITOK);
    178 	ppark->park_preq = kbuf;
    179 
    180 	ppark->park_preq->preq_opclass = PUFFSOP_VFS;
    181 	ppark->park_preq->preq_optype = optype;
    182 
    183 	ppark->park_maxlen = ppark->park_copylen = buflen;
    184 	ppark->park_flags = 0;
    185 
    186 	return touser(pmp, ppark, puffs_getreqid(pmp), NULL, NULL);
    187 }
    188 
    189 void
    190 puffs_suspendtouser(struct puffs_mount *pmp, int status)
    191 {
    192 	struct puffs_vfsreq_suspend *pvfsr_susp;
    193 	struct puffs_park *ppark;
    194 
    195 	pvfsr_susp = malloc(sizeof(struct puffs_vfsreq_suspend),
    196 	    M_PUFFS, M_WAITOK | M_ZERO);
    197 	ppark = pool_cache_get(&parkpc, PR_WAITOK);
    198 
    199 	pvfsr_susp->pvfsr_status = status;
    200 	ppark->park_preq = (struct puffs_req *)pvfsr_susp;
    201 
    202 	ppark->park_preq->preq_opclass = PUFFSOP_VFS | PUFFSOPFLAG_FAF;
    203 	ppark->park_preq->preq_optype = PUFFS_VFS_SUSPEND;
    204 
    205 	ppark->park_maxlen = ppark->park_copylen
    206 	    = sizeof(struct puffs_vfsreq_suspend);
    207 	ppark->park_flags = 0;
    208 
    209 	(void)touser(pmp, ppark, 0, NULL, NULL);
    210 }
    211 
    212 /*
    213  * vnode level request
    214  */
    215 int
    216 puffs_vntouser(struct puffs_mount *pmp, int optype,
    217 	void *kbuf, size_t buflen, void *cookie,
    218 	struct vnode *vp1, struct vnode *vp2)
    219 {
    220 	struct puffs_park *ppark;
    221 
    222 	ppark = pool_cache_get(&parkpc, PR_WAITOK);
    223 	ppark->park_preq = kbuf;
    224 
    225 	ppark->park_preq->preq_opclass = PUFFSOP_VN;
    226 	ppark->park_preq->preq_optype = optype;
    227 	ppark->park_preq->preq_cookie = cookie;
    228 
    229 	ppark->park_maxlen = ppark->park_copylen = buflen;
    230 	ppark->park_flags = 0;
    231 
    232 	return touser(pmp, ppark, puffs_getreqid(pmp), vp1, vp2);
    233 }
    234 
    235 /*
    236  * vnode level request, caller-controller req id
    237  */
    238 int
    239 puffs_vntouser_req(struct puffs_mount *pmp, int optype,
    240 	void *kbuf, size_t buflen, void *cookie, uint64_t reqid,
    241 	struct vnode *vp1, struct vnode *vp2)
    242 {
    243 	struct puffs_park *ppark;
    244 
    245 	ppark = pool_cache_get(&parkpc, PR_WAITOK);
    246 	ppark->park_preq = kbuf;
    247 
    248 	ppark->park_preq->preq_opclass = PUFFSOP_VN;
    249 	ppark->park_preq->preq_optype = optype;
    250 	ppark->park_preq->preq_cookie = cookie;
    251 
    252 	ppark->park_maxlen = ppark->park_copylen = buflen;
    253 	ppark->park_flags = 0;
    254 
    255 	return touser(pmp, ppark, reqid, vp1, vp2);
    256 }
    257 
    258 int
    259 puffs_vntouser_delta(struct puffs_mount *pmp, int optype,
    260 	void *kbuf, size_t buflen, size_t maxdelta,
    261 	void *cookie, struct vnode *vp1, struct vnode *vp2)
    262 {
    263 	struct puffs_park *ppark;
    264 
    265 	ppark = pool_cache_get(&parkpc, PR_WAITOK);
    266 	ppark->park_preq = kbuf;
    267 
    268 	ppark->park_preq->preq_opclass = PUFFSOP_VN;
    269 	ppark->park_preq->preq_optype = optype;
    270 	ppark->park_preq->preq_cookie = cookie;
    271 
    272 	ppark->park_copylen = buflen;
    273 	ppark->park_maxlen = buflen + maxdelta;
    274 	ppark->park_flags = 0;
    275 
    276 	return touser(pmp, ppark, puffs_getreqid(pmp), vp1, vp2);
    277 }
    278 
    279 /*
    280  * File server interaction is async from caller perspective.
    281  * biodone(bp) is signalled in putop.
    282  */
    283 void
    284 puffs_vntouser_bioread_async(struct puffs_mount *pmp, void *cookie,
    285 	size_t tomove, off_t offset, struct buf *bp,
    286 	struct vnode *vp1, struct vnode *vp2)
    287 {
    288 	struct puffs_park *ppark;
    289 	struct puffs_vnreq_read *read_argp;
    290 
    291 	ppark = pool_cache_get(&parkpc, PR_WAITOK);
    292 	MALLOC(read_argp, struct puffs_vnreq_read *,
    293 	    sizeof(struct puffs_vnreq_read) + tomove,
    294 	    M_PUFFS, M_WAITOK | M_ZERO);
    295 
    296 	read_argp->pvnr_ioflag = 0;
    297 	read_argp->pvnr_resid = tomove;
    298 	read_argp->pvnr_offset = offset;
    299 	puffs_credcvt(&read_argp->pvnr_cred, FSCRED);
    300 
    301 	ppark->park_preq = (void *)read_argp;
    302 	ppark->park_preq->preq_opclass = PUFFSOP_VN;
    303 	ppark->park_preq->preq_optype = PUFFS_VN_READ;
    304 	ppark->park_preq->preq_cookie = cookie;
    305 
    306 	ppark->park_copylen = sizeof(struct puffs_vnreq_read);
    307 	ppark->park_maxlen = sizeof(struct puffs_vnreq_read) + tomove;
    308 	ppark->park_bp = bp;
    309 	ppark->park_flags = PARKFLAG_ASYNCBIOREAD;
    310 
    311 	(void)touser(pmp, ppark, puffs_getreqid(pmp), vp1, vp2);
    312 }
    313 
    314 /*
    315  * Notice: kbuf will be free'd later.  I must be allocated from the
    316  * kernel heap and it's ownership is shifted to this function from
    317  * now on, i.e. the caller is not allowed to use it anymore!
    318  */
    319 void
    320 puffs_vntouser_faf(struct puffs_mount *pmp, int optype,
    321 	void *kbuf, size_t buflen, void *cookie)
    322 {
    323 	struct puffs_park *ppark;
    324 
    325 	/* XXX: is it allowable to sleep here? */
    326 	ppark = pool_cache_get(&parkpc, PR_NOWAIT);
    327 	if (ppark == NULL)
    328 		return; /* 2bad */
    329 
    330 	ppark->park_preq = kbuf;
    331 
    332 	ppark->park_preq->preq_opclass = PUFFSOP_VN | PUFFSOPFLAG_FAF;
    333 	ppark->park_preq->preq_optype = optype;
    334 	ppark->park_preq->preq_cookie = cookie;
    335 
    336 	ppark->park_maxlen = ppark->park_copylen = buflen;
    337 	ppark->park_flags = 0;
    338 
    339 	(void)touser(pmp, ppark, 0, NULL, NULL);
    340 }
    341 
    342 void
    343 puffs_cacheop(struct puffs_mount *pmp, struct puffs_park *ppark,
    344 	struct puffs_cacheinfo *pcinfo, size_t pcilen, void *cookie)
    345 {
    346 
    347 	ppark->park_preq = (struct puffs_req *)pcinfo;
    348 	ppark->park_preq->preq_opclass = PUFFSOP_CACHE | PUFFSOPFLAG_FAF;
    349 	ppark->park_preq->preq_optype = PCACHE_TYPE_WRITE; /* XXX */
    350 	ppark->park_preq->preq_cookie = cookie;
    351 
    352 	ppark->park_maxlen = ppark->park_copylen = pcilen;
    353 
    354 	(void)touser(pmp, ppark, 0, NULL, NULL);
    355 }
    356 
    357 /*
    358  * Wait for the userspace ping-pong game in calling process context.
    359  *
    360  * This unlocks vnodes if they are supplied.  vp1 is the vnode
    361  * before in the locking order, i.e. the one which must be locked
    362  * before accessing vp2.  This is done here so that operations are
    363  * already ordered in the queue when vnodes are unlocked (I'm not
    364  * sure if that's really necessary, but it can't hurt).  Okok, maybe
    365  * there's a slight ugly-factor also, but let's not worry about that.
    366  */
    367 static int
    368 touser(struct puffs_mount *pmp, struct puffs_park *ppark, uint64_t reqid,
    369 	struct vnode *vp1, struct vnode *vp2)
    370 {
    371 	struct mount *mp;
    372 	struct puffs_req *preq;
    373 	int rv = 0;
    374 
    375 	mp = PMPTOMP(pmp);
    376 	preq = ppark->park_preq;
    377 	preq->preq_id = ppark->park_id = reqid;
    378 	preq->preq_buflen = ALIGN(ppark->park_maxlen);
    379 
    380 #if 0
    381 	/*
    382 	 * We don't trap signals currently
    383 	 */
    384 	struct lwp *l = curlwp;
    385 
    386 	/*
    387 	 * To support PCATCH, yet another movie: check if there are signals
    388 	 * pending and we are issueing a non-FAF.  If so, return an error
    389 	 * directly UNLESS we are issueing INACTIVE.  In that case, convert
    390 	 * it to a FAF, fire off to the file server and return an error.
    391 	 * Yes, this is bordering disgusting.  Barfbags are on me.
    392 	 */
    393 	if (PUFFSOP_WANTREPLY(preq->preq_opclass)
    394 	   && (ppark->park_flags & PARKFLAG_ASYNCBIOREAD) == 0
    395 	   && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) {
    396 		if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
    397 		    && preq->preq_optype == PUFFS_VN_INACTIVE) {
    398 			puffs_reqtofaf(ppark);
    399 			DPRINTF(("puffs touser: converted to FAF %p\n", ppark));
    400 			rv = EINTR;
    401 		} else {
    402 			return EINTR;
    403 		}
    404 	}
    405 #endif
    406 
    407 	/*
    408 	 * test for suspension lock.
    409 	 *
    410 	 * Note that we *DO NOT* keep the lock, since that might block
    411 	 * lock acquiring PLUS it would give userlandia control over
    412 	 * the lock.  The operation queue enforces a strict ordering:
    413 	 * when the fs server gets in the op stream, it knows things
    414 	 * are in order.  The kernel locks can't guarantee that for
    415 	 * userspace, in any case.
    416 	 *
    417 	 * BUT: this presents a problem for ops which have a consistency
    418 	 * clause based on more than one operation.  Unfortunately such
    419 	 * operations (read, write) do not reliably work yet.
    420 	 *
    421 	 * Ya, Ya, it's wrong wong wrong, me be fixink this someday.
    422 	 *
    423 	 * XXX: and there is one more problem.  We sometimes need to
    424 	 * take a lazy lock in case the fs is suspending and we are
    425 	 * executing as the fs server context.  This might happen
    426 	 * e.g. in the case that the user server triggers a reclaim
    427 	 * in the kernel while the fs is suspending.  It's not a very
    428 	 * likely event, but it needs to be fixed some day.
    429 	 */
    430 
    431 	/*
    432 	 * MOREXXX: once PUFFS_WCACHEINFO is enabled, we can't take
    433 	 * the mutex here, since getpages() might be called locked.
    434 	 */
    435 	fstrans_start(mp, FSTRANS_NORMAL);
    436 	mutex_enter(&pmp->pmp_lock);
    437 	fstrans_done(mp);
    438 
    439 	if (pmp->pmp_status != PUFFSTAT_RUNNING) {
    440 		mutex_exit(&pmp->pmp_lock);
    441 		pool_cache_put(&parkpc, ppark);
    442 		return ENXIO;
    443 	}
    444 
    445 	TAILQ_INSERT_TAIL(&pmp->pmp_req_touser, ppark, park_entries);
    446 	pmp->pmp_req_waiters++;
    447 
    448 #if 0
    449 	/*
    450 	 * Don't do unlock-relock dance yet.  There are a couple of
    451 	 * unsolved issues with it.  If we don't unlock, we can have
    452 	 * processes wanting vn_lock in case userspace hangs.  But
    453 	 * that can be "solved" by killing the userspace process.  It
    454 	 * would of course be nicer to have antilocking in the userspace
    455 	 * interface protocol itself.. your patience will be rewarded.
    456 	 */
    457 	/* unlock */
    458 	if (vp2)
    459 		VOP_UNLOCK(vp2, 0);
    460 	if (vp1)
    461 		VOP_UNLOCK(vp1, 0);
    462 #endif
    463 
    464 	DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
    465 	    "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, ppark,
    466 	    preq->preq_opclass, preq->preq_optype, ppark->park_flags));
    467 
    468 	cv_broadcast(&pmp->pmp_req_waiter_cv);
    469 	selnotify(pmp->pmp_sel, 0);
    470 
    471 	if (PUFFSOP_WANTREPLY(preq->preq_opclass)
    472 	    && (ppark->park_flags & PARKFLAG_ASYNCBIOREAD) == 0) {
    473 		int error;
    474 
    475 		error = 0; /* XXX: no interrupt for now */
    476 
    477 		cv_wait(&ppark->park_cv, &pmp->pmp_lock);
    478 		if (error) {
    479 			ppark->park_flags |= PARKFLAG_WAITERGONE;
    480 			if (ppark->park_flags & PARKFLAG_PROCESSING) {
    481 				cv_wait(&ppark->park_cv, &pmp->pmp_lock);
    482 				rv = preq->preq_rv;
    483 			} else {
    484 				rv = error;
    485 			}
    486 		} else {
    487 			rv = preq->preq_rv;
    488 		}
    489 		mutex_exit(&pmp->pmp_lock);
    490 		pool_cache_put(&parkpc, ppark);
    491 
    492 		/*
    493 		 * retake the lock and release.  This makes sure (haha,
    494 		 * I'm humorous) that we don't process the same vnode in
    495 		 * multiple threads due to the locks hacks we have in
    496 		 * puffs_lock().  In reality this is well protected by
    497 		 * the biglock, but once that's gone, well, hopefully
    498 		 * this will be fixed for real.  (and when you read this
    499 		 * comment in 2017 and subsequently barf, my condolences ;).
    500 		 */
    501 		if (rv == 0 && !fstrans_is_owner(mp)) {
    502 			fstrans_start(mp, FSTRANS_NORMAL);
    503 			fstrans_done(mp);
    504 		}
    505 	} else {
    506 		mutex_exit(&pmp->pmp_lock);
    507 	}
    508 
    509 #if 0
    510 	/* relock */
    511 	if (vp1)
    512 		KASSERT(vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY) == 0);
    513 	if (vp2)
    514 		KASSERT(vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY) == 0);
    515 #endif
    516 
    517 	mutex_enter(&pmp->pmp_lock);
    518 	if (--pmp->pmp_req_waiters == 0) {
    519 		KASSERT(cv_has_waiters(&pmp->pmp_req_waitersink_cv) <= 1);
    520 		cv_signal(&pmp->pmp_req_waitersink_cv);
    521 	}
    522 	mutex_exit(&pmp->pmp_lock);
    523 
    524 	return rv;
    525 }
    526 
    527 
    528 /*
    529  * getop: scan through queued requests until:
    530  *  1) max number of requests satisfied
    531  *     OR
    532  *  2) buffer runs out of space
    533  *     OR
    534  *  3) nonblocking is set AND there are no operations available
    535  *     OR
    536  *  4) at least one operation was transferred AND there are no more waiting
    537  */
    538 int
    539 puffs_getop(struct puffs_mount *pmp, struct puffs_reqh_get *phg, int nonblock)
    540 {
    541 	struct puffs_park *park;
    542 	struct puffs_req *preq;
    543 	uint8_t *bufpos;
    544 	int error, donesome;
    545 
    546 	donesome = error = 0;
    547 	bufpos = phg->phg_buf;
    548 
    549 	mutex_enter(&pmp->pmp_lock);
    550 	while (phg->phg_nops == 0 || donesome != phg->phg_nops) {
    551  again:
    552 		if (pmp->pmp_status != PUFFSTAT_RUNNING) {
    553 			/* if we got some, they don't really matter anymore */
    554 			error = ENXIO;
    555 			goto out;
    556 		}
    557 		if (TAILQ_EMPTY(&pmp->pmp_req_touser)) {
    558 			if (donesome)
    559 				goto out;
    560 
    561 			if (nonblock) {
    562 				error = EWOULDBLOCK;
    563 				goto out;
    564 			}
    565 
    566 			error = cv_wait_sig(&pmp->pmp_req_waiter_cv,
    567 			    &pmp->pmp_lock);
    568 			if (error)
    569 				goto out;
    570 			else
    571 				goto again;
    572 		}
    573 
    574 		park = TAILQ_FIRST(&pmp->pmp_req_touser);
    575 		preq = park->park_preq;
    576 		if (phg->phg_buflen < preq->preq_buflen) {
    577 			if (!donesome)
    578 				error = E2BIG;
    579 			goto out;
    580 		}
    581 		TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
    582 
    583 		/* If it's a goner, don't process any furher */
    584 		if (park->park_flags & PARKFLAG_WAITERGONE) {
    585 			panic("impossible for now");
    586 			pool_cache_put(&parkpc, park);
    587 			continue;
    588 		}
    589 
    590 		mutex_exit(&pmp->pmp_lock);
    591 
    592 		DPRINTF(("puffsgetop: get op %" PRIu64 " (%d.), from %p "
    593 		    "len %zu (buflen %zu), target %p\n", preq->preq_id,
    594 		    donesome, preq, park->park_copylen, preq->preq_buflen,
    595 		    bufpos));
    596 
    597 		if ((error = copyout(preq, bufpos, park->park_copylen)) != 0) {
    598 			DPRINTF(("puffs_getop: copyout failed\n"));
    599 			/*
    600 			 * ok, user server is probably trying to cheat.
    601 			 * stuff op back & return error to user
    602 			 */
    603 			 mutex_enter(&pmp->pmp_lock);
    604 			 TAILQ_INSERT_HEAD(&pmp->pmp_req_touser, park,
    605 			     park_entries);
    606 
    607 			 if (donesome)
    608 				error = 0;
    609 			 goto out;
    610 		}
    611 		bufpos += preq->preq_buflen;
    612 		phg->phg_buflen -= preq->preq_buflen;
    613 		donesome++;
    614 
    615 		mutex_enter(&pmp->pmp_lock);
    616 		if (PUFFSOP_WANTREPLY(preq->preq_opclass)) {
    617 			TAILQ_INSERT_TAIL(&pmp->pmp_req_replywait, park,
    618 			    park_entries);
    619 		} else {
    620 			free(preq, M_PUFFS);
    621 			pool_cache_put(&parkpc, park);
    622 		}
    623 	}
    624 
    625  out:
    626 	phg->phg_more = pmp->pmp_req_waiters;
    627 	mutex_exit(&pmp->pmp_lock);
    628 
    629 	phg->phg_nops = donesome;
    630 
    631 	return error;
    632 }
    633 
    634 int
    635 puffs_putop(struct puffs_mount *pmp, struct puffs_reqh_put *php)
    636 {
    637 	struct puffs_park *park;
    638 	struct puffs_req tmpreq;
    639 	struct puffs_req *nextpreq;
    640 	struct buf *bp;
    641 	void *userbuf;
    642 	uint64_t id;
    643 	size_t reqlen;
    644 	int donesome, error, wgone;
    645 
    646 	donesome = error = wgone = 0;
    647 
    648 	id = php->php_id;
    649 	userbuf = php->php_buf;
    650 	reqlen = php->php_buflen;
    651 
    652 	mutex_enter(&pmp->pmp_lock);
    653 	while (donesome != php->php_nops) {
    654 #ifdef PUFFSDEBUG
    655 		DPRINTF(("puffsputop: searching for %" PRIu64 ", ubuf: %p, "
    656 		    "len %zu\n", id, userbuf, reqlen));
    657 #endif
    658 		TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
    659 			if (park->park_id == id)
    660 				break;
    661 		}
    662 
    663 		if (park == NULL) {
    664 			error = EINVAL;
    665 			break;
    666 		}
    667 		TAILQ_REMOVE(&pmp->pmp_req_replywait, park, park_entries);
    668 		wgone = park->park_flags & PARKFLAG_WAITERGONE;
    669 		park->park_flags |= PARKFLAG_PROCESSING;
    670 		mutex_exit(&pmp->pmp_lock);
    671 
    672 		/*
    673 		 * If the caller has gone south, go to next, collect
    674 		 * $200 and free the structure there instead of wakeup.
    675 		 * We also need to copyin the
    676 		 */
    677 		if (wgone) {
    678 			panic("puffs: wgone impossible for now\n");
    679 			DPRINTF(("puffs_putop: bad service - waiter gone for "
    680 			    "park %p\n", park));
    681 			error = copyin(userbuf, &tmpreq,
    682 			    sizeof(struct puffs_req));
    683 			if (error)
    684 				goto loopout;
    685 			nextpreq = &tmpreq;
    686 			goto next;
    687 		}
    688 
    689 		if (reqlen == 0 || reqlen > park->park_maxlen) {
    690 			reqlen = park->park_maxlen;
    691 			DPRINTF(("puffsputop: kernel bufsize override: "
    692 			    "%zu\n", reqlen));
    693 		}
    694 
    695 		DPRINTF(("puffsputpop: copyin from %p to %p, len %zu\n",
    696 		    userbuf, park->park_preq, reqlen));
    697 		error = copyin(userbuf, park->park_preq, reqlen);
    698 		if (error)
    699 			goto loopout;
    700 		nextpreq = park->park_preq;
    701 		bp = park->park_bp;
    702 
    703 		if (park->park_flags & PARKFLAG_ASYNCBIOREAD) {
    704 			struct puffs_vnreq_read *read_argp;
    705 			size_t moved;
    706 
    707 			bp->b_error = park->park_preq->preq_rv;
    708 
    709 			DPRINTF(("puffs_putop: async bioread for park %p, "
    710 			    "bp %p, error %d\n", park, bp, bp->b_error));
    711 
    712 			if (bp->b_error == 0) {
    713 				read_argp = (void *)park->park_preq;
    714 				moved = park->park_maxlen
    715 				    - sizeof(struct puffs_vnreq_read)
    716 				    - read_argp->pvnr_resid;
    717 				memcpy(bp->b_data, read_argp->pvnr_data, moved);
    718 				bp->b_resid = bp->b_bcount - moved;
    719 				biodone(bp);
    720 			}
    721 		}
    722 
    723  next:
    724 		/* all's well, prepare for next op */
    725 		id = nextpreq->preq_id;
    726 		reqlen = nextpreq->preq_buflen;
    727 		userbuf = nextpreq->preq_nextbuf;
    728 		donesome++;
    729 
    730  loopout:
    731 		if (error && park->park_preq) {
    732 			park->park_preq->preq_rv = error;
    733 			if (park->park_flags & PARKFLAG_ASYNCBIOREAD) {
    734 				bp = park->park_bp;
    735 				bp->b_error = error;
    736 				bp->b_flags |= B_ERROR;
    737 				biodone(bp);
    738 			}
    739 		}
    740 
    741 		mutex_enter(&pmp->pmp_lock);
    742 		if (!wgone) {
    743 			if (park->park_flags & PARKFLAG_ASYNCBIOREAD) {
    744 				free(park->park_preq, M_PUFFS);
    745 				pool_cache_put(&parkpc, park);
    746 			} else {
    747 				DPRINTF(("puffs_putop: flagging done for "
    748 				    "park %p\n", park));
    749 
    750 				cv_signal(&park->park_cv);
    751 			}
    752 		}
    753 
    754 		if (error)
    755 			break;
    756 		wgone = 0;
    757 	}
    758 
    759 	mutex_exit(&pmp->pmp_lock);
    760 	php->php_nops -= donesome;
    761 
    762 	return error;
    763 }
    764 
    765 /*
    766  * We're dead, kaput, RIP, slightly more than merely pining for the
    767  * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
    768  * our maker, ceased to be, etcetc.  YASD.  It's a dead FS!
    769  *
    770  * Caller must hold puffs mutex.
    771  */
    772 void
    773 puffs_userdead(struct puffs_mount *pmp)
    774 {
    775 	struct puffs_park *park;
    776 	struct buf *bp;
    777 
    778 	/*
    779 	 * Mark filesystem status as dying so that operations don't
    780 	 * attempt to march to userspace any longer.
    781 	 */
    782 	pmp->pmp_status = PUFFSTAT_DYING;
    783 
    784 	/* signal waiters on REQUEST TO file server queue */
    785 	TAILQ_FOREACH(park, &pmp->pmp_req_touser, park_entries) {
    786 		uint8_t opclass;
    787 
    788 		opclass = park->park_preq->preq_rv;
    789 		park->park_preq->preq_rv = ENXIO;
    790 
    791 		TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
    792 
    793 		if (park->park_flags & PARKFLAG_ASYNCBIOREAD) {
    794 			bp = park->park_bp;
    795 			bp->b_error = ENXIO;
    796 			bp->b_flags |= B_ERROR;
    797 			biodone(bp);
    798 			free(park->park_preq, M_PUFFS);
    799 			pool_cache_put(&parkpc, park);
    800 		} else if (!PUFFSOP_WANTREPLY(opclass)) {
    801 			free(park->park_preq, M_PUFFS);
    802 			pool_cache_put(&parkpc, park);
    803 		} else {
    804 			cv_signal(&park->park_cv);
    805 		}
    806 	}
    807 
    808 	/* signal waiters on RESPONSE FROM file server queue */
    809 	TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
    810 		KASSERT(PUFFSOP_WANTREPLY(park->park_preq->preq_opclass));
    811 
    812 		park->park_preq->preq_rv = ENXIO;
    813 		TAILQ_REMOVE(&pmp->pmp_req_replywait, park, park_entries);
    814 
    815 		if (park->park_flags & PARKFLAG_ASYNCBIOREAD) {
    816 			bp = park->park_bp;
    817 			bp->b_error = ENXIO;
    818 			bp->b_flags |= B_ERROR;
    819 			biodone(bp);
    820 			free(park->park_preq, M_PUFFS);
    821 			pool_cache_put(&parkpc, park);
    822 		} else {
    823 			cv_signal(&park->park_cv);
    824 		}
    825 	}
    826 }
    827 
    828 /* this is probably going to die away at some point? */
    829 /*
    830  * XXX: currently bitrotted
    831  */
    832 #if 0
    833 static int
    834 puffssizeop(struct puffs_mount *pmp, struct puffs_sizeop *psop_user)
    835 {
    836 	struct puffs_sizepark *pspark;
    837 	void *kernbuf;
    838 	size_t copylen;
    839 	int error;
    840 
    841 	/* locate correct op */
    842 	mutex_enter(&pmp->pmp_lock);
    843 	TAILQ_FOREACH(pspark, &pmp->pmp_req_sizepark, pkso_entries) {
    844 		if (pspark->pkso_reqid == psop_user->pso_reqid) {
    845 			TAILQ_REMOVE(&pmp->pmp_req_sizepark, pspark,
    846 			    pkso_entries);
    847 			break;
    848 		}
    849 	}
    850 	mutex_exit(&pmp->pmp_lock);
    851 
    852 	if (pspark == NULL)
    853 		return EINVAL;
    854 
    855 	error = 0;
    856 	copylen = MIN(pspark->pkso_bufsize, psop_user->pso_bufsize);
    857 
    858 	/*
    859 	 * XXX: uvm stuff to avoid bouncy-bouncy copying?
    860 	 */
    861 	if (PUFFS_SIZEOP_UIO(pspark->pkso_reqtype)) {
    862 		kernbuf = malloc(copylen, M_PUFFS, M_WAITOK | M_ZERO);
    863 		if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_IN) {
    864 			error = copyin(psop_user->pso_userbuf,
    865 			    kernbuf, copylen);
    866 			if (error) {
    867 				printf("psop ERROR1 %d\n", error);
    868 				goto escape;
    869 			}
    870 		}
    871 		error = uiomove(kernbuf, copylen, pspark->pkso_uio);
    872 		if (error) {
    873 			printf("uiomove from kernel %p, len %d failed: %d\n",
    874 			    kernbuf, (int)copylen, error);
    875 			goto escape;
    876 		}
    877 
    878 		if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_OUT) {
    879 			error = copyout(kernbuf,
    880 			    psop_user->pso_userbuf, copylen);
    881 			if (error) {
    882 				printf("psop ERROR2 %d\n", error);
    883 				goto escape;
    884 			}
    885 		}
    886  escape:
    887 		free(kernbuf, M_PUFFS);
    888 	} else if (PUFFS_SIZEOP_BUF(pspark->pkso_reqtype)) {
    889 		copylen = MAX(pspark->pkso_bufsize, psop_user->pso_bufsize);
    890 		if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_BUF_IN) {
    891 			error = copyin(psop_user->pso_userbuf,
    892 			pspark->pkso_copybuf, copylen);
    893 		} else {
    894 			error = copyout(pspark->pkso_copybuf,
    895 			    psop_user->pso_userbuf, copylen);
    896 		}
    897 	}
    898 #ifdef DIAGNOSTIC
    899 	else
    900 		panic("puffssizeop: invalid reqtype %d\n",
    901 		    pspark->pkso_reqtype);
    902 #endif /* DIAGNOSTIC */
    903 
    904 	return error;
    905 }
    906 #endif
    907