puffs_msgif.c revision 1.52 1 /* $NetBSD: puffs_msgif.c,v 1.52 2007/11/07 00:23:20 ad Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.52 2007/11/07 00:23:20 ad Exp $");
34
35 #include <sys/param.h>
36 #include <sys/fstrans.h>
37 #include <sys/kmem.h>
38 #include <sys/malloc.h>
39 #include <sys/mount.h>
40 #include <sys/vnode.h>
41 #include <sys/lock.h>
42 #include <sys/proc.h>
43
44 #include <fs/puffs/puffs_msgif.h>
45 #include <fs/puffs/puffs_sys.h>
46
47 /*
48 * waitq data structures
49 */
50
51 /*
52 * While a request is going to userspace, park the caller within the
53 * kernel. This is the kernel counterpart of "struct puffs_req".
54 */
55 struct puffs_msgpark {
56 struct puffs_req *park_preq; /* req followed by buf */
57
58 size_t park_copylen; /* userspace copylength */
59 size_t park_maxlen; /* max size in comeback */
60
61 parkdone_fn park_done; /* "biodone" a'la puffs */
62 void *park_donearg;
63
64 int park_flags;
65 int park_refcount;
66
67 kcondvar_t park_cv;
68 kmutex_t park_mtx;
69
70 TAILQ_ENTRY(puffs_msgpark) park_entries;
71 };
72 #define PARKFLAG_WAITERGONE 0x01
73 #define PARKFLAG_DONE 0x02
74 #define PARKFLAG_ONQUEUE1 0x04
75 #define PARKFLAG_ONQUEUE2 0x08
76 #define PARKFLAG_CALL 0x10
77 #define PARKFLAG_WANTREPLY 0x20
78
79 static pool_cache_t parkpc;
80
81 static int
82 makepark(void *arg, void *obj, int flags)
83 {
84 struct puffs_msgpark *park = obj;
85
86 mutex_init(&park->park_mtx, MUTEX_DEFAULT, IPL_NONE);
87 cv_init(&park->park_cv, "puffsrpl");
88
89 return 0;
90 }
91
92 static void
93 nukepark(void *arg, void *obj)
94 {
95 struct puffs_msgpark *park = obj;
96
97 cv_destroy(&park->park_cv);
98 mutex_destroy(&park->park_mtx);
99 }
100
101 void
102 puffs_msgif_init()
103 {
104
105 parkpc = pool_cache_init(sizeof(struct puffs_msgpark), 0, 0, 0,
106 "puffprkl", NULL, IPL_NONE, makepark, nukepark, NULL);
107 }
108
109 void
110 puffs_msgif_destroy()
111 {
112
113 pool_cache_destroy(parkpc);
114 }
115
116 static int alloced;
117
118 static struct puffs_msgpark *
119 puffs_msgpark_alloc(int waitok)
120 {
121 struct puffs_msgpark *park;
122
123 park = pool_cache_get(parkpc, waitok ? PR_WAITOK : PR_NOWAIT);
124 if (park == NULL)
125 return park;
126
127 park->park_refcount = 1;
128 park->park_preq = NULL;
129 park->park_flags = PARKFLAG_WANTREPLY;
130
131 return park;
132 }
133
134 static void
135 puffs_msgpark_reference(struct puffs_msgpark *park)
136 {
137
138 KASSERT(mutex_owned(&park->park_mtx));
139 park->park_refcount++;
140 }
141
142 /*
143 * Release reference to park structure.
144 */
145 static void
146 puffs_msgpark_release1(struct puffs_msgpark *park, int howmany)
147 {
148 struct puffs_req *preq = park->park_preq;
149 int refcnt;
150
151 KASSERT(mutex_owned(&park->park_mtx));
152 refcnt = park->park_refcount -= howmany;
153 mutex_exit(&park->park_mtx);
154
155 KASSERT(refcnt >= 0);
156
157 if (refcnt == 0) {
158 alloced--;
159 if (preq)
160 kmem_free(preq, park->park_maxlen);
161 pool_cache_put(parkpc, park);
162 }
163 }
164 #define puffs_msgpark_release(a) puffs_msgpark_release1(a, 1)
165
166 #ifdef PUFFSDEBUG
167 static void
168 parkdump(struct puffs_msgpark *park)
169 {
170
171 DPRINTF(("park %p, preq %p, id %" PRIu64 "\n"
172 "\tcopy %zu, max %zu - done: %p/%p\n"
173 "\tflags 0x%08x, refcount %d, cv/mtx: %p/%p\n",
174 park, park->park_preq, park->park_preq->preq_id,
175 park->park_copylen, park->park_maxlen,
176 park->park_done, park->park_donearg,
177 park->park_flags, park->park_refcount,
178 &park->park_cv, &park->park_mtx));
179 }
180
181 static void
182 parkqdump(struct puffs_wq *q, int dumpall)
183 {
184 struct puffs_msgpark *park;
185 int total = 0;
186
187 TAILQ_FOREACH(park, q, park_entries) {
188 if (dumpall)
189 parkdump(park);
190 total++;
191 }
192 DPRINTF(("puffs waitqueue at %p dumped, %d total\n", q, total));
193
194 }
195 #endif /* PUFFSDEBUG */
196
197 /*
198 * A word about locking in the park structures: the lock protects the
199 * fields of the *park* structure (not preq) and acts as an interlock
200 * in cv operations. The lock is always internal to this module and
201 * callers do not need to worry about it.
202 */
203
204 int
205 puffs_msgmem_alloc(size_t len, struct puffs_msgpark **ppark, void **mem,
206 int cansleep)
207 {
208 struct puffs_msgpark *park;
209 void *m;
210
211 m = kmem_zalloc(len, cansleep ? KM_SLEEP : KM_NOSLEEP);
212 if (m == NULL) {
213 KASSERT(cansleep == 0);
214 return ENOMEM;
215 }
216
217 park = puffs_msgpark_alloc(cansleep);
218 if (park == NULL) {
219 KASSERT(cansleep == 0);
220 kmem_free(m, len);
221 return ENOMEM;
222 }
223
224 park->park_preq = m;
225 park->park_maxlen = len;
226
227 *ppark = park;
228 *mem = m;
229
230 return 0;
231 }
232
233 void
234 puffs_msgmem_release(struct puffs_msgpark *park)
235 {
236
237 if (park == NULL)
238 return;
239
240 mutex_enter(&park->park_mtx);
241 puffs_msgpark_release(park);
242 }
243
244 void
245 puffs_msg_setfaf(struct puffs_msgpark *park)
246 {
247
248 park->park_flags &= ~PARKFLAG_WANTREPLY;
249 }
250
251 /*
252 * kernel-user-kernel waitqueues
253 */
254
255 static int touser(struct puffs_mount *, struct puffs_msgpark *);
256
257 static uint64_t
258 puffs_getmsgid(struct puffs_mount *pmp)
259 {
260 uint64_t rv;
261
262 mutex_enter(&pmp->pmp_lock);
263 rv = pmp->pmp_nextmsgid++;
264 mutex_exit(&pmp->pmp_lock);
265
266 return rv;
267 }
268
269 /* vfs request */
270 int
271 puffs_msg_vfs(struct puffs_mount *pmp, struct puffs_msgpark *park, int optype)
272 {
273
274 park->park_preq->preq_opclass = PUFFSOP_VFS;
275 park->park_preq->preq_optype = optype;
276
277 park->park_copylen = park->park_maxlen;
278
279 return touser(pmp, park);
280 }
281
282 /*
283 * vnode level request
284 */
285 int
286 puffs_msg_vn(struct puffs_mount *pmp, struct puffs_msgpark *park,
287 int optype, size_t delta, struct vnode *vp_opc, struct vnode *vp_aux)
288 {
289 struct puffs_req *preq;
290 void *cookie = VPTOPNC(vp_opc);
291 struct puffs_node *pnode;
292 int rv;
293
294 park->park_preq->preq_opclass = PUFFSOP_VN;
295 park->park_preq->preq_optype = optype;
296 park->park_preq->preq_cookie = cookie;
297
298 KASSERT(delta < park->park_maxlen); /* "<=" wouldn't make sense */
299 park->park_copylen = park->park_maxlen - delta;
300
301 rv = touser(pmp, park);
302
303 /*
304 * Check if the user server requests that inactive be called
305 * when the time is right.
306 */
307 preq = park->park_preq;
308 if (preq->preq_setbacks & PUFFS_SETBACK_INACT_N1) {
309 pnode = vp_opc->v_data;
310 pnode->pn_stat |= PNODE_DOINACT;
311 }
312 if (preq->preq_setbacks & PUFFS_SETBACK_INACT_N2) {
313 /* if no vp_aux, just ignore */
314 if (vp_aux) {
315 pnode = vp_aux->v_data;
316 pnode->pn_stat |= PNODE_DOINACT;
317 }
318 }
319 if (preq->preq_setbacks & PUFFS_SETBACK_NOREF_N1) {
320 pnode = vp_opc->v_data;
321 pnode->pn_stat |= PNODE_NOREFS;
322 }
323 if (preq->preq_setbacks & PUFFS_SETBACK_NOREF_N2) {
324 /* if no vp_aux, just ignore */
325 if (vp_aux) {
326 pnode = vp_aux->v_data;
327 pnode->pn_stat |= PNODE_NOREFS;
328 }
329 }
330
331 return rv;
332 }
333
334 void
335 puffs_msg_vncall(struct puffs_mount *pmp, struct puffs_msgpark *park,
336 int optype, size_t delta, parkdone_fn donefn, void *donearg,
337 struct vnode *vp_opc)
338 {
339 void *cookie = VPTOPNC(vp_opc);
340
341 park->park_preq->preq_opclass = PUFFSOP_VN;
342 park->park_preq->preq_optype = optype;
343 park->park_preq->preq_cookie = cookie;
344
345 KASSERT(delta < park->park_maxlen);
346 park->park_copylen = park->park_maxlen - delta;
347 park->park_done = donefn;
348 park->park_donearg = donearg;
349 park->park_flags |= PARKFLAG_CALL;
350
351 (void) touser(pmp, park);
352 }
353
354 int
355 puffs_msg_raw(struct puffs_mount *pmp, struct puffs_msgpark *park)
356 {
357
358 park->park_copylen = park->park_maxlen;
359
360 return touser(pmp, park);
361 }
362
363 void
364 puffs_msg_errnotify(struct puffs_mount *pmp, uint8_t type, int error,
365 const char *str, void *cookie)
366 {
367 struct puffs_msgpark *park;
368 struct puffs_error *perr;
369
370 puffs_msgmem_alloc(sizeof(struct puffs_error), &park, (void **)&perr,1);
371
372 perr->perr_error = error;
373 strlcpy(perr->perr_str, str, sizeof(perr->perr_str));
374
375 park->park_preq->preq_opclass |= PUFFSOP_ERROR | PUFFSOPFLAG_FAF;
376 park->park_preq->preq_optype = type;
377 park->park_preq->preq_cookie = cookie;
378
379 park->park_copylen = park->park_maxlen;
380
381 (void)touser(pmp, park);
382 }
383
384 /*
385 * Wait for the userspace ping-pong game in calling process context,
386 * unless a FAF / async call, in which case just enqueues the request
387 * and return immediately.
388 */
389 static int
390 touser(struct puffs_mount *pmp, struct puffs_msgpark *park)
391 {
392 struct lwp *l = curlwp;
393 struct mount *mp;
394 struct puffs_req *preq;
395 int rv = 0;
396
397 mp = PMPTOMP(pmp);
398 preq = park->park_preq;
399 preq->preq_buflen = park->park_maxlen;
400 KASSERT(preq->preq_id == 0);
401
402 if ((park->park_flags & PARKFLAG_WANTREPLY) == 0)
403 preq->preq_opclass |= PUFFSOPFLAG_FAF;
404 else
405 preq->preq_id = puffs_getmsgid(pmp);
406
407 /* fill in caller information */
408 preq->preq_pid = l->l_proc->p_pid;
409 preq->preq_lid = l->l_lid;
410
411 /*
412 * To support cv_sig, yet another movie: check if there are signals
413 * pending and we are issueing a non-FAF. If so, return an error
414 * directly UNLESS we are issueing INACTIVE. In that case, convert
415 * it to a FAF, fire off to the file server and return an error.
416 * Yes, this is bordering disgusting. Barfbags are on me.
417 */
418 if ((park->park_flags & PARKFLAG_WANTREPLY)
419 && (park->park_flags & PARKFLAG_CALL) == 0
420 && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) {
421 if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
422 && preq->preq_optype == PUFFS_VN_INACTIVE) {
423 park->park_preq->preq_opclass |= PUFFSOPFLAG_FAF;
424 park->park_flags &= ~PARKFLAG_WANTREPLY;
425 DPRINTF(("puffs touser: converted to FAF %p\n", park));
426 rv = EINTR;
427 } else {
428 return EINTR;
429 }
430 }
431
432 /*
433 * test for suspension lock.
434 *
435 * Note that we *DO NOT* keep the lock, since that might block
436 * lock acquiring PLUS it would give userlandia control over
437 * the lock. The operation queue enforces a strict ordering:
438 * when the fs server gets in the op stream, it knows things
439 * are in order. The kernel locks can't guarantee that for
440 * userspace, in any case.
441 *
442 * BUT: this presents a problem for ops which have a consistency
443 * clause based on more than one operation. Unfortunately such
444 * operations (read, write) do not reliably work yet.
445 *
446 * Ya, Ya, it's wrong wong wrong, me be fixink this someday.
447 *
448 * XXX: and there is one more problem. We sometimes need to
449 * take a lazy lock in case the fs is suspending and we are
450 * executing as the fs server context. This might happen
451 * e.g. in the case that the user server triggers a reclaim
452 * in the kernel while the fs is suspending. It's not a very
453 * likely event, but it needs to be fixed some day.
454 */
455
456 /*
457 * MOREXXX: once PUFFS_WCACHEINFO is enabled, we can't take
458 * the mutex here, since getpages() might be called locked.
459 */
460 fstrans_start(mp, FSTRANS_NORMAL);
461 mutex_enter(&pmp->pmp_lock);
462 fstrans_done(mp);
463
464 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
465 mutex_exit(&pmp->pmp_lock);
466 return ENXIO;
467 }
468
469 #ifdef PUFFSDEBUG
470 parkqdump(&pmp->pmp_msg_touser, puffsdebug > 1);
471 parkqdump(&pmp->pmp_msg_replywait, puffsdebug > 1);
472 #endif
473
474 mutex_enter(&park->park_mtx);
475 TAILQ_INSERT_TAIL(&pmp->pmp_msg_touser, park, park_entries);
476 park->park_flags |= PARKFLAG_ONQUEUE1;
477 puffs_mp_reference(pmp);
478 pmp->pmp_msg_touser_count++;
479 mutex_exit(&pmp->pmp_lock);
480
481 DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
482 "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, park,
483 preq->preq_opclass, preq->preq_optype, park->park_flags));
484
485 cv_broadcast(&pmp->pmp_msg_waiter_cv);
486 selnotify(pmp->pmp_sel, 0);
487
488 if ((park->park_flags & PARKFLAG_WANTREPLY)
489 && (park->park_flags & PARKFLAG_CALL) == 0) {
490 int error;
491
492 error = cv_wait_sig(&park->park_cv, &park->park_mtx);
493 DPRINTF(("puffs_touser: waiter for %p woke up with %d\n",
494 park, error));
495 if (error) {
496 park->park_flags |= PARKFLAG_WAITERGONE;
497 if (park->park_flags & PARKFLAG_DONE) {
498 rv = preq->preq_rv;
499 } else {
500 /*
501 * ok, we marked it as going away, but
502 * still need to do queue ops. take locks
503 * in correct order.
504 *
505 * We don't want to release our reference
506 * if it's on replywait queue to avoid error
507 * to file server. putop() code will DTRT.
508 */
509 mutex_exit(&park->park_mtx);
510 mutex_enter(&pmp->pmp_lock);
511 mutex_enter(&park->park_mtx);
512
513 /* remove from queue1 */
514 if (park->park_flags & PARKFLAG_ONQUEUE1) {
515 TAILQ_REMOVE(&pmp->pmp_msg_touser,
516 park, park_entries);
517 pmp->pmp_msg_touser_count--;
518 park->park_flags &= ~PARKFLAG_ONQUEUE1;
519 }
520
521 /*
522 * If it's waiting for a response already,
523 * boost reference count. Park will get
524 * nuked once the response arrives from
525 * the file server.
526 */
527 if (park->park_flags & PARKFLAG_ONQUEUE2)
528 puffs_msgpark_reference(park);
529
530 mutex_exit(&pmp->pmp_lock);
531
532 rv = error;
533 }
534 } else {
535 rv = preq->preq_rv;
536 }
537
538 /*
539 * retake the lock and release. This makes sure (haha,
540 * I'm humorous) that we don't process the same vnode in
541 * multiple threads due to the locks hacks we have in
542 * puffs_lock(). In reality this is well protected by
543 * the biglock, but once that's gone, well, hopefully
544 * this will be fixed for real. (and when you read this
545 * comment in 2017 and subsequently barf, my condolences ;).
546 */
547 if (rv == 0 && !fstrans_is_owner(mp)) {
548 fstrans_start(mp, FSTRANS_NORMAL);
549 fstrans_done(mp);
550 }
551
552 } else {
553 /*
554 * Take extra reference for FAF, i.e. don't free us
555 * immediately upon return to the caller, but rather
556 * only when the message has been transported.
557 */
558 puffs_msgpark_reference(park);
559 }
560
561 mutex_exit(&park->park_mtx);
562
563 mutex_enter(&pmp->pmp_lock);
564 puffs_mp_release(pmp);
565 mutex_exit(&pmp->pmp_lock);
566
567 return rv;
568 }
569
570 /*
571 * Get next request in the outgoing queue. "maxsize" controls the
572 * size the caller can accommodate and "nonblock" signals if this
573 * should block while waiting for input. Handles all locking internally.
574 */
575 int
576 puffs_msgif_getout(void *this, size_t maxsize, int nonblock,
577 uint8_t **data, size_t *dlen, void **parkptr)
578 {
579 struct puffs_mount *pmp = this;
580 struct puffs_msgpark *park;
581 struct puffs_req *preq;
582 int error;
583
584 error = 0;
585 mutex_enter(&pmp->pmp_lock);
586 puffs_mp_reference(pmp);
587 for (;;) {
588 /* RIP? */
589 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
590 error = ENXIO;
591 break;
592 }
593
594 /* need platinum yendorian express card? */
595 if (TAILQ_EMPTY(&pmp->pmp_msg_touser)) {
596 DPRINTF(("puffs_getout: no outgoing op, "));
597 if (nonblock) {
598 DPRINTF(("returning EWOULDBLOCK\n"));
599 error = EWOULDBLOCK;
600 break;
601 }
602 DPRINTF(("waiting ...\n"));
603
604 error = cv_wait_sig(&pmp->pmp_msg_waiter_cv,
605 &pmp->pmp_lock);
606 if (error)
607 break;
608 else
609 continue;
610 }
611
612 park = TAILQ_FIRST(&pmp->pmp_msg_touser);
613 if (park == NULL)
614 continue;
615
616 mutex_enter(&park->park_mtx);
617 puffs_msgpark_reference(park);
618
619 DPRINTF(("puffs_getout: found park at %p, ", park));
620
621 /* If it's a goner, don't process any furher */
622 if (park->park_flags & PARKFLAG_WAITERGONE) {
623 DPRINTF(("waitergone!\n"));
624 puffs_msgpark_release(park);
625 continue;
626 }
627
628 /* check size */
629 preq = park->park_preq;
630 if (maxsize < preq->preq_frhdr.pfr_len) {
631 DPRINTF(("buffer too small\n"));
632 puffs_msgpark_release(park);
633 error = E2BIG;
634 break;
635 }
636
637 DPRINTF(("returning\n"));
638
639 /*
640 * Ok, we found what we came for. Release it from the
641 * outgoing queue but do not unlock. We will unlock
642 * only after we "releaseout" it to avoid complications:
643 * otherwise it is (theoretically) possible for userland
644 * to race us into "put" before we have a change to put
645 * this baby on the receiving queue.
646 */
647 TAILQ_REMOVE(&pmp->pmp_msg_touser, park, park_entries);
648 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
649 park->park_flags &= ~PARKFLAG_ONQUEUE1;
650 mutex_exit(&park->park_mtx);
651
652 pmp->pmp_msg_touser_count--;
653 KASSERT(pmp->pmp_msg_touser_count >= 0);
654
655 break;
656 }
657 puffs_mp_release(pmp);
658 mutex_exit(&pmp->pmp_lock);
659
660 if (error == 0) {
661 *data = (uint8_t *)preq;
662 preq->preq_frhdr.pfr_len = park->park_copylen;
663 preq->preq_frhdr.pfr_alloclen = park->park_maxlen;
664 preq->preq_frhdr.pfr_type = preq->preq_opclass; /* yay! */
665 *dlen = preq->preq_frhdr.pfr_len;
666 *parkptr = park;
667 }
668
669 return error;
670 }
671
672 /*
673 * Release outgoing structure. Now, depending on the success of the
674 * outgoing send, it is either going onto the result waiting queue
675 * or the death chamber.
676 */
677 void
678 puffs_msgif_releaseout(void *this, void *parkptr, int status)
679 {
680 struct puffs_mount *pmp = this;
681 struct puffs_msgpark *park = parkptr;
682
683 DPRINTF(("puffs_releaseout: returning park %p, errno %d: " ,
684 park, status));
685 mutex_enter(&pmp->pmp_lock);
686 mutex_enter(&park->park_mtx);
687 if (park->park_flags & PARKFLAG_WANTREPLY) {
688 if (status == 0) {
689 DPRINTF(("enqueue replywait\n"));
690 TAILQ_INSERT_TAIL(&pmp->pmp_msg_replywait, park,
691 park_entries);
692 park->park_flags |= PARKFLAG_ONQUEUE2;
693 } else {
694 DPRINTF(("error path!\n"));
695 park->park_preq->preq_rv = status;
696 park->park_flags |= PARKFLAG_DONE;
697 cv_signal(&park->park_cv);
698 }
699 puffs_msgpark_release(park);
700 } else {
701 DPRINTF(("release\n"));
702 puffs_msgpark_release1(park, 2);
703 }
704 mutex_exit(&pmp->pmp_lock);
705 }
706
707 /*
708 * XXX: locking with this one?
709 */
710 void
711 puffs_msgif_incoming(void *this, void *buf)
712 {
713 struct puffs_mount *pmp = this;
714 struct puffs_req *preq = buf;
715 struct puffs_frame *pfr = &preq->preq_frhdr;
716 struct puffs_msgpark *park;
717 int release, wgone;
718
719 /* XXX */
720 if (PUFFSOP_OPCLASS(preq->preq_opclass) != PUFFSOP_VN
721 && PUFFSOP_OPCLASS(preq->preq_opclass) != PUFFSOP_VFS)
722 return;
723
724 mutex_enter(&pmp->pmp_lock);
725
726 /* Locate waiter */
727 TAILQ_FOREACH(park, &pmp->pmp_msg_replywait, park_entries) {
728 if (park->park_preq->preq_id == preq->preq_id)
729 break;
730 }
731 if (park == NULL) {
732 DPRINTF(("puffs_msgif_income: no request: %" PRIu64 "\n",
733 preq->preq_id));
734 mutex_exit(&pmp->pmp_lock);
735 return; /* XXX send error */
736 }
737
738 mutex_enter(&park->park_mtx);
739 puffs_msgpark_reference(park);
740 if (pfr->pfr_len > park->park_maxlen) {
741 DPRINTF(("puffs_msgif_income: invalid buffer length: "
742 "%zu (req %" PRIu64 ", \n", pfr->pfr_len, preq->preq_id));
743 park->park_preq->preq_rv = EPROTO;
744 cv_signal(&park->park_cv);
745 puffs_msgpark_release(park);
746 mutex_exit(&pmp->pmp_lock);
747 return; /* XXX: error */
748 }
749 wgone = park->park_flags & PARKFLAG_WAITERGONE;
750
751 KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
752 TAILQ_REMOVE(&pmp->pmp_msg_replywait, park, park_entries);
753 park->park_flags &= ~PARKFLAG_ONQUEUE2;
754 mutex_exit(&pmp->pmp_lock);
755
756 if (wgone) {
757 DPRINTF(("puffs_putop: bad service - waiter gone for "
758 "park %p\n", park));
759 release = 2;
760 } else {
761 if (park->park_flags & PARKFLAG_CALL) {
762 DPRINTF(("puffs_msgif_income: call for %p, arg %p\n",
763 park->park_preq, park->park_donearg));
764 park->park_done(pmp, buf, park->park_donearg);
765 release = 2;
766 } else {
767 /* XXX: yes, I know */
768 memcpy(park->park_preq, buf, pfr->pfr_len);
769 release = 1;
770 }
771 }
772
773 if (!wgone) {
774 DPRINTF(("puffs_putop: flagging done for "
775 "park %p\n", park));
776 cv_signal(&park->park_cv);
777 }
778
779 park->park_flags |= PARKFLAG_DONE;
780 puffs_msgpark_release1(park, release);
781 }
782
783 /*
784 * We're dead, kaput, RIP, slightly more than merely pining for the
785 * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
786 * our maker, ceased to be, etcetc. YASD. It's a dead FS!
787 *
788 * Caller must hold puffs mutex.
789 */
790 void
791 puffs_userdead(struct puffs_mount *pmp)
792 {
793 struct puffs_msgpark *park, *park_next;
794
795 /*
796 * Mark filesystem status as dying so that operations don't
797 * attempt to march to userspace any longer.
798 */
799 pmp->pmp_status = PUFFSTAT_DYING;
800
801 /* signal waiters on REQUEST TO file server queue */
802 for (park = TAILQ_FIRST(&pmp->pmp_msg_touser); park; park = park_next) {
803 uint8_t opclass;
804
805 mutex_enter(&park->park_mtx);
806 puffs_msgpark_reference(park);
807 park_next = TAILQ_NEXT(park, park_entries);
808
809 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
810 TAILQ_REMOVE(&pmp->pmp_msg_touser, park, park_entries);
811 park->park_flags &= ~PARKFLAG_ONQUEUE1;
812 pmp->pmp_msg_touser_count--;
813
814 /*
815 * Even though waiters on QUEUE1 are removed in touser()
816 * in case of WAITERGONE, it is still possible for us to
817 * get raced here due to having to retake locks in said
818 * touser(). In the race case simply "ignore" the item
819 * on the queue and move on to the next one.
820 */
821 if (park->park_flags & PARKFLAG_WAITERGONE) {
822 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
823 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
824 puffs_msgpark_release(park);
825
826 } else {
827 opclass = park->park_preq->preq_opclass;
828 park->park_preq->preq_rv = ENXIO;
829
830 if (park->park_flags & PARKFLAG_CALL) {
831 park->park_done(pmp, park->park_preq,
832 park->park_donearg);
833 puffs_msgpark_release1(park, 2);
834 } else if ((park->park_flags & PARKFLAG_WANTREPLY)==0) {
835 puffs_msgpark_release1(park, 2);
836 } else {
837 park->park_preq->preq_rv = ENXIO;
838 cv_signal(&park->park_cv);
839 puffs_msgpark_release(park);
840 }
841 }
842 }
843
844 /* signal waiters on RESPONSE FROM file server queue */
845 for (park=TAILQ_FIRST(&pmp->pmp_msg_replywait); park; park=park_next) {
846 mutex_enter(&park->park_mtx);
847 puffs_msgpark_reference(park);
848 park_next = TAILQ_NEXT(park, park_entries);
849
850 KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
851 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
852
853 TAILQ_REMOVE(&pmp->pmp_msg_replywait, park, park_entries);
854 park->park_flags &= ~PARKFLAG_ONQUEUE2;
855
856 if (park->park_flags & PARKFLAG_WAITERGONE) {
857 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
858 puffs_msgpark_release(park);
859 } else {
860 park->park_preq->preq_rv = ENXIO;
861 if (park->park_flags & PARKFLAG_CALL) {
862 park->park_done(pmp, park->park_preq,
863 park->park_donearg);
864 puffs_msgpark_release1(park, 2);
865 } else {
866 cv_signal(&park->park_cv);
867 puffs_msgpark_release(park);
868 }
869 }
870 }
871
872 cv_broadcast(&pmp->pmp_msg_waiter_cv);
873 }
874