puffs_msgif.c revision 1.64 1 /* $NetBSD: puffs_msgif.c,v 1.64 2008/01/28 21:06:36 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.64 2008/01/28 21:06:36 pooka Exp $");
34
35 #include <sys/param.h>
36 #include <sys/fstrans.h>
37 #include <sys/kmem.h>
38 #include <sys/kthread.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mount.h>
42 #include <sys/namei.h>
43 #include <sys/proc.h>
44 #include <sys/vnode.h>
45
46 #include <dev/putter/putter_sys.h>
47
48 #include <fs/puffs/puffs_msgif.h>
49 #include <fs/puffs/puffs_sys.h>
50
51 #include <miscfs/syncfs/syncfs.h> /* XXX: for syncer_mutex reference */
52
53 /*
54 * waitq data structures
55 */
56
57 /*
58 * While a request is going to userspace, park the caller within the
59 * kernel. This is the kernel counterpart of "struct puffs_req".
60 */
61 struct puffs_msgpark {
62 struct puffs_req *park_preq; /* req followed by buf */
63
64 size_t park_copylen; /* userspace copylength */
65 size_t park_maxlen; /* max size in comeback */
66
67 parkdone_fn park_done; /* "biodone" a'la puffs */
68 void *park_donearg;
69
70 int park_flags;
71 int park_refcount;
72
73 kcondvar_t park_cv;
74 kmutex_t park_mtx;
75
76 TAILQ_ENTRY(puffs_msgpark) park_entries;
77 };
78 #define PARKFLAG_WAITERGONE 0x01
79 #define PARKFLAG_DONE 0x02
80 #define PARKFLAG_ONQUEUE1 0x04
81 #define PARKFLAG_ONQUEUE2 0x08
82 #define PARKFLAG_CALL 0x10
83 #define PARKFLAG_WANTREPLY 0x20
84 #define PARKFLAG_HASERROR 0x40
85
86 static pool_cache_t parkpc;
87 #ifdef PUFFSDEBUG
88 static int totalpark;
89 #endif
90
91 static int
92 makepark(void *arg, void *obj, int flags)
93 {
94 struct puffs_msgpark *park = obj;
95
96 mutex_init(&park->park_mtx, MUTEX_DEFAULT, IPL_NONE);
97 cv_init(&park->park_cv, "puffsrpl");
98
99 return 0;
100 }
101
102 static void
103 nukepark(void *arg, void *obj)
104 {
105 struct puffs_msgpark *park = obj;
106
107 cv_destroy(&park->park_cv);
108 mutex_destroy(&park->park_mtx);
109 }
110
111 void
112 puffs_msgif_init()
113 {
114
115 parkpc = pool_cache_init(sizeof(struct puffs_msgpark), 0, 0, 0,
116 "puffprkl", NULL, IPL_NONE, makepark, nukepark, NULL);
117 }
118
119 void
120 puffs_msgif_destroy()
121 {
122
123 pool_cache_destroy(parkpc);
124 }
125
126 static int alloced;
127
128 static struct puffs_msgpark *
129 puffs_msgpark_alloc(int waitok)
130 {
131 struct puffs_msgpark *park;
132
133 park = pool_cache_get(parkpc, waitok ? PR_WAITOK : PR_NOWAIT);
134 if (park == NULL)
135 return park;
136
137 park->park_refcount = 1;
138 park->park_preq = NULL;
139 park->park_flags = PARKFLAG_WANTREPLY;
140
141 #ifdef PUFFSDEBUG
142 totalpark++;
143 #endif
144
145 return park;
146 }
147
148 static void
149 puffs_msgpark_reference(struct puffs_msgpark *park)
150 {
151
152 KASSERT(mutex_owned(&park->park_mtx));
153 park->park_refcount++;
154 }
155
156 /*
157 * Release reference to park structure.
158 */
159 static void
160 puffs_msgpark_release1(struct puffs_msgpark *park, int howmany)
161 {
162 struct puffs_req *preq = park->park_preq;
163 int refcnt;
164
165 KASSERT(mutex_owned(&park->park_mtx));
166 refcnt = park->park_refcount -= howmany;
167 mutex_exit(&park->park_mtx);
168
169 KASSERT(refcnt >= 0);
170
171 if (refcnt == 0) {
172 alloced--;
173 if (preq)
174 kmem_free(preq, park->park_maxlen);
175 pool_cache_put(parkpc, park);
176
177 #ifdef PUFFSDEBUG
178 totalpark--;
179 #endif
180 }
181 }
182 #define puffs_msgpark_release(a) puffs_msgpark_release1(a, 1)
183
184 #ifdef PUFFSDEBUG
185 static void
186 parkdump(struct puffs_msgpark *park)
187 {
188
189 DPRINTF(("park %p, preq %p, id %" PRIu64 "\n"
190 "\tcopy %zu, max %zu - done: %p/%p\n"
191 "\tflags 0x%08x, refcount %d, cv/mtx: %p/%p\n",
192 park, park->park_preq, park->park_preq->preq_id,
193 park->park_copylen, park->park_maxlen,
194 park->park_done, park->park_donearg,
195 park->park_flags, park->park_refcount,
196 &park->park_cv, &park->park_mtx));
197 }
198
199 static void
200 parkqdump(struct puffs_wq *q, int dumpall)
201 {
202 struct puffs_msgpark *park;
203 int total = 0;
204
205 TAILQ_FOREACH(park, q, park_entries) {
206 if (dumpall)
207 parkdump(park);
208 total++;
209 }
210 DPRINTF(("puffs waitqueue at %p dumped, %d total\n", q, total));
211
212 }
213 #endif /* PUFFSDEBUG */
214
215 /*
216 * A word about locking in the park structures: the lock protects the
217 * fields of the *park* structure (not preq) and acts as an interlock
218 * in cv operations. The lock is always internal to this module and
219 * callers do not need to worry about it.
220 */
221
222 int
223 puffs_msgmem_alloc(size_t len, struct puffs_msgpark **ppark, void **mem,
224 int cansleep)
225 {
226 struct puffs_msgpark *park;
227 void *m;
228
229 m = kmem_zalloc(len, cansleep ? KM_SLEEP : KM_NOSLEEP);
230 if (m == NULL) {
231 KASSERT(cansleep == 0);
232 return ENOMEM;
233 }
234
235 park = puffs_msgpark_alloc(cansleep);
236 if (park == NULL) {
237 KASSERT(cansleep == 0);
238 kmem_free(m, len);
239 return ENOMEM;
240 }
241
242 park->park_preq = m;
243 park->park_maxlen = park->park_copylen = len;
244
245 *ppark = park;
246 *mem = m;
247
248 return 0;
249 }
250
251 void
252 puffs_msgmem_release(struct puffs_msgpark *park)
253 {
254
255 if (park == NULL)
256 return;
257
258 mutex_enter(&park->park_mtx);
259 puffs_msgpark_release(park);
260 }
261
262 void
263 puffs_msg_setfaf(struct puffs_msgpark *park)
264 {
265
266 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
267 park->park_flags &= ~PARKFLAG_WANTREPLY;
268 }
269
270 void
271 puffs_msg_setdelta(struct puffs_msgpark *park, size_t delta)
272 {
273
274 KASSERT(delta < park->park_maxlen); /* "<=" wouldn't make sense */
275 park->park_copylen = park->park_maxlen - delta;
276 }
277
278 void
279 puffs_msg_setinfo(struct puffs_msgpark *park, int class, int type,
280 puffs_cookie_t ck)
281 {
282
283 park->park_preq->preq_opclass = PUFFSOP_OPCLASS(class);
284 park->park_preq->preq_optype = type;
285 park->park_preq->preq_cookie = ck;
286 }
287
288 void
289 puffs_msg_setcall(struct puffs_msgpark *park, parkdone_fn donefn, void *donearg)
290 {
291
292 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
293 park->park_done = donefn;
294 park->park_donearg = donearg;
295 park->park_flags |= PARKFLAG_CALL;
296 }
297
298 /*
299 * kernel-user-kernel waitqueues
300 */
301
302 static uint64_t
303 puffs_getmsgid(struct puffs_mount *pmp)
304 {
305 uint64_t rv;
306
307 mutex_enter(&pmp->pmp_lock);
308 rv = pmp->pmp_nextmsgid++;
309 mutex_exit(&pmp->pmp_lock);
310
311 return rv;
312 }
313
314 /*
315 * A word about reference counting of parks. A reference must be taken
316 * when accessing a park and additionally when it is on a queue. So
317 * when taking it off a queue and releasing the access reference, the
318 * reference count is generally decremented by 2.
319 */
320
321 void
322 puffs_msg_enqueue(struct puffs_mount *pmp, struct puffs_msgpark *park)
323 {
324 struct lwp *l = curlwp;
325 struct mount *mp;
326 struct puffs_req *preq;
327
328 mp = PMPTOMP(pmp);
329 preq = park->park_preq;
330 preq->preq_buflen = park->park_maxlen;
331 KASSERT(preq->preq_id == 0
332 || (preq->preq_opclass & PUFFSOPFLAG_ISRESPONSE));
333
334 if ((park->park_flags & PARKFLAG_WANTREPLY) == 0)
335 preq->preq_opclass |= PUFFSOPFLAG_FAF;
336 else
337 preq->preq_id = puffs_getmsgid(pmp);
338
339 /* fill in caller information */
340 preq->preq_pid = l->l_proc->p_pid;
341 preq->preq_lid = l->l_lid;
342
343 /*
344 * To support cv_sig, yet another movie: check if there are signals
345 * pending and we are issueing a non-FAF. If so, return an error
346 * directly UNLESS we are issueing INACTIVE/RECLAIM. In that case,
347 * convert it to a FAF, fire off to the file server and return
348 * an error. Yes, this is bordering disgusting. Barfbags are on me.
349 */
350 if (__predict_false((park->park_flags & PARKFLAG_WANTREPLY)
351 && (park->park_flags & PARKFLAG_CALL) == 0
352 && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0))) {
353 park->park_flags |= PARKFLAG_HASERROR;
354 preq->preq_rv = EINTR;
355 if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
356 && (preq->preq_optype == PUFFS_VN_INACTIVE
357 || preq->preq_optype == PUFFS_VN_RECLAIM)) {
358 park->park_preq->preq_opclass |= PUFFSOPFLAG_FAF;
359 park->park_flags &= ~PARKFLAG_WANTREPLY;
360 DPRINTF(("puffs_msg_enqueue: converted to FAF %p\n",
361 park));
362 } else {
363 return;
364 }
365 }
366
367 /*
368 * test for suspension lock.
369 *
370 * Note that we *DO NOT* keep the lock, since that might block
371 * lock acquiring PLUS it would give userlandia control over
372 * the lock. The operation queue enforces a strict ordering:
373 * when the fs server gets in the op stream, it knows things
374 * are in order. The kernel locks can't guarantee that for
375 * userspace, in any case.
376 *
377 * BUT: this presents a problem for ops which have a consistency
378 * clause based on more than one operation. Unfortunately such
379 * operations (read, write) do not reliably work yet.
380 *
381 * Ya, Ya, it's wrong wong wrong, me be fixink this someday.
382 *
383 * XXX: and there is one more problem. We sometimes need to
384 * take a lazy lock in case the fs is suspending and we are
385 * executing as the fs server context. This might happen
386 * e.g. in the case that the user server triggers a reclaim
387 * in the kernel while the fs is suspending. It's not a very
388 * likely event, but it needs to be fixed some day.
389 */
390
391 /*
392 * MOREXXX: once PUFFS_WCACHEINFO is enabled, we can't take
393 * the mutex here, since getpages() might be called locked.
394 */
395 fstrans_start(mp, FSTRANS_NORMAL);
396 mutex_enter(&pmp->pmp_lock);
397 fstrans_done(mp);
398
399 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
400 mutex_exit(&pmp->pmp_lock);
401 park->park_flags |= PARKFLAG_HASERROR;
402 preq->preq_rv = ENXIO;
403 return;
404 }
405
406 #ifdef PUFFSDEBUG
407 parkqdump(&pmp->pmp_msg_touser, puffsdebug > 1);
408 parkqdump(&pmp->pmp_msg_replywait, puffsdebug > 1);
409 #endif
410
411 /*
412 * Note: we don't need to lock park since we have the only
413 * reference to it at this point.
414 */
415 TAILQ_INSERT_TAIL(&pmp->pmp_msg_touser, park, park_entries);
416 park->park_flags |= PARKFLAG_ONQUEUE1;
417 pmp->pmp_msg_touser_count++;
418 park->park_refcount++;
419 mutex_exit(&pmp->pmp_lock);
420
421 cv_broadcast(&pmp->pmp_msg_waiter_cv);
422 putter_notify(pmp->pmp_pi);
423
424 DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
425 "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, park,
426 preq->preq_opclass, preq->preq_optype, park->park_flags));
427 }
428
429 int
430 puffs_msg_wait(struct puffs_mount *pmp, struct puffs_msgpark *park)
431 {
432 struct puffs_req *preq = park->park_preq; /* XXX: hmmm */
433 struct mount *mp = PMPTOMP(pmp);
434 int error = 0;
435 int rv;
436
437 mutex_enter(&pmp->pmp_lock);
438 puffs_mp_reference(pmp);
439 mutex_exit(&pmp->pmp_lock);
440
441 mutex_enter(&park->park_mtx);
442 if ((park->park_flags & PARKFLAG_WANTREPLY) == 0
443 || (park->park_flags & PARKFLAG_CALL)) {
444 mutex_exit(&park->park_mtx);
445 rv = 0;
446 goto skipwait;
447 }
448
449 /* did the response beat us to the wait? */
450 if (__predict_false((park->park_flags & PARKFLAG_DONE)
451 || (park->park_flags & PARKFLAG_HASERROR))) {
452 rv = park->park_preq->preq_rv;
453 mutex_exit(&park->park_mtx);
454 goto skipwait;
455 }
456
457 error = cv_wait_sig(&park->park_cv, &park->park_mtx);
458 DPRINTF(("puffs_touser: waiter for %p woke up with %d\n",
459 park, error));
460 if (error) {
461 park->park_flags |= PARKFLAG_WAITERGONE;
462 if (park->park_flags & PARKFLAG_DONE) {
463 rv = preq->preq_rv;
464 mutex_exit(&park->park_mtx);
465 } else {
466 /*
467 * ok, we marked it as going away, but
468 * still need to do queue ops. take locks
469 * in correct order.
470 *
471 * We don't want to release our reference
472 * if it's on replywait queue to avoid error
473 * to file server. putop() code will DTRT.
474 */
475 mutex_exit(&park->park_mtx);
476 mutex_enter(&pmp->pmp_lock);
477 mutex_enter(&park->park_mtx);
478
479 /*
480 * Still on queue1? We can safely remove it
481 * without any consequences since the file
482 * server hasn't seen it. "else" we need to
483 * wait for the response and just ignore it
484 * to avoid signalling an incorrect error to
485 * the file server.
486 */
487 if (park->park_flags & PARKFLAG_ONQUEUE1) {
488 TAILQ_REMOVE(&pmp->pmp_msg_touser,
489 park, park_entries);
490 puffs_msgpark_release(park);
491 pmp->pmp_msg_touser_count--;
492 park->park_flags &= ~PARKFLAG_ONQUEUE1;
493 } else {
494 mutex_exit(&park->park_mtx);
495 }
496 mutex_exit(&pmp->pmp_lock);
497
498 rv = EINTR;
499 }
500 } else {
501 rv = preq->preq_rv;
502 mutex_exit(&park->park_mtx);
503 }
504
505 /*
506 * retake the lock and release. This makes sure (haha,
507 * I'm humorous) that we don't process the same vnode in
508 * multiple threads due to the locks hacks we have in
509 * puffs_lock(). In reality this is well protected by
510 * the biglock, but once that's gone, well, hopefully
511 * this will be fixed for real. (and when you read this
512 * comment in 2017 and subsequently barf, my condolences ;).
513 */
514 if (rv == 0 && !fstrans_is_owner(mp)) {
515 fstrans_start(mp, FSTRANS_NORMAL);
516 fstrans_done(mp);
517 }
518
519 skipwait:
520 mutex_enter(&pmp->pmp_lock);
521 puffs_mp_release(pmp);
522 mutex_exit(&pmp->pmp_lock);
523
524 return rv;
525 }
526
527 /*
528 * XXX: this suuuucks. Hopefully I'll get rid of this lossage once
529 * the whole setback-nonsense gets fixed.
530 */
531 int
532 puffs_msg_wait2(struct puffs_mount *pmp, struct puffs_msgpark *park,
533 struct puffs_node *pn1, struct puffs_node *pn2)
534 {
535 struct puffs_req *preq;
536 int rv;
537
538 rv = puffs_msg_wait(pmp, park);
539
540 preq = park->park_preq;
541 if (pn1 && preq->preq_setbacks & PUFFS_SETBACK_INACT_N1)
542 pn1->pn_stat |= PNODE_DOINACT;
543 if (pn2 && preq->preq_setbacks & PUFFS_SETBACK_INACT_N2)
544 pn2->pn_stat |= PNODE_DOINACT;
545
546 if (pn1 && preq->preq_setbacks & PUFFS_SETBACK_NOREF_N1)
547 pn1->pn_stat |= PNODE_NOREFS;
548 if (pn2 && preq->preq_setbacks & PUFFS_SETBACK_NOREF_N2)
549 pn2->pn_stat |= PNODE_NOREFS;
550
551 return rv;
552
553 }
554
555 /*
556 * XXX: lazy bum. please, for the love of foie gras, fix me.
557 * This should *NOT* depend on setfaf. Also "memcpy" could
558 * be done more nicely.
559 */
560 void
561 puffs_msg_sendresp(struct puffs_mount *pmp, struct puffs_req *origpreq, int rv)
562 {
563 struct puffs_msgpark *park;
564 struct puffs_req *preq;
565
566 puffs_msgmem_alloc(sizeof(struct puffs_req), &park, (void *)&preq, 1);
567 puffs_msg_setfaf(park); /* XXXXXX: avoids reqid override */
568
569 memcpy(preq, origpreq, sizeof(struct puffs_req));
570 preq->preq_rv = rv;
571 preq->preq_opclass |= PUFFSOPFLAG_ISRESPONSE;
572
573 puffs_msg_enqueue(pmp, park);
574 puffs_msgmem_release(park);
575 }
576
577 /*
578 * Get next request in the outgoing queue. "maxsize" controls the
579 * size the caller can accommodate and "nonblock" signals if this
580 * should block while waiting for input. Handles all locking internally.
581 */
582 int
583 puffs_msgif_getout(void *this, size_t maxsize, int nonblock,
584 uint8_t **data, size_t *dlen, void **parkptr)
585 {
586 struct puffs_mount *pmp = this;
587 struct puffs_msgpark *park;
588 struct puffs_req *preq;
589 int error;
590
591 error = 0;
592 mutex_enter(&pmp->pmp_lock);
593 puffs_mp_reference(pmp);
594 for (;;) {
595 /* RIP? */
596 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
597 error = ENXIO;
598 break;
599 }
600
601 /* need platinum yendorian express card? */
602 if (TAILQ_EMPTY(&pmp->pmp_msg_touser)) {
603 DPRINTF(("puffs_getout: no outgoing op, "));
604 if (nonblock) {
605 DPRINTF(("returning EWOULDBLOCK\n"));
606 error = EWOULDBLOCK;
607 break;
608 }
609 DPRINTF(("waiting ...\n"));
610
611 error = cv_wait_sig(&pmp->pmp_msg_waiter_cv,
612 &pmp->pmp_lock);
613 if (error)
614 break;
615 else
616 continue;
617 }
618
619 park = TAILQ_FIRST(&pmp->pmp_msg_touser);
620 if (park == NULL)
621 continue;
622
623 mutex_enter(&park->park_mtx);
624 puffs_msgpark_reference(park);
625
626 DPRINTF(("puffs_getout: found park at %p, ", park));
627
628 /* If it's a goner, don't process any furher */
629 if (park->park_flags & PARKFLAG_WAITERGONE) {
630 DPRINTF(("waitergone!\n"));
631 puffs_msgpark_release(park);
632 continue;
633 }
634 preq = park->park_preq;
635
636 #if 0
637 /* check size */
638 /*
639 * XXX: this check is not valid for now, we don't know
640 * the size of the caller's input buffer. i.e. this
641 * will most likely go away
642 */
643 if (maxsize < preq->preq_frhdr.pfr_len) {
644 DPRINTF(("buffer too small\n"));
645 puffs_msgpark_release(park);
646 error = E2BIG;
647 break;
648 }
649 #endif
650
651 DPRINTF(("returning\n"));
652
653 /*
654 * Ok, we found what we came for. Release it from the
655 * outgoing queue but do not unlock. We will unlock
656 * only after we "releaseout" it to avoid complications:
657 * otherwise it is (theoretically) possible for userland
658 * to race us into "put" before we have a change to put
659 * this baby on the receiving queue.
660 */
661 TAILQ_REMOVE(&pmp->pmp_msg_touser, park, park_entries);
662 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
663 park->park_flags &= ~PARKFLAG_ONQUEUE1;
664 mutex_exit(&park->park_mtx);
665
666 pmp->pmp_msg_touser_count--;
667 KASSERT(pmp->pmp_msg_touser_count >= 0);
668
669 break;
670 }
671 puffs_mp_release(pmp);
672 mutex_exit(&pmp->pmp_lock);
673
674 if (error == 0) {
675 *data = (uint8_t *)preq;
676 preq->preq_pth.pth_framelen = park->park_copylen;
677 *dlen = preq->preq_pth.pth_framelen;
678 *parkptr = park;
679 }
680
681 return error;
682 }
683
684 /*
685 * Release outgoing structure. Now, depending on the success of the
686 * outgoing send, it is either going onto the result waiting queue
687 * or the death chamber.
688 */
689 void
690 puffs_msgif_releaseout(void *this, void *parkptr, int status)
691 {
692 struct puffs_mount *pmp = this;
693 struct puffs_msgpark *park = parkptr;
694
695 DPRINTF(("puffs_releaseout: returning park %p, errno %d: " ,
696 park, status));
697 mutex_enter(&pmp->pmp_lock);
698 mutex_enter(&park->park_mtx);
699 if (park->park_flags & PARKFLAG_WANTREPLY) {
700 if (status == 0) {
701 DPRINTF(("enqueue replywait\n"));
702 TAILQ_INSERT_TAIL(&pmp->pmp_msg_replywait, park,
703 park_entries);
704 park->park_flags |= PARKFLAG_ONQUEUE2;
705 } else {
706 DPRINTF(("error path!\n"));
707 park->park_preq->preq_rv = status;
708 park->park_flags |= PARKFLAG_DONE;
709 cv_signal(&park->park_cv);
710 }
711 puffs_msgpark_release(park);
712 } else {
713 DPRINTF(("release\n"));
714 puffs_msgpark_release1(park, 2);
715 }
716 mutex_exit(&pmp->pmp_lock);
717 }
718
719 size_t
720 puffs_msgif_waitcount(void *this)
721 {
722 struct puffs_mount *pmp = this;
723 size_t rv;
724
725 mutex_enter(&pmp->pmp_lock);
726 rv = pmp->pmp_msg_touser_count;
727 mutex_exit(&pmp->pmp_lock);
728
729 return rv;
730 }
731
732 /*
733 * XXX: locking with this one?
734 */
735 static void
736 puffsop_msg(void *this, struct puffs_req *preq)
737 {
738 struct puffs_mount *pmp = this;
739 struct putter_hdr *pth = &preq->preq_pth;
740 struct puffs_msgpark *park;
741 int wgone;
742
743 mutex_enter(&pmp->pmp_lock);
744
745 /* Locate waiter */
746 TAILQ_FOREACH(park, &pmp->pmp_msg_replywait, park_entries) {
747 if (park->park_preq->preq_id == preq->preq_id)
748 break;
749 }
750 if (park == NULL) {
751 DPRINTF(("puffsop_msg: no request: %" PRIu64 "\n",
752 preq->preq_id));
753 mutex_exit(&pmp->pmp_lock);
754 return; /* XXX send error */
755 }
756
757 mutex_enter(&park->park_mtx);
758 puffs_msgpark_reference(park);
759 if (pth->pth_framelen > park->park_maxlen) {
760 DPRINTF(("puffsop_msg: invalid buffer length: "
761 "%" PRIu64 " (req %" PRIu64 ", \n", pth->pth_framelen,
762 preq->preq_id));
763 park->park_preq->preq_rv = EPROTO;
764 cv_signal(&park->park_cv);
765 puffs_msgpark_release1(park, 2);
766 mutex_exit(&pmp->pmp_lock);
767 return; /* XXX: error */
768 }
769 wgone = park->park_flags & PARKFLAG_WAITERGONE;
770
771 KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
772 TAILQ_REMOVE(&pmp->pmp_msg_replywait, park, park_entries);
773 park->park_flags &= ~PARKFLAG_ONQUEUE2;
774 mutex_exit(&pmp->pmp_lock);
775
776 if (wgone) {
777 DPRINTF(("puffsop_msg: bad service - waiter gone for "
778 "park %p\n", park));
779 } else {
780 if (park->park_flags & PARKFLAG_CALL) {
781 DPRINTF(("puffsop_msg: call for %p, arg %p\n",
782 park->park_preq, park->park_donearg));
783 park->park_done(pmp, preq, park->park_donearg);
784 } else {
785 /* XXX: yes, I know */
786 memcpy(park->park_preq, preq, pth->pth_framelen);
787 }
788 }
789
790 if (!wgone) {
791 DPRINTF(("puffs_putop: flagging done for "
792 "park %p\n", park));
793 cv_signal(&park->park_cv);
794 }
795
796 park->park_flags |= PARKFLAG_DONE;
797 puffs_msgpark_release1(park, 2);
798 }
799
800 /*
801 * helpers
802 */
803 static void
804 dosuspendresume(void *arg)
805 {
806 struct puffs_mount *pmp = arg;
807 struct mount *mp;
808 int rv;
809
810 mp = PMPTOMP(pmp);
811 /*
812 * XXX? does this really do any good or is it just
813 * paranoid stupidity? or stupid paranoia?
814 */
815 if (mp->mnt_iflag & IMNT_UNMOUNT) {
816 printf("puffs dosuspendresume(): detected suspend on "
817 "unmounting fs\n");
818 goto out;
819 }
820
821 /* Do the dance. Allow only one concurrent suspend */
822 rv = vfs_suspend(PMPTOMP(pmp), 1);
823 if (rv == 0)
824 vfs_resume(PMPTOMP(pmp));
825
826 out:
827 mutex_enter(&pmp->pmp_lock);
828 KASSERT(pmp->pmp_suspend == 1);
829 pmp->pmp_suspend = 0;
830 puffs_mp_release(pmp);
831 mutex_exit(&pmp->pmp_lock);
832
833 kthread_exit(0);
834 }
835
836 static void
837 puffsop_suspend(struct puffs_mount *pmp)
838 {
839 int rv = 0;
840
841 mutex_enter(&pmp->pmp_lock);
842 if (pmp->pmp_suspend || pmp->pmp_status != PUFFSTAT_RUNNING) {
843 rv = EBUSY;
844 } else {
845 puffs_mp_reference(pmp);
846 pmp->pmp_suspend = 1;
847 }
848 mutex_exit(&pmp->pmp_lock);
849 if (rv)
850 return;
851 rv = kthread_create(PRI_NONE, 0, NULL, dosuspendresume,
852 pmp, NULL, "puffsusp");
853
854 /* XXX: "return" rv */
855 }
856
857 static void
858 puffsop_flush(struct puffs_mount *pmp, struct puffs_flush *pf)
859 {
860 struct vnode *vp;
861 voff_t offlo, offhi;
862 int rv, flags = 0;
863
864 if (pf->pf_req.preq_pth.pth_framelen != sizeof(struct puffs_flush)) {
865 rv = EINVAL;
866 goto out;
867 }
868
869 /* XXX: slurry */
870 if (pf->pf_op == PUFFS_INVAL_NAMECACHE_ALL) {
871 cache_purgevfs(PMPTOMP(pmp));
872 rv = 0;
873 goto out;
874 }
875
876 /*
877 * Get vnode, don't lock it. Namecache is protected by its own lock
878 * and we have a reference to protect against premature harvesting.
879 *
880 * The node we want here might be locked and the op is in
881 * userspace waiting for us to complete ==> deadlock. Another
882 * reason we need to eventually bump locking to userspace, as we
883 * will need to lock the node if we wish to do flushes.
884 */
885 rv = puffs_cookie2vnode(pmp, pf->pf_cookie, 0, 0, &vp);
886 if (rv) {
887 if (rv == PUFFS_NOSUCHCOOKIE)
888 rv = ENOENT;
889 goto out;
890 }
891
892 switch (pf->pf_op) {
893 #if 0
894 /* not quite ready, yet */
895 case PUFFS_INVAL_NAMECACHE_NODE:
896 struct componentname *pf_cn;
897 char *name;
898 /* get comfortab^Wcomponentname */
899 pf_cn = kmem_alloc(componentname);
900 memset(pf_cn, 0, sizeof(struct componentname));
901 break;
902
903 #endif
904 case PUFFS_INVAL_NAMECACHE_DIR:
905 if (vp->v_type != VDIR) {
906 rv = EINVAL;
907 break;
908 }
909 cache_purge1(vp, NULL, PURGE_CHILDREN);
910 break;
911
912 case PUFFS_INVAL_PAGECACHE_NODE_RANGE:
913 flags = PGO_FREE;
914 /*FALLTHROUGH*/
915 case PUFFS_FLUSH_PAGECACHE_NODE_RANGE:
916 if (flags == 0)
917 flags = PGO_CLEANIT;
918
919 if (pf->pf_end > vp->v_size || vp->v_type != VREG) {
920 rv = EINVAL;
921 break;
922 }
923
924 offlo = trunc_page(pf->pf_start);
925 offhi = round_page(pf->pf_end);
926 if (offhi != 0 && offlo >= offhi) {
927 rv = EINVAL;
928 break;
929 }
930
931 mutex_enter(&vp->v_uobj.vmobjlock);
932 rv = VOP_PUTPAGES(vp, offlo, offhi, flags);
933 break;
934
935 default:
936 rv = EINVAL;
937 }
938
939 vrele(vp);
940
941 out:
942 puffs_msg_sendresp(pmp, &pf->pf_req, rv);
943 }
944
945 int
946 puffs_msgif_dispatch(void *this, struct putter_hdr *pth)
947 {
948 struct puffs_mount *pmp = this;
949 struct puffs_req *preq = (struct puffs_req *)pth;
950
951 /* XXX: need to send error to userspace */
952 if (pth->pth_framelen < sizeof(struct puffs_req)) {
953 puffs_msg_sendresp(pmp, preq, EINVAL); /* E2SMALL */
954 return 0;
955 }
956
957 switch (PUFFSOP_OPCLASS(preq->preq_opclass)) {
958 case PUFFSOP_VN:
959 case PUFFSOP_VFS:
960 DPRINTF(("dispatch: vn/vfs message 0x%x\n", preq->preq_optype));
961 puffsop_msg(pmp, preq);
962 break;
963 case PUFFSOP_FLUSH:
964 DPRINTF(("dispatch: flush 0x%x\n", preq->preq_optype));
965 puffsop_flush(pmp, (struct puffs_flush *)preq);
966 break;
967 case PUFFSOP_SUSPEND:
968 DPRINTF(("dispatch: suspend\n"));
969 puffsop_suspend(pmp);
970 break;
971 default:
972 DPRINTF(("dispatch: invalid class 0x%x\n", preq->preq_opclass));
973 puffs_msg_sendresp(pmp, preq, EINVAL);
974 break;
975 }
976
977 return 0;
978 }
979
980 int
981 puffs_msgif_close(void *this)
982 {
983 struct puffs_mount *pmp = this;
984 struct mount *mp = PMPTOMP(pmp);
985 int gone, rv;
986
987 mutex_enter(&pmp->pmp_lock);
988 puffs_mp_reference(pmp);
989
990 /*
991 * Free the waiting callers before proceeding any further.
992 * The syncer might be jogging around in this file system
993 * currently. If we allow it to go to the userspace of no
994 * return while trying to get the syncer lock, well ...
995 * synclk: I feel happy, I feel fine.
996 * lockmgr: You're not fooling anyone, you know.
997 */
998 puffs_userdead(pmp);
999
1000 /*
1001 * Make sure someone from puffs_unmount() isn't currently in
1002 * userspace. If we don't take this precautionary step,
1003 * they might notice that the mountpoint has disappeared
1004 * from under them once they return. Especially note that we
1005 * cannot simply test for an unmounter before calling
1006 * dounmount(), since it might be possible that that particular
1007 * invocation of unmount was called without MNT_FORCE. Here we
1008 * *must* make sure unmount succeeds. Also, restart is necessary
1009 * since pmp isn't locked. We might end up with PUTTER_DEAD after
1010 * restart and exit from there.
1011 */
1012 if (pmp->pmp_unmounting) {
1013 cv_wait(&pmp->pmp_unmounting_cv, &pmp->pmp_lock);
1014 puffs_mp_release(pmp);
1015 mutex_exit(&pmp->pmp_lock);
1016 DPRINTF(("puffs_fop_close: unmount was in progress for pmp %p, "
1017 "restart\n", pmp));
1018 return ERESTART;
1019 }
1020
1021 /* Won't access pmp from here anymore */
1022 puffs_mp_release(pmp);
1023 mutex_exit(&pmp->pmp_lock);
1024
1025 /*
1026 * Detach from VFS. First do necessary XXX-dance (from
1027 * sys_unmount() & other callers of dounmount()
1028 *
1029 * XXX Freeze syncer. Must do this before locking the
1030 * mount point. See dounmount() for details.
1031 *
1032 * XXX2: take a reference to the mountpoint before starting to
1033 * wait for syncer_mutex. Otherwise the mointpoint can be
1034 * wiped out while we wait.
1035 */
1036 mutex_enter(&mp->mnt_mutex);
1037 mp->mnt_wcnt++;
1038 mutex_exit(&mp->mnt_mutex);
1039
1040 mutex_enter(&syncer_mutex);
1041
1042 mutex_enter(&mp->mnt_mutex);
1043 mp->mnt_wcnt--;
1044 if (mp->mnt_wcnt == 0)
1045 wakeup(&mp->mnt_wcnt);
1046 gone = mp->mnt_iflag & IMNT_GONE;
1047 mutex_exit(&mp->mnt_mutex);
1048 if (gone) {
1049 mutex_exit(&syncer_mutex);
1050 return 0;
1051 }
1052
1053 /*
1054 * microscopic race condition here (although not with the current
1055 * kernel), but can't really fix it without starting a crusade
1056 * against vfs_busy(), so let it be, let it be, let it be
1057 */
1058
1059 /*
1060 * The only way vfs_busy() will fail for us is if the filesystem
1061 * is already a goner.
1062 * XXX: skating on the thin ice of modern calling conventions ...
1063 */
1064 if (vfs_busy(mp, 0, 0)) {
1065 mutex_exit(&syncer_mutex);
1066 return 0;
1067 }
1068
1069 /*
1070 * Once we have the mount point, unmount() can't interfere..
1071 * or at least in theory it shouldn't. dounmount() reentracy
1072 * might require some visiting at some point.
1073 */
1074 rv = dounmount(mp, MNT_FORCE, curlwp);
1075 KASSERT(rv == 0);
1076
1077 return 0;
1078 }
1079
1080 /*
1081 * We're dead, kaput, RIP, slightly more than merely pining for the
1082 * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
1083 * our maker, ceased to be, etcetc. YASD. It's a dead FS!
1084 *
1085 * Caller must hold puffs mutex.
1086 */
1087 void
1088 puffs_userdead(struct puffs_mount *pmp)
1089 {
1090 struct puffs_msgpark *park, *park_next;
1091
1092 /*
1093 * Mark filesystem status as dying so that operations don't
1094 * attempt to march to userspace any longer.
1095 */
1096 pmp->pmp_status = PUFFSTAT_DYING;
1097
1098 /* signal waiters on REQUEST TO file server queue */
1099 for (park = TAILQ_FIRST(&pmp->pmp_msg_touser); park; park = park_next) {
1100 uint8_t opclass;
1101
1102 mutex_enter(&park->park_mtx);
1103 puffs_msgpark_reference(park);
1104 park_next = TAILQ_NEXT(park, park_entries);
1105
1106 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
1107 TAILQ_REMOVE(&pmp->pmp_msg_touser, park, park_entries);
1108 park->park_flags &= ~PARKFLAG_ONQUEUE1;
1109 pmp->pmp_msg_touser_count--;
1110
1111 /*
1112 * Even though waiters on QUEUE1 are removed in touser()
1113 * in case of WAITERGONE, it is still possible for us to
1114 * get raced here due to having to retake locks in said
1115 * touser(). In the race case simply "ignore" the item
1116 * on the queue and move on to the next one.
1117 */
1118 if (park->park_flags & PARKFLAG_WAITERGONE) {
1119 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
1120 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
1121 puffs_msgpark_release(park);
1122
1123 } else {
1124 opclass = park->park_preq->preq_opclass;
1125 park->park_preq->preq_rv = ENXIO;
1126
1127 if (park->park_flags & PARKFLAG_CALL) {
1128 park->park_done(pmp, park->park_preq,
1129 park->park_donearg);
1130 puffs_msgpark_release1(park, 2);
1131 } else if ((park->park_flags & PARKFLAG_WANTREPLY)==0) {
1132 puffs_msgpark_release1(park, 2);
1133 } else {
1134 park->park_preq->preq_rv = ENXIO;
1135 cv_signal(&park->park_cv);
1136 puffs_msgpark_release(park);
1137 }
1138 }
1139 }
1140
1141 /* signal waiters on RESPONSE FROM file server queue */
1142 for (park=TAILQ_FIRST(&pmp->pmp_msg_replywait); park; park=park_next) {
1143 mutex_enter(&park->park_mtx);
1144 puffs_msgpark_reference(park);
1145 park_next = TAILQ_NEXT(park, park_entries);
1146
1147 KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
1148 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
1149
1150 TAILQ_REMOVE(&pmp->pmp_msg_replywait, park, park_entries);
1151 park->park_flags &= ~PARKFLAG_ONQUEUE2;
1152
1153 if (park->park_flags & PARKFLAG_WAITERGONE) {
1154 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
1155 puffs_msgpark_release(park);
1156 } else {
1157 park->park_preq->preq_rv = ENXIO;
1158 if (park->park_flags & PARKFLAG_CALL) {
1159 park->park_done(pmp, park->park_preq,
1160 park->park_donearg);
1161 puffs_msgpark_release1(park, 2);
1162 } else {
1163 cv_signal(&park->park_cv);
1164 puffs_msgpark_release(park);
1165 }
1166 }
1167 }
1168
1169 cv_broadcast(&pmp->pmp_msg_waiter_cv);
1170 }
1171