puffs_msgif.c revision 1.60 1 /* $NetBSD: puffs_msgif.c,v 1.60 2007/11/26 12:57:26 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.60 2007/11/26 12:57:26 pooka Exp $");
34
35 #include <sys/param.h>
36 #include <sys/fstrans.h>
37 #include <sys/kmem.h>
38 #include <sys/kthread.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mount.h>
42 #include <sys/namei.h>
43 #include <sys/proc.h>
44 #include <sys/vnode.h>
45
46 #include <dev/putter/putter_sys.h>
47
48 #include <fs/puffs/puffs_msgif.h>
49 #include <fs/puffs/puffs_sys.h>
50
51 #include <miscfs/syncfs/syncfs.h> /* XXX: for syncer_mutex reference */
52
53 /*
54 * waitq data structures
55 */
56
57 /*
58 * While a request is going to userspace, park the caller within the
59 * kernel. This is the kernel counterpart of "struct puffs_req".
60 */
61 struct puffs_msgpark {
62 struct puffs_req *park_preq; /* req followed by buf */
63
64 size_t park_copylen; /* userspace copylength */
65 size_t park_maxlen; /* max size in comeback */
66
67 parkdone_fn park_done; /* "biodone" a'la puffs */
68 void *park_donearg;
69
70 int park_flags;
71 int park_refcount;
72
73 kcondvar_t park_cv;
74 kmutex_t park_mtx;
75
76 TAILQ_ENTRY(puffs_msgpark) park_entries;
77 };
78 #define PARKFLAG_WAITERGONE 0x01
79 #define PARKFLAG_DONE 0x02
80 #define PARKFLAG_ONQUEUE1 0x04
81 #define PARKFLAG_ONQUEUE2 0x08
82 #define PARKFLAG_CALL 0x10
83 #define PARKFLAG_WANTREPLY 0x20
84 #define PARKFLAG_HASERROR 0x40
85
86 static pool_cache_t parkpc;
87 #ifdef PUFFSDEBUG
88 static int totalpark;
89 #endif
90
91 static int
92 makepark(void *arg, void *obj, int flags)
93 {
94 struct puffs_msgpark *park = obj;
95
96 mutex_init(&park->park_mtx, MUTEX_DEFAULT, IPL_NONE);
97 cv_init(&park->park_cv, "puffsrpl");
98
99 return 0;
100 }
101
102 static void
103 nukepark(void *arg, void *obj)
104 {
105 struct puffs_msgpark *park = obj;
106
107 cv_destroy(&park->park_cv);
108 mutex_destroy(&park->park_mtx);
109 }
110
111 void
112 puffs_msgif_init()
113 {
114
115 parkpc = pool_cache_init(sizeof(struct puffs_msgpark), 0, 0, 0,
116 "puffprkl", NULL, IPL_NONE, makepark, nukepark, NULL);
117 }
118
119 void
120 puffs_msgif_destroy()
121 {
122
123 pool_cache_destroy(parkpc);
124 }
125
126 static int alloced;
127
128 static struct puffs_msgpark *
129 puffs_msgpark_alloc(int waitok)
130 {
131 struct puffs_msgpark *park;
132
133 park = pool_cache_get(parkpc, waitok ? PR_WAITOK : PR_NOWAIT);
134 if (park == NULL)
135 return park;
136
137 park->park_refcount = 1;
138 park->park_preq = NULL;
139 park->park_flags = PARKFLAG_WANTREPLY;
140
141 #ifdef PUFFSDEBUG
142 totalpark++;
143 #endif
144
145 return park;
146 }
147
148 static void
149 puffs_msgpark_reference(struct puffs_msgpark *park)
150 {
151
152 KASSERT(mutex_owned(&park->park_mtx));
153 park->park_refcount++;
154 }
155
156 /*
157 * Release reference to park structure.
158 */
159 static void
160 puffs_msgpark_release1(struct puffs_msgpark *park, int howmany)
161 {
162 struct puffs_req *preq = park->park_preq;
163 int refcnt;
164
165 KASSERT(mutex_owned(&park->park_mtx));
166 refcnt = park->park_refcount -= howmany;
167 mutex_exit(&park->park_mtx);
168
169 KASSERT(refcnt >= 0);
170
171 if (refcnt == 0) {
172 alloced--;
173 if (preq)
174 kmem_free(preq, park->park_maxlen);
175 pool_cache_put(parkpc, park);
176
177 #ifdef PUFFSDEBUG
178 totalpark--;
179 #endif
180 }
181 }
182 #define puffs_msgpark_release(a) puffs_msgpark_release1(a, 1)
183
184 #ifdef PUFFSDEBUG
185 static void
186 parkdump(struct puffs_msgpark *park)
187 {
188
189 DPRINTF(("park %p, preq %p, id %" PRIu64 "\n"
190 "\tcopy %zu, max %zu - done: %p/%p\n"
191 "\tflags 0x%08x, refcount %d, cv/mtx: %p/%p\n",
192 park, park->park_preq, park->park_preq->preq_id,
193 park->park_copylen, park->park_maxlen,
194 park->park_done, park->park_donearg,
195 park->park_flags, park->park_refcount,
196 &park->park_cv, &park->park_mtx));
197 }
198
199 static void
200 parkqdump(struct puffs_wq *q, int dumpall)
201 {
202 struct puffs_msgpark *park;
203 int total = 0;
204
205 TAILQ_FOREACH(park, q, park_entries) {
206 if (dumpall)
207 parkdump(park);
208 total++;
209 }
210 DPRINTF(("puffs waitqueue at %p dumped, %d total\n", q, total));
211
212 }
213 #endif /* PUFFSDEBUG */
214
215 /*
216 * A word about locking in the park structures: the lock protects the
217 * fields of the *park* structure (not preq) and acts as an interlock
218 * in cv operations. The lock is always internal to this module and
219 * callers do not need to worry about it.
220 */
221
222 int
223 puffs_msgmem_alloc(size_t len, struct puffs_msgpark **ppark, void **mem,
224 int cansleep)
225 {
226 struct puffs_msgpark *park;
227 void *m;
228
229 m = kmem_zalloc(len, cansleep ? KM_SLEEP : KM_NOSLEEP);
230 if (m == NULL) {
231 KASSERT(cansleep == 0);
232 return ENOMEM;
233 }
234
235 park = puffs_msgpark_alloc(cansleep);
236 if (park == NULL) {
237 KASSERT(cansleep == 0);
238 kmem_free(m, len);
239 return ENOMEM;
240 }
241
242 park->park_preq = m;
243 park->park_maxlen = park->park_copylen = len;
244
245 *ppark = park;
246 *mem = m;
247
248 return 0;
249 }
250
251 void
252 puffs_msgmem_release(struct puffs_msgpark *park)
253 {
254
255 if (park == NULL)
256 return;
257
258 mutex_enter(&park->park_mtx);
259 puffs_msgpark_release(park);
260 }
261
262 void
263 puffs_msg_setfaf(struct puffs_msgpark *park)
264 {
265
266 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
267 park->park_flags &= ~PARKFLAG_WANTREPLY;
268 }
269
270 void
271 puffs_msg_setdelta(struct puffs_msgpark *park, size_t delta)
272 {
273
274 KASSERT(delta < park->park_maxlen); /* "<=" wouldn't make sense */
275 park->park_copylen = park->park_maxlen - delta;
276 }
277
278 void
279 puffs_msg_setinfo(struct puffs_msgpark *park, int class, int type, void *cookie)
280 {
281
282 park->park_preq->preq_opclass = PUFFSOP_OPCLASS(class);
283 park->park_preq->preq_optype = type;
284 park->park_preq->preq_cookie = cookie;
285 }
286
287 void
288 puffs_msg_setcall(struct puffs_msgpark *park, parkdone_fn donefn, void *donearg)
289 {
290
291 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
292 park->park_done = donefn;
293 park->park_donearg = donearg;
294 park->park_flags |= PARKFLAG_CALL;
295 }
296
297 /*
298 * kernel-user-kernel waitqueues
299 */
300
301 static uint64_t
302 puffs_getmsgid(struct puffs_mount *pmp)
303 {
304 uint64_t rv;
305
306 mutex_enter(&pmp->pmp_lock);
307 rv = pmp->pmp_nextmsgid++;
308 mutex_exit(&pmp->pmp_lock);
309
310 return rv;
311 }
312
313 /*
314 * A word about reference counting of parks. A reference must be taken
315 * when accessing a park and additionally when it is on a queue. So
316 * when taking it off a queue and releasing the access reference, the
317 * reference count is generally decremented by 2.
318 */
319
320 void
321 puffs_msg_enqueue(struct puffs_mount *pmp, struct puffs_msgpark *park)
322 {
323 struct lwp *l = curlwp;
324 struct mount *mp;
325 struct puffs_req *preq;
326
327 mp = PMPTOMP(pmp);
328 preq = park->park_preq;
329 preq->preq_buflen = park->park_maxlen;
330 KASSERT(preq->preq_id == 0);
331
332 if ((park->park_flags & PARKFLAG_WANTREPLY) == 0)
333 preq->preq_opclass |= PUFFSOPFLAG_FAF;
334 else
335 preq->preq_id = puffs_getmsgid(pmp);
336
337 /* fill in caller information */
338 preq->preq_pid = l->l_proc->p_pid;
339 preq->preq_lid = l->l_lid;
340
341 /*
342 * To support cv_sig, yet another movie: check if there are signals
343 * pending and we are issueing a non-FAF. If so, return an error
344 * directly UNLESS we are issueing INACTIVE/RECLAIM. In that case,
345 * convert it to a FAF, fire off to the file server and return
346 * an error. Yes, this is bordering disgusting. Barfbags are on me.
347 */
348 if (__predict_false((park->park_flags & PARKFLAG_WANTREPLY)
349 && (park->park_flags & PARKFLAG_CALL) == 0
350 && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0))) {
351 park->park_flags |= PARKFLAG_HASERROR;
352 preq->preq_rv = EINTR;
353 if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
354 && (preq->preq_optype == PUFFS_VN_INACTIVE
355 || preq->preq_optype == PUFFS_VN_RECLAIM)) {
356 park->park_preq->preq_opclass |= PUFFSOPFLAG_FAF;
357 park->park_flags &= ~PARKFLAG_WANTREPLY;
358 DPRINTF(("puffs_msg_enqueue: converted to FAF %p\n",
359 park));
360 } else {
361 return;
362 }
363 }
364
365 /*
366 * test for suspension lock.
367 *
368 * Note that we *DO NOT* keep the lock, since that might block
369 * lock acquiring PLUS it would give userlandia control over
370 * the lock. The operation queue enforces a strict ordering:
371 * when the fs server gets in the op stream, it knows things
372 * are in order. The kernel locks can't guarantee that for
373 * userspace, in any case.
374 *
375 * BUT: this presents a problem for ops which have a consistency
376 * clause based on more than one operation. Unfortunately such
377 * operations (read, write) do not reliably work yet.
378 *
379 * Ya, Ya, it's wrong wong wrong, me be fixink this someday.
380 *
381 * XXX: and there is one more problem. We sometimes need to
382 * take a lazy lock in case the fs is suspending and we are
383 * executing as the fs server context. This might happen
384 * e.g. in the case that the user server triggers a reclaim
385 * in the kernel while the fs is suspending. It's not a very
386 * likely event, but it needs to be fixed some day.
387 */
388
389 /*
390 * MOREXXX: once PUFFS_WCACHEINFO is enabled, we can't take
391 * the mutex here, since getpages() might be called locked.
392 */
393 fstrans_start(mp, FSTRANS_NORMAL);
394 mutex_enter(&pmp->pmp_lock);
395 fstrans_done(mp);
396
397 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
398 mutex_exit(&pmp->pmp_lock);
399 park->park_flags |= PARKFLAG_HASERROR;
400 preq->preq_rv = ENXIO;
401 return;
402 }
403
404 #ifdef PUFFSDEBUG
405 parkqdump(&pmp->pmp_msg_touser, puffsdebug > 1);
406 parkqdump(&pmp->pmp_msg_replywait, puffsdebug > 1);
407 #endif
408
409 /*
410 * Note: we don't need to lock park since we have the only
411 * reference to it at this point.
412 */
413 TAILQ_INSERT_TAIL(&pmp->pmp_msg_touser, park, park_entries);
414 park->park_flags |= PARKFLAG_ONQUEUE1;
415 pmp->pmp_msg_touser_count++;
416 park->park_refcount++;
417 mutex_exit(&pmp->pmp_lock);
418
419 cv_broadcast(&pmp->pmp_msg_waiter_cv);
420 putter_notify(pmp->pmp_pi);
421
422 DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
423 "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, park,
424 preq->preq_opclass, preq->preq_optype, park->park_flags));
425 }
426
427 int
428 puffs_msg_wait(struct puffs_mount *pmp, struct puffs_msgpark *park)
429 {
430 struct puffs_req *preq = park->park_preq; /* XXX: hmmm */
431 struct mount *mp = PMPTOMP(pmp);
432 int error = 0;
433 int rv;
434
435 mutex_enter(&pmp->pmp_lock);
436 puffs_mp_reference(pmp);
437 mutex_exit(&pmp->pmp_lock);
438
439 mutex_enter(&park->park_mtx);
440 if ((park->park_flags & PARKFLAG_WANTREPLY) == 0
441 || (park->park_flags & PARKFLAG_CALL)) {
442 mutex_exit(&park->park_mtx);
443 rv = 0;
444 goto skipwait;
445 }
446
447 /* did the response beat us to the wait? */
448 if (__predict_false((park->park_flags & PARKFLAG_DONE)
449 || (park->park_flags & PARKFLAG_HASERROR))) {
450 rv = park->park_preq->preq_rv;
451 mutex_exit(&park->park_mtx);
452 goto skipwait;
453 }
454
455 error = cv_wait_sig(&park->park_cv, &park->park_mtx);
456 DPRINTF(("puffs_touser: waiter for %p woke up with %d\n",
457 park, error));
458 if (error) {
459 park->park_flags |= PARKFLAG_WAITERGONE;
460 if (park->park_flags & PARKFLAG_DONE) {
461 rv = preq->preq_rv;
462 mutex_exit(&park->park_mtx);
463 } else {
464 /*
465 * ok, we marked it as going away, but
466 * still need to do queue ops. take locks
467 * in correct order.
468 *
469 * We don't want to release our reference
470 * if it's on replywait queue to avoid error
471 * to file server. putop() code will DTRT.
472 */
473 mutex_exit(&park->park_mtx);
474 mutex_enter(&pmp->pmp_lock);
475 mutex_enter(&park->park_mtx);
476
477 /*
478 * Still on queue1? We can safely remove it
479 * without any consequences since the file
480 * server hasn't seen it. "else" we need to
481 * wait for the response and just ignore it
482 * to avoid signalling an incorrect error to
483 * the file server.
484 */
485 if (park->park_flags & PARKFLAG_ONQUEUE1) {
486 TAILQ_REMOVE(&pmp->pmp_msg_touser,
487 park, park_entries);
488 puffs_msgpark_release(park);
489 pmp->pmp_msg_touser_count--;
490 park->park_flags &= ~PARKFLAG_ONQUEUE1;
491 } else {
492 mutex_exit(&park->park_mtx);
493 }
494 mutex_exit(&pmp->pmp_lock);
495
496 rv = EINTR;
497 }
498 } else {
499 rv = preq->preq_rv;
500 mutex_exit(&park->park_mtx);
501 }
502
503 /*
504 * retake the lock and release. This makes sure (haha,
505 * I'm humorous) that we don't process the same vnode in
506 * multiple threads due to the locks hacks we have in
507 * puffs_lock(). In reality this is well protected by
508 * the biglock, but once that's gone, well, hopefully
509 * this will be fixed for real. (and when you read this
510 * comment in 2017 and subsequently barf, my condolences ;).
511 */
512 if (rv == 0 && !fstrans_is_owner(mp)) {
513 fstrans_start(mp, FSTRANS_NORMAL);
514 fstrans_done(mp);
515 }
516
517 skipwait:
518 mutex_enter(&pmp->pmp_lock);
519 puffs_mp_release(pmp);
520 mutex_exit(&pmp->pmp_lock);
521
522 return rv;
523 }
524
525 /*
526 * XXX: this suuuucks. Hopefully I'll get rid of this lossage once
527 * the whole setback-nonsense gets fixed.
528 */
529 int
530 puffs_msg_wait2(struct puffs_mount *pmp, struct puffs_msgpark *park,
531 struct puffs_node *pn1, struct puffs_node *pn2)
532 {
533 struct puffs_req *preq;
534 int rv;
535
536 rv = puffs_msg_wait(pmp, park);
537
538 preq = park->park_preq;
539 if (pn1 && preq->preq_setbacks & PUFFS_SETBACK_INACT_N1)
540 pn1->pn_stat |= PNODE_DOINACT;
541 if (pn2 && preq->preq_setbacks & PUFFS_SETBACK_INACT_N2)
542 pn2->pn_stat |= PNODE_DOINACT;
543
544 if (pn1 && preq->preq_setbacks & PUFFS_SETBACK_NOREF_N1)
545 pn1->pn_stat |= PNODE_NOREFS;
546 if (pn2 && preq->preq_setbacks & PUFFS_SETBACK_NOREF_N2)
547 pn2->pn_stat |= PNODE_NOREFS;
548
549 return rv;
550
551 }
552
553 /*
554 * Get next request in the outgoing queue. "maxsize" controls the
555 * size the caller can accommodate and "nonblock" signals if this
556 * should block while waiting for input. Handles all locking internally.
557 */
558 int
559 puffs_msgif_getout(void *this, size_t maxsize, int nonblock,
560 uint8_t **data, size_t *dlen, void **parkptr)
561 {
562 struct puffs_mount *pmp = this;
563 struct puffs_msgpark *park;
564 struct puffs_req *preq;
565 int error;
566
567 error = 0;
568 mutex_enter(&pmp->pmp_lock);
569 puffs_mp_reference(pmp);
570 for (;;) {
571 /* RIP? */
572 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
573 error = ENXIO;
574 break;
575 }
576
577 /* need platinum yendorian express card? */
578 if (TAILQ_EMPTY(&pmp->pmp_msg_touser)) {
579 DPRINTF(("puffs_getout: no outgoing op, "));
580 if (nonblock) {
581 DPRINTF(("returning EWOULDBLOCK\n"));
582 error = EWOULDBLOCK;
583 break;
584 }
585 DPRINTF(("waiting ...\n"));
586
587 error = cv_wait_sig(&pmp->pmp_msg_waiter_cv,
588 &pmp->pmp_lock);
589 if (error)
590 break;
591 else
592 continue;
593 }
594
595 park = TAILQ_FIRST(&pmp->pmp_msg_touser);
596 if (park == NULL)
597 continue;
598
599 mutex_enter(&park->park_mtx);
600 puffs_msgpark_reference(park);
601
602 DPRINTF(("puffs_getout: found park at %p, ", park));
603
604 /* If it's a goner, don't process any furher */
605 if (park->park_flags & PARKFLAG_WAITERGONE) {
606 DPRINTF(("waitergone!\n"));
607 puffs_msgpark_release(park);
608 continue;
609 }
610 preq = park->park_preq;
611
612 #if 0
613 /* check size */
614 /*
615 * XXX: this check is not valid for now, we don't know
616 * the size of the caller's input buffer. i.e. this
617 * will most likely go away
618 */
619 if (maxsize < preq->preq_frhdr.pfr_len) {
620 DPRINTF(("buffer too small\n"));
621 puffs_msgpark_release(park);
622 error = E2BIG;
623 break;
624 }
625 #endif
626
627 DPRINTF(("returning\n"));
628
629 /*
630 * Ok, we found what we came for. Release it from the
631 * outgoing queue but do not unlock. We will unlock
632 * only after we "releaseout" it to avoid complications:
633 * otherwise it is (theoretically) possible for userland
634 * to race us into "put" before we have a change to put
635 * this baby on the receiving queue.
636 */
637 TAILQ_REMOVE(&pmp->pmp_msg_touser, park, park_entries);
638 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
639 park->park_flags &= ~PARKFLAG_ONQUEUE1;
640 mutex_exit(&park->park_mtx);
641
642 pmp->pmp_msg_touser_count--;
643 KASSERT(pmp->pmp_msg_touser_count >= 0);
644
645 break;
646 }
647 puffs_mp_release(pmp);
648 mutex_exit(&pmp->pmp_lock);
649
650 if (error == 0) {
651 *data = (uint8_t *)preq;
652 preq->preq_pth.pth_framelen = park->park_copylen;
653 *dlen = preq->preq_pth.pth_framelen;
654 *parkptr = park;
655 }
656
657 return error;
658 }
659
660 /*
661 * Release outgoing structure. Now, depending on the success of the
662 * outgoing send, it is either going onto the result waiting queue
663 * or the death chamber.
664 */
665 void
666 puffs_msgif_releaseout(void *this, void *parkptr, int status)
667 {
668 struct puffs_mount *pmp = this;
669 struct puffs_msgpark *park = parkptr;
670
671 DPRINTF(("puffs_releaseout: returning park %p, errno %d: " ,
672 park, status));
673 mutex_enter(&pmp->pmp_lock);
674 mutex_enter(&park->park_mtx);
675 if (park->park_flags & PARKFLAG_WANTREPLY) {
676 if (status == 0) {
677 DPRINTF(("enqueue replywait\n"));
678 TAILQ_INSERT_TAIL(&pmp->pmp_msg_replywait, park,
679 park_entries);
680 park->park_flags |= PARKFLAG_ONQUEUE2;
681 } else {
682 DPRINTF(("error path!\n"));
683 park->park_preq->preq_rv = status;
684 park->park_flags |= PARKFLAG_DONE;
685 cv_signal(&park->park_cv);
686 }
687 puffs_msgpark_release(park);
688 } else {
689 DPRINTF(("release\n"));
690 puffs_msgpark_release1(park, 2);
691 }
692 mutex_exit(&pmp->pmp_lock);
693 }
694
695 size_t
696 puffs_msgif_waitcount(void *this)
697 {
698 struct puffs_mount *pmp = this;
699 size_t rv;
700
701 mutex_enter(&pmp->pmp_lock);
702 rv = pmp->pmp_msg_touser_count;
703 mutex_exit(&pmp->pmp_lock);
704
705 return rv;
706 }
707
708 /*
709 * XXX: locking with this one?
710 */
711 static void
712 puffsop_msg(void *this, struct puffs_req *preq)
713 {
714 struct puffs_mount *pmp = this;
715 struct putter_hdr *pth = &preq->preq_pth;
716 struct puffs_msgpark *park;
717 int wgone;
718
719 mutex_enter(&pmp->pmp_lock);
720
721 /* Locate waiter */
722 TAILQ_FOREACH(park, &pmp->pmp_msg_replywait, park_entries) {
723 if (park->park_preq->preq_id == preq->preq_id)
724 break;
725 }
726 if (park == NULL) {
727 DPRINTF(("puffsop_msg: no request: %" PRIu64 "\n",
728 preq->preq_id));
729 mutex_exit(&pmp->pmp_lock);
730 return; /* XXX send error */
731 }
732
733 mutex_enter(&park->park_mtx);
734 puffs_msgpark_reference(park);
735 if (pth->pth_framelen > park->park_maxlen) {
736 DPRINTF(("puffsop_msg: invalid buffer length: "
737 "%" PRIu64 " (req %" PRIu64 ", \n", pth->pth_framelen,
738 preq->preq_id));
739 park->park_preq->preq_rv = EPROTO;
740 cv_signal(&park->park_cv);
741 puffs_msgpark_release1(park, 2);
742 mutex_exit(&pmp->pmp_lock);
743 return; /* XXX: error */
744 }
745 wgone = park->park_flags & PARKFLAG_WAITERGONE;
746
747 KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
748 TAILQ_REMOVE(&pmp->pmp_msg_replywait, park, park_entries);
749 park->park_flags &= ~PARKFLAG_ONQUEUE2;
750 mutex_exit(&pmp->pmp_lock);
751
752 if (wgone) {
753 DPRINTF(("puffsop_msg: bad service - waiter gone for "
754 "park %p\n", park));
755 } else {
756 if (park->park_flags & PARKFLAG_CALL) {
757 DPRINTF(("puffsop_msg: call for %p, arg %p\n",
758 park->park_preq, park->park_donearg));
759 park->park_done(pmp, preq, park->park_donearg);
760 } else {
761 /* XXX: yes, I know */
762 memcpy(park->park_preq, preq, pth->pth_framelen);
763 }
764 }
765
766 if (!wgone) {
767 DPRINTF(("puffs_putop: flagging done for "
768 "park %p\n", park));
769 cv_signal(&park->park_cv);
770 }
771
772 park->park_flags |= PARKFLAG_DONE;
773 puffs_msgpark_release1(park, 2);
774 }
775
776 /*
777 * helpers
778 */
779 static void
780 dosuspendresume(void *arg)
781 {
782 struct puffs_mount *pmp = arg;
783 struct mount *mp;
784 int rv;
785
786 mp = PMPTOMP(pmp);
787 /*
788 * XXX? does this really do any good or is it just
789 * paranoid stupidity? or stupid paranoia?
790 */
791 if (mp->mnt_iflag & IMNT_UNMOUNT) {
792 printf("puffs dosuspendresume(): detected suspend on "
793 "unmounting fs\n");
794 goto out;
795 }
796
797 /* Do the dance. Allow only one concurrent suspend */
798 rv = vfs_suspend(PMPTOMP(pmp), 1);
799 if (rv == 0)
800 vfs_resume(PMPTOMP(pmp));
801
802 out:
803 mutex_enter(&pmp->pmp_lock);
804 KASSERT(pmp->pmp_suspend == 1);
805 pmp->pmp_suspend = 0;
806 puffs_mp_release(pmp);
807 mutex_exit(&pmp->pmp_lock);
808
809 kthread_exit(0);
810 }
811
812 static void
813 puffsop_suspend(struct puffs_mount *pmp)
814 {
815 int rv = 0;
816
817 mutex_enter(&pmp->pmp_lock);
818 if (pmp->pmp_suspend || pmp->pmp_status != PUFFSTAT_RUNNING) {
819 rv = EBUSY;
820 } else {
821 puffs_mp_reference(pmp);
822 pmp->pmp_suspend = 1;
823 }
824 mutex_exit(&pmp->pmp_lock);
825 if (rv)
826 return;
827 rv = kthread_create(PRI_NONE, 0, NULL, dosuspendresume,
828 pmp, NULL, "puffsusp");
829
830 /* XXX: "return" rv */
831 }
832
833 static int
834 puffsop_flush(struct puffs_mount *pmp, struct puffs_flush *pf)
835 {
836 struct vnode *vp;
837 voff_t offlo, offhi;
838 int rv, flags = 0;
839
840 /* XXX: slurry */
841 if (pf->pf_op == PUFFS_INVAL_NAMECACHE_ALL) {
842 cache_purgevfs(PMPTOMP(pmp));
843 return 0;
844 }
845
846 /*
847 * Get vnode, don't lock it. Namecache is protected by its own lock
848 * and we have a reference to protect against premature harvesting.
849 *
850 * The node we want here might be locked and the op is in
851 * userspace waiting for us to complete ==> deadlock. Another
852 * reason we need to eventually bump locking to userspace, as we
853 * will need to lock the node if we wish to do flushes.
854 */
855 rv = puffs_cookie2vnode(pmp, pf->pf_cookie, 0, 0, &vp);
856 if (rv) {
857 if (rv == PUFFS_NOSUCHCOOKIE)
858 return ENOENT;
859 return rv;
860 }
861
862 switch (pf->pf_op) {
863 #if 0
864 /* not quite ready, yet */
865 case PUFFS_INVAL_NAMECACHE_NODE:
866 struct componentname *pf_cn;
867 char *name;
868 /* get comfortab^Wcomponentname */
869 pf_cn = kmem_alloc(componentname);
870 memset(pf_cn, 0, sizeof(struct componentname));
871 break;
872
873 #endif
874 case PUFFS_INVAL_NAMECACHE_DIR:
875 if (vp->v_type != VDIR) {
876 rv = EINVAL;
877 break;
878 }
879 cache_purge1(vp, NULL, PURGE_CHILDREN);
880 break;
881
882 case PUFFS_INVAL_PAGECACHE_NODE_RANGE:
883 flags = PGO_FREE;
884 /*FALLTHROUGH*/
885 case PUFFS_FLUSH_PAGECACHE_NODE_RANGE:
886 if (flags == 0)
887 flags = PGO_CLEANIT;
888
889 if (pf->pf_end > vp->v_size || vp->v_type != VREG) {
890 rv = EINVAL;
891 break;
892 }
893
894 offlo = trunc_page(pf->pf_start);
895 offhi = round_page(pf->pf_end);
896 if (offhi != 0 && offlo >= offhi) {
897 rv = EINVAL;
898 break;
899 }
900
901 simple_lock(&vp->v_uobj.vmobjlock);
902 rv = VOP_PUTPAGES(vp, offlo, offhi, flags);
903 break;
904
905 default:
906 rv = EINVAL;
907 }
908
909 vrele(vp);
910
911 return rv;
912 }
913
914 int
915 puffs_msgif_dispatch(void *this, struct putter_hdr *pth)
916 {
917 struct puffs_mount *pmp = this;
918 struct puffs_req *preq = (struct puffs_req *)pth;
919
920 /* XXX: need to send error to userspace */
921 if (pth->pth_framelen < sizeof(struct puffs_req))
922 return EINVAL; /* E2SMALL */
923
924 switch (PUFFSOP_OPCLASS(preq->preq_opclass)) {
925 case PUFFSOP_VN:
926 case PUFFSOP_VFS:
927 puffsop_msg(pmp, preq);
928 break;
929 case PUFFSOP_FLUSH:
930 if (pth->pth_framelen != sizeof(struct puffs_flush))
931 return EINVAL;
932 puffsop_flush(pmp, (struct puffs_flush *)preq);
933 break;
934 case PUFFSOP_SUSPEND:
935 puffsop_suspend(pmp);
936 break;
937 default:
938 /* XXX: send error */
939 break;
940 }
941
942 return 0;
943 }
944
945 int
946 puffs_msgif_close(void *this)
947 {
948 struct puffs_mount *pmp = this;
949 struct mount *mp = PMPTOMP(pmp);
950 int gone, rv;
951
952 mutex_enter(&pmp->pmp_lock);
953 puffs_mp_reference(pmp);
954
955 /*
956 * Free the waiting callers before proceeding any further.
957 * The syncer might be jogging around in this file system
958 * currently. If we allow it to go to the userspace of no
959 * return while trying to get the syncer lock, well ...
960 * synclk: I feel happy, I feel fine.
961 * lockmgr: You're not fooling anyone, you know.
962 */
963 puffs_userdead(pmp);
964
965 /*
966 * Make sure someone from puffs_unmount() isn't currently in
967 * userspace. If we don't take this precautionary step,
968 * they might notice that the mountpoint has disappeared
969 * from under them once they return. Especially note that we
970 * cannot simply test for an unmounter before calling
971 * dounmount(), since it might be possible that that particular
972 * invocation of unmount was called without MNT_FORCE. Here we
973 * *must* make sure unmount succeeds. Also, restart is necessary
974 * since pmp isn't locked. We might end up with PUTTER_DEAD after
975 * restart and exit from there.
976 */
977 if (pmp->pmp_unmounting) {
978 cv_wait(&pmp->pmp_unmounting_cv, &pmp->pmp_lock);
979 puffs_mp_release(pmp);
980 mutex_exit(&pmp->pmp_lock);
981 DPRINTF(("puffs_fop_close: unmount was in progress for pmp %p, "
982 "restart\n", pmp));
983 return ERESTART;
984 }
985
986 /* Won't access pmp from here anymore */
987 puffs_mp_release(pmp);
988 mutex_exit(&pmp->pmp_lock);
989
990 /*
991 * Detach from VFS. First do necessary XXX-dance (from
992 * sys_unmount() & other callers of dounmount()
993 *
994 * XXX Freeze syncer. Must do this before locking the
995 * mount point. See dounmount() for details.
996 *
997 * XXX2: take a reference to the mountpoint before starting to
998 * wait for syncer_mutex. Otherwise the mointpoint can be
999 * wiped out while we wait.
1000 */
1001 simple_lock(&mp->mnt_slock);
1002 mp->mnt_wcnt++;
1003 simple_unlock(&mp->mnt_slock);
1004
1005 mutex_enter(&syncer_mutex);
1006
1007 simple_lock(&mp->mnt_slock);
1008 mp->mnt_wcnt--;
1009 if (mp->mnt_wcnt == 0)
1010 wakeup(&mp->mnt_wcnt);
1011 gone = mp->mnt_iflag & IMNT_GONE;
1012 simple_unlock(&mp->mnt_slock);
1013 if (gone) {
1014 mutex_exit(&syncer_mutex);
1015 return 0;
1016 }
1017
1018 /*
1019 * microscopic race condition here (although not with the current
1020 * kernel), but can't really fix it without starting a crusade
1021 * against vfs_busy(), so let it be, let it be, let it be
1022 */
1023
1024 /*
1025 * The only way vfs_busy() will fail for us is if the filesystem
1026 * is already a goner.
1027 * XXX: skating on the thin ice of modern calling conventions ...
1028 */
1029 if (vfs_busy(mp, 0, 0)) {
1030 mutex_exit(&syncer_mutex);
1031 return 0;
1032 }
1033
1034 /*
1035 * Once we have the mount point, unmount() can't interfere..
1036 * or at least in theory it shouldn't. dounmount() reentracy
1037 * might require some visiting at some point.
1038 */
1039 rv = dounmount(mp, MNT_FORCE, curlwp);
1040 KASSERT(rv == 0);
1041
1042 return 0;
1043 }
1044
1045 /*
1046 * We're dead, kaput, RIP, slightly more than merely pining for the
1047 * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
1048 * our maker, ceased to be, etcetc. YASD. It's a dead FS!
1049 *
1050 * Caller must hold puffs mutex.
1051 */
1052 void
1053 puffs_userdead(struct puffs_mount *pmp)
1054 {
1055 struct puffs_msgpark *park, *park_next;
1056
1057 /*
1058 * Mark filesystem status as dying so that operations don't
1059 * attempt to march to userspace any longer.
1060 */
1061 pmp->pmp_status = PUFFSTAT_DYING;
1062
1063 /* signal waiters on REQUEST TO file server queue */
1064 for (park = TAILQ_FIRST(&pmp->pmp_msg_touser); park; park = park_next) {
1065 uint8_t opclass;
1066
1067 mutex_enter(&park->park_mtx);
1068 puffs_msgpark_reference(park);
1069 park_next = TAILQ_NEXT(park, park_entries);
1070
1071 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
1072 TAILQ_REMOVE(&pmp->pmp_msg_touser, park, park_entries);
1073 park->park_flags &= ~PARKFLAG_ONQUEUE1;
1074 pmp->pmp_msg_touser_count--;
1075
1076 /*
1077 * Even though waiters on QUEUE1 are removed in touser()
1078 * in case of WAITERGONE, it is still possible for us to
1079 * get raced here due to having to retake locks in said
1080 * touser(). In the race case simply "ignore" the item
1081 * on the queue and move on to the next one.
1082 */
1083 if (park->park_flags & PARKFLAG_WAITERGONE) {
1084 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
1085 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
1086 puffs_msgpark_release(park);
1087
1088 } else {
1089 opclass = park->park_preq->preq_opclass;
1090 park->park_preq->preq_rv = ENXIO;
1091
1092 if (park->park_flags & PARKFLAG_CALL) {
1093 park->park_done(pmp, park->park_preq,
1094 park->park_donearg);
1095 puffs_msgpark_release1(park, 2);
1096 } else if ((park->park_flags & PARKFLAG_WANTREPLY)==0) {
1097 puffs_msgpark_release1(park, 2);
1098 } else {
1099 park->park_preq->preq_rv = ENXIO;
1100 cv_signal(&park->park_cv);
1101 puffs_msgpark_release(park);
1102 }
1103 }
1104 }
1105
1106 /* signal waiters on RESPONSE FROM file server queue */
1107 for (park=TAILQ_FIRST(&pmp->pmp_msg_replywait); park; park=park_next) {
1108 mutex_enter(&park->park_mtx);
1109 puffs_msgpark_reference(park);
1110 park_next = TAILQ_NEXT(park, park_entries);
1111
1112 KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
1113 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
1114
1115 TAILQ_REMOVE(&pmp->pmp_msg_replywait, park, park_entries);
1116 park->park_flags &= ~PARKFLAG_ONQUEUE2;
1117
1118 if (park->park_flags & PARKFLAG_WAITERGONE) {
1119 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
1120 puffs_msgpark_release(park);
1121 } else {
1122 park->park_preq->preq_rv = ENXIO;
1123 if (park->park_flags & PARKFLAG_CALL) {
1124 park->park_done(pmp, park->park_preq,
1125 park->park_donearg);
1126 puffs_msgpark_release1(park, 2);
1127 } else {
1128 cv_signal(&park->park_cv);
1129 puffs_msgpark_release(park);
1130 }
1131 }
1132 }
1133
1134 cv_broadcast(&pmp->pmp_msg_waiter_cv);
1135 }
1136