puffs_msgif.c revision 1.38 1 /* $NetBSD: puffs_msgif.c,v 1.38 2007/06/06 01:54:59 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.38 2007/06/06 01:54:59 pooka Exp $");
34
35 #include <sys/param.h>
36 #include <sys/fstrans.h>
37 #include <sys/malloc.h>
38 #include <sys/mount.h>
39 #include <sys/vnode.h>
40 #include <sys/lock.h>
41
42 #include <fs/puffs/puffs_msgif.h>
43 #include <fs/puffs/puffs_sys.h>
44
45 /*
46 * waitq data structures
47 */
48
49 /*
50 * While a request is going to userspace, park the caller within the
51 * kernel. This is the kernel counterpart of "struct puffs_req".
52 */
53 struct puffs_park {
54 struct puffs_req *park_preq; /* req followed by buf */
55 uint64_t park_id; /* duplicate of preq_id */
56
57 size_t park_copylen; /* userspace copylength */
58 size_t park_maxlen; /* max size in comeback */
59
60 parkdone_fn park_done;
61 void *park_donearg;
62
63 int park_flags;
64 int park_refcount;
65
66 kcondvar_t park_cv;
67 kmutex_t park_mtx;
68
69 TAILQ_ENTRY(puffs_park) park_entries;
70 };
71 #define PARKFLAG_WAITERGONE 0x01
72 #define PARKFLAG_DONE 0x02
73 #define PARKFLAG_ONQUEUE1 0x04
74 #define PARKFLAG_ONQUEUE2 0x08
75 #define PARKFLAG_CALL 0x10
76 #define PARKFLAG_WANTREPLY 0x20
77
78 static struct pool_cache parkpc;
79 static struct pool parkpool;
80
81 static int
82 makepark(void *arg, void *obj, int flags)
83 {
84 struct puffs_park *park = obj;
85
86 mutex_init(&park->park_mtx, MUTEX_DEFAULT, IPL_NONE);
87 cv_init(&park->park_cv, "puffsrpl");
88
89 return 0;
90 }
91
92 static void
93 nukepark(void *arg, void *obj)
94 {
95 struct puffs_park *park = obj;
96
97 cv_destroy(&park->park_cv);
98 mutex_destroy(&park->park_mtx);
99 }
100
101 void
102 puffs_msgif_init()
103 {
104
105 pool_init(&parkpool, sizeof(struct puffs_park), 0, 0, 0,
106 "puffprkl", &pool_allocator_nointr, IPL_NONE);
107 pool_cache_init(&parkpc, &parkpool, makepark, nukepark, NULL);
108 }
109
110 void
111 puffs_msgif_destroy()
112 {
113
114 pool_cache_destroy(&parkpc);
115 pool_destroy(&parkpool);
116 }
117
118 void *
119 puffs_park_alloc(int waitok)
120 {
121 struct puffs_park *park;
122
123 park = pool_cache_get(&parkpc, waitok ? PR_WAITOK : PR_NOWAIT);
124 if (park) {
125 park->park_refcount = 1;
126 mutex_enter(&park->park_mtx);
127 }
128
129 return park;
130 }
131
132 static void
133 puffs_park_reference(struct puffs_park *park)
134 {
135
136 mutex_enter(&park->park_mtx);
137 park->park_refcount++;
138 }
139
140 void
141 puffs_park_release(void *arg, int fullnuke)
142 {
143 struct puffs_park *park = arg;
144
145 KASSERT(mutex_owned(&park->park_mtx));
146 --park->park_refcount;
147
148 mutex_exit(&park->park_mtx);
149 if (park->park_refcount == 0 || fullnuke)
150 pool_cache_put(&parkpc, park);
151 }
152
153 #ifdef PUFFSDEBUG
154 static void
155 parkdump(struct puffs_park *park)
156 {
157
158 DPRINTF(("park %p, preq %p, id %" PRIu64 "\n"
159 "\tcopy %zu, max %zu - done: %p/%p\n"
160 "\tflags 0x%08x, refcount %d, cv/mtx: %p/%p\n",
161 park, park->park_preq, park->park_id,
162 park->park_copylen, park->park_maxlen,
163 park->park_done, park->park_donearg,
164 park->park_flags, park->park_refcount,
165 &park->park_cv, &park->park_mtx));
166 }
167
168 static void
169 parkqdump(struct puffs_wq *q, int dumpall)
170 {
171 struct puffs_park *park;
172 int total = 0;
173
174 TAILQ_FOREACH(park, q, park_entries) {
175 if (dumpall)
176 parkdump(park);
177 total++;
178 }
179 DPRINTF(("puffs waitqueue at %p dumped, %d total\n", q, total));
180
181 }
182 #endif /* PUFFSDEBUG */
183
184 /*
185 * Converts a non-FAF op to a FAF. This simply involves making copies
186 * of the park and request structures and tagging the request as a FAF.
187 * It is safe to block here, since the original op is not a FAF.
188 */
189 static void
190 puffs_reqtofaf(struct puffs_park *park)
191 {
192 struct puffs_req *newpreq;
193
194 KASSERT((park->park_preq->preq_opclass & PUFFSOPFLAG_FAF) == 0);
195
196 MALLOC(newpreq, struct puffs_req *, park->park_copylen,
197 M_PUFFS, M_ZERO | M_WAITOK);
198
199 memcpy(newpreq, park->park_preq, park->park_copylen);
200
201 park->park_preq = newpreq;
202 park->park_preq->preq_opclass |= PUFFSOPFLAG_FAF;
203 park->park_flags &= ~PARKFLAG_WANTREPLY;
204 }
205
206
207 /*
208 * kernel-user-kernel waitqueues
209 */
210
211 static int touser(struct puffs_mount *, struct puffs_park *, uint64_t);
212
213 uint64_t
214 puffs_getreqid(struct puffs_mount *pmp)
215 {
216 uint64_t rv;
217
218 mutex_enter(&pmp->pmp_lock);
219 rv = pmp->pmp_nextreq++;
220 mutex_exit(&pmp->pmp_lock);
221
222 return rv;
223 }
224
225 /* vfs request */
226 int
227 puffs_vfstouser(struct puffs_mount *pmp, int optype, void *kbuf, size_t buflen)
228 {
229 struct puffs_park *park;
230
231 park = puffs_park_alloc(1);
232 park->park_preq = kbuf;
233
234 park->park_preq->preq_opclass = PUFFSOP_VFS;
235 park->park_preq->preq_optype = optype;
236
237 park->park_maxlen = park->park_copylen = buflen;
238 park->park_flags = 0;
239
240 return touser(pmp, park, puffs_getreqid(pmp));
241 }
242
243 void
244 puffs_suspendtouser(struct puffs_mount *pmp, int status)
245 {
246 struct puffs_vfsreq_suspend *pvfsr_susp;
247 struct puffs_park *park;
248
249 pvfsr_susp = malloc(sizeof(struct puffs_vfsreq_suspend),
250 M_PUFFS, M_WAITOK | M_ZERO);
251 park = puffs_park_alloc(1);
252
253 pvfsr_susp->pvfsr_status = status;
254 park->park_preq = (struct puffs_req *)pvfsr_susp;
255
256 park->park_preq->preq_opclass = PUFFSOP_VFS | PUFFSOPFLAG_FAF;
257 park->park_preq->preq_optype = PUFFS_VFS_SUSPEND;
258
259 park->park_maxlen = park->park_copylen
260 = sizeof(struct puffs_vfsreq_suspend);
261 park->park_flags = 0;
262
263 (void)touser(pmp, park, 0);
264 }
265
266 /*
267 * vnode level request
268 */
269 int
270 puffs_vntouser(struct puffs_mount *pmp, int optype,
271 void *kbuf, size_t buflen, size_t maxdelta,
272 struct vnode *vp_opc, struct vnode *vp_aux)
273 {
274 struct puffs_park *park;
275 struct puffs_req *preq;
276 void *cookie = VPTOPNC(vp_opc);
277 struct puffs_node *pnode;
278 int rv;
279
280 park = puffs_park_alloc(1);
281 park->park_preq = kbuf;
282
283 park->park_preq->preq_opclass = PUFFSOP_VN;
284 park->park_preq->preq_optype = optype;
285 park->park_preq->preq_cookie = cookie;
286
287 park->park_copylen = buflen;
288 park->park_maxlen = buflen + maxdelta;
289 park->park_flags = 0;
290
291 rv = touser(pmp, park, puffs_getreqid(pmp));
292
293 /*
294 * Check if the user server requests that inactive be called
295 * when the time is right.
296 */
297 preq = park->park_preq;
298 if (preq->preq_setbacks & PUFFS_SETBACK_INACT_N1) {
299 pnode = vp_opc->v_data;
300 pnode->pn_stat |= PNODE_DOINACT;
301 }
302 if (preq->preq_setbacks & PUFFS_SETBACK_INACT_N2) {
303 /* if no vp_aux, just ignore */
304 if (vp_aux) {
305 pnode = vp_aux->v_data;
306 pnode->pn_stat |= PNODE_DOINACT;
307 }
308 }
309 if (preq->preq_setbacks & PUFFS_SETBACK_NOREF_N1) {
310 pnode = vp_opc->v_data;
311 pnode->pn_stat |= PNODE_NOREFS;
312 }
313 if (preq->preq_setbacks & PUFFS_SETBACK_NOREF_N2) {
314 /* if no vp_aux, just ignore */
315 if (vp_aux) {
316 pnode = vp_aux->v_data;
317 pnode->pn_stat |= PNODE_NOREFS;
318 }
319 }
320
321 return rv;
322 }
323
324 /*
325 * vnode level request, caller-controller req id
326 */
327 int
328 puffs_vntouser_req(struct puffs_mount *pmp, int optype,
329 void *kbuf, size_t buflen, size_t maxdelta,
330 uint64_t reqid, struct vnode *vp_opc, struct vnode *vp_aux)
331 {
332 struct puffs_park *park;
333 void *cookie = VPTOPNC(vp_opc);
334
335 park = puffs_park_alloc(1);
336 park->park_preq = kbuf;
337
338 park->park_preq->preq_opclass = PUFFSOP_VN;
339 park->park_preq->preq_optype = optype;
340 park->park_preq->preq_cookie = cookie;
341
342 park->park_copylen = buflen;
343 park->park_maxlen = buflen + maxdelta;
344 park->park_flags = 0;
345
346 return touser(pmp, park, reqid);
347 }
348
349 void
350 puffs_vntouser_call(struct puffs_mount *pmp, int optype,
351 void *kbuf, size_t buflen, size_t maxdelta,
352 parkdone_fn donefn, void *donearg,
353 struct vnode *vp_opc, struct vnode *vp_aux)
354 {
355 struct puffs_park *park;
356 void *cookie = VPTOPNC(vp_opc);
357
358 park = puffs_park_alloc(1);
359 park->park_preq = kbuf;
360
361 park->park_preq->preq_opclass = PUFFSOP_VN;
362 park->park_preq->preq_optype = optype;
363 park->park_preq->preq_cookie = cookie;
364
365 park->park_copylen = buflen;
366 park->park_maxlen = buflen + maxdelta;
367 park->park_done = donefn;
368 park->park_donearg = donearg;
369 park->park_flags = PARKFLAG_CALL;
370
371 (void) touser(pmp, park, puffs_getreqid(pmp));
372 }
373
374 /*
375 * Notice: kbuf will be free'd later. I must be allocated from the
376 * kernel heap and it's ownership is shifted to this function from
377 * now on, i.e. the caller is not allowed to use it anymore!
378 */
379 void
380 puffs_vntouser_faf(struct puffs_mount *pmp, int optype,
381 void *kbuf, size_t buflen, struct vnode *vp_opc)
382 {
383 struct puffs_park *park;
384 void *cookie = VPTOPNC(vp_opc);
385
386 /* XXX: is it allowable to sleep here? */
387 park = puffs_park_alloc(0);
388 if (park == NULL)
389 return; /* 2bad */
390
391 park->park_preq = kbuf;
392
393 park->park_preq->preq_opclass = PUFFSOP_VN | PUFFSOPFLAG_FAF;
394 park->park_preq->preq_optype = optype;
395 park->park_preq->preq_cookie = cookie;
396
397 park->park_maxlen = park->park_copylen = buflen;
398 park->park_flags = 0;
399
400 (void)touser(pmp, park, 0);
401 }
402
403 void
404 puffs_cacheop(struct puffs_mount *pmp, struct puffs_park *park,
405 struct puffs_cacheinfo *pcinfo, size_t pcilen, void *cookie)
406 {
407
408 park->park_preq = (struct puffs_req *)pcinfo;
409 park->park_preq->preq_opclass = PUFFSOP_CACHE | PUFFSOPFLAG_FAF;
410 park->park_preq->preq_optype = PCACHE_TYPE_WRITE; /* XXX */
411 park->park_preq->preq_cookie = cookie;
412
413 park->park_maxlen = park->park_copylen = pcilen;
414 park->park_flags = 0;
415
416 (void)touser(pmp, park, 0);
417 }
418
419 /*
420 * Wait for the userspace ping-pong game in calling process context.
421 *
422 * This unlocks vnodes if they are supplied. vp1 is the vnode
423 * before in the locking order, i.e. the one which must be locked
424 * before accessing vp2. This is done here so that operations are
425 * already ordered in the queue when vnodes are unlocked (I'm not
426 * sure if that's really necessary, but it can't hurt). Okok, maybe
427 * there's a slight ugly-factor also, but let's not worry about that.
428 */
429 static int
430 touser(struct puffs_mount *pmp, struct puffs_park *park, uint64_t reqid)
431 {
432 struct lwp *l = curlwp;
433 struct mount *mp;
434 struct puffs_req *preq;
435 int rv = 0;
436
437 mp = PMPTOMP(pmp);
438 preq = park->park_preq;
439 preq->preq_id = park->park_id = reqid;
440 preq->preq_buflen = ALIGN(park->park_maxlen);
441
442 if (PUFFSOP_WANTREPLY(preq->preq_opclass))
443 park->park_flags |= PARKFLAG_WANTREPLY;
444
445 /*
446 * To support PCATCH, yet another movie: check if there are signals
447 * pending and we are issueing a non-FAF. If so, return an error
448 * directly UNLESS we are issueing INACTIVE. In that case, convert
449 * it to a FAF, fire off to the file server and return an error.
450 * Yes, this is bordering disgusting. Barfbags are on me.
451 */
452 if ((park->park_flags & PARKFLAG_WANTREPLY)
453 && (park->park_flags & PARKFLAG_CALL) == 0
454 && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) {
455 if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
456 && preq->preq_optype == PUFFS_VN_INACTIVE) {
457 puffs_reqtofaf(park);
458 DPRINTF(("puffs touser: converted to FAF %p\n", park));
459 rv = EINTR;
460 } else {
461 puffs_park_release(park, 0);
462 return EINTR;
463 }
464 }
465
466 /*
467 * test for suspension lock.
468 *
469 * Note that we *DO NOT* keep the lock, since that might block
470 * lock acquiring PLUS it would give userlandia control over
471 * the lock. The operation queue enforces a strict ordering:
472 * when the fs server gets in the op stream, it knows things
473 * are in order. The kernel locks can't guarantee that for
474 * userspace, in any case.
475 *
476 * BUT: this presents a problem for ops which have a consistency
477 * clause based on more than one operation. Unfortunately such
478 * operations (read, write) do not reliably work yet.
479 *
480 * Ya, Ya, it's wrong wong wrong, me be fixink this someday.
481 *
482 * XXX: and there is one more problem. We sometimes need to
483 * take a lazy lock in case the fs is suspending and we are
484 * executing as the fs server context. This might happen
485 * e.g. in the case that the user server triggers a reclaim
486 * in the kernel while the fs is suspending. It's not a very
487 * likely event, but it needs to be fixed some day.
488 */
489
490 /*
491 * MOREXXX: once PUFFS_WCACHEINFO is enabled, we can't take
492 * the mutex here, since getpages() might be called locked.
493 */
494 fstrans_start(mp, FSTRANS_NORMAL);
495 mutex_enter(&pmp->pmp_lock);
496 fstrans_done(mp);
497
498 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
499 mutex_exit(&pmp->pmp_lock);
500 puffs_park_release(park, 0);
501 return ENXIO;
502 }
503
504 #ifdef PUFFSDEBUG
505 parkqdump(&pmp->pmp_req_touser, puffsdebug > 1);
506 parkqdump(&pmp->pmp_req_replywait, puffsdebug > 1);
507 #endif
508
509 TAILQ_INSERT_TAIL(&pmp->pmp_req_touser, park, park_entries);
510 park->park_flags |= PARKFLAG_ONQUEUE1;
511 puffs_mp_reference(pmp);
512 pmp->pmp_req_touser_count++;
513 mutex_exit(&pmp->pmp_lock);
514
515 DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
516 "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, park,
517 preq->preq_opclass, preq->preq_optype, park->park_flags));
518
519 cv_broadcast(&pmp->pmp_req_waiter_cv);
520 selnotify(pmp->pmp_sel, 0);
521
522 if ((park->park_flags & PARKFLAG_WANTREPLY)
523 && (park->park_flags & PARKFLAG_CALL) == 0) {
524 int error;
525
526 error = cv_wait_sig(&park->park_cv, &park->park_mtx);
527 if (error) {
528 park->park_flags |= PARKFLAG_WAITERGONE;
529 if (park->park_flags & PARKFLAG_DONE) {
530 rv = preq->preq_rv;
531 puffs_park_release(park, 0);
532 } else {
533 /*
534 * ok, we marked it as going away, but
535 * still need to do queue ops. take locks
536 * in correct order.
537 *
538 * We don't want to release our reference
539 * if it's on replywait queue to avoid error
540 * to file server. putop() code will DTRT.
541 */
542 KASSERT(park->park_flags &
543 (PARKFLAG_ONQUEUE1 | PARKFLAG_ONQUEUE2));
544 mutex_exit(&park->park_mtx);
545
546 mutex_enter(&pmp->pmp_lock);
547 mutex_enter(&park->park_mtx);
548 if (park->park_flags & PARKFLAG_ONQUEUE1) {
549 TAILQ_REMOVE(&pmp->pmp_req_touser,
550 park, park_entries);
551 pmp->pmp_req_touser_count--;
552 park->park_flags &= ~PARKFLAG_ONQUEUE1;
553 }
554 if ((park->park_flags & PARKFLAG_ONQUEUE2) == 0)
555 puffs_park_release(park, 0);
556 else
557 mutex_exit(&park->park_mtx);
558 mutex_exit(&pmp->pmp_lock);
559
560 rv = error;
561 }
562 } else {
563 rv = preq->preq_rv;
564 puffs_park_release(park, 0);
565 }
566
567 /*
568 * retake the lock and release. This makes sure (haha,
569 * I'm humorous) that we don't process the same vnode in
570 * multiple threads due to the locks hacks we have in
571 * puffs_lock(). In reality this is well protected by
572 * the biglock, but once that's gone, well, hopefully
573 * this will be fixed for real. (and when you read this
574 * comment in 2017 and subsequently barf, my condolences ;).
575 */
576 if (rv == 0 && !fstrans_is_owner(mp)) {
577 fstrans_start(mp, FSTRANS_NORMAL);
578 fstrans_done(mp);
579 }
580 } else {
581 mutex_exit(&park->park_mtx);
582 }
583
584 mutex_enter(&pmp->pmp_lock);
585 puffs_mp_release(pmp);
586 mutex_exit(&pmp->pmp_lock);
587
588 return rv;
589 }
590
591
592 /*
593 * getop: scan through queued requests until:
594 * 1) max number of requests satisfied
595 * OR
596 * 2) buffer runs out of space
597 * OR
598 * 3) nonblocking is set AND there are no operations available
599 * OR
600 * 4) at least one operation was transferred AND there are no more waiting
601 */
602 int
603 puffs_getop(struct puffs_mount *pmp, struct puffs_reqh_get *phg, int nonblock)
604 {
605 struct puffs_park *park;
606 struct puffs_req *preq;
607 uint8_t *bufpos;
608 int error, donesome;
609
610 donesome = error = 0;
611 bufpos = phg->phg_buf;
612
613 mutex_enter(&pmp->pmp_lock);
614 while (phg->phg_nops == 0 || donesome != phg->phg_nops) {
615 again:
616 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
617 /* if we got some, they don't really matter anymore */
618 error = ENXIO;
619 goto out;
620 }
621 if (TAILQ_EMPTY(&pmp->pmp_req_touser)) {
622 if (donesome)
623 goto out;
624
625 if (nonblock) {
626 error = EWOULDBLOCK;
627 goto out;
628 }
629
630 error = cv_wait_sig(&pmp->pmp_req_waiter_cv,
631 &pmp->pmp_lock);
632 if (error)
633 goto out;
634 else
635 goto again;
636 }
637
638 park = TAILQ_FIRST(&pmp->pmp_req_touser);
639 puffs_park_reference(park);
640
641 /* If it's a goner, don't process any furher */
642 if (park->park_flags & PARKFLAG_WAITERGONE) {
643 puffs_park_release(park, 0);
644 continue;
645 }
646
647 preq = park->park_preq;
648 if (phg->phg_buflen < preq->preq_buflen) {
649 if (!donesome)
650 error = E2BIG;
651 puffs_park_release(park, 0);
652 goto out;
653 }
654
655 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
656 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
657 park->park_flags &= ~PARKFLAG_ONQUEUE1;
658 pmp->pmp_req_touser_count--;
659 KASSERT(pmp->pmp_req_touser_count >= 0);
660 mutex_exit(&pmp->pmp_lock);
661
662 DPRINTF(("puffsgetop: get op %" PRIu64 " (%d.), from %p "
663 "len %zu (buflen %zu), target %p\n", preq->preq_id,
664 donesome, preq, park->park_copylen, preq->preq_buflen,
665 bufpos));
666
667 if ((error = copyout(preq, bufpos, park->park_copylen)) != 0) {
668 DPRINTF(("puffs_getop: copyout failed: %d\n", error));
669 /*
670 * ok, user server is probably trying to cheat.
671 * stuff op back & return error to user. We need
672 * to take locks in the correct order.
673 */
674 mutex_exit(&park->park_mtx);
675
676 /*
677 * XXX: ONQUEUE1 | ONQUEUE2 invariant doesn't
678 * hold here
679 */
680
681 mutex_enter(&pmp->pmp_lock);
682 mutex_enter(&park->park_mtx);
683 if ((park->park_flags & PARKFLAG_WAITERGONE) == 0) {
684 TAILQ_INSERT_HEAD(&pmp->pmp_req_touser, park,
685 park_entries);
686 park->park_flags |= PARKFLAG_ONQUEUE1;
687 pmp->pmp_req_touser_count++;
688 }
689
690 if (donesome)
691 error = 0;
692 puffs_park_release(park, 0);
693 goto out;
694 }
695 bufpos += preq->preq_buflen;
696 phg->phg_buflen -= preq->preq_buflen;
697 donesome++;
698
699 /* XXXfixme: taking this lock in the wrong order */
700 mutex_enter(&pmp->pmp_lock);
701
702 if (park->park_flags & PARKFLAG_WANTREPLY) {
703 TAILQ_INSERT_TAIL(&pmp->pmp_req_replywait, park,
704 park_entries);
705 park->park_flags |= PARKFLAG_ONQUEUE2;
706 puffs_park_release(park, 0);
707 } else {
708 free(preq, M_PUFFS);
709 puffs_park_release(park, 1);
710 }
711 }
712
713 out:
714 phg->phg_more = pmp->pmp_req_touser_count;
715 mutex_exit(&pmp->pmp_lock);
716
717 phg->phg_nops = donesome;
718
719 return error;
720 }
721
722 int
723 puffs_putop(struct puffs_mount *pmp, struct puffs_reqh_put *php)
724 {
725 struct puffs_park *park;
726 struct puffs_req tmpreq;
727 struct puffs_req *nextpreq;
728 void *userbuf;
729 uint64_t id;
730 size_t reqlen;
731 int donesome, error, wgone, release;
732
733 donesome = error = wgone = 0;
734
735 id = php->php_id;
736 userbuf = php->php_buf;
737 reqlen = php->php_buflen;
738
739 mutex_enter(&pmp->pmp_lock);
740 while (donesome != php->php_nops) {
741 release = 0;
742 #ifdef PUFFSDEBUG
743 DPRINTF(("puffsputop: searching for %" PRIu64 ", ubuf: %p, "
744 "len %zu\n", id, userbuf, reqlen));
745 #endif
746 TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
747 if (park->park_id == id)
748 break;
749 }
750
751 if (park == NULL) {
752 DPRINTF(("puffsputop: no request: %" PRIu64 "\n", id));
753 error = EINVAL;
754 break;
755 }
756
757 puffs_park_reference(park);
758 if (reqlen == 0 || reqlen > park->park_maxlen) {
759 DPRINTF(("puffsputop: invalid buffer length: "
760 "%zu\n", reqlen));
761 error = E2BIG;
762 puffs_park_release(park, 0);
763 break;
764 }
765 wgone = park->park_flags & PARKFLAG_WAITERGONE;
766
767 /* check if it's still on the queue after acquiring lock */
768 if (park->park_flags & PARKFLAG_ONQUEUE2) {
769 TAILQ_REMOVE(&pmp->pmp_req_replywait, park,
770 park_entries);
771 park->park_flags &= ~PARKFLAG_ONQUEUE2;
772 }
773
774 mutex_exit(&pmp->pmp_lock);
775
776 /*
777 * If the caller has gone south, go to next, collect
778 * $200 and free the structure there instead of wakeup.
779 * We also need to copyin the header info. Flag structure
780 * release to mode total and utter destruction.
781 */
782 if (wgone) {
783 DPRINTF(("puffs_putop: bad service - waiter gone for "
784 "park %p\n", park));
785 error = copyin(userbuf, &tmpreq,
786 sizeof(struct puffs_req));
787 release = 1;
788 if (error)
789 goto loopout;
790 nextpreq = &tmpreq;
791 goto next;
792 }
793
794 DPRINTF(("puffsputpop: copyin from %p to %p, len %zu\n",
795 userbuf, park->park_preq, reqlen));
796 error = copyin(userbuf, park->park_preq, reqlen);
797 if (error)
798 goto loopout;
799 nextpreq = park->park_preq;
800
801 next:
802 /* all's well, prepare for next op */
803 id = nextpreq->preq_id;
804 reqlen = nextpreq->preq_buflen;
805 userbuf = nextpreq->preq_nextbuf;
806 donesome++;
807
808 loopout:
809 if (error && !wgone)
810 park->park_preq->preq_rv = error;
811
812 if (park->park_flags & PARKFLAG_CALL) {
813 park->park_done(park->park_preq, park->park_donearg);
814 release = 1;
815 }
816
817 if (!wgone) {
818 DPRINTF(("puffs_putop: flagging done for "
819 "park %p\n", park));
820
821 cv_signal(&park->park_cv);
822 }
823 park->park_flags |= PARKFLAG_DONE;
824 puffs_park_release(park, release);
825
826 mutex_enter(&pmp->pmp_lock);
827 if (error)
828 break;
829 wgone = 0;
830 }
831
832 mutex_exit(&pmp->pmp_lock);
833 php->php_nops -= donesome;
834
835 return error;
836 }
837
838 /*
839 * We're dead, kaput, RIP, slightly more than merely pining for the
840 * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
841 * our maker, ceased to be, etcetc. YASD. It's a dead FS!
842 *
843 * Caller must hold puffs mutex.
844 */
845 void
846 puffs_userdead(struct puffs_mount *pmp)
847 {
848 struct puffs_park *park, *park_next;
849
850 /*
851 * Mark filesystem status as dying so that operations don't
852 * attempt to march to userspace any longer.
853 */
854 pmp->pmp_status = PUFFSTAT_DYING;
855
856 /* signal waiters on REQUEST TO file server queue */
857 for (park = TAILQ_FIRST(&pmp->pmp_req_touser); park; park = park_next) {
858 uint8_t opclass;
859
860 puffs_park_reference(park);
861 park_next = TAILQ_NEXT(park, park_entries);
862
863 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
864 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
865 park->park_flags &= ~PARKFLAG_ONQUEUE1;
866 pmp->pmp_req_touser_count--;
867
868 /*
869 * If the waiter is gone, we may *NOT* access preq anymore.
870 */
871 if (park->park_flags & PARKFLAG_WAITERGONE) {
872 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
873 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
874 puffs_park_release(park, 0);
875 } else {
876 opclass = park->park_preq->preq_opclass;
877 park->park_preq->preq_rv = ENXIO;
878
879 if (park->park_flags & PARKFLAG_CALL) {
880 park->park_done(park->park_preq,
881 park->park_donearg);
882 puffs_park_release(park, 1);
883 } else if ((park->park_flags & PARKFLAG_WANTREPLY)==0) {
884 free(park->park_preq, M_PUFFS);
885 puffs_park_release(park, 1);
886 } else {
887 park->park_preq->preq_rv = ENXIO;
888 cv_signal(&park->park_cv);
889 puffs_park_release(park, 0);
890 }
891 }
892 }
893
894 /* signal waiters on RESPONSE FROM file server queue */
895 for (park=TAILQ_FIRST(&pmp->pmp_req_replywait); park; park=park_next) {
896 puffs_park_reference(park);
897 park_next = TAILQ_NEXT(park, park_entries);
898
899 KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
900 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
901
902 TAILQ_REMOVE(&pmp->pmp_req_replywait, park, park_entries);
903 park->park_flags &= ~PARKFLAG_ONQUEUE2;
904
905 /*
906 * If the waiter is gone, we may *NOT* access preq anymore.
907 */
908 if (park->park_flags & PARKFLAG_WAITERGONE) {
909 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
910 puffs_park_release(park, 0);
911 } else {
912 park->park_preq->preq_rv = ENXIO;
913 if (park->park_flags & PARKFLAG_CALL) {
914 park->park_done(park->park_preq,
915 park->park_donearg);
916 puffs_park_release(park, 1);
917 } else {
918 cv_signal(&park->park_cv);
919 puffs_park_release(park, 0);
920 }
921 }
922 }
923 }
924
925 /* this is probably going to die away at some point? */
926 /*
927 * XXX: currently bitrotted
928 */
929 #if 0
930 static int
931 puffssizeop(struct puffs_mount *pmp, struct puffs_sizeop *psop_user)
932 {
933 struct puffs_sizepark *pspark;
934 void *kernbuf;
935 size_t copylen;
936 int error;
937
938 /* locate correct op */
939 mutex_enter(&pmp->pmp_lock);
940 TAILQ_FOREACH(pspark, &pmp->pmp_req_sizepark, pkso_entries) {
941 if (pspark->pkso_reqid == psop_user->pso_reqid) {
942 TAILQ_REMOVE(&pmp->pmp_req_sizepark, pspark,
943 pkso_entries);
944 break;
945 }
946 }
947 mutex_exit(&pmp->pmp_lock);
948
949 if (pspark == NULL)
950 return EINVAL;
951
952 error = 0;
953 copylen = MIN(pspark->pkso_bufsize, psop_user->pso_bufsize);
954
955 /*
956 * XXX: uvm stuff to avoid bouncy-bouncy copying?
957 */
958 if (PUFFS_SIZEOP_UIO(pspark->pkso_reqtype)) {
959 kernbuf = malloc(copylen, M_PUFFS, M_WAITOK | M_ZERO);
960 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_IN) {
961 error = copyin(psop_user->pso_userbuf,
962 kernbuf, copylen);
963 if (error) {
964 printf("psop ERROR1 %d\n", error);
965 goto escape;
966 }
967 }
968 error = uiomove(kernbuf, copylen, pspark->pkso_uio);
969 if (error) {
970 printf("uiomove from kernel %p, len %d failed: %d\n",
971 kernbuf, (int)copylen, error);
972 goto escape;
973 }
974
975 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_OUT) {
976 error = copyout(kernbuf,
977 psop_user->pso_userbuf, copylen);
978 if (error) {
979 printf("psop ERROR2 %d\n", error);
980 goto escape;
981 }
982 }
983 escape:
984 free(kernbuf, M_PUFFS);
985 } else if (PUFFS_SIZEOP_BUF(pspark->pkso_reqtype)) {
986 copylen = MAX(pspark->pkso_bufsize, psop_user->pso_bufsize);
987 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_BUF_IN) {
988 error = copyin(psop_user->pso_userbuf,
989 pspark->pkso_copybuf, copylen);
990 } else {
991 error = copyout(pspark->pkso_copybuf,
992 psop_user->pso_userbuf, copylen);
993 }
994 }
995 #ifdef DIAGNOSTIC
996 else
997 panic("puffssizeop: invalid reqtype %d\n",
998 pspark->pkso_reqtype);
999 #endif /* DIAGNOSTIC */
1000
1001 return error;
1002 }
1003 #endif
1004