puffs_msgif.c revision 1.26 1 /* $NetBSD: puffs_msgif.c,v 1.26 2007/04/04 16:13:51 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.26 2007/04/04 16:13:51 pooka Exp $");
37
38 #include <sys/param.h>
39 #include <sys/fstrans.h>
40 #include <sys/malloc.h>
41 #include <sys/mount.h>
42 #include <sys/vnode.h>
43 #include <sys/lock.h>
44
45 #include <fs/puffs/puffs_msgif.h>
46 #include <fs/puffs/puffs_sys.h>
47
48 /*
49 * waitq data structures
50 */
51
52 /*
53 * While a request is going to userspace, park the caller within the
54 * kernel. This is the kernel counterpart of "struct puffs_req".
55 */
56 struct puffs_park {
57 struct puffs_req *park_preq; /* req followed by buf */
58 uint64_t park_id; /* duplicate of preq_id */
59
60 size_t park_copylen; /* userspace copylength */
61 size_t park_maxlen; /* max size in comeback */
62
63 parkdone_fn park_done;
64 void *park_donearg;
65
66 int park_flags;
67 int park_refcount;
68
69 kcondvar_t park_cv;
70 kmutex_t park_mtx;
71
72 TAILQ_ENTRY(puffs_park) park_entries;
73 };
74 #define PARKFLAG_WAITERGONE 0x01
75 #define PARKFLAG_DONE 0x02
76 #define PARKFLAG_ONQUEUE1 0x04
77 #define PARKFLAG_ONQUEUE2 0x08
78 #define PARKFLAG_CALL 0x10
79
80 static struct pool_cache parkpc;
81 static struct pool parkpool;
82
83 static int
84 makepark(void *arg, void *obj, int flags)
85 {
86 struct puffs_park *park = obj;
87
88 mutex_init(&park->park_mtx, MUTEX_DEFAULT, IPL_NONE);
89 cv_init(&park->park_cv, "puffsrpl");
90
91 return 0;
92 }
93
94 static void
95 nukepark(void *arg, void *obj)
96 {
97 struct puffs_park *park = obj;
98
99 cv_destroy(&park->park_cv);
100 mutex_destroy(&park->park_mtx);
101 }
102
103 void
104 puffs_msgif_init()
105 {
106
107 pool_init(&parkpool, sizeof(struct puffs_park), 0, 0, 0,
108 "puffprkl", &pool_allocator_nointr, IPL_NONE);
109 pool_cache_init(&parkpc, &parkpool, makepark, nukepark, NULL);
110 }
111
112 void
113 puffs_msgif_destroy()
114 {
115
116 pool_cache_destroy(&parkpc);
117 pool_destroy(&parkpool);
118 }
119
120 void *
121 puffs_park_alloc(int waitok)
122 {
123 struct puffs_park *park;
124
125 park = pool_cache_get(&parkpc, waitok ? PR_WAITOK : PR_NOWAIT);
126 if (park) {
127 park->park_refcount = 1;
128 mutex_enter(&park->park_mtx);
129 }
130
131 return park;
132 }
133
134 static void
135 puffs_park_reference(struct puffs_park *park)
136 {
137
138 mutex_enter(&park->park_mtx);
139 park->park_refcount++;
140 }
141
142 void
143 puffs_park_release(void *arg, int fullnuke)
144 {
145 struct puffs_park *park = arg;
146
147 KASSERT(mutex_owned(&park->park_mtx));
148 --park->park_refcount;
149
150 mutex_exit(&park->park_mtx);
151 if (park->park_refcount == 0 || fullnuke)
152 pool_cache_put(&parkpc, park);
153 }
154
155 #ifdef PUFFSDEBUG
156 static void
157 parkdump(struct puffs_park *park)
158 {
159
160 DPRINTF(("park %p, preq %p, id %" PRIu64 "\n"
161 "\tcopy %zu, max %zu - done: %p/%p\n"
162 "\tflags 0x%08x, refcount %d, cv/mtx: %p/%p\n",
163 park, park->park_preq, park->park_id,
164 park->park_copylen, park->park_maxlen,
165 park->park_done, park->park_donearg,
166 park->park_flags, park->park_refcount,
167 &park->park_cv, &park->park_mtx));
168 }
169
170 static void
171 parkqdump(struct puffs_wq *q, int dumpall)
172 {
173 struct puffs_park *park;
174 int total = 0;
175
176 DPRINTF(("puffs waitqueue at %p, BEGIN\n", q));
177 TAILQ_FOREACH(park, q, park_entries) {
178 if (dumpall)
179 parkdump(park);
180 total++;
181 }
182 DPRINTF(("puffs waitqueue at %p, END. %d total\n", q, total));
183
184 }
185 #endif /* PUFFSDEBUG */
186
187 /*
188 * Converts a non-FAF op to a FAF. This simply involves making copies
189 * of the park and request structures and tagging the request as a FAF.
190 * It is safe to block here, since the original op is not a FAF.
191 */
192 static void
193 puffs_reqtofaf(struct puffs_park *park)
194 {
195 struct puffs_req *newpreq;
196
197 KASSERT((park->park_preq->preq_opclass & PUFFSOPFLAG_FAF) == 0);
198
199 MALLOC(newpreq, struct puffs_req *, park->park_copylen,
200 M_PUFFS, M_ZERO | M_WAITOK);
201
202 memcpy(newpreq, park->park_preq, park->park_copylen);
203
204 park->park_preq = newpreq;
205 park->park_preq->preq_opclass |= PUFFSOPFLAG_FAF;
206 }
207
208
209 /*
210 * kernel-user-kernel waitqueues
211 */
212
213 static int touser(struct puffs_mount *, struct puffs_park *, uint64_t,
214 struct vnode *, struct vnode *);
215
216 uint64_t
217 puffs_getreqid(struct puffs_mount *pmp)
218 {
219 uint64_t rv;
220
221 mutex_enter(&pmp->pmp_lock);
222 rv = pmp->pmp_nextreq++;
223 mutex_exit(&pmp->pmp_lock);
224
225 return rv;
226 }
227
228 /* vfs request */
229 int
230 puffs_vfstouser(struct puffs_mount *pmp, int optype, void *kbuf, size_t buflen)
231 {
232 struct puffs_park *park;
233
234 park = puffs_park_alloc(1);
235 park->park_preq = kbuf;
236
237 park->park_preq->preq_opclass = PUFFSOP_VFS;
238 park->park_preq->preq_optype = optype;
239
240 park->park_maxlen = park->park_copylen = buflen;
241 park->park_flags = 0;
242
243 return touser(pmp, park, puffs_getreqid(pmp), NULL, NULL);
244 }
245
246 void
247 puffs_suspendtouser(struct puffs_mount *pmp, int status)
248 {
249 struct puffs_vfsreq_suspend *pvfsr_susp;
250 struct puffs_park *park;
251
252 pvfsr_susp = malloc(sizeof(struct puffs_vfsreq_suspend),
253 M_PUFFS, M_WAITOK | M_ZERO);
254 park = puffs_park_alloc(1);
255
256 pvfsr_susp->pvfsr_status = status;
257 park->park_preq = (struct puffs_req *)pvfsr_susp;
258
259 park->park_preq->preq_opclass = PUFFSOP_VFS | PUFFSOPFLAG_FAF;
260 park->park_preq->preq_optype = PUFFS_VFS_SUSPEND;
261
262 park->park_maxlen = park->park_copylen
263 = sizeof(struct puffs_vfsreq_suspend);
264 park->park_flags = 0;
265
266 (void)touser(pmp, park, 0, NULL, NULL);
267 }
268
269 /*
270 * vnode level request
271 */
272 int
273 puffs_vntouser(struct puffs_mount *pmp, int optype,
274 void *kbuf, size_t buflen, size_t maxdelta, void *cookie,
275 struct vnode *vp1, struct vnode *vp2)
276 {
277 struct puffs_park *park;
278
279 park = puffs_park_alloc(1);
280 park->park_preq = kbuf;
281
282 park->park_preq->preq_opclass = PUFFSOP_VN;
283 park->park_preq->preq_optype = optype;
284 park->park_preq->preq_cookie = cookie;
285
286 park->park_copylen = buflen;
287 park->park_maxlen = buflen + maxdelta;
288 park->park_flags = 0;
289
290 return touser(pmp, park, puffs_getreqid(pmp), vp1, vp2);
291 }
292
293 /*
294 * vnode level request, caller-controller req id
295 */
296 int
297 puffs_vntouser_req(struct puffs_mount *pmp, int optype,
298 void *kbuf, size_t buflen, size_t maxdelta, void *cookie,
299 uint64_t reqid, struct vnode *vp1, struct vnode *vp2)
300 {
301 struct puffs_park *park;
302
303 park = puffs_park_alloc(1);
304 park->park_preq = kbuf;
305
306 park->park_preq->preq_opclass = PUFFSOP_VN;
307 park->park_preq->preq_optype = optype;
308 park->park_preq->preq_cookie = cookie;
309
310 park->park_copylen = buflen;
311 park->park_maxlen = buflen + maxdelta;
312 park->park_flags = 0;
313
314 return touser(pmp, park, reqid, vp1, vp2);
315 }
316
317 void
318 puffs_vntouser_call(struct puffs_mount *pmp, int optype,
319 void *kbuf, size_t buflen, size_t maxdelta, void *cookie,
320 parkdone_fn donefn, void *donearg,
321 struct vnode *vp1, struct vnode *vp2)
322 {
323 struct puffs_park *park;
324
325 park = puffs_park_alloc(1);
326 park->park_preq = kbuf;
327
328 park->park_preq->preq_opclass = PUFFSOP_VN;
329 park->park_preq->preq_optype = optype;
330 park->park_preq->preq_cookie = cookie;
331
332 park->park_copylen = buflen;
333 park->park_maxlen = buflen + maxdelta;
334 park->park_done = donefn;
335 park->park_donearg = donearg;
336 park->park_flags = PARKFLAG_CALL;
337
338 (void) touser(pmp, park, puffs_getreqid(pmp), vp1, vp2);
339 }
340
341 /*
342 * Notice: kbuf will be free'd later. I must be allocated from the
343 * kernel heap and it's ownership is shifted to this function from
344 * now on, i.e. the caller is not allowed to use it anymore!
345 */
346 void
347 puffs_vntouser_faf(struct puffs_mount *pmp, int optype,
348 void *kbuf, size_t buflen, void *cookie)
349 {
350 struct puffs_park *park;
351
352 /* XXX: is it allowable to sleep here? */
353 park = puffs_park_alloc(0);
354 if (park == NULL)
355 return; /* 2bad */
356
357 park->park_preq = kbuf;
358
359 park->park_preq->preq_opclass = PUFFSOP_VN | PUFFSOPFLAG_FAF;
360 park->park_preq->preq_optype = optype;
361 park->park_preq->preq_cookie = cookie;
362
363 park->park_maxlen = park->park_copylen = buflen;
364 park->park_flags = 0;
365
366 (void)touser(pmp, park, 0, NULL, NULL);
367 }
368
369 void
370 puffs_cacheop(struct puffs_mount *pmp, struct puffs_park *park,
371 struct puffs_cacheinfo *pcinfo, size_t pcilen, void *cookie)
372 {
373
374 park->park_preq = (struct puffs_req *)pcinfo;
375 park->park_preq->preq_opclass = PUFFSOP_CACHE | PUFFSOPFLAG_FAF;
376 park->park_preq->preq_optype = PCACHE_TYPE_WRITE; /* XXX */
377 park->park_preq->preq_cookie = cookie;
378
379 park->park_maxlen = park->park_copylen = pcilen;
380 park->park_flags = 0;
381
382 (void)touser(pmp, park, 0, NULL, NULL);
383 }
384
385 /*
386 * Wait for the userspace ping-pong game in calling process context.
387 *
388 * This unlocks vnodes if they are supplied. vp1 is the vnode
389 * before in the locking order, i.e. the one which must be locked
390 * before accessing vp2. This is done here so that operations are
391 * already ordered in the queue when vnodes are unlocked (I'm not
392 * sure if that's really necessary, but it can't hurt). Okok, maybe
393 * there's a slight ugly-factor also, but let's not worry about that.
394 */
395 static int
396 touser(struct puffs_mount *pmp, struct puffs_park *park, uint64_t reqid,
397 struct vnode *vp1, struct vnode *vp2)
398 {
399 struct lwp *l = curlwp;
400 struct mount *mp;
401 struct puffs_req *preq;
402 int rv = 0;
403
404 mp = PMPTOMP(pmp);
405 preq = park->park_preq;
406 preq->preq_id = park->park_id = reqid;
407 preq->preq_buflen = ALIGN(park->park_maxlen);
408
409 /*
410 * To support PCATCH, yet another movie: check if there are signals
411 * pending and we are issueing a non-FAF. If so, return an error
412 * directly UNLESS we are issueing INACTIVE. In that case, convert
413 * it to a FAF, fire off to the file server and return an error.
414 * Yes, this is bordering disgusting. Barfbags are on me.
415 */
416 if (PUFFSOP_WANTREPLY(preq->preq_opclass)
417 && (park->park_flags & PARKFLAG_CALL) == 0
418 && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) {
419 if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
420 && preq->preq_optype == PUFFS_VN_INACTIVE) {
421 puffs_reqtofaf(park);
422 DPRINTF(("puffs touser: converted to FAF %p\n", park));
423 rv = EINTR;
424 } else {
425 puffs_park_release(park, 0);
426 return EINTR;
427 }
428 }
429
430 /*
431 * test for suspension lock.
432 *
433 * Note that we *DO NOT* keep the lock, since that might block
434 * lock acquiring PLUS it would give userlandia control over
435 * the lock. The operation queue enforces a strict ordering:
436 * when the fs server gets in the op stream, it knows things
437 * are in order. The kernel locks can't guarantee that for
438 * userspace, in any case.
439 *
440 * BUT: this presents a problem for ops which have a consistency
441 * clause based on more than one operation. Unfortunately such
442 * operations (read, write) do not reliably work yet.
443 *
444 * Ya, Ya, it's wrong wong wrong, me be fixink this someday.
445 *
446 * XXX: and there is one more problem. We sometimes need to
447 * take a lazy lock in case the fs is suspending and we are
448 * executing as the fs server context. This might happen
449 * e.g. in the case that the user server triggers a reclaim
450 * in the kernel while the fs is suspending. It's not a very
451 * likely event, but it needs to be fixed some day.
452 */
453
454 /*
455 * MOREXXX: once PUFFS_WCACHEINFO is enabled, we can't take
456 * the mutex here, since getpages() might be called locked.
457 */
458 fstrans_start(mp, FSTRANS_NORMAL);
459 mutex_enter(&pmp->pmp_lock);
460 fstrans_done(mp);
461
462 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
463 mutex_exit(&pmp->pmp_lock);
464 puffs_park_release(park, 0);
465 return ENXIO;
466 }
467
468 #ifdef PUFFSDEBUG
469 parkqdump(&pmp->pmp_req_touser, puffsdebug > 1);
470 parkqdump(&pmp->pmp_req_replywait, puffsdebug > 1);
471 #endif
472
473 TAILQ_INSERT_TAIL(&pmp->pmp_req_touser, park, park_entries);
474 park->park_flags |= PARKFLAG_ONQUEUE1;
475 pmp->pmp_req_waiters++;
476 mutex_exit(&pmp->pmp_lock);
477
478 #if 0
479 /*
480 * Don't do unlock-relock dance yet. There are a couple of
481 * unsolved issues with it. If we don't unlock, we can have
482 * processes wanting vn_lock in case userspace hangs. But
483 * that can be "solved" by killing the userspace process. It
484 * would of course be nicer to have antilocking in the userspace
485 * interface protocol itself.. your patience will be rewarded.
486 */
487 /* unlock */
488 if (vp2)
489 VOP_UNLOCK(vp2, 0);
490 if (vp1)
491 VOP_UNLOCK(vp1, 0);
492 #endif
493
494 DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
495 "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, park,
496 preq->preq_opclass, preq->preq_optype, park->park_flags));
497
498 cv_broadcast(&pmp->pmp_req_waiter_cv);
499 selnotify(pmp->pmp_sel, 0);
500
501 if (PUFFSOP_WANTREPLY(preq->preq_opclass)
502 && (park->park_flags & PARKFLAG_CALL) == 0) {
503 int error;
504
505 error = cv_wait_sig(&park->park_cv, &park->park_mtx);
506 if (error) {
507 park->park_flags |= PARKFLAG_WAITERGONE;
508 if (park->park_flags & PARKFLAG_DONE) {
509 rv = preq->preq_rv;
510 puffs_park_release(park, 0);
511 } else {
512 /*
513 * ok, we marked it as going away, but
514 * still need to do queue ops. take locks
515 * in correct order.
516 *
517 * We don't want to release our reference
518 * if it's on replywait queue to avoid error
519 * to file server. putop() code will DTRT.
520 */
521 KASSERT(park->park_flags &
522 (PARKFLAG_ONQUEUE1 | PARKFLAG_ONQUEUE2));
523 mutex_exit(&park->park_mtx);
524
525 mutex_enter(&pmp->pmp_lock);
526 mutex_enter(&park->park_mtx);
527 if (park->park_flags & PARKFLAG_ONQUEUE1)
528 TAILQ_REMOVE(&pmp->pmp_req_touser,
529 park, park_entries);
530 park->park_flags &= ~PARKFLAG_ONQUEUE1;
531 if ((park->park_flags & PARKFLAG_ONQUEUE2) == 0)
532 puffs_park_release(park, 0);
533 else
534 mutex_exit(&park->park_mtx);
535 mutex_exit(&pmp->pmp_lock);
536
537 rv = error;
538 }
539 } else {
540 rv = preq->preq_rv;
541 puffs_park_release(park, 0);
542 }
543
544 /*
545 * retake the lock and release. This makes sure (haha,
546 * I'm humorous) that we don't process the same vnode in
547 * multiple threads due to the locks hacks we have in
548 * puffs_lock(). In reality this is well protected by
549 * the biglock, but once that's gone, well, hopefully
550 * this will be fixed for real. (and when you read this
551 * comment in 2017 and subsequently barf, my condolences ;).
552 */
553 if (rv == 0 && !fstrans_is_owner(mp)) {
554 fstrans_start(mp, FSTRANS_NORMAL);
555 fstrans_done(mp);
556 }
557 } else {
558 mutex_exit(&park->park_mtx);
559 }
560
561 #if 0
562 /* relock */
563 if (vp1)
564 KASSERT(vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY) == 0);
565 if (vp2)
566 KASSERT(vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY) == 0);
567 #endif
568
569 mutex_enter(&pmp->pmp_lock);
570 if (--pmp->pmp_req_waiters == 0) {
571 KASSERT(cv_has_waiters(&pmp->pmp_req_waitersink_cv) <= 1);
572 cv_signal(&pmp->pmp_req_waitersink_cv);
573 }
574 mutex_exit(&pmp->pmp_lock);
575
576 return rv;
577 }
578
579
580 /*
581 * getop: scan through queued requests until:
582 * 1) max number of requests satisfied
583 * OR
584 * 2) buffer runs out of space
585 * OR
586 * 3) nonblocking is set AND there are no operations available
587 * OR
588 * 4) at least one operation was transferred AND there are no more waiting
589 */
590 int
591 puffs_getop(struct puffs_mount *pmp, struct puffs_reqh_get *phg, int nonblock)
592 {
593 struct puffs_park *park;
594 struct puffs_req *preq;
595 uint8_t *bufpos;
596 int error, donesome;
597
598 donesome = error = 0;
599 bufpos = phg->phg_buf;
600
601 mutex_enter(&pmp->pmp_lock);
602 while (phg->phg_nops == 0 || donesome != phg->phg_nops) {
603 again:
604 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
605 /* if we got some, they don't really matter anymore */
606 error = ENXIO;
607 goto out;
608 }
609 if (TAILQ_EMPTY(&pmp->pmp_req_touser)) {
610 if (donesome)
611 goto out;
612
613 if (nonblock) {
614 error = EWOULDBLOCK;
615 goto out;
616 }
617
618 error = cv_wait_sig(&pmp->pmp_req_waiter_cv,
619 &pmp->pmp_lock);
620 if (error)
621 goto out;
622 else
623 goto again;
624 }
625
626 park = TAILQ_FIRST(&pmp->pmp_req_touser);
627 puffs_park_reference(park);
628
629 /* If it's a goner, don't process any furher */
630 if (park->park_flags & PARKFLAG_WAITERGONE) {
631 puffs_park_release(park, 0);
632 continue;
633 }
634
635 preq = park->park_preq;
636 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
637 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
638 park->park_flags &= ~PARKFLAG_ONQUEUE1;
639 mutex_exit(&pmp->pmp_lock);
640
641 if (phg->phg_buflen < preq->preq_buflen) {
642 if (!donesome)
643 error = E2BIG;
644 puffs_park_release(park, 0);
645 goto out;
646 }
647
648 DPRINTF(("puffsgetop: get op %" PRIu64 " (%d.), from %p "
649 "len %zu (buflen %zu), target %p\n", preq->preq_id,
650 donesome, preq, park->park_copylen, preq->preq_buflen,
651 bufpos));
652
653 if ((error = copyout(preq, bufpos, park->park_copylen)) != 0) {
654 DPRINTF(("puffs_getop: copyout failed: %d\n", error));
655 /*
656 * ok, user server is probably trying to cheat.
657 * stuff op back & return error to user. We need
658 * to take locks in the correct order.
659 */
660 mutex_exit(&park->park_mtx);
661 mutex_enter(&pmp->pmp_lock);
662 mutex_enter(&park->park_mtx);
663 if ((park->park_flags & PARKFLAG_WAITERGONE) == 0) {
664 TAILQ_INSERT_HEAD(&pmp->pmp_req_touser, park,
665 park_entries);
666 park->park_flags |= PARKFLAG_ONQUEUE1;
667 }
668 mutex_exit(&pmp->pmp_lock);
669
670 if (donesome)
671 error = 0;
672 puffs_park_release(park, 0);
673 goto out;
674 }
675 bufpos += preq->preq_buflen;
676 phg->phg_buflen -= preq->preq_buflen;
677 donesome++;
678
679 mutex_enter(&pmp->pmp_lock);
680 if (PUFFSOP_WANTREPLY(preq->preq_opclass)) {
681 TAILQ_INSERT_TAIL(&pmp->pmp_req_replywait, park,
682 park_entries);
683 park->park_flags |= PARKFLAG_ONQUEUE2;
684 puffs_park_release(park, 0);
685 } else {
686 free(preq, M_PUFFS);
687 puffs_park_release(park, 1);
688 }
689 }
690
691 out:
692 phg->phg_more = pmp->pmp_req_waiters;
693 mutex_exit(&pmp->pmp_lock);
694
695 phg->phg_nops = donesome;
696
697 return error;
698 }
699
700 int
701 puffs_putop(struct puffs_mount *pmp, struct puffs_reqh_put *php)
702 {
703 struct puffs_park *park;
704 struct puffs_req tmpreq;
705 struct puffs_req *nextpreq;
706 void *userbuf;
707 uint64_t id;
708 size_t reqlen;
709 int donesome, error, wgone, release;
710
711 donesome = error = wgone = 0;
712
713 id = php->php_id;
714 userbuf = php->php_buf;
715 reqlen = php->php_buflen;
716
717 mutex_enter(&pmp->pmp_lock);
718 while (donesome != php->php_nops) {
719 release = 0;
720 #ifdef PUFFSDEBUG
721 DPRINTF(("puffsputop: searching for %" PRIu64 ", ubuf: %p, "
722 "len %zu\n", id, userbuf, reqlen));
723 #endif
724 TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
725 if (park->park_id == id)
726 break;
727 }
728
729 if (park == NULL) {
730 DPRINTF(("puffsputop: no request: %" PRIu64 "\n", id));
731 error = EINVAL;
732 break;
733 }
734
735 puffs_park_reference(park);
736 if (reqlen == 0 || reqlen > park->park_maxlen) {
737 DPRINTF(("puffsputop: invalid buffer length: "
738 "%zu\n", reqlen));
739 error = E2BIG;
740 puffs_park_release(park, 0);
741 break;
742 }
743 wgone = park->park_flags & PARKFLAG_WAITERGONE;
744
745 /* check if it's still on the queue after acquiring lock */
746 if (park->park_flags & PARKFLAG_ONQUEUE2) {
747 TAILQ_REMOVE(&pmp->pmp_req_replywait, park,
748 park_entries);
749 park->park_flags &= ~PARKFLAG_ONQUEUE2;
750 }
751
752 mutex_exit(&pmp->pmp_lock);
753
754 /*
755 * If the caller has gone south, go to next, collect
756 * $200 and free the structure there instead of wakeup.
757 * We also need to copyin the header info. Flag structure
758 * release to mode total and utter destruction.
759 */
760 if (wgone) {
761 DPRINTF(("puffs_putop: bad service - waiter gone for "
762 "park %p\n", park));
763 error = copyin(userbuf, &tmpreq,
764 sizeof(struct puffs_req));
765 release = 1;
766 if (error)
767 goto loopout;
768 nextpreq = &tmpreq;
769 goto next;
770 }
771
772 DPRINTF(("puffsputpop: copyin from %p to %p, len %zu\n",
773 userbuf, park->park_preq, reqlen));
774 error = copyin(userbuf, park->park_preq, reqlen);
775 if (error)
776 goto loopout;
777 nextpreq = park->park_preq;
778
779 next:
780 /* all's well, prepare for next op */
781 id = nextpreq->preq_id;
782 reqlen = nextpreq->preq_buflen;
783 userbuf = nextpreq->preq_nextbuf;
784 donesome++;
785
786 loopout:
787 if (error)
788 park->park_preq->preq_rv = error;
789
790 if (park->park_flags & PARKFLAG_CALL) {
791 park->park_done(park->park_preq, park->park_donearg);
792 release = 1;
793 }
794
795 if (!wgone) {
796 DPRINTF(("puffs_putop: flagging done for "
797 "park %p\n", park));
798
799 cv_signal(&park->park_cv);
800 }
801 puffs_park_release(park, release);
802
803 mutex_enter(&pmp->pmp_lock);
804 if (error)
805 break;
806 wgone = 0;
807 }
808
809 mutex_exit(&pmp->pmp_lock);
810 php->php_nops -= donesome;
811
812 return error;
813 }
814
815 /*
816 * We're dead, kaput, RIP, slightly more than merely pining for the
817 * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
818 * our maker, ceased to be, etcetc. YASD. It's a dead FS!
819 *
820 * Caller must hold puffs mutex.
821 */
822 void
823 puffs_userdead(struct puffs_mount *pmp)
824 {
825 struct puffs_park *park;
826
827 /*
828 * Mark filesystem status as dying so that operations don't
829 * attempt to march to userspace any longer.
830 */
831 pmp->pmp_status = PUFFSTAT_DYING;
832
833 /* signal waiters on REQUEST TO file server queue */
834 TAILQ_FOREACH(park, &pmp->pmp_req_touser, park_entries) {
835 uint8_t opclass;
836
837 puffs_park_reference(park);
838
839 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
840
841 opclass = park->park_preq->preq_opclass;
842 park->park_preq->preq_rv = ENXIO;
843 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
844 park->park_flags &= ~PARKFLAG_ONQUEUE1;
845
846 if (park->park_flags & PARKFLAG_CALL) {
847 park->park_done(park->park_preq, park->park_donearg);
848 puffs_park_release(park, 1);
849 } else if (!PUFFSOP_WANTREPLY(opclass)) {
850 free(park->park_preq, M_PUFFS);
851 puffs_park_release(park, 1);
852 } else {
853 park->park_preq->preq_rv = ENXIO;
854 cv_signal(&park->park_cv);
855 puffs_park_release(park, 0);
856 }
857 }
858
859 /* signal waiters on RESPONSE FROM file server queue */
860 TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
861 puffs_park_reference(park);
862
863 KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
864
865 TAILQ_REMOVE(&pmp->pmp_req_replywait, park, park_entries);
866 park->park_flags &= ~PARKFLAG_ONQUEUE2;
867
868 KASSERT(PUFFSOP_WANTREPLY(park->park_preq->preq_opclass));
869
870 park->park_preq->preq_rv = ENXIO;
871 if (park->park_flags & PARKFLAG_CALL) {
872 park->park_done(park->park_preq, park->park_donearg);
873 mutex_enter(&park->park_mtx);
874 puffs_park_release(park, 1);
875 } else {
876 cv_signal(&park->park_cv);
877 puffs_park_release(park, 0);
878 }
879 }
880 }
881
882 /* this is probably going to die away at some point? */
883 /*
884 * XXX: currently bitrotted
885 */
886 #if 0
887 static int
888 puffssizeop(struct puffs_mount *pmp, struct puffs_sizeop *psop_user)
889 {
890 struct puffs_sizepark *pspark;
891 void *kernbuf;
892 size_t copylen;
893 int error;
894
895 /* locate correct op */
896 mutex_enter(&pmp->pmp_lock);
897 TAILQ_FOREACH(pspark, &pmp->pmp_req_sizepark, pkso_entries) {
898 if (pspark->pkso_reqid == psop_user->pso_reqid) {
899 TAILQ_REMOVE(&pmp->pmp_req_sizepark, pspark,
900 pkso_entries);
901 break;
902 }
903 }
904 mutex_exit(&pmp->pmp_lock);
905
906 if (pspark == NULL)
907 return EINVAL;
908
909 error = 0;
910 copylen = MIN(pspark->pkso_bufsize, psop_user->pso_bufsize);
911
912 /*
913 * XXX: uvm stuff to avoid bouncy-bouncy copying?
914 */
915 if (PUFFS_SIZEOP_UIO(pspark->pkso_reqtype)) {
916 kernbuf = malloc(copylen, M_PUFFS, M_WAITOK | M_ZERO);
917 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_IN) {
918 error = copyin(psop_user->pso_userbuf,
919 kernbuf, copylen);
920 if (error) {
921 printf("psop ERROR1 %d\n", error);
922 goto escape;
923 }
924 }
925 error = uiomove(kernbuf, copylen, pspark->pkso_uio);
926 if (error) {
927 printf("uiomove from kernel %p, len %d failed: %d\n",
928 kernbuf, (int)copylen, error);
929 goto escape;
930 }
931
932 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_OUT) {
933 error = copyout(kernbuf,
934 psop_user->pso_userbuf, copylen);
935 if (error) {
936 printf("psop ERROR2 %d\n", error);
937 goto escape;
938 }
939 }
940 escape:
941 free(kernbuf, M_PUFFS);
942 } else if (PUFFS_SIZEOP_BUF(pspark->pkso_reqtype)) {
943 copylen = MAX(pspark->pkso_bufsize, psop_user->pso_bufsize);
944 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_BUF_IN) {
945 error = copyin(psop_user->pso_userbuf,
946 pspark->pkso_copybuf, copylen);
947 } else {
948 error = copyout(pspark->pkso_copybuf,
949 psop_user->pso_userbuf, copylen);
950 }
951 }
952 #ifdef DIAGNOSTIC
953 else
954 panic("puffssizeop: invalid reqtype %d\n",
955 pspark->pkso_reqtype);
956 #endif /* DIAGNOSTIC */
957
958 return error;
959 }
960 #endif
961