puffs_msgif.c revision 1.41 1 /* $NetBSD: puffs_msgif.c,v 1.41 2007/09/27 21:14:49 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.41 2007/09/27 21:14:49 pooka Exp $");
34
35 #include <sys/param.h>
36 #include <sys/fstrans.h>
37 #include <sys/malloc.h>
38 #include <sys/mount.h>
39 #include <sys/vnode.h>
40 #include <sys/lock.h>
41 #include <sys/proc.h>
42
43 #include <fs/puffs/puffs_msgif.h>
44 #include <fs/puffs/puffs_sys.h>
45
46 /*
47 * waitq data structures
48 */
49
50 /*
51 * While a request is going to userspace, park the caller within the
52 * kernel. This is the kernel counterpart of "struct puffs_req".
53 */
54 struct puffs_park {
55 struct puffs_req *park_preq; /* req followed by buf */
56 uint64_t park_id; /* duplicate of preq_id */
57
58 size_t park_copylen; /* userspace copylength */
59 size_t park_maxlen; /* max size in comeback */
60
61 parkdone_fn park_done;
62 void *park_donearg;
63
64 int park_flags;
65 int park_refcount;
66
67 kcondvar_t park_cv;
68 kmutex_t park_mtx;
69
70 TAILQ_ENTRY(puffs_park) park_entries;
71 };
72 #define PARKFLAG_WAITERGONE 0x01
73 #define PARKFLAG_DONE 0x02
74 #define PARKFLAG_ONQUEUE1 0x04
75 #define PARKFLAG_ONQUEUE2 0x08
76 #define PARKFLAG_CALL 0x10
77 #define PARKFLAG_WANTREPLY 0x20
78
79 static struct pool_cache parkpc;
80 static struct pool parkpool;
81
82 static int
83 makepark(void *arg, void *obj, int flags)
84 {
85 struct puffs_park *park = obj;
86
87 mutex_init(&park->park_mtx, MUTEX_DEFAULT, IPL_NONE);
88 cv_init(&park->park_cv, "puffsrpl");
89
90 return 0;
91 }
92
93 static void
94 nukepark(void *arg, void *obj)
95 {
96 struct puffs_park *park = obj;
97
98 cv_destroy(&park->park_cv);
99 mutex_destroy(&park->park_mtx);
100 }
101
102 void
103 puffs_msgif_init()
104 {
105
106 pool_init(&parkpool, sizeof(struct puffs_park), 0, 0, 0,
107 "puffprkl", &pool_allocator_nointr, IPL_NONE);
108 pool_cache_init(&parkpc, &parkpool, makepark, nukepark, NULL);
109 }
110
111 void
112 puffs_msgif_destroy()
113 {
114
115 pool_cache_destroy(&parkpc);
116 pool_destroy(&parkpool);
117 }
118
119 void *
120 puffs_park_alloc(int waitok)
121 {
122 struct puffs_park *park;
123
124 park = pool_cache_get(&parkpc, waitok ? PR_WAITOK : PR_NOWAIT);
125 if (park) {
126 park->park_refcount = 1;
127 mutex_enter(&park->park_mtx);
128 }
129
130 return park;
131 }
132
133 static void
134 puffs_park_reference(struct puffs_park *park)
135 {
136
137 mutex_enter(&park->park_mtx);
138 park->park_refcount++;
139 }
140
141 void
142 puffs_park_release(void *arg, int fullnuke)
143 {
144 struct puffs_park *park = arg;
145
146 KASSERT(mutex_owned(&park->park_mtx));
147 --park->park_refcount;
148
149 mutex_exit(&park->park_mtx);
150 if (park->park_refcount == 0 || fullnuke)
151 pool_cache_put(&parkpc, park);
152 }
153
154 #ifdef PUFFSDEBUG
155 static void
156 parkdump(struct puffs_park *park)
157 {
158
159 DPRINTF(("park %p, preq %p, id %" PRIu64 "\n"
160 "\tcopy %zu, max %zu - done: %p/%p\n"
161 "\tflags 0x%08x, refcount %d, cv/mtx: %p/%p\n",
162 park, park->park_preq, park->park_id,
163 park->park_copylen, park->park_maxlen,
164 park->park_done, park->park_donearg,
165 park->park_flags, park->park_refcount,
166 &park->park_cv, &park->park_mtx));
167 }
168
169 static void
170 parkqdump(struct puffs_wq *q, int dumpall)
171 {
172 struct puffs_park *park;
173 int total = 0;
174
175 TAILQ_FOREACH(park, q, park_entries) {
176 if (dumpall)
177 parkdump(park);
178 total++;
179 }
180 DPRINTF(("puffs waitqueue at %p dumped, %d total\n", q, total));
181
182 }
183 #endif /* PUFFSDEBUG */
184
185 /*
186 * Converts a non-FAF op to a FAF. This simply involves making copies
187 * of the park and request structures and tagging the request as a FAF.
188 * It is safe to block here, since the original op is not a FAF.
189 */
190 static void
191 puffs_reqtofaf(struct puffs_park *park)
192 {
193 struct puffs_req *newpreq;
194
195 KASSERT((park->park_preq->preq_opclass & PUFFSOPFLAG_FAF) == 0);
196
197 MALLOC(newpreq, struct puffs_req *, park->park_copylen,
198 M_PUFFS, M_ZERO | M_WAITOK);
199
200 memcpy(newpreq, park->park_preq, park->park_copylen);
201
202 park->park_preq = newpreq;
203 park->park_preq->preq_opclass |= PUFFSOPFLAG_FAF;
204 park->park_flags &= ~PARKFLAG_WANTREPLY;
205 }
206
207
208 /*
209 * kernel-user-kernel waitqueues
210 */
211
212 static int touser(struct puffs_mount *, struct puffs_park *, uint64_t);
213
214 uint64_t
215 puffs_getreqid(struct puffs_mount *pmp)
216 {
217 uint64_t rv;
218
219 mutex_enter(&pmp->pmp_lock);
220 rv = pmp->pmp_nextreq++;
221 mutex_exit(&pmp->pmp_lock);
222
223 return rv;
224 }
225
226 /* vfs request */
227 int
228 puffs_vfstouser(struct puffs_mount *pmp, int optype, void *kbuf, size_t buflen)
229 {
230 struct puffs_park *park;
231
232 park = puffs_park_alloc(1);
233 park->park_preq = kbuf;
234
235 park->park_preq->preq_opclass = PUFFSOP_VFS;
236 park->park_preq->preq_optype = optype;
237
238 park->park_maxlen = park->park_copylen = buflen;
239 park->park_flags = 0;
240
241 return touser(pmp, park, puffs_getreqid(pmp));
242 }
243
244 void
245 puffs_suspendtouser(struct puffs_mount *pmp, int status)
246 {
247 struct puffs_vfsreq_suspend *pvfsr_susp;
248 struct puffs_park *park;
249
250 pvfsr_susp = malloc(sizeof(struct puffs_vfsreq_suspend),
251 M_PUFFS, M_WAITOK | M_ZERO);
252 park = puffs_park_alloc(1);
253
254 pvfsr_susp->pvfsr_status = status;
255 park->park_preq = (struct puffs_req *)pvfsr_susp;
256
257 park->park_preq->preq_opclass = PUFFSOP_VFS | PUFFSOPFLAG_FAF;
258 park->park_preq->preq_optype = PUFFS_VFS_SUSPEND;
259
260 park->park_maxlen = park->park_copylen
261 = sizeof(struct puffs_vfsreq_suspend);
262 park->park_flags = 0;
263
264 (void)touser(pmp, park, 0);
265 }
266
267 /*
268 * vnode level request
269 */
270 int
271 puffs_vntouser(struct puffs_mount *pmp, int optype,
272 void *kbuf, size_t buflen, size_t maxdelta,
273 struct vnode *vp_opc, struct vnode *vp_aux)
274 {
275 struct puffs_park *park;
276 struct puffs_req *preq;
277 void *cookie = VPTOPNC(vp_opc);
278 struct puffs_node *pnode;
279 int rv;
280
281 park = puffs_park_alloc(1);
282 park->park_preq = kbuf;
283
284 park->park_preq->preq_opclass = PUFFSOP_VN;
285 park->park_preq->preq_optype = optype;
286 park->park_preq->preq_cookie = cookie;
287
288 park->park_copylen = buflen;
289 park->park_maxlen = buflen + maxdelta;
290 park->park_flags = 0;
291
292 rv = touser(pmp, park, puffs_getreqid(pmp));
293
294 /*
295 * Check if the user server requests that inactive be called
296 * when the time is right.
297 */
298 preq = park->park_preq;
299 if (preq->preq_setbacks & PUFFS_SETBACK_INACT_N1) {
300 pnode = vp_opc->v_data;
301 pnode->pn_stat |= PNODE_DOINACT;
302 }
303 if (preq->preq_setbacks & PUFFS_SETBACK_INACT_N2) {
304 /* if no vp_aux, just ignore */
305 if (vp_aux) {
306 pnode = vp_aux->v_data;
307 pnode->pn_stat |= PNODE_DOINACT;
308 }
309 }
310 if (preq->preq_setbacks & PUFFS_SETBACK_NOREF_N1) {
311 pnode = vp_opc->v_data;
312 pnode->pn_stat |= PNODE_NOREFS;
313 }
314 if (preq->preq_setbacks & PUFFS_SETBACK_NOREF_N2) {
315 /* if no vp_aux, just ignore */
316 if (vp_aux) {
317 pnode = vp_aux->v_data;
318 pnode->pn_stat |= PNODE_NOREFS;
319 }
320 }
321
322 return rv;
323 }
324
325 /*
326 * vnode level request, caller-controller req id
327 */
328 int
329 puffs_vntouser_req(struct puffs_mount *pmp, int optype,
330 void *kbuf, size_t buflen, size_t maxdelta,
331 uint64_t reqid, struct vnode *vp_opc, struct vnode *vp_aux)
332 {
333 struct puffs_park *park;
334 void *cookie = VPTOPNC(vp_opc);
335
336 park = puffs_park_alloc(1);
337 park->park_preq = kbuf;
338
339 park->park_preq->preq_opclass = PUFFSOP_VN;
340 park->park_preq->preq_optype = optype;
341 park->park_preq->preq_cookie = cookie;
342
343 park->park_copylen = buflen;
344 park->park_maxlen = buflen + maxdelta;
345 park->park_flags = 0;
346
347 return touser(pmp, park, reqid);
348 }
349
350 void
351 puffs_vntouser_call(struct puffs_mount *pmp, int optype,
352 void *kbuf, size_t buflen, size_t maxdelta,
353 parkdone_fn donefn, void *donearg,
354 struct vnode *vp_opc, struct vnode *vp_aux)
355 {
356 struct puffs_park *park;
357 void *cookie = VPTOPNC(vp_opc);
358
359 park = puffs_park_alloc(1);
360 park->park_preq = kbuf;
361
362 park->park_preq->preq_opclass = PUFFSOP_VN;
363 park->park_preq->preq_optype = optype;
364 park->park_preq->preq_cookie = cookie;
365
366 park->park_copylen = buflen;
367 park->park_maxlen = buflen + maxdelta;
368 park->park_done = donefn;
369 park->park_donearg = donearg;
370 park->park_flags = PARKFLAG_CALL;
371
372 (void) touser(pmp, park, puffs_getreqid(pmp));
373 }
374
375 /*
376 * Notice: kbuf will be free'd later. I must be allocated from the
377 * kernel heap and it's ownership is shifted to this function from
378 * now on, i.e. the caller is not allowed to use it anymore!
379 */
380 void
381 puffs_vntouser_faf(struct puffs_mount *pmp, int optype,
382 void *kbuf, size_t buflen, struct vnode *vp_opc)
383 {
384 struct puffs_park *park;
385 void *cookie = VPTOPNC(vp_opc);
386
387 /* XXX: is it allowable to sleep here? */
388 park = puffs_park_alloc(0);
389 if (park == NULL)
390 return; /* 2bad */
391
392 park->park_preq = kbuf;
393
394 park->park_preq->preq_opclass = PUFFSOP_VN | PUFFSOPFLAG_FAF;
395 park->park_preq->preq_optype = optype;
396 park->park_preq->preq_cookie = cookie;
397
398 park->park_maxlen = park->park_copylen = buflen;
399 park->park_flags = 0;
400
401 (void)touser(pmp, park, 0);
402 }
403
404 void
405 puffs_cacheop(struct puffs_mount *pmp, struct puffs_park *park,
406 struct puffs_cacheinfo *pcinfo, size_t pcilen, void *cookie)
407 {
408
409 park->park_preq = (struct puffs_req *)pcinfo;
410 park->park_preq->preq_opclass = PUFFSOP_CACHE | PUFFSOPFLAG_FAF;
411 park->park_preq->preq_optype = PCACHE_TYPE_WRITE; /* XXX */
412 park->park_preq->preq_cookie = cookie;
413
414 park->park_maxlen = park->park_copylen = pcilen;
415 park->park_flags = 0;
416
417 (void)touser(pmp, park, 0);
418 }
419
420 void
421 puffs_errnotify(struct puffs_mount *pmp, uint8_t type, int error, void *cookie)
422 {
423 struct puffs_park *park;
424 struct puffs_error *perr;
425
426 park = puffs_park_alloc(1);
427 MALLOC(perr, struct puffs_error *, sizeof(struct puffs_error),
428 M_PUFFS, M_ZERO | M_WAITOK);
429
430 perr->perr_error = error;
431
432 park->park_preq = (struct puffs_req *)perr;
433 park->park_preq->preq_opclass = PUFFSOP_ERROR | PUFFSOPFLAG_FAF;
434 park->park_preq->preq_optype = type;
435 park->park_preq->preq_cookie = cookie;
436
437 park->park_maxlen = park->park_copylen = sizeof(struct puffs_error);
438 park->park_flags = 0;
439
440 (void)touser(pmp, park, 0);
441 }
442
443 /*
444 * Wait for the userspace ping-pong game in calling process context.
445 *
446 * This unlocks vnodes if they are supplied. vp1 is the vnode
447 * before in the locking order, i.e. the one which must be locked
448 * before accessing vp2. This is done here so that operations are
449 * already ordered in the queue when vnodes are unlocked (I'm not
450 * sure if that's really necessary, but it can't hurt). Okok, maybe
451 * there's a slight ugly-factor also, but let's not worry about that.
452 */
453 static int
454 touser(struct puffs_mount *pmp, struct puffs_park *park, uint64_t reqid)
455 {
456 struct lwp *l = curlwp;
457 struct mount *mp;
458 struct puffs_req *preq;
459 int rv = 0;
460
461 mp = PMPTOMP(pmp);
462 preq = park->park_preq;
463 preq->preq_id = park->park_id = reqid;
464 preq->preq_buflen = ALIGN(park->park_maxlen);
465
466 if (PUFFSOP_WANTREPLY(preq->preq_opclass))
467 park->park_flags |= PARKFLAG_WANTREPLY;
468
469 /*
470 * To support PCATCH, yet another movie: check if there are signals
471 * pending and we are issueing a non-FAF. If so, return an error
472 * directly UNLESS we are issueing INACTIVE. In that case, convert
473 * it to a FAF, fire off to the file server and return an error.
474 * Yes, this is bordering disgusting. Barfbags are on me.
475 */
476 if ((park->park_flags & PARKFLAG_WANTREPLY)
477 && (park->park_flags & PARKFLAG_CALL) == 0
478 && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) {
479 if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
480 && preq->preq_optype == PUFFS_VN_INACTIVE) {
481 puffs_reqtofaf(park);
482 DPRINTF(("puffs touser: converted to FAF %p\n", park));
483 rv = EINTR;
484 } else {
485 puffs_park_release(park, 0);
486 return EINTR;
487 }
488 }
489
490 /*
491 * test for suspension lock.
492 *
493 * Note that we *DO NOT* keep the lock, since that might block
494 * lock acquiring PLUS it would give userlandia control over
495 * the lock. The operation queue enforces a strict ordering:
496 * when the fs server gets in the op stream, it knows things
497 * are in order. The kernel locks can't guarantee that for
498 * userspace, in any case.
499 *
500 * BUT: this presents a problem for ops which have a consistency
501 * clause based on more than one operation. Unfortunately such
502 * operations (read, write) do not reliably work yet.
503 *
504 * Ya, Ya, it's wrong wong wrong, me be fixink this someday.
505 *
506 * XXX: and there is one more problem. We sometimes need to
507 * take a lazy lock in case the fs is suspending and we are
508 * executing as the fs server context. This might happen
509 * e.g. in the case that the user server triggers a reclaim
510 * in the kernel while the fs is suspending. It's not a very
511 * likely event, but it needs to be fixed some day.
512 */
513
514 /*
515 * MOREXXX: once PUFFS_WCACHEINFO is enabled, we can't take
516 * the mutex here, since getpages() might be called locked.
517 */
518 fstrans_start(mp, FSTRANS_NORMAL);
519 mutex_enter(&pmp->pmp_lock);
520 fstrans_done(mp);
521
522 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
523 mutex_exit(&pmp->pmp_lock);
524 puffs_park_release(park, 0);
525 return ENXIO;
526 }
527
528 #ifdef PUFFSDEBUG
529 parkqdump(&pmp->pmp_req_touser, puffsdebug > 1);
530 parkqdump(&pmp->pmp_req_replywait, puffsdebug > 1);
531 #endif
532
533 TAILQ_INSERT_TAIL(&pmp->pmp_req_touser, park, park_entries);
534 park->park_flags |= PARKFLAG_ONQUEUE1;
535 puffs_mp_reference(pmp);
536 pmp->pmp_req_touser_count++;
537 mutex_exit(&pmp->pmp_lock);
538
539 DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
540 "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, park,
541 preq->preq_opclass, preq->preq_optype, park->park_flags));
542
543 cv_broadcast(&pmp->pmp_req_waiter_cv);
544 selnotify(pmp->pmp_sel, 0);
545
546 if ((park->park_flags & PARKFLAG_WANTREPLY)
547 && (park->park_flags & PARKFLAG_CALL) == 0) {
548 int error;
549
550 error = cv_wait_sig(&park->park_cv, &park->park_mtx);
551 if (error) {
552 park->park_flags |= PARKFLAG_WAITERGONE;
553 if (park->park_flags & PARKFLAG_DONE) {
554 rv = preq->preq_rv;
555 puffs_park_release(park, 0);
556 } else {
557 /*
558 * ok, we marked it as going away, but
559 * still need to do queue ops. take locks
560 * in correct order.
561 *
562 * We don't want to release our reference
563 * if it's on replywait queue to avoid error
564 * to file server. putop() code will DTRT.
565 */
566 KASSERT(park->park_flags &
567 (PARKFLAG_ONQUEUE1 | PARKFLAG_ONQUEUE2));
568 mutex_exit(&park->park_mtx);
569
570 mutex_enter(&pmp->pmp_lock);
571 mutex_enter(&park->park_mtx);
572 if (park->park_flags & PARKFLAG_ONQUEUE1) {
573 TAILQ_REMOVE(&pmp->pmp_req_touser,
574 park, park_entries);
575 pmp->pmp_req_touser_count--;
576 park->park_flags &= ~PARKFLAG_ONQUEUE1;
577 }
578 if ((park->park_flags & PARKFLAG_ONQUEUE2) == 0)
579 puffs_park_release(park, 0);
580 else
581 mutex_exit(&park->park_mtx);
582 mutex_exit(&pmp->pmp_lock);
583
584 rv = error;
585 }
586 } else {
587 rv = preq->preq_rv;
588 puffs_park_release(park, 0);
589 }
590
591 /*
592 * retake the lock and release. This makes sure (haha,
593 * I'm humorous) that we don't process the same vnode in
594 * multiple threads due to the locks hacks we have in
595 * puffs_lock(). In reality this is well protected by
596 * the biglock, but once that's gone, well, hopefully
597 * this will be fixed for real. (and when you read this
598 * comment in 2017 and subsequently barf, my condolences ;).
599 */
600 if (rv == 0 && !fstrans_is_owner(mp)) {
601 fstrans_start(mp, FSTRANS_NORMAL);
602 fstrans_done(mp);
603 }
604 } else {
605 mutex_exit(&park->park_mtx);
606 }
607
608 mutex_enter(&pmp->pmp_lock);
609 puffs_mp_release(pmp);
610 mutex_exit(&pmp->pmp_lock);
611
612 return rv;
613 }
614
615
616 /*
617 * getop: scan through queued requests until:
618 * 1) max number of requests satisfied
619 * OR
620 * 2) buffer runs out of space
621 * OR
622 * 3) nonblocking is set AND there are no operations available
623 * OR
624 * 4) at least one operation was transferred AND there are no more waiting
625 */
626 int
627 puffs_getop(struct puffs_mount *pmp, struct puffs_reqh_get *phg, int nonblock)
628 {
629 struct puffs_park *park;
630 struct puffs_req *preq;
631 uint8_t *bufpos;
632 int error, donesome;
633
634 donesome = error = 0;
635 bufpos = phg->phg_buf;
636
637 mutex_enter(&pmp->pmp_lock);
638 while (phg->phg_nops == 0 || donesome != phg->phg_nops) {
639 again:
640 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
641 /* if we got some, they don't really matter anymore */
642 error = ENXIO;
643 goto out;
644 }
645 if (TAILQ_EMPTY(&pmp->pmp_req_touser)) {
646 if (donesome)
647 goto out;
648
649 if (nonblock) {
650 error = EWOULDBLOCK;
651 goto out;
652 }
653
654 error = cv_wait_sig(&pmp->pmp_req_waiter_cv,
655 &pmp->pmp_lock);
656 if (error)
657 goto out;
658 else
659 goto again;
660 }
661
662 park = TAILQ_FIRST(&pmp->pmp_req_touser);
663 puffs_park_reference(park);
664
665 /* If it's a goner, don't process any furher */
666 if (park->park_flags & PARKFLAG_WAITERGONE) {
667 puffs_park_release(park, 0);
668 continue;
669 }
670
671 preq = park->park_preq;
672 if (phg->phg_buflen < preq->preq_buflen) {
673 if (!donesome)
674 error = E2BIG;
675 puffs_park_release(park, 0);
676 goto out;
677 }
678
679 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
680 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
681 park->park_flags &= ~PARKFLAG_ONQUEUE1;
682 pmp->pmp_req_touser_count--;
683 KASSERT(pmp->pmp_req_touser_count >= 0);
684 mutex_exit(&pmp->pmp_lock);
685
686 DPRINTF(("puffsgetop: get op %" PRIu64 " (%d.), from %p "
687 "len %zu (buflen %zu), target %p\n", preq->preq_id,
688 donesome, preq, park->park_copylen, preq->preq_buflen,
689 bufpos));
690
691 if ((error = copyout(preq, bufpos, park->park_copylen)) != 0) {
692 DPRINTF(("puffs_getop: copyout failed: %d\n", error));
693 /*
694 * ok, user server is probably trying to cheat.
695 * stuff op back & return error to user. We need
696 * to take locks in the correct order.
697 */
698 mutex_exit(&park->park_mtx);
699
700 /*
701 * XXX: ONQUEUE1 | ONQUEUE2 invariant doesn't
702 * hold here
703 */
704
705 mutex_enter(&pmp->pmp_lock);
706 mutex_enter(&park->park_mtx);
707 if ((park->park_flags & PARKFLAG_WAITERGONE) == 0) {
708 TAILQ_INSERT_HEAD(&pmp->pmp_req_touser, park,
709 park_entries);
710 park->park_flags |= PARKFLAG_ONQUEUE1;
711 pmp->pmp_req_touser_count++;
712 }
713
714 if (donesome)
715 error = 0;
716 puffs_park_release(park, 0);
717 goto out;
718 }
719 bufpos += preq->preq_buflen;
720 phg->phg_buflen -= preq->preq_buflen;
721 donesome++;
722
723 /* XXXfixme: taking this lock in the wrong order */
724 mutex_enter(&pmp->pmp_lock);
725
726 if (park->park_flags & PARKFLAG_WANTREPLY) {
727 TAILQ_INSERT_TAIL(&pmp->pmp_req_replywait, park,
728 park_entries);
729 park->park_flags |= PARKFLAG_ONQUEUE2;
730 puffs_park_release(park, 0);
731 } else {
732 free(preq, M_PUFFS);
733 puffs_park_release(park, 1);
734 }
735 }
736
737 out:
738 phg->phg_more = pmp->pmp_req_touser_count;
739 mutex_exit(&pmp->pmp_lock);
740
741 phg->phg_nops = donesome;
742
743 return error;
744 }
745
746 int
747 puffs_putop(struct puffs_mount *pmp, struct puffs_reqh_put *php)
748 {
749 struct puffs_park *park;
750 struct puffs_req tmpreq;
751 struct puffs_req *nextpreq;
752 void *userbuf;
753 uint64_t id;
754 size_t reqlen;
755 int donesome, error, wgone, release;
756
757 donesome = error = wgone = 0;
758
759 id = php->php_id;
760 userbuf = php->php_buf;
761 reqlen = php->php_buflen;
762
763 mutex_enter(&pmp->pmp_lock);
764 while (donesome != php->php_nops) {
765 release = 0;
766 #ifdef PUFFSDEBUG
767 DPRINTF(("puffsputop: searching for %" PRIu64 ", ubuf: %p, "
768 "len %zu\n", id, userbuf, reqlen));
769 #endif
770 TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
771 if (park->park_id == id)
772 break;
773 }
774
775 if (park == NULL) {
776 DPRINTF(("puffsputop: no request: %" PRIu64 "\n", id));
777 error = EINVAL;
778 break;
779 }
780
781 puffs_park_reference(park);
782 if (reqlen == 0 || reqlen > park->park_maxlen) {
783 DPRINTF(("puffsputop: invalid buffer length: "
784 "%zu\n", reqlen));
785 error = E2BIG;
786 puffs_park_release(park, 0);
787 break;
788 }
789 wgone = park->park_flags & PARKFLAG_WAITERGONE;
790
791 /* check if it's still on the queue after acquiring lock */
792 if (park->park_flags & PARKFLAG_ONQUEUE2) {
793 TAILQ_REMOVE(&pmp->pmp_req_replywait, park,
794 park_entries);
795 park->park_flags &= ~PARKFLAG_ONQUEUE2;
796 }
797
798 mutex_exit(&pmp->pmp_lock);
799
800 /*
801 * If the caller has gone south, go to next, collect
802 * $200 and free the structure there instead of wakeup.
803 * We also need to copyin the header info. Flag structure
804 * release to mode total and utter destruction.
805 */
806 if (wgone) {
807 DPRINTF(("puffs_putop: bad service - waiter gone for "
808 "park %p\n", park));
809 error = copyin(userbuf, &tmpreq,
810 sizeof(struct puffs_req));
811 release = 1;
812 if (error)
813 goto loopout;
814 nextpreq = &tmpreq;
815 goto next;
816 }
817
818 DPRINTF(("puffsputpop: copyin from %p to %p, len %zu\n",
819 userbuf, park->park_preq, reqlen));
820 error = copyin(userbuf, park->park_preq, reqlen);
821 if (error)
822 goto loopout;
823 nextpreq = park->park_preq;
824
825 next:
826 /* all's well, prepare for next op */
827 id = nextpreq->preq_id;
828 reqlen = nextpreq->preq_buflen;
829 userbuf = nextpreq->preq_nextbuf;
830 donesome++;
831
832 loopout:
833 if (error && !wgone)
834 park->park_preq->preq_rv = error;
835
836 if (park->park_flags & PARKFLAG_CALL) {
837 DPRINTF(("puffsputopt: call for %p, arg %p\n",
838 park->park_preq, park->park_donearg));
839 park->park_done(park->park_preq, park->park_donearg);
840 release = 1;
841 }
842
843 if (!wgone) {
844 DPRINTF(("puffs_putop: flagging done for "
845 "park %p\n", park));
846
847 cv_signal(&park->park_cv);
848 }
849 park->park_flags |= PARKFLAG_DONE;
850 puffs_park_release(park, release);
851
852 mutex_enter(&pmp->pmp_lock);
853 if (error)
854 break;
855 wgone = 0;
856 }
857
858 mutex_exit(&pmp->pmp_lock);
859 php->php_nops -= donesome;
860
861 return error;
862 }
863
864 /*
865 * We're dead, kaput, RIP, slightly more than merely pining for the
866 * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
867 * our maker, ceased to be, etcetc. YASD. It's a dead FS!
868 *
869 * Caller must hold puffs mutex.
870 */
871 void
872 puffs_userdead(struct puffs_mount *pmp)
873 {
874 struct puffs_park *park, *park_next;
875
876 /*
877 * Mark filesystem status as dying so that operations don't
878 * attempt to march to userspace any longer.
879 */
880 pmp->pmp_status = PUFFSTAT_DYING;
881
882 /* signal waiters on REQUEST TO file server queue */
883 for (park = TAILQ_FIRST(&pmp->pmp_req_touser); park; park = park_next) {
884 uint8_t opclass;
885
886 puffs_park_reference(park);
887 park_next = TAILQ_NEXT(park, park_entries);
888
889 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
890 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
891 park->park_flags &= ~PARKFLAG_ONQUEUE1;
892 pmp->pmp_req_touser_count--;
893
894 /*
895 * If the waiter is gone, we may *NOT* access preq anymore.
896 */
897 if (park->park_flags & PARKFLAG_WAITERGONE) {
898 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
899 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
900 puffs_park_release(park, 0);
901 } else {
902 opclass = park->park_preq->preq_opclass;
903 park->park_preq->preq_rv = ENXIO;
904
905 if (park->park_flags & PARKFLAG_CALL) {
906 park->park_done(park->park_preq,
907 park->park_donearg);
908 puffs_park_release(park, 1);
909 } else if ((park->park_flags & PARKFLAG_WANTREPLY)==0) {
910 free(park->park_preq, M_PUFFS);
911 puffs_park_release(park, 1);
912 } else {
913 park->park_preq->preq_rv = ENXIO;
914 cv_signal(&park->park_cv);
915 puffs_park_release(park, 0);
916 }
917 }
918 }
919
920 /* signal waiters on RESPONSE FROM file server queue */
921 for (park=TAILQ_FIRST(&pmp->pmp_req_replywait); park; park=park_next) {
922 puffs_park_reference(park);
923 park_next = TAILQ_NEXT(park, park_entries);
924
925 KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
926 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
927
928 TAILQ_REMOVE(&pmp->pmp_req_replywait, park, park_entries);
929 park->park_flags &= ~PARKFLAG_ONQUEUE2;
930
931 /*
932 * If the waiter is gone, we may *NOT* access preq anymore.
933 */
934 if (park->park_flags & PARKFLAG_WAITERGONE) {
935 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
936 puffs_park_release(park, 0);
937 } else {
938 park->park_preq->preq_rv = ENXIO;
939 if (park->park_flags & PARKFLAG_CALL) {
940 park->park_done(park->park_preq,
941 park->park_donearg);
942 puffs_park_release(park, 1);
943 } else {
944 cv_signal(&park->park_cv);
945 puffs_park_release(park, 0);
946 }
947 }
948 }
949 }
950
951 /* this is probably going to die away at some point? */
952 /*
953 * XXX: currently bitrotted
954 */
955 #if 0
956 static int
957 puffssizeop(struct puffs_mount *pmp, struct puffs_sizeop *psop_user)
958 {
959 struct puffs_sizepark *pspark;
960 void *kernbuf;
961 size_t copylen;
962 int error;
963
964 /* locate correct op */
965 mutex_enter(&pmp->pmp_lock);
966 TAILQ_FOREACH(pspark, &pmp->pmp_req_sizepark, pkso_entries) {
967 if (pspark->pkso_reqid == psop_user->pso_reqid) {
968 TAILQ_REMOVE(&pmp->pmp_req_sizepark, pspark,
969 pkso_entries);
970 break;
971 }
972 }
973 mutex_exit(&pmp->pmp_lock);
974
975 if (pspark == NULL)
976 return EINVAL;
977
978 error = 0;
979 copylen = MIN(pspark->pkso_bufsize, psop_user->pso_bufsize);
980
981 /*
982 * XXX: uvm stuff to avoid bouncy-bouncy copying?
983 */
984 if (PUFFS_SIZEOP_UIO(pspark->pkso_reqtype)) {
985 kernbuf = malloc(copylen, M_PUFFS, M_WAITOK | M_ZERO);
986 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_IN) {
987 error = copyin(psop_user->pso_userbuf,
988 kernbuf, copylen);
989 if (error) {
990 printf("psop ERROR1 %d\n", error);
991 goto escape;
992 }
993 }
994 error = uiomove(kernbuf, copylen, pspark->pkso_uio);
995 if (error) {
996 printf("uiomove from kernel %p, len %d failed: %d\n",
997 kernbuf, (int)copylen, error);
998 goto escape;
999 }
1000
1001 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_OUT) {
1002 error = copyout(kernbuf,
1003 psop_user->pso_userbuf, copylen);
1004 if (error) {
1005 printf("psop ERROR2 %d\n", error);
1006 goto escape;
1007 }
1008 }
1009 escape:
1010 free(kernbuf, M_PUFFS);
1011 } else if (PUFFS_SIZEOP_BUF(pspark->pkso_reqtype)) {
1012 copylen = MAX(pspark->pkso_bufsize, psop_user->pso_bufsize);
1013 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_BUF_IN) {
1014 error = copyin(psop_user->pso_userbuf,
1015 pspark->pkso_copybuf, copylen);
1016 } else {
1017 error = copyout(pspark->pkso_copybuf,
1018 psop_user->pso_userbuf, copylen);
1019 }
1020 }
1021 #ifdef DIAGNOSTIC
1022 else
1023 panic("puffssizeop: invalid reqtype %d\n",
1024 pspark->pkso_reqtype);
1025 #endif /* DIAGNOSTIC */
1026
1027 return error;
1028 }
1029 #endif
1030