puffs_msgif.c revision 1.37 1 /* $NetBSD: puffs_msgif.c,v 1.37 2007/05/18 15:46:09 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.37 2007/05/18 15:46:09 pooka Exp $");
37
38 #include <sys/param.h>
39 #include <sys/fstrans.h>
40 #include <sys/malloc.h>
41 #include <sys/mount.h>
42 #include <sys/vnode.h>
43 #include <sys/lock.h>
44
45 #include <fs/puffs/puffs_msgif.h>
46 #include <fs/puffs/puffs_sys.h>
47
48 /*
49 * waitq data structures
50 */
51
52 /*
53 * While a request is going to userspace, park the caller within the
54 * kernel. This is the kernel counterpart of "struct puffs_req".
55 */
56 struct puffs_park {
57 struct puffs_req *park_preq; /* req followed by buf */
58 uint64_t park_id; /* duplicate of preq_id */
59
60 size_t park_copylen; /* userspace copylength */
61 size_t park_maxlen; /* max size in comeback */
62
63 parkdone_fn park_done;
64 void *park_donearg;
65
66 int park_flags;
67 int park_refcount;
68
69 kcondvar_t park_cv;
70 kmutex_t park_mtx;
71
72 TAILQ_ENTRY(puffs_park) park_entries;
73 };
74 #define PARKFLAG_WAITERGONE 0x01
75 #define PARKFLAG_DONE 0x02
76 #define PARKFLAG_ONQUEUE1 0x04
77 #define PARKFLAG_ONQUEUE2 0x08
78 #define PARKFLAG_CALL 0x10
79 #define PARKFLAG_WANTREPLY 0x20
80
81 static struct pool_cache parkpc;
82 static struct pool parkpool;
83
84 static int
85 makepark(void *arg, void *obj, int flags)
86 {
87 struct puffs_park *park = obj;
88
89 mutex_init(&park->park_mtx, MUTEX_DEFAULT, IPL_NONE);
90 cv_init(&park->park_cv, "puffsrpl");
91
92 return 0;
93 }
94
95 static void
96 nukepark(void *arg, void *obj)
97 {
98 struct puffs_park *park = obj;
99
100 cv_destroy(&park->park_cv);
101 mutex_destroy(&park->park_mtx);
102 }
103
104 void
105 puffs_msgif_init()
106 {
107
108 pool_init(&parkpool, sizeof(struct puffs_park), 0, 0, 0,
109 "puffprkl", &pool_allocator_nointr, IPL_NONE);
110 pool_cache_init(&parkpc, &parkpool, makepark, nukepark, NULL);
111 }
112
113 void
114 puffs_msgif_destroy()
115 {
116
117 pool_cache_destroy(&parkpc);
118 pool_destroy(&parkpool);
119 }
120
121 void *
122 puffs_park_alloc(int waitok)
123 {
124 struct puffs_park *park;
125
126 park = pool_cache_get(&parkpc, waitok ? PR_WAITOK : PR_NOWAIT);
127 if (park) {
128 park->park_refcount = 1;
129 mutex_enter(&park->park_mtx);
130 }
131
132 return park;
133 }
134
135 static void
136 puffs_park_reference(struct puffs_park *park)
137 {
138
139 mutex_enter(&park->park_mtx);
140 park->park_refcount++;
141 }
142
143 void
144 puffs_park_release(void *arg, int fullnuke)
145 {
146 struct puffs_park *park = arg;
147
148 KASSERT(mutex_owned(&park->park_mtx));
149 --park->park_refcount;
150
151 mutex_exit(&park->park_mtx);
152 if (park->park_refcount == 0 || fullnuke)
153 pool_cache_put(&parkpc, park);
154 }
155
156 #ifdef PUFFSDEBUG
157 static void
158 parkdump(struct puffs_park *park)
159 {
160
161 DPRINTF(("park %p, preq %p, id %" PRIu64 "\n"
162 "\tcopy %zu, max %zu - done: %p/%p\n"
163 "\tflags 0x%08x, refcount %d, cv/mtx: %p/%p\n",
164 park, park->park_preq, park->park_id,
165 park->park_copylen, park->park_maxlen,
166 park->park_done, park->park_donearg,
167 park->park_flags, park->park_refcount,
168 &park->park_cv, &park->park_mtx));
169 }
170
171 static void
172 parkqdump(struct puffs_wq *q, int dumpall)
173 {
174 struct puffs_park *park;
175 int total = 0;
176
177 TAILQ_FOREACH(park, q, park_entries) {
178 if (dumpall)
179 parkdump(park);
180 total++;
181 }
182 DPRINTF(("puffs waitqueue at %p dumped, %d total\n", q, total));
183
184 }
185 #endif /* PUFFSDEBUG */
186
187 /*
188 * Converts a non-FAF op to a FAF. This simply involves making copies
189 * of the park and request structures and tagging the request as a FAF.
190 * It is safe to block here, since the original op is not a FAF.
191 */
192 static void
193 puffs_reqtofaf(struct puffs_park *park)
194 {
195 struct puffs_req *newpreq;
196
197 KASSERT((park->park_preq->preq_opclass & PUFFSOPFLAG_FAF) == 0);
198
199 MALLOC(newpreq, struct puffs_req *, park->park_copylen,
200 M_PUFFS, M_ZERO | M_WAITOK);
201
202 memcpy(newpreq, park->park_preq, park->park_copylen);
203
204 park->park_preq = newpreq;
205 park->park_preq->preq_opclass |= PUFFSOPFLAG_FAF;
206 park->park_flags &= ~PARKFLAG_WANTREPLY;
207 }
208
209
210 /*
211 * kernel-user-kernel waitqueues
212 */
213
214 static int touser(struct puffs_mount *, struct puffs_park *, uint64_t);
215
216 uint64_t
217 puffs_getreqid(struct puffs_mount *pmp)
218 {
219 uint64_t rv;
220
221 mutex_enter(&pmp->pmp_lock);
222 rv = pmp->pmp_nextreq++;
223 mutex_exit(&pmp->pmp_lock);
224
225 return rv;
226 }
227
228 /* vfs request */
229 int
230 puffs_vfstouser(struct puffs_mount *pmp, int optype, void *kbuf, size_t buflen)
231 {
232 struct puffs_park *park;
233
234 park = puffs_park_alloc(1);
235 park->park_preq = kbuf;
236
237 park->park_preq->preq_opclass = PUFFSOP_VFS;
238 park->park_preq->preq_optype = optype;
239
240 park->park_maxlen = park->park_copylen = buflen;
241 park->park_flags = 0;
242
243 return touser(pmp, park, puffs_getreqid(pmp));
244 }
245
246 void
247 puffs_suspendtouser(struct puffs_mount *pmp, int status)
248 {
249 struct puffs_vfsreq_suspend *pvfsr_susp;
250 struct puffs_park *park;
251
252 pvfsr_susp = malloc(sizeof(struct puffs_vfsreq_suspend),
253 M_PUFFS, M_WAITOK | M_ZERO);
254 park = puffs_park_alloc(1);
255
256 pvfsr_susp->pvfsr_status = status;
257 park->park_preq = (struct puffs_req *)pvfsr_susp;
258
259 park->park_preq->preq_opclass = PUFFSOP_VFS | PUFFSOPFLAG_FAF;
260 park->park_preq->preq_optype = PUFFS_VFS_SUSPEND;
261
262 park->park_maxlen = park->park_copylen
263 = sizeof(struct puffs_vfsreq_suspend);
264 park->park_flags = 0;
265
266 (void)touser(pmp, park, 0);
267 }
268
269 /*
270 * vnode level request
271 */
272 int
273 puffs_vntouser(struct puffs_mount *pmp, int optype,
274 void *kbuf, size_t buflen, size_t maxdelta,
275 struct vnode *vp_opc, struct vnode *vp_aux)
276 {
277 struct puffs_park *park;
278 struct puffs_req *preq;
279 void *cookie = VPTOPNC(vp_opc);
280 struct puffs_node *pnode;
281 int rv;
282
283 park = puffs_park_alloc(1);
284 park->park_preq = kbuf;
285
286 park->park_preq->preq_opclass = PUFFSOP_VN;
287 park->park_preq->preq_optype = optype;
288 park->park_preq->preq_cookie = cookie;
289
290 park->park_copylen = buflen;
291 park->park_maxlen = buflen + maxdelta;
292 park->park_flags = 0;
293
294 rv = touser(pmp, park, puffs_getreqid(pmp));
295
296 /*
297 * Check if the user server requests that inactive be called
298 * when the time is right.
299 */
300 preq = park->park_preq;
301 if (preq->preq_setbacks & PUFFS_SETBACK_INACT_N1) {
302 pnode = vp_opc->v_data;
303 pnode->pn_stat |= PNODE_DOINACT;
304 }
305 if (preq->preq_setbacks & PUFFS_SETBACK_INACT_N2) {
306 /* if no vp_aux, just ignore */
307 if (vp_aux) {
308 pnode = vp_aux->v_data;
309 pnode->pn_stat |= PNODE_DOINACT;
310 }
311 }
312 if (preq->preq_setbacks & PUFFS_SETBACK_NOREF_N1) {
313 pnode = vp_opc->v_data;
314 pnode->pn_stat |= PNODE_NOREFS;
315 }
316 if (preq->preq_setbacks & PUFFS_SETBACK_NOREF_N2) {
317 /* if no vp_aux, just ignore */
318 if (vp_aux) {
319 pnode = vp_aux->v_data;
320 pnode->pn_stat |= PNODE_NOREFS;
321 }
322 }
323
324 return rv;
325 }
326
327 /*
328 * vnode level request, caller-controller req id
329 */
330 int
331 puffs_vntouser_req(struct puffs_mount *pmp, int optype,
332 void *kbuf, size_t buflen, size_t maxdelta,
333 uint64_t reqid, struct vnode *vp_opc, struct vnode *vp_aux)
334 {
335 struct puffs_park *park;
336 void *cookie = VPTOPNC(vp_opc);
337
338 park = puffs_park_alloc(1);
339 park->park_preq = kbuf;
340
341 park->park_preq->preq_opclass = PUFFSOP_VN;
342 park->park_preq->preq_optype = optype;
343 park->park_preq->preq_cookie = cookie;
344
345 park->park_copylen = buflen;
346 park->park_maxlen = buflen + maxdelta;
347 park->park_flags = 0;
348
349 return touser(pmp, park, reqid);
350 }
351
352 void
353 puffs_vntouser_call(struct puffs_mount *pmp, int optype,
354 void *kbuf, size_t buflen, size_t maxdelta,
355 parkdone_fn donefn, void *donearg,
356 struct vnode *vp_opc, struct vnode *vp_aux)
357 {
358 struct puffs_park *park;
359 void *cookie = VPTOPNC(vp_opc);
360
361 park = puffs_park_alloc(1);
362 park->park_preq = kbuf;
363
364 park->park_preq->preq_opclass = PUFFSOP_VN;
365 park->park_preq->preq_optype = optype;
366 park->park_preq->preq_cookie = cookie;
367
368 park->park_copylen = buflen;
369 park->park_maxlen = buflen + maxdelta;
370 park->park_done = donefn;
371 park->park_donearg = donearg;
372 park->park_flags = PARKFLAG_CALL;
373
374 (void) touser(pmp, park, puffs_getreqid(pmp));
375 }
376
377 /*
378 * Notice: kbuf will be free'd later. I must be allocated from the
379 * kernel heap and it's ownership is shifted to this function from
380 * now on, i.e. the caller is not allowed to use it anymore!
381 */
382 void
383 puffs_vntouser_faf(struct puffs_mount *pmp, int optype,
384 void *kbuf, size_t buflen, struct vnode *vp_opc)
385 {
386 struct puffs_park *park;
387 void *cookie = VPTOPNC(vp_opc);
388
389 /* XXX: is it allowable to sleep here? */
390 park = puffs_park_alloc(0);
391 if (park == NULL)
392 return; /* 2bad */
393
394 park->park_preq = kbuf;
395
396 park->park_preq->preq_opclass = PUFFSOP_VN | PUFFSOPFLAG_FAF;
397 park->park_preq->preq_optype = optype;
398 park->park_preq->preq_cookie = cookie;
399
400 park->park_maxlen = park->park_copylen = buflen;
401 park->park_flags = 0;
402
403 (void)touser(pmp, park, 0);
404 }
405
406 void
407 puffs_cacheop(struct puffs_mount *pmp, struct puffs_park *park,
408 struct puffs_cacheinfo *pcinfo, size_t pcilen, void *cookie)
409 {
410
411 park->park_preq = (struct puffs_req *)pcinfo;
412 park->park_preq->preq_opclass = PUFFSOP_CACHE | PUFFSOPFLAG_FAF;
413 park->park_preq->preq_optype = PCACHE_TYPE_WRITE; /* XXX */
414 park->park_preq->preq_cookie = cookie;
415
416 park->park_maxlen = park->park_copylen = pcilen;
417 park->park_flags = 0;
418
419 (void)touser(pmp, park, 0);
420 }
421
422 /*
423 * Wait for the userspace ping-pong game in calling process context.
424 *
425 * This unlocks vnodes if they are supplied. vp1 is the vnode
426 * before in the locking order, i.e. the one which must be locked
427 * before accessing vp2. This is done here so that operations are
428 * already ordered in the queue when vnodes are unlocked (I'm not
429 * sure if that's really necessary, but it can't hurt). Okok, maybe
430 * there's a slight ugly-factor also, but let's not worry about that.
431 */
432 static int
433 touser(struct puffs_mount *pmp, struct puffs_park *park, uint64_t reqid)
434 {
435 struct lwp *l = curlwp;
436 struct mount *mp;
437 struct puffs_req *preq;
438 int rv = 0;
439
440 mp = PMPTOMP(pmp);
441 preq = park->park_preq;
442 preq->preq_id = park->park_id = reqid;
443 preq->preq_buflen = ALIGN(park->park_maxlen);
444
445 if (PUFFSOP_WANTREPLY(preq->preq_opclass))
446 park->park_flags |= PARKFLAG_WANTREPLY;
447
448 /*
449 * To support PCATCH, yet another movie: check if there are signals
450 * pending and we are issueing a non-FAF. If so, return an error
451 * directly UNLESS we are issueing INACTIVE. In that case, convert
452 * it to a FAF, fire off to the file server and return an error.
453 * Yes, this is bordering disgusting. Barfbags are on me.
454 */
455 if ((park->park_flags & PARKFLAG_WANTREPLY)
456 && (park->park_flags & PARKFLAG_CALL) == 0
457 && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) {
458 if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
459 && preq->preq_optype == PUFFS_VN_INACTIVE) {
460 puffs_reqtofaf(park);
461 DPRINTF(("puffs touser: converted to FAF %p\n", park));
462 rv = EINTR;
463 } else {
464 puffs_park_release(park, 0);
465 return EINTR;
466 }
467 }
468
469 /*
470 * test for suspension lock.
471 *
472 * Note that we *DO NOT* keep the lock, since that might block
473 * lock acquiring PLUS it would give userlandia control over
474 * the lock. The operation queue enforces a strict ordering:
475 * when the fs server gets in the op stream, it knows things
476 * are in order. The kernel locks can't guarantee that for
477 * userspace, in any case.
478 *
479 * BUT: this presents a problem for ops which have a consistency
480 * clause based on more than one operation. Unfortunately such
481 * operations (read, write) do not reliably work yet.
482 *
483 * Ya, Ya, it's wrong wong wrong, me be fixink this someday.
484 *
485 * XXX: and there is one more problem. We sometimes need to
486 * take a lazy lock in case the fs is suspending and we are
487 * executing as the fs server context. This might happen
488 * e.g. in the case that the user server triggers a reclaim
489 * in the kernel while the fs is suspending. It's not a very
490 * likely event, but it needs to be fixed some day.
491 */
492
493 /*
494 * MOREXXX: once PUFFS_WCACHEINFO is enabled, we can't take
495 * the mutex here, since getpages() might be called locked.
496 */
497 fstrans_start(mp, FSTRANS_NORMAL);
498 mutex_enter(&pmp->pmp_lock);
499 fstrans_done(mp);
500
501 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
502 mutex_exit(&pmp->pmp_lock);
503 puffs_park_release(park, 0);
504 return ENXIO;
505 }
506
507 #ifdef PUFFSDEBUG
508 parkqdump(&pmp->pmp_req_touser, puffsdebug > 1);
509 parkqdump(&pmp->pmp_req_replywait, puffsdebug > 1);
510 #endif
511
512 TAILQ_INSERT_TAIL(&pmp->pmp_req_touser, park, park_entries);
513 park->park_flags |= PARKFLAG_ONQUEUE1;
514 puffs_mp_reference(pmp);
515 pmp->pmp_req_touser_count++;
516 mutex_exit(&pmp->pmp_lock);
517
518 DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
519 "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, park,
520 preq->preq_opclass, preq->preq_optype, park->park_flags));
521
522 cv_broadcast(&pmp->pmp_req_waiter_cv);
523 selnotify(pmp->pmp_sel, 0);
524
525 if ((park->park_flags & PARKFLAG_WANTREPLY)
526 && (park->park_flags & PARKFLAG_CALL) == 0) {
527 int error;
528
529 error = cv_wait_sig(&park->park_cv, &park->park_mtx);
530 if (error) {
531 park->park_flags |= PARKFLAG_WAITERGONE;
532 if (park->park_flags & PARKFLAG_DONE) {
533 rv = preq->preq_rv;
534 puffs_park_release(park, 0);
535 } else {
536 /*
537 * ok, we marked it as going away, but
538 * still need to do queue ops. take locks
539 * in correct order.
540 *
541 * We don't want to release our reference
542 * if it's on replywait queue to avoid error
543 * to file server. putop() code will DTRT.
544 */
545 KASSERT(park->park_flags &
546 (PARKFLAG_ONQUEUE1 | PARKFLAG_ONQUEUE2));
547 mutex_exit(&park->park_mtx);
548
549 mutex_enter(&pmp->pmp_lock);
550 mutex_enter(&park->park_mtx);
551 if (park->park_flags & PARKFLAG_ONQUEUE1) {
552 TAILQ_REMOVE(&pmp->pmp_req_touser,
553 park, park_entries);
554 pmp->pmp_req_touser_count--;
555 park->park_flags &= ~PARKFLAG_ONQUEUE1;
556 }
557 if ((park->park_flags & PARKFLAG_ONQUEUE2) == 0)
558 puffs_park_release(park, 0);
559 else
560 mutex_exit(&park->park_mtx);
561 mutex_exit(&pmp->pmp_lock);
562
563 rv = error;
564 }
565 } else {
566 rv = preq->preq_rv;
567 puffs_park_release(park, 0);
568 }
569
570 /*
571 * retake the lock and release. This makes sure (haha,
572 * I'm humorous) that we don't process the same vnode in
573 * multiple threads due to the locks hacks we have in
574 * puffs_lock(). In reality this is well protected by
575 * the biglock, but once that's gone, well, hopefully
576 * this will be fixed for real. (and when you read this
577 * comment in 2017 and subsequently barf, my condolences ;).
578 */
579 if (rv == 0 && !fstrans_is_owner(mp)) {
580 fstrans_start(mp, FSTRANS_NORMAL);
581 fstrans_done(mp);
582 }
583 } else {
584 mutex_exit(&park->park_mtx);
585 }
586
587 mutex_enter(&pmp->pmp_lock);
588 puffs_mp_release(pmp);
589 mutex_exit(&pmp->pmp_lock);
590
591 return rv;
592 }
593
594
595 /*
596 * getop: scan through queued requests until:
597 * 1) max number of requests satisfied
598 * OR
599 * 2) buffer runs out of space
600 * OR
601 * 3) nonblocking is set AND there are no operations available
602 * OR
603 * 4) at least one operation was transferred AND there are no more waiting
604 */
605 int
606 puffs_getop(struct puffs_mount *pmp, struct puffs_reqh_get *phg, int nonblock)
607 {
608 struct puffs_park *park;
609 struct puffs_req *preq;
610 uint8_t *bufpos;
611 int error, donesome;
612
613 donesome = error = 0;
614 bufpos = phg->phg_buf;
615
616 mutex_enter(&pmp->pmp_lock);
617 while (phg->phg_nops == 0 || donesome != phg->phg_nops) {
618 again:
619 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
620 /* if we got some, they don't really matter anymore */
621 error = ENXIO;
622 goto out;
623 }
624 if (TAILQ_EMPTY(&pmp->pmp_req_touser)) {
625 if (donesome)
626 goto out;
627
628 if (nonblock) {
629 error = EWOULDBLOCK;
630 goto out;
631 }
632
633 error = cv_wait_sig(&pmp->pmp_req_waiter_cv,
634 &pmp->pmp_lock);
635 if (error)
636 goto out;
637 else
638 goto again;
639 }
640
641 park = TAILQ_FIRST(&pmp->pmp_req_touser);
642 puffs_park_reference(park);
643
644 /* If it's a goner, don't process any furher */
645 if (park->park_flags & PARKFLAG_WAITERGONE) {
646 puffs_park_release(park, 0);
647 continue;
648 }
649
650 preq = park->park_preq;
651 if (phg->phg_buflen < preq->preq_buflen) {
652 if (!donesome)
653 error = E2BIG;
654 puffs_park_release(park, 0);
655 goto out;
656 }
657
658 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
659 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
660 park->park_flags &= ~PARKFLAG_ONQUEUE1;
661 pmp->pmp_req_touser_count--;
662 KASSERT(pmp->pmp_req_touser_count >= 0);
663 mutex_exit(&pmp->pmp_lock);
664
665 DPRINTF(("puffsgetop: get op %" PRIu64 " (%d.), from %p "
666 "len %zu (buflen %zu), target %p\n", preq->preq_id,
667 donesome, preq, park->park_copylen, preq->preq_buflen,
668 bufpos));
669
670 if ((error = copyout(preq, bufpos, park->park_copylen)) != 0) {
671 DPRINTF(("puffs_getop: copyout failed: %d\n", error));
672 /*
673 * ok, user server is probably trying to cheat.
674 * stuff op back & return error to user. We need
675 * to take locks in the correct order.
676 */
677 mutex_exit(&park->park_mtx);
678
679 /*
680 * XXX: ONQUEUE1 | ONQUEUE2 invariant doesn't
681 * hold here
682 */
683
684 mutex_enter(&pmp->pmp_lock);
685 mutex_enter(&park->park_mtx);
686 if ((park->park_flags & PARKFLAG_WAITERGONE) == 0) {
687 TAILQ_INSERT_HEAD(&pmp->pmp_req_touser, park,
688 park_entries);
689 park->park_flags |= PARKFLAG_ONQUEUE1;
690 pmp->pmp_req_touser_count++;
691 }
692
693 if (donesome)
694 error = 0;
695 puffs_park_release(park, 0);
696 goto out;
697 }
698 bufpos += preq->preq_buflen;
699 phg->phg_buflen -= preq->preq_buflen;
700 donesome++;
701
702 /* XXXfixme: taking this lock in the wrong order */
703 mutex_enter(&pmp->pmp_lock);
704
705 if (park->park_flags & PARKFLAG_WANTREPLY) {
706 TAILQ_INSERT_TAIL(&pmp->pmp_req_replywait, park,
707 park_entries);
708 park->park_flags |= PARKFLAG_ONQUEUE2;
709 puffs_park_release(park, 0);
710 } else {
711 free(preq, M_PUFFS);
712 puffs_park_release(park, 1);
713 }
714 }
715
716 out:
717 phg->phg_more = pmp->pmp_req_touser_count;
718 mutex_exit(&pmp->pmp_lock);
719
720 phg->phg_nops = donesome;
721
722 return error;
723 }
724
725 int
726 puffs_putop(struct puffs_mount *pmp, struct puffs_reqh_put *php)
727 {
728 struct puffs_park *park;
729 struct puffs_req tmpreq;
730 struct puffs_req *nextpreq;
731 void *userbuf;
732 uint64_t id;
733 size_t reqlen;
734 int donesome, error, wgone, release;
735
736 donesome = error = wgone = 0;
737
738 id = php->php_id;
739 userbuf = php->php_buf;
740 reqlen = php->php_buflen;
741
742 mutex_enter(&pmp->pmp_lock);
743 while (donesome != php->php_nops) {
744 release = 0;
745 #ifdef PUFFSDEBUG
746 DPRINTF(("puffsputop: searching for %" PRIu64 ", ubuf: %p, "
747 "len %zu\n", id, userbuf, reqlen));
748 #endif
749 TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
750 if (park->park_id == id)
751 break;
752 }
753
754 if (park == NULL) {
755 DPRINTF(("puffsputop: no request: %" PRIu64 "\n", id));
756 error = EINVAL;
757 break;
758 }
759
760 puffs_park_reference(park);
761 if (reqlen == 0 || reqlen > park->park_maxlen) {
762 DPRINTF(("puffsputop: invalid buffer length: "
763 "%zu\n", reqlen));
764 error = E2BIG;
765 puffs_park_release(park, 0);
766 break;
767 }
768 wgone = park->park_flags & PARKFLAG_WAITERGONE;
769
770 /* check if it's still on the queue after acquiring lock */
771 if (park->park_flags & PARKFLAG_ONQUEUE2) {
772 TAILQ_REMOVE(&pmp->pmp_req_replywait, park,
773 park_entries);
774 park->park_flags &= ~PARKFLAG_ONQUEUE2;
775 }
776
777 mutex_exit(&pmp->pmp_lock);
778
779 /*
780 * If the caller has gone south, go to next, collect
781 * $200 and free the structure there instead of wakeup.
782 * We also need to copyin the header info. Flag structure
783 * release to mode total and utter destruction.
784 */
785 if (wgone) {
786 DPRINTF(("puffs_putop: bad service - waiter gone for "
787 "park %p\n", park));
788 error = copyin(userbuf, &tmpreq,
789 sizeof(struct puffs_req));
790 release = 1;
791 if (error)
792 goto loopout;
793 nextpreq = &tmpreq;
794 goto next;
795 }
796
797 DPRINTF(("puffsputpop: copyin from %p to %p, len %zu\n",
798 userbuf, park->park_preq, reqlen));
799 error = copyin(userbuf, park->park_preq, reqlen);
800 if (error)
801 goto loopout;
802 nextpreq = park->park_preq;
803
804 next:
805 /* all's well, prepare for next op */
806 id = nextpreq->preq_id;
807 reqlen = nextpreq->preq_buflen;
808 userbuf = nextpreq->preq_nextbuf;
809 donesome++;
810
811 loopout:
812 if (error && !wgone)
813 park->park_preq->preq_rv = error;
814
815 if (park->park_flags & PARKFLAG_CALL) {
816 park->park_done(park->park_preq, park->park_donearg);
817 release = 1;
818 }
819
820 if (!wgone) {
821 DPRINTF(("puffs_putop: flagging done for "
822 "park %p\n", park));
823
824 cv_signal(&park->park_cv);
825 }
826 park->park_flags |= PARKFLAG_DONE;
827 puffs_park_release(park, release);
828
829 mutex_enter(&pmp->pmp_lock);
830 if (error)
831 break;
832 wgone = 0;
833 }
834
835 mutex_exit(&pmp->pmp_lock);
836 php->php_nops -= donesome;
837
838 return error;
839 }
840
841 /*
842 * We're dead, kaput, RIP, slightly more than merely pining for the
843 * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
844 * our maker, ceased to be, etcetc. YASD. It's a dead FS!
845 *
846 * Caller must hold puffs mutex.
847 */
848 void
849 puffs_userdead(struct puffs_mount *pmp)
850 {
851 struct puffs_park *park, *park_next;
852
853 /*
854 * Mark filesystem status as dying so that operations don't
855 * attempt to march to userspace any longer.
856 */
857 pmp->pmp_status = PUFFSTAT_DYING;
858
859 /* signal waiters on REQUEST TO file server queue */
860 for (park = TAILQ_FIRST(&pmp->pmp_req_touser); park; park = park_next) {
861 uint8_t opclass;
862
863 puffs_park_reference(park);
864 park_next = TAILQ_NEXT(park, park_entries);
865
866 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
867 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
868 park->park_flags &= ~PARKFLAG_ONQUEUE1;
869 pmp->pmp_req_touser_count--;
870
871 /*
872 * If the waiter is gone, we may *NOT* access preq anymore.
873 */
874 if (park->park_flags & PARKFLAG_WAITERGONE) {
875 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
876 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
877 puffs_park_release(park, 0);
878 } else {
879 opclass = park->park_preq->preq_opclass;
880 park->park_preq->preq_rv = ENXIO;
881
882 if (park->park_flags & PARKFLAG_CALL) {
883 park->park_done(park->park_preq,
884 park->park_donearg);
885 puffs_park_release(park, 1);
886 } else if ((park->park_flags & PARKFLAG_WANTREPLY)==0) {
887 free(park->park_preq, M_PUFFS);
888 puffs_park_release(park, 1);
889 } else {
890 park->park_preq->preq_rv = ENXIO;
891 cv_signal(&park->park_cv);
892 puffs_park_release(park, 0);
893 }
894 }
895 }
896
897 /* signal waiters on RESPONSE FROM file server queue */
898 for (park=TAILQ_FIRST(&pmp->pmp_req_replywait); park; park=park_next) {
899 puffs_park_reference(park);
900 park_next = TAILQ_NEXT(park, park_entries);
901
902 KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
903 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
904
905 TAILQ_REMOVE(&pmp->pmp_req_replywait, park, park_entries);
906 park->park_flags &= ~PARKFLAG_ONQUEUE2;
907
908 /*
909 * If the waiter is gone, we may *NOT* access preq anymore.
910 */
911 if (park->park_flags & PARKFLAG_WAITERGONE) {
912 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
913 puffs_park_release(park, 0);
914 } else {
915 park->park_preq->preq_rv = ENXIO;
916 if (park->park_flags & PARKFLAG_CALL) {
917 park->park_done(park->park_preq,
918 park->park_donearg);
919 puffs_park_release(park, 1);
920 } else {
921 cv_signal(&park->park_cv);
922 puffs_park_release(park, 0);
923 }
924 }
925 }
926 }
927
928 /* this is probably going to die away at some point? */
929 /*
930 * XXX: currently bitrotted
931 */
932 #if 0
933 static int
934 puffssizeop(struct puffs_mount *pmp, struct puffs_sizeop *psop_user)
935 {
936 struct puffs_sizepark *pspark;
937 void *kernbuf;
938 size_t copylen;
939 int error;
940
941 /* locate correct op */
942 mutex_enter(&pmp->pmp_lock);
943 TAILQ_FOREACH(pspark, &pmp->pmp_req_sizepark, pkso_entries) {
944 if (pspark->pkso_reqid == psop_user->pso_reqid) {
945 TAILQ_REMOVE(&pmp->pmp_req_sizepark, pspark,
946 pkso_entries);
947 break;
948 }
949 }
950 mutex_exit(&pmp->pmp_lock);
951
952 if (pspark == NULL)
953 return EINVAL;
954
955 error = 0;
956 copylen = MIN(pspark->pkso_bufsize, psop_user->pso_bufsize);
957
958 /*
959 * XXX: uvm stuff to avoid bouncy-bouncy copying?
960 */
961 if (PUFFS_SIZEOP_UIO(pspark->pkso_reqtype)) {
962 kernbuf = malloc(copylen, M_PUFFS, M_WAITOK | M_ZERO);
963 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_IN) {
964 error = copyin(psop_user->pso_userbuf,
965 kernbuf, copylen);
966 if (error) {
967 printf("psop ERROR1 %d\n", error);
968 goto escape;
969 }
970 }
971 error = uiomove(kernbuf, copylen, pspark->pkso_uio);
972 if (error) {
973 printf("uiomove from kernel %p, len %d failed: %d\n",
974 kernbuf, (int)copylen, error);
975 goto escape;
976 }
977
978 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_OUT) {
979 error = copyout(kernbuf,
980 psop_user->pso_userbuf, copylen);
981 if (error) {
982 printf("psop ERROR2 %d\n", error);
983 goto escape;
984 }
985 }
986 escape:
987 free(kernbuf, M_PUFFS);
988 } else if (PUFFS_SIZEOP_BUF(pspark->pkso_reqtype)) {
989 copylen = MAX(pspark->pkso_bufsize, psop_user->pso_bufsize);
990 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_BUF_IN) {
991 error = copyin(psop_user->pso_userbuf,
992 pspark->pkso_copybuf, copylen);
993 } else {
994 error = copyout(pspark->pkso_copybuf,
995 psop_user->pso_userbuf, copylen);
996 }
997 }
998 #ifdef DIAGNOSTIC
999 else
1000 panic("puffssizeop: invalid reqtype %d\n",
1001 pspark->pkso_reqtype);
1002 #endif /* DIAGNOSTIC */
1003
1004 return error;
1005 }
1006 #endif
1007