puffs_msgif.c revision 1.35 1 /* $NetBSD: puffs_msgif.c,v 1.35 2007/05/07 17:14:54 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.35 2007/05/07 17:14:54 pooka Exp $");
37
38 #include <sys/param.h>
39 #include <sys/fstrans.h>
40 #include <sys/malloc.h>
41 #include <sys/mount.h>
42 #include <sys/vnode.h>
43 #include <sys/lock.h>
44
45 #include <fs/puffs/puffs_msgif.h>
46 #include <fs/puffs/puffs_sys.h>
47
48 /*
49 * waitq data structures
50 */
51
52 /*
53 * While a request is going to userspace, park the caller within the
54 * kernel. This is the kernel counterpart of "struct puffs_req".
55 */
56 struct puffs_park {
57 struct puffs_req *park_preq; /* req followed by buf */
58 uint64_t park_id; /* duplicate of preq_id */
59
60 size_t park_copylen; /* userspace copylength */
61 size_t park_maxlen; /* max size in comeback */
62
63 parkdone_fn park_done;
64 void *park_donearg;
65
66 int park_flags;
67 int park_refcount;
68
69 kcondvar_t park_cv;
70 kmutex_t park_mtx;
71
72 TAILQ_ENTRY(puffs_park) park_entries;
73 };
74 #define PARKFLAG_WAITERGONE 0x01
75 #define PARKFLAG_DONE 0x02
76 #define PARKFLAG_ONQUEUE1 0x04
77 #define PARKFLAG_ONQUEUE2 0x08
78 #define PARKFLAG_CALL 0x10
79 #define PARKFLAG_WANTREPLY 0x20
80
81 static struct pool_cache parkpc;
82 static struct pool parkpool;
83
84 static int
85 makepark(void *arg, void *obj, int flags)
86 {
87 struct puffs_park *park = obj;
88
89 mutex_init(&park->park_mtx, MUTEX_DEFAULT, IPL_NONE);
90 cv_init(&park->park_cv, "puffsrpl");
91
92 return 0;
93 }
94
95 static void
96 nukepark(void *arg, void *obj)
97 {
98 struct puffs_park *park = obj;
99
100 cv_destroy(&park->park_cv);
101 mutex_destroy(&park->park_mtx);
102 }
103
104 void
105 puffs_msgif_init()
106 {
107
108 pool_init(&parkpool, sizeof(struct puffs_park), 0, 0, 0,
109 "puffprkl", &pool_allocator_nointr, IPL_NONE);
110 pool_cache_init(&parkpc, &parkpool, makepark, nukepark, NULL);
111 }
112
113 void
114 puffs_msgif_destroy()
115 {
116
117 pool_cache_destroy(&parkpc);
118 pool_destroy(&parkpool);
119 }
120
121 void *
122 puffs_park_alloc(int waitok)
123 {
124 struct puffs_park *park;
125
126 park = pool_cache_get(&parkpc, waitok ? PR_WAITOK : PR_NOWAIT);
127 if (park) {
128 park->park_refcount = 1;
129 mutex_enter(&park->park_mtx);
130 }
131
132 return park;
133 }
134
135 static void
136 puffs_park_reference(struct puffs_park *park)
137 {
138
139 mutex_enter(&park->park_mtx);
140 park->park_refcount++;
141 }
142
143 void
144 puffs_park_release(void *arg, int fullnuke)
145 {
146 struct puffs_park *park = arg;
147
148 KASSERT(mutex_owned(&park->park_mtx));
149 --park->park_refcount;
150
151 mutex_exit(&park->park_mtx);
152 if (park->park_refcount == 0 || fullnuke)
153 pool_cache_put(&parkpc, park);
154 }
155
156 #ifdef PUFFSDEBUG
157 static void
158 parkdump(struct puffs_park *park)
159 {
160
161 DPRINTF(("park %p, preq %p, id %" PRIu64 "\n"
162 "\tcopy %zu, max %zu - done: %p/%p\n"
163 "\tflags 0x%08x, refcount %d, cv/mtx: %p/%p\n",
164 park, park->park_preq, park->park_id,
165 park->park_copylen, park->park_maxlen,
166 park->park_done, park->park_donearg,
167 park->park_flags, park->park_refcount,
168 &park->park_cv, &park->park_mtx));
169 }
170
171 static void
172 parkqdump(struct puffs_wq *q, int dumpall)
173 {
174 struct puffs_park *park;
175 int total = 0;
176
177 TAILQ_FOREACH(park, q, park_entries) {
178 if (dumpall)
179 parkdump(park);
180 total++;
181 }
182 DPRINTF(("puffs waitqueue at %p dumped, %d total\n", q, total));
183
184 }
185 #endif /* PUFFSDEBUG */
186
187 /*
188 * Converts a non-FAF op to a FAF. This simply involves making copies
189 * of the park and request structures and tagging the request as a FAF.
190 * It is safe to block here, since the original op is not a FAF.
191 */
192 static void
193 puffs_reqtofaf(struct puffs_park *park)
194 {
195 struct puffs_req *newpreq;
196
197 KASSERT((park->park_preq->preq_opclass & PUFFSOPFLAG_FAF) == 0);
198
199 MALLOC(newpreq, struct puffs_req *, park->park_copylen,
200 M_PUFFS, M_ZERO | M_WAITOK);
201
202 memcpy(newpreq, park->park_preq, park->park_copylen);
203
204 park->park_preq = newpreq;
205 park->park_preq->preq_opclass |= PUFFSOPFLAG_FAF;
206 }
207
208
209 /*
210 * kernel-user-kernel waitqueues
211 */
212
213 static int touser(struct puffs_mount *, struct puffs_park *, uint64_t);
214
215 uint64_t
216 puffs_getreqid(struct puffs_mount *pmp)
217 {
218 uint64_t rv;
219
220 mutex_enter(&pmp->pmp_lock);
221 rv = pmp->pmp_nextreq++;
222 mutex_exit(&pmp->pmp_lock);
223
224 return rv;
225 }
226
227 /* vfs request */
228 int
229 puffs_vfstouser(struct puffs_mount *pmp, int optype, void *kbuf, size_t buflen)
230 {
231 struct puffs_park *park;
232
233 park = puffs_park_alloc(1);
234 park->park_preq = kbuf;
235
236 park->park_preq->preq_opclass = PUFFSOP_VFS;
237 park->park_preq->preq_optype = optype;
238
239 park->park_maxlen = park->park_copylen = buflen;
240 park->park_flags = 0;
241
242 return touser(pmp, park, puffs_getreqid(pmp));
243 }
244
245 void
246 puffs_suspendtouser(struct puffs_mount *pmp, int status)
247 {
248 struct puffs_vfsreq_suspend *pvfsr_susp;
249 struct puffs_park *park;
250
251 pvfsr_susp = malloc(sizeof(struct puffs_vfsreq_suspend),
252 M_PUFFS, M_WAITOK | M_ZERO);
253 park = puffs_park_alloc(1);
254
255 pvfsr_susp->pvfsr_status = status;
256 park->park_preq = (struct puffs_req *)pvfsr_susp;
257
258 park->park_preq->preq_opclass = PUFFSOP_VFS | PUFFSOPFLAG_FAF;
259 park->park_preq->preq_optype = PUFFS_VFS_SUSPEND;
260
261 park->park_maxlen = park->park_copylen
262 = sizeof(struct puffs_vfsreq_suspend);
263 park->park_flags = 0;
264
265 (void)touser(pmp, park, 0);
266 }
267
268 /*
269 * vnode level request
270 */
271 int
272 puffs_vntouser(struct puffs_mount *pmp, int optype,
273 void *kbuf, size_t buflen, size_t maxdelta,
274 struct vnode *vp_opc, struct vnode *vp_aux)
275 {
276 struct puffs_park *park;
277 struct puffs_req *preq;
278 void *cookie = VPTOPNC(vp_opc);
279 struct puffs_node *pnode;
280 int rv;
281
282 park = puffs_park_alloc(1);
283 park->park_preq = kbuf;
284
285 park->park_preq->preq_opclass = PUFFSOP_VN;
286 park->park_preq->preq_optype = optype;
287 park->park_preq->preq_cookie = cookie;
288
289 park->park_copylen = buflen;
290 park->park_maxlen = buflen + maxdelta;
291 park->park_flags = 0;
292
293 rv = touser(pmp, park, puffs_getreqid(pmp));
294
295 /*
296 * Check if the user server requests that inactive be called
297 * when the time is right.
298 */
299 preq = park->park_preq;
300 if (preq->preq_setbacks & PUFFS_SETBACK_INACT_N1) {
301 pnode = vp_opc->v_data;
302 pnode->pn_stat |= PNODE_DOINACT;
303 }
304 if (preq->preq_setbacks & PUFFS_SETBACK_INACT_N2) {
305 /* if no vp_aux, just ignore */
306 if (vp_aux) {
307 pnode = vp_aux->v_data;
308 pnode->pn_stat |= PNODE_DOINACT;
309 }
310 }
311
312 return rv;
313 }
314
315 /*
316 * vnode level request, caller-controller req id
317 */
318 int
319 puffs_vntouser_req(struct puffs_mount *pmp, int optype,
320 void *kbuf, size_t buflen, size_t maxdelta,
321 uint64_t reqid, struct vnode *vp_opc, struct vnode *vp_aux)
322 {
323 struct puffs_park *park;
324 void *cookie = VPTOPNC(vp_opc);
325
326 park = puffs_park_alloc(1);
327 park->park_preq = kbuf;
328
329 park->park_preq->preq_opclass = PUFFSOP_VN;
330 park->park_preq->preq_optype = optype;
331 park->park_preq->preq_cookie = cookie;
332
333 park->park_copylen = buflen;
334 park->park_maxlen = buflen + maxdelta;
335 park->park_flags = 0;
336
337 return touser(pmp, park, reqid);
338 }
339
340 void
341 puffs_vntouser_call(struct puffs_mount *pmp, int optype,
342 void *kbuf, size_t buflen, size_t maxdelta,
343 parkdone_fn donefn, void *donearg,
344 struct vnode *vp_opc, struct vnode *vp_aux)
345 {
346 struct puffs_park *park;
347 void *cookie = VPTOPNC(vp_opc);
348
349 park = puffs_park_alloc(1);
350 park->park_preq = kbuf;
351
352 park->park_preq->preq_opclass = PUFFSOP_VN;
353 park->park_preq->preq_optype = optype;
354 park->park_preq->preq_cookie = cookie;
355
356 park->park_copylen = buflen;
357 park->park_maxlen = buflen + maxdelta;
358 park->park_done = donefn;
359 park->park_donearg = donearg;
360 park->park_flags = PARKFLAG_CALL;
361
362 (void) touser(pmp, park, puffs_getreqid(pmp));
363 }
364
365 /*
366 * Notice: kbuf will be free'd later. I must be allocated from the
367 * kernel heap and it's ownership is shifted to this function from
368 * now on, i.e. the caller is not allowed to use it anymore!
369 */
370 void
371 puffs_vntouser_faf(struct puffs_mount *pmp, int optype,
372 void *kbuf, size_t buflen, struct vnode *vp_opc)
373 {
374 struct puffs_park *park;
375 void *cookie = VPTOPNC(vp_opc);
376
377 /* XXX: is it allowable to sleep here? */
378 park = puffs_park_alloc(0);
379 if (park == NULL)
380 return; /* 2bad */
381
382 park->park_preq = kbuf;
383
384 park->park_preq->preq_opclass = PUFFSOP_VN | PUFFSOPFLAG_FAF;
385 park->park_preq->preq_optype = optype;
386 park->park_preq->preq_cookie = cookie;
387
388 park->park_maxlen = park->park_copylen = buflen;
389 park->park_flags = 0;
390
391 (void)touser(pmp, park, 0);
392 }
393
394 void
395 puffs_cacheop(struct puffs_mount *pmp, struct puffs_park *park,
396 struct puffs_cacheinfo *pcinfo, size_t pcilen, void *cookie)
397 {
398
399 park->park_preq = (struct puffs_req *)pcinfo;
400 park->park_preq->preq_opclass = PUFFSOP_CACHE | PUFFSOPFLAG_FAF;
401 park->park_preq->preq_optype = PCACHE_TYPE_WRITE; /* XXX */
402 park->park_preq->preq_cookie = cookie;
403
404 park->park_maxlen = park->park_copylen = pcilen;
405 park->park_flags = 0;
406
407 (void)touser(pmp, park, 0);
408 }
409
410 /*
411 * Wait for the userspace ping-pong game in calling process context.
412 *
413 * This unlocks vnodes if they are supplied. vp1 is the vnode
414 * before in the locking order, i.e. the one which must be locked
415 * before accessing vp2. This is done here so that operations are
416 * already ordered in the queue when vnodes are unlocked (I'm not
417 * sure if that's really necessary, but it can't hurt). Okok, maybe
418 * there's a slight ugly-factor also, but let's not worry about that.
419 */
420 static int
421 touser(struct puffs_mount *pmp, struct puffs_park *park, uint64_t reqid)
422 {
423 struct lwp *l = curlwp;
424 struct mount *mp;
425 struct puffs_req *preq;
426 int rv = 0;
427
428 mp = PMPTOMP(pmp);
429 preq = park->park_preq;
430 preq->preq_id = park->park_id = reqid;
431 preq->preq_buflen = ALIGN(park->park_maxlen);
432
433 if (PUFFSOP_WANTREPLY(preq->preq_opclass))
434 park->park_flags |= PARKFLAG_WANTREPLY;
435
436 /*
437 * To support PCATCH, yet another movie: check if there are signals
438 * pending and we are issueing a non-FAF. If so, return an error
439 * directly UNLESS we are issueing INACTIVE. In that case, convert
440 * it to a FAF, fire off to the file server and return an error.
441 * Yes, this is bordering disgusting. Barfbags are on me.
442 */
443 if ((park->park_flags & PARKFLAG_WANTREPLY)
444 && (park->park_flags & PARKFLAG_CALL) == 0
445 && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) {
446 if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
447 && preq->preq_optype == PUFFS_VN_INACTIVE) {
448 puffs_reqtofaf(park);
449 DPRINTF(("puffs touser: converted to FAF %p\n", park));
450 rv = EINTR;
451 } else {
452 puffs_park_release(park, 0);
453 return EINTR;
454 }
455 }
456
457 /*
458 * test for suspension lock.
459 *
460 * Note that we *DO NOT* keep the lock, since that might block
461 * lock acquiring PLUS it would give userlandia control over
462 * the lock. The operation queue enforces a strict ordering:
463 * when the fs server gets in the op stream, it knows things
464 * are in order. The kernel locks can't guarantee that for
465 * userspace, in any case.
466 *
467 * BUT: this presents a problem for ops which have a consistency
468 * clause based on more than one operation. Unfortunately such
469 * operations (read, write) do not reliably work yet.
470 *
471 * Ya, Ya, it's wrong wong wrong, me be fixink this someday.
472 *
473 * XXX: and there is one more problem. We sometimes need to
474 * take a lazy lock in case the fs is suspending and we are
475 * executing as the fs server context. This might happen
476 * e.g. in the case that the user server triggers a reclaim
477 * in the kernel while the fs is suspending. It's not a very
478 * likely event, but it needs to be fixed some day.
479 */
480
481 /*
482 * MOREXXX: once PUFFS_WCACHEINFO is enabled, we can't take
483 * the mutex here, since getpages() might be called locked.
484 */
485 fstrans_start(mp, FSTRANS_NORMAL);
486 mutex_enter(&pmp->pmp_lock);
487 fstrans_done(mp);
488
489 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
490 mutex_exit(&pmp->pmp_lock);
491 puffs_park_release(park, 0);
492 return ENXIO;
493 }
494
495 #ifdef PUFFSDEBUG
496 parkqdump(&pmp->pmp_req_touser, puffsdebug > 1);
497 parkqdump(&pmp->pmp_req_replywait, puffsdebug > 1);
498 #endif
499
500 TAILQ_INSERT_TAIL(&pmp->pmp_req_touser, park, park_entries);
501 park->park_flags |= PARKFLAG_ONQUEUE1;
502 puffs_mp_reference(pmp);
503 pmp->pmp_req_touser_count++;
504 mutex_exit(&pmp->pmp_lock);
505
506 DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
507 "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, park,
508 preq->preq_opclass, preq->preq_optype, park->park_flags));
509
510 cv_broadcast(&pmp->pmp_req_waiter_cv);
511 selnotify(pmp->pmp_sel, 0);
512
513 if ((park->park_flags & PARKFLAG_WANTREPLY)
514 && (park->park_flags & PARKFLAG_CALL) == 0) {
515 int error;
516
517 error = cv_wait_sig(&park->park_cv, &park->park_mtx);
518 if (error) {
519 park->park_flags |= PARKFLAG_WAITERGONE;
520 if (park->park_flags & PARKFLAG_DONE) {
521 rv = preq->preq_rv;
522 puffs_park_release(park, 0);
523 } else {
524 /*
525 * ok, we marked it as going away, but
526 * still need to do queue ops. take locks
527 * in correct order.
528 *
529 * We don't want to release our reference
530 * if it's on replywait queue to avoid error
531 * to file server. putop() code will DTRT.
532 */
533 KASSERT(park->park_flags &
534 (PARKFLAG_ONQUEUE1 | PARKFLAG_ONQUEUE2));
535 mutex_exit(&park->park_mtx);
536
537 mutex_enter(&pmp->pmp_lock);
538 mutex_enter(&park->park_mtx);
539 if (park->park_flags & PARKFLAG_ONQUEUE1)
540 TAILQ_REMOVE(&pmp->pmp_req_touser,
541 park, park_entries);
542 park->park_flags &= ~PARKFLAG_ONQUEUE1;
543 pmp->pmp_req_touser_count--;
544 if ((park->park_flags & PARKFLAG_ONQUEUE2) == 0)
545 puffs_park_release(park, 0);
546 else
547 mutex_exit(&park->park_mtx);
548 mutex_exit(&pmp->pmp_lock);
549
550 rv = error;
551 }
552 } else {
553 rv = preq->preq_rv;
554 puffs_park_release(park, 0);
555 }
556
557 /*
558 * retake the lock and release. This makes sure (haha,
559 * I'm humorous) that we don't process the same vnode in
560 * multiple threads due to the locks hacks we have in
561 * puffs_lock(). In reality this is well protected by
562 * the biglock, but once that's gone, well, hopefully
563 * this will be fixed for real. (and when you read this
564 * comment in 2017 and subsequently barf, my condolences ;).
565 */
566 if (rv == 0 && !fstrans_is_owner(mp)) {
567 fstrans_start(mp, FSTRANS_NORMAL);
568 fstrans_done(mp);
569 }
570 } else {
571 mutex_exit(&park->park_mtx);
572 }
573
574 mutex_enter(&pmp->pmp_lock);
575 puffs_mp_release(pmp);
576 mutex_exit(&pmp->pmp_lock);
577
578 return rv;
579 }
580
581
582 /*
583 * getop: scan through queued requests until:
584 * 1) max number of requests satisfied
585 * OR
586 * 2) buffer runs out of space
587 * OR
588 * 3) nonblocking is set AND there are no operations available
589 * OR
590 * 4) at least one operation was transferred AND there are no more waiting
591 */
592 int
593 puffs_getop(struct puffs_mount *pmp, struct puffs_reqh_get *phg, int nonblock)
594 {
595 struct puffs_park *park;
596 struct puffs_req *preq;
597 uint8_t *bufpos;
598 int error, donesome;
599
600 donesome = error = 0;
601 bufpos = phg->phg_buf;
602
603 mutex_enter(&pmp->pmp_lock);
604 while (phg->phg_nops == 0 || donesome != phg->phg_nops) {
605 again:
606 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
607 /* if we got some, they don't really matter anymore */
608 error = ENXIO;
609 goto out;
610 }
611 if (TAILQ_EMPTY(&pmp->pmp_req_touser)) {
612 if (donesome)
613 goto out;
614
615 if (nonblock) {
616 error = EWOULDBLOCK;
617 goto out;
618 }
619
620 error = cv_wait_sig(&pmp->pmp_req_waiter_cv,
621 &pmp->pmp_lock);
622 if (error)
623 goto out;
624 else
625 goto again;
626 }
627
628 park = TAILQ_FIRST(&pmp->pmp_req_touser);
629 puffs_park_reference(park);
630
631 /* If it's a goner, don't process any furher */
632 if (park->park_flags & PARKFLAG_WAITERGONE) {
633 puffs_park_release(park, 0);
634 continue;
635 }
636
637 preq = park->park_preq;
638 if (phg->phg_buflen < preq->preq_buflen) {
639 if (!donesome)
640 error = E2BIG;
641 puffs_park_release(park, 0);
642 goto out;
643 }
644
645 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
646 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
647 park->park_flags &= ~PARKFLAG_ONQUEUE1;
648 pmp->pmp_req_touser_count--;
649 KASSERT(pmp->pmp_req_touser_count >= 0);
650 mutex_exit(&pmp->pmp_lock);
651
652 DPRINTF(("puffsgetop: get op %" PRIu64 " (%d.), from %p "
653 "len %zu (buflen %zu), target %p\n", preq->preq_id,
654 donesome, preq, park->park_copylen, preq->preq_buflen,
655 bufpos));
656
657 if ((error = copyout(preq, bufpos, park->park_copylen)) != 0) {
658 DPRINTF(("puffs_getop: copyout failed: %d\n", error));
659 /*
660 * ok, user server is probably trying to cheat.
661 * stuff op back & return error to user. We need
662 * to take locks in the correct order.
663 */
664 mutex_exit(&park->park_mtx);
665
666 /*
667 * XXX: ONQUEUE1 | ONQUEUE2 invariant doesn't
668 * hold here
669 */
670
671 mutex_enter(&pmp->pmp_lock);
672 mutex_enter(&park->park_mtx);
673 if ((park->park_flags & PARKFLAG_WAITERGONE) == 0) {
674 TAILQ_INSERT_HEAD(&pmp->pmp_req_touser, park,
675 park_entries);
676 park->park_flags |= PARKFLAG_ONQUEUE1;
677 pmp->pmp_req_touser_count++;
678 }
679
680 if (donesome)
681 error = 0;
682 puffs_park_release(park, 0);
683 goto out;
684 }
685 bufpos += preq->preq_buflen;
686 phg->phg_buflen -= preq->preq_buflen;
687 donesome++;
688
689 /* XXXfixme: taking this lock in the wrong order */
690 mutex_enter(&pmp->pmp_lock);
691
692 if (park->park_flags & PARKFLAG_WANTREPLY) {
693 TAILQ_INSERT_TAIL(&pmp->pmp_req_replywait, park,
694 park_entries);
695 park->park_flags |= PARKFLAG_ONQUEUE2;
696 puffs_park_release(park, 0);
697 } else {
698 free(preq, M_PUFFS);
699 puffs_park_release(park, 1);
700 }
701 }
702
703 out:
704 phg->phg_more = pmp->pmp_req_touser_count;
705 mutex_exit(&pmp->pmp_lock);
706
707 phg->phg_nops = donesome;
708
709 return error;
710 }
711
712 int
713 puffs_putop(struct puffs_mount *pmp, struct puffs_reqh_put *php)
714 {
715 struct puffs_park *park;
716 struct puffs_req tmpreq;
717 struct puffs_req *nextpreq;
718 void *userbuf;
719 uint64_t id;
720 size_t reqlen;
721 int donesome, error, wgone, release;
722
723 donesome = error = wgone = 0;
724
725 id = php->php_id;
726 userbuf = php->php_buf;
727 reqlen = php->php_buflen;
728
729 mutex_enter(&pmp->pmp_lock);
730 while (donesome != php->php_nops) {
731 release = 0;
732 #ifdef PUFFSDEBUG
733 DPRINTF(("puffsputop: searching for %" PRIu64 ", ubuf: %p, "
734 "len %zu\n", id, userbuf, reqlen));
735 #endif
736 TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
737 if (park->park_id == id)
738 break;
739 }
740
741 if (park == NULL) {
742 DPRINTF(("puffsputop: no request: %" PRIu64 "\n", id));
743 error = EINVAL;
744 break;
745 }
746
747 puffs_park_reference(park);
748 if (reqlen == 0 || reqlen > park->park_maxlen) {
749 DPRINTF(("puffsputop: invalid buffer length: "
750 "%zu\n", reqlen));
751 error = E2BIG;
752 puffs_park_release(park, 0);
753 break;
754 }
755 wgone = park->park_flags & PARKFLAG_WAITERGONE;
756
757 /* check if it's still on the queue after acquiring lock */
758 if (park->park_flags & PARKFLAG_ONQUEUE2) {
759 TAILQ_REMOVE(&pmp->pmp_req_replywait, park,
760 park_entries);
761 park->park_flags &= ~PARKFLAG_ONQUEUE2;
762 }
763
764 mutex_exit(&pmp->pmp_lock);
765
766 /*
767 * If the caller has gone south, go to next, collect
768 * $200 and free the structure there instead of wakeup.
769 * We also need to copyin the header info. Flag structure
770 * release to mode total and utter destruction.
771 */
772 if (wgone) {
773 DPRINTF(("puffs_putop: bad service - waiter gone for "
774 "park %p\n", park));
775 error = copyin(userbuf, &tmpreq,
776 sizeof(struct puffs_req));
777 release = 1;
778 if (error)
779 goto loopout;
780 nextpreq = &tmpreq;
781 goto next;
782 }
783
784 DPRINTF(("puffsputpop: copyin from %p to %p, len %zu\n",
785 userbuf, park->park_preq, reqlen));
786 error = copyin(userbuf, park->park_preq, reqlen);
787 if (error)
788 goto loopout;
789 nextpreq = park->park_preq;
790
791 next:
792 /* all's well, prepare for next op */
793 id = nextpreq->preq_id;
794 reqlen = nextpreq->preq_buflen;
795 userbuf = nextpreq->preq_nextbuf;
796 donesome++;
797
798 loopout:
799 if (error && !wgone)
800 park->park_preq->preq_rv = error;
801
802 if (park->park_flags & PARKFLAG_CALL) {
803 park->park_done(park->park_preq, park->park_donearg);
804 release = 1;
805 }
806
807 if (!wgone) {
808 DPRINTF(("puffs_putop: flagging done for "
809 "park %p\n", park));
810
811 cv_signal(&park->park_cv);
812 }
813 park->park_flags |= PARKFLAG_DONE;
814 puffs_park_release(park, release);
815
816 mutex_enter(&pmp->pmp_lock);
817 if (error)
818 break;
819 wgone = 0;
820 }
821
822 mutex_exit(&pmp->pmp_lock);
823 php->php_nops -= donesome;
824
825 return error;
826 }
827
828 /*
829 * We're dead, kaput, RIP, slightly more than merely pining for the
830 * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
831 * our maker, ceased to be, etcetc. YASD. It's a dead FS!
832 *
833 * Caller must hold puffs mutex.
834 */
835 void
836 puffs_userdead(struct puffs_mount *pmp)
837 {
838 struct puffs_park *park, *park_next;
839
840 /*
841 * Mark filesystem status as dying so that operations don't
842 * attempt to march to userspace any longer.
843 */
844 pmp->pmp_status = PUFFSTAT_DYING;
845
846 /* signal waiters on REQUEST TO file server queue */
847 for (park = TAILQ_FIRST(&pmp->pmp_req_touser); park; park = park_next) {
848 uint8_t opclass;
849
850 puffs_park_reference(park);
851 park_next = TAILQ_NEXT(park, park_entries);
852
853 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
854 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
855 park->park_flags &= ~PARKFLAG_ONQUEUE1;
856 pmp->pmp_req_touser_count--;
857
858 /*
859 * If the waiter is gone, we may *NOT* access preq anymore.
860 */
861 if (park->park_flags & PARKFLAG_WAITERGONE) {
862 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
863 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
864 puffs_park_release(park, 0);
865 } else {
866 opclass = park->park_preq->preq_opclass;
867 park->park_preq->preq_rv = ENXIO;
868
869 if (park->park_flags & PARKFLAG_CALL) {
870 park->park_done(park->park_preq,
871 park->park_donearg);
872 puffs_park_release(park, 1);
873 } else if ((park->park_flags & PARKFLAG_WANTREPLY)==0) {
874 free(park->park_preq, M_PUFFS);
875 puffs_park_release(park, 1);
876 } else {
877 park->park_preq->preq_rv = ENXIO;
878 cv_signal(&park->park_cv);
879 puffs_park_release(park, 0);
880 }
881 }
882 }
883
884 /* signal waiters on RESPONSE FROM file server queue */
885 for (park=TAILQ_FIRST(&pmp->pmp_req_replywait); park; park=park_next) {
886 puffs_park_reference(park);
887 park_next = TAILQ_NEXT(park, park_entries);
888
889 KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
890 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
891
892 TAILQ_REMOVE(&pmp->pmp_req_replywait, park, park_entries);
893 park->park_flags &= ~PARKFLAG_ONQUEUE2;
894
895 /*
896 * If the waiter is gone, we may *NOT* access preq anymore.
897 */
898 if (park->park_flags & PARKFLAG_WAITERGONE) {
899 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
900 puffs_park_release(park, 0);
901 } else {
902 park->park_preq->preq_rv = ENXIO;
903 if (park->park_flags & PARKFLAG_CALL) {
904 park->park_done(park->park_preq,
905 park->park_donearg);
906 puffs_park_release(park, 1);
907 } else {
908 cv_signal(&park->park_cv);
909 puffs_park_release(park, 0);
910 }
911 }
912 }
913 }
914
915 /* this is probably going to die away at some point? */
916 /*
917 * XXX: currently bitrotted
918 */
919 #if 0
920 static int
921 puffssizeop(struct puffs_mount *pmp, struct puffs_sizeop *psop_user)
922 {
923 struct puffs_sizepark *pspark;
924 void *kernbuf;
925 size_t copylen;
926 int error;
927
928 /* locate correct op */
929 mutex_enter(&pmp->pmp_lock);
930 TAILQ_FOREACH(pspark, &pmp->pmp_req_sizepark, pkso_entries) {
931 if (pspark->pkso_reqid == psop_user->pso_reqid) {
932 TAILQ_REMOVE(&pmp->pmp_req_sizepark, pspark,
933 pkso_entries);
934 break;
935 }
936 }
937 mutex_exit(&pmp->pmp_lock);
938
939 if (pspark == NULL)
940 return EINVAL;
941
942 error = 0;
943 copylen = MIN(pspark->pkso_bufsize, psop_user->pso_bufsize);
944
945 /*
946 * XXX: uvm stuff to avoid bouncy-bouncy copying?
947 */
948 if (PUFFS_SIZEOP_UIO(pspark->pkso_reqtype)) {
949 kernbuf = malloc(copylen, M_PUFFS, M_WAITOK | M_ZERO);
950 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_IN) {
951 error = copyin(psop_user->pso_userbuf,
952 kernbuf, copylen);
953 if (error) {
954 printf("psop ERROR1 %d\n", error);
955 goto escape;
956 }
957 }
958 error = uiomove(kernbuf, copylen, pspark->pkso_uio);
959 if (error) {
960 printf("uiomove from kernel %p, len %d failed: %d\n",
961 kernbuf, (int)copylen, error);
962 goto escape;
963 }
964
965 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_OUT) {
966 error = copyout(kernbuf,
967 psop_user->pso_userbuf, copylen);
968 if (error) {
969 printf("psop ERROR2 %d\n", error);
970 goto escape;
971 }
972 }
973 escape:
974 free(kernbuf, M_PUFFS);
975 } else if (PUFFS_SIZEOP_BUF(pspark->pkso_reqtype)) {
976 copylen = MAX(pspark->pkso_bufsize, psop_user->pso_bufsize);
977 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_BUF_IN) {
978 error = copyin(psop_user->pso_userbuf,
979 pspark->pkso_copybuf, copylen);
980 } else {
981 error = copyout(pspark->pkso_copybuf,
982 psop_user->pso_userbuf, copylen);
983 }
984 }
985 #ifdef DIAGNOSTIC
986 else
987 panic("puffssizeop: invalid reqtype %d\n",
988 pspark->pkso_reqtype);
989 #endif /* DIAGNOSTIC */
990
991 return error;
992 }
993 #endif
994