puffs_msgif.c revision 1.20 1 /* $NetBSD: puffs_msgif.c,v 1.20 2007/03/14 12:13:58 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.20 2007/03/14 12:13:58 pooka Exp $");
37
38 #include <sys/param.h>
39 #include <sys/fstrans.h>
40 #include <sys/malloc.h>
41 #include <sys/mount.h>
42 #include <sys/vnode.h>
43 #include <sys/lock.h>
44
45 #include <fs/puffs/puffs_msgif.h>
46 #include <fs/puffs/puffs_sys.h>
47
48
49 /*
50 * kernel-user-kernel waitqueues
51 */
52
53 static int touser(struct puffs_mount *, struct puffs_park *, uint64_t,
54 struct vnode *, struct vnode *);
55
56 uint64_t
57 puffs_getreqid(struct puffs_mount *pmp)
58 {
59 uint64_t rv;
60
61 simple_lock(&pmp->pmp_lock);
62 rv = pmp->pmp_nextreq++;
63 simple_unlock(&pmp->pmp_lock);
64
65 return rv;
66 }
67
68 /* vfs request */
69 int
70 puffs_vfstouser(struct puffs_mount *pmp, int optype, void *kbuf, size_t buflen)
71 {
72 struct puffs_park park;
73
74 park.park_preq = kbuf;
75
76 park.park_preq->preq_opclass = PUFFSOP_VFS;
77 park.park_preq->preq_optype = optype;
78
79 park.park_maxlen = park.park_copylen = buflen;
80 park.park_flags = 0;
81
82 return touser(pmp, &park, puffs_getreqid(pmp), NULL, NULL);
83 }
84
85 void
86 puffs_suspendtouser(struct puffs_mount *pmp, int status)
87 {
88 struct puffs_vfsreq_suspend *pvfsr_susp;
89 struct puffs_park *ppark;
90
91 pvfsr_susp = malloc(sizeof(struct puffs_vfsreq_suspend),
92 M_PUFFS, M_WAITOK | M_ZERO);
93 ppark = malloc(sizeof(struct puffs_park), M_PUFFS, M_WAITOK | M_ZERO);
94
95 pvfsr_susp->pvfsr_status = status;
96 ppark->park_preq = (struct puffs_req *)pvfsr_susp;
97
98 ppark->park_preq->preq_opclass = PUFFSOP_VFS | PUFFSOPFLAG_FAF;
99 ppark->park_preq->preq_optype = PUFFS_VFS_SUSPEND;
100
101 ppark->park_maxlen = ppark->park_copylen
102 = sizeof(struct puffs_vfsreq_suspend);
103 ppark->park_flags = 0;
104
105 (void)touser(pmp, ppark, 0, NULL, NULL);
106 }
107
108 /*
109 * vnode level request
110 */
111 int
112 puffs_vntouser(struct puffs_mount *pmp, int optype,
113 void *kbuf, size_t buflen, void *cookie,
114 struct vnode *vp1, struct vnode *vp2)
115 {
116 struct puffs_park park;
117
118 park.park_preq = kbuf;
119
120 park.park_preq->preq_opclass = PUFFSOP_VN;
121 park.park_preq->preq_optype = optype;
122 park.park_preq->preq_cookie = cookie;
123
124 park.park_maxlen = park.park_copylen = buflen;
125 park.park_flags = 0;
126
127 return touser(pmp, &park, puffs_getreqid(pmp), vp1, vp2);
128 }
129
130 /*
131 * vnode level request, caller-controller req id
132 */
133 int
134 puffs_vntouser_req(struct puffs_mount *pmp, int optype,
135 void *kbuf, size_t buflen, void *cookie, uint64_t reqid,
136 struct vnode *vp1, struct vnode *vp2)
137 {
138 struct puffs_park park;
139
140 park.park_preq = kbuf;
141
142 park.park_preq->preq_opclass = PUFFSOP_VN;
143 park.park_preq->preq_optype = optype;
144 park.park_preq->preq_cookie = cookie;
145
146 park.park_maxlen = park.park_copylen = buflen;
147 park.park_flags = 0;
148
149 return touser(pmp, &park, reqid, vp1, vp2);
150 }
151
152 /*
153 * vnode level request, copy routines can adjust "kernbuf".
154 */
155 int
156 puffs_vntouser_adjbuf(struct puffs_mount *pmp, int optype,
157 void **kbuf, size_t *buflen, size_t maxdelta,
158 void *cookie, struct vnode *vp1, struct vnode *vp2)
159 {
160 struct puffs_park park;
161 int error;
162
163 park.park_preq = *kbuf;
164
165 park.park_preq->preq_opclass = PUFFSOP_VN;
166 park.park_preq->preq_optype = optype;
167 park.park_preq->preq_cookie = cookie;
168
169 park.park_copylen = *buflen;
170 park.park_maxlen = maxdelta + *buflen;
171 park.park_flags = PUFFS_PARKFLAG_ADJUSTABLE;
172
173 error = touser(pmp, &park, puffs_getreqid(pmp), vp1, vp2);
174
175 *kbuf = park.park_preq;
176 *buflen = park.park_copylen;
177
178 return error;
179 }
180
181 /*
182 * File server interaction is async from caller perspective.
183 * biodone(bp)J is signalled in putop.
184 */
185 void
186 puffs_vntouser_bioread_async(struct puffs_mount *pmp, void *cookie,
187 size_t tomove, off_t offset, struct buf *bp,
188 struct vnode *vp1, struct vnode *vp2)
189 {
190 struct puffs_park *ppark;
191 struct puffs_vnreq_read *read_argp;
192
193 MALLOC(ppark, struct puffs_park *, sizeof(struct puffs_park),
194 M_PUFFS, M_WAITOK);
195 MALLOC(read_argp, struct puffs_vnreq_read *,
196 sizeof(struct puffs_vnreq_read), M_PUFFS, M_WAITOK | M_ZERO);
197
198 read_argp->pvnr_ioflag = 0;
199 read_argp->pvnr_resid = tomove;
200 read_argp->pvnr_offset = offset;
201 puffs_credcvt(&read_argp->pvnr_cred, FSCRED);
202
203 ppark->park_preq = (void *)read_argp;
204 ppark->park_preq->preq_opclass = PUFFSOP_VN;
205 ppark->park_preq->preq_optype = PUFFS_VN_READ;
206 ppark->park_preq->preq_cookie = cookie;
207
208 ppark->park_copylen = sizeof(struct puffs_vnreq_read);
209 ppark->park_maxlen = sizeof(struct puffs_vnreq_read) + tomove;
210 ppark->park_bp = bp;
211 ppark->park_flags
212 = PUFFS_PARKFLAG_ADJUSTABLE | PUFFS_PARKFLAG_ASYNCBIOREAD;
213
214 (void)touser(pmp, ppark, puffs_getreqid(pmp), vp1, vp2);
215 }
216
217 /*
218 * Notice: kbuf will be free'd later. I must be allocated from the
219 * kernel heap and it's ownership is shifted to this function from
220 * now on, i.e. the caller is not allowed to use it anymore!
221 */
222 void
223 puffs_vntouser_faf(struct puffs_mount *pmp, int optype,
224 void *kbuf, size_t buflen, void *cookie)
225 {
226 struct puffs_park *ppark;
227
228 /* XXX: is it allowable to sleep here? */
229 ppark = malloc(sizeof(struct puffs_park), M_PUFFS, M_NOWAIT | M_ZERO);
230 if (ppark == NULL)
231 return; /* 2bad */
232
233 ppark->park_preq = kbuf;
234
235 ppark->park_preq->preq_opclass = PUFFSOP_VN | PUFFSOPFLAG_FAF;
236 ppark->park_preq->preq_optype = optype;
237 ppark->park_preq->preq_cookie = cookie;
238
239 ppark->park_maxlen = ppark->park_copylen = buflen;
240 ppark->park_flags = 0;
241
242 (void)touser(pmp, ppark, 0, NULL, NULL);
243 }
244
245 /*
246 * Wait for the userspace ping-pong game in calling process context.
247 *
248 * This unlocks vnodes if they are supplied. vp1 is the vnode
249 * before in the locking order, i.e. the one which must be locked
250 * before accessing vp2. This is done here so that operations are
251 * already ordered in the queue when vnodes are unlocked (I'm not
252 * sure if that's really necessary, but it can't hurt). Okok, maybe
253 * there's a slight ugly-factor also, but let's not worry about that.
254 */
255 static int
256 touser(struct puffs_mount *pmp, struct puffs_park *ppark, uint64_t reqid,
257 struct vnode *vp1, struct vnode *vp2)
258 {
259 struct lwp *l = curlwp;
260 struct mount *mp;
261 struct puffs_req *preq;
262 int rv = 0;
263
264 mp = PMPTOMP(pmp);
265 preq = ppark->park_preq;
266 preq->preq_id = ppark->park_id = reqid;
267 preq->preq_buflen = ALIGN(ppark->park_maxlen);
268
269 /*
270 * To support PCATCH, yet another movie: check if there are signals
271 * pending and we are issueing a non-FAF. If so, return an error
272 * directly UNLESS we are issueing INACTIVE. In that case, convert
273 * it to a FAF, fire off to the file server and return an error.
274 * Yes, this is bordering disgusting. Barfbags are on me.
275 */
276 if (PUFFSOP_WANTREPLY(ppark->park_preq->preq_opclass)
277 && (ppark->park_flags & PUFFS_PARKFLAG_ASYNCBIOREAD) == 0
278 && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) {
279 if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
280 && preq->preq_optype == PUFFS_VN_INACTIVE) {
281 struct puffs_park *newpark;
282
283 newpark = puffs_reqtofaf(ppark);
284 DPRINTF(("puffs touser: converted to FAF, old %p, "
285 "new %p\n", ppark, newpark));
286 ppark = newpark;
287 rv = EINTR;
288 } else {
289 return EINTR;
290 }
291 }
292
293 /*
294 * test for suspension lock.
295 *
296 * Note that we *DO NOT* keep the lock, since that might block
297 * lock acquiring PLUS it would give userlandia control over
298 * the lock. The operation queue enforces a strict ordering:
299 * when the fs server gets in the op stream, it knows things
300 * are in order. The kernel locks can't guarantee that for
301 * userspace, in any case.
302 *
303 * BUT: this presents a problem for ops which have a consistency
304 * clause based on more than one operation. Unfortunately such
305 * operations (read, write) do not reliably work yet.
306 *
307 * Ya, Ya, it's wrong wong wrong, me be fixink this someday.
308 *
309 * XXX: and there is one more problem. We sometimes need to
310 * take a lazy lock in case the fs is suspending and we are
311 * executing as the fs server context. This might happen
312 * e.g. in the case that the user server triggers a reclaim
313 * in the kernel while the fs is suspending. It's not a very
314 * likely event, but it needs to be fixed some day.
315 */
316 fstrans_start(mp, FSTRANS_NORMAL);
317 simple_lock(&pmp->pmp_lock);
318 fstrans_done(mp);
319
320 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
321 simple_unlock(&pmp->pmp_lock);
322 return ENXIO;
323 }
324
325 TAILQ_INSERT_TAIL(&pmp->pmp_req_touser, ppark, park_entries);
326 pmp->pmp_req_touser_waiters++;
327
328 /*
329 * Don't do unlock-relock dance yet. There are a couple of
330 * unsolved issues with it. If we don't unlock, we can have
331 * processes wanting vn_lock in case userspace hangs. But
332 * that can be "solved" by killing the userspace process. It
333 * would of course be nicer to have antilocking in the userspace
334 * interface protocol itself.. your patience will be rewarded.
335 */
336 #if 0
337 /* unlock */
338 if (vp2)
339 VOP_UNLOCK(vp2, 0);
340 if (vp1)
341 VOP_UNLOCK(vp1, 0);
342 #endif
343
344 /*
345 * XXX: does releasing the lock here cause trouble? Can't hold
346 * it, because otherwise the below would cause locking against
347 * oneself-problems in the kqueue stuff. yes, it is a
348 * theoretical race, so it must be solved
349 */
350 simple_unlock(&pmp->pmp_lock);
351
352 DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
353 "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, ppark,
354 preq->preq_opclass, preq->preq_optype, ppark->park_flags));
355
356 wakeup(&pmp->pmp_req_touser);
357 selnotify(pmp->pmp_sel, 0);
358
359 if (PUFFSOP_WANTREPLY(ppark->park_preq->preq_opclass)
360 && (ppark->park_flags & PUFFS_PARKFLAG_ASYNCBIOREAD) == 0) {
361 struct puffs_park *valetpark = NULL;
362 int error;
363
364 error = ltsleep(ppark, PUSER | PCATCH, "puffs1", 0, NULL);
365 rv = ppark->park_preq->preq_rv;
366
367 /*
368 * Ok, so it gets a bit tricky around here once again.
369 * We want to give interruptibility to the sleep to work
370 * around all kinds of locking-against-oneself problems
371 * and the file system recursing into itself and so forth.
372 * So if we break out of the ltsleep() for anything except
373 * natural causes, we need to caution ourselves.
374 *
375 * The stages at which we can break out are:
376 * 1) operation waiting to be fetched by file server
377 * 2) operation being copied to userspace, not on either queue
378 * 3) file server operating on .. err .. operation
379 * 4) putop: locate the correct park structure from the queue
380 * 5) putop: copy response from userspace
381 * 6) putop: wakeup waiter
382 *
383 * If we are still at stage 1, no problem, just remove
384 * ourselves from the queue to userspace. If we are at
385 * the stage before 4 has completed, replace the park structure
386 * with a park structure indicating that the caller is
387 * no more and no proper reply is required. If the server
388 * is already copying data from userspace to the kernel,
389 * wait for it to finish and return the real return value to
390 * the caller.
391 */
392 checkagain:
393 if (valetpark) {
394 FREE(valetpark, M_PUFFS);
395 valetpark = NULL;
396 }
397
398 if (error) {
399 DPRINTF(("puffs touser: got %d from ltsleep, "
400 "(unlocked) flags 0x%x (park %p)\n",
401 error, ppark->park_flags, ppark));
402 rv = error;
403
404 MALLOC(valetpark, struct puffs_park *,
405 sizeof(struct puffs_park), M_PUFFS,
406 M_ZERO | M_WAITOK);
407
408 simple_lock(&pmp->pmp_lock);
409
410 /*
411 * The order here for the clauses, per description
412 * in comment above, is:
413 * 1, after 6, after 4, 2-3.
414 */
415 if ((ppark->park_flags&PUFFS_PARKFLAG_PROCESSING)==0) {
416 TAILQ_REMOVE(&pmp->pmp_req_touser, ppark,
417 park_entries);
418 simple_unlock(&pmp->pmp_lock);
419 FREE(valetpark, M_PUFFS);
420 DPRINTF(("puffs touser: park %p removed "
421 "from queue one\n", ppark));
422 } else if
423 (ppark->park_flags & PUFFS_PARKFLAG_RECVREPLY) {
424 if (ppark->park_flags & PUFFS_PARKFLAG_DONE) {
425 rv = ppark->park_preq->preq_rv;
426 simple_unlock(&pmp->pmp_lock);
427 FREE(valetpark, M_PUFFS);
428 } else {
429 error = ltsleep(ppark,
430 PUSER | PCATCH | PNORELOCK,
431 "puffsre1", 0, &pmp->pmp_lock);
432 goto checkagain;
433 }
434 } else {
435 valetpark->park_flags
436 = PUFFS_PARKFLAG_WAITERGONE;
437 ppark->park_flags |= PUFFS_PARKFLAG_WAITERGONE;
438 valetpark->park_id = ppark->park_id;
439
440 if (ppark->park_flags & PUFFS_PARKFLAG_RQUEUE) {
441 TAILQ_INSERT_BEFORE(ppark, valetpark,
442 park_entries);
443 TAILQ_REMOVE(&pmp->pmp_req_replywait,
444 ppark, park_entries);
445 } else {
446 TAILQ_INSERT_TAIL(
447 &pmp->pmp_req_replywait,
448 valetpark, park_entries);
449 }
450
451 simple_unlock(&pmp->pmp_lock);
452 DPRINTF(("puffs touser: replaced park %p "
453 "with valet park %p\n", ppark, valetpark));
454 }
455 }
456
457 /*
458 * retake the lock and release. This makes sure (haha,
459 * I'm humorous) that we don't process the same vnode in
460 * multiple threads due to the locks hacks we have in
461 * puffs_lock(). In reality this is well protected by
462 * the biglock, but once that's gone, well, hopefully
463 * this will be fixed for real. (and when you read this
464 * comment in 2017 and subsequently barf, my condolences ;).
465 */
466 if (rv == 0 && !fstrans_is_owner(mp)) {
467 fstrans_start(mp, FSTRANS_NORMAL);
468 fstrans_done(mp);
469 }
470 }
471
472 #if 0
473 /* relock */
474 if (vp1)
475 KASSERT(vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY) == 0);
476 if (vp2)
477 KASSERT(vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY) == 0);
478 #endif
479
480 simple_lock(&pmp->pmp_lock);
481 if (--pmp->pmp_req_touser_waiters == 0)
482 wakeup(&pmp->pmp_req_touser_waiters);
483 simple_unlock(&pmp->pmp_lock);
484
485 return rv;
486 }
487
488
489 /*
490 * getop: scan through queued requests until:
491 * 1) max number of requests satisfied
492 * OR
493 * 2) buffer runs out of space
494 * OR
495 * 3) nonblocking is set AND there are no operations available
496 * OR
497 * 4) at least one operation was transferred AND there are no more waiting
498 */
499 int
500 puffs_getop(struct puffs_mount *pmp, struct puffs_reqh_get *phg, int nonblock)
501 {
502 struct puffs_park *park;
503 struct puffs_req *preq;
504 uint8_t *bufpos;
505 int error, donesome;
506
507 donesome = error = 0;
508 bufpos = phg->phg_buf;
509
510 simple_lock(&pmp->pmp_lock);
511 while (phg->phg_nops == 0 || donesome != phg->phg_nops) {
512 again:
513 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
514 /* if we got some, they don't really matter anymore */
515 error = ENXIO;
516 goto out;
517 }
518 if (TAILQ_EMPTY(&pmp->pmp_req_touser)) {
519 if (donesome)
520 goto out;
521
522 if (nonblock) {
523 error = EWOULDBLOCK;
524 goto out;
525 }
526
527 error = ltsleep(&pmp->pmp_req_touser, PUSER | PCATCH,
528 "puffs2", 0, &pmp->pmp_lock);
529 if (error)
530 goto out;
531 else
532 goto again;
533 }
534
535 park = TAILQ_FIRST(&pmp->pmp_req_touser);
536 preq = park->park_preq;
537
538 if (phg->phg_buflen < preq->preq_buflen) {
539 if (!donesome)
540 error = E2BIG;
541 goto out;
542 }
543 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
544 park->park_flags |= PUFFS_PARKFLAG_PROCESSING;
545 simple_unlock(&pmp->pmp_lock);
546
547 DPRINTF(("puffsgetop: get op %" PRIu64 " (%d.), from %p "
548 "len %zu (buflen %zu), target %p\n", preq->preq_id,
549 donesome, preq, park->park_copylen, preq->preq_buflen,
550 bufpos));
551
552 if ((error = copyout(preq, bufpos, park->park_copylen)) != 0) {
553 DPRINTF((" FAILED %d\n", error));
554 /*
555 * ok, user server is probably trying to cheat.
556 * stuff op back & return error to user
557 */
558 simple_lock(&pmp->pmp_lock);
559 TAILQ_INSERT_HEAD(&pmp->pmp_req_touser, park,
560 park_entries);
561
562 if (donesome)
563 error = 0;
564 goto out;
565 }
566 bufpos += preq->preq_buflen;
567 phg->phg_buflen -= preq->preq_buflen;
568 donesome++;
569
570 simple_lock(&pmp->pmp_lock);
571 if (PUFFSOP_WANTREPLY(preq->preq_opclass)) {
572 if ((park->park_flags & PUFFS_PARKFLAG_WAITERGONE)==0) {
573 TAILQ_INSERT_TAIL(&pmp->pmp_req_replywait, park,
574 park_entries);
575 park->park_flags |= PUFFS_PARKFLAG_RQUEUE;
576 }
577 } else {
578 simple_unlock(&pmp->pmp_lock);
579 free(preq, M_PUFFS);
580 free(park, M_PUFFS);
581 simple_lock(&pmp->pmp_lock);
582 }
583 }
584
585 out:
586 phg->phg_more = pmp->pmp_req_touser_waiters;
587 simple_unlock(&pmp->pmp_lock);
588
589 phg->phg_nops = donesome;
590
591 return error;
592 }
593
594 /*
595 * urgh, too complex, be very very careful while editing, i.e. NEEDS CLEANUP
596 */
597 int
598 puffs_putop(struct puffs_mount *pmp, struct puffs_reqh_put *php)
599 {
600 struct puffs_park *park;
601 struct puffs_req tmpreq;
602 struct puffs_req *nextpreq;
603 struct buf *bp;
604 void *userbuf;
605 uint64_t id;
606 size_t reqlen;
607 int donesome, error, wgone;
608
609 donesome = error = wgone = 0;
610
611 id = php->php_id;
612 userbuf = php->php_buf;
613 reqlen = php->php_buflen;
614
615 simple_lock(&pmp->pmp_lock);
616 while (donesome != php->php_nops) {
617 #ifdef PUFFSDEBUG
618 simple_unlock(&pmp->pmp_lock);
619 DPRINTF(("puffsputop: searching for %" PRIu64 ", ubuf: %p, "
620 "len %zu\n", id, userbuf, reqlen));
621 simple_lock(&pmp->pmp_lock);
622 #endif
623 TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
624 if (park->park_id == id)
625 break;
626 }
627
628 if (park == NULL) {
629 error = EINVAL;
630 break;
631 }
632 TAILQ_REMOVE(&pmp->pmp_req_replywait, park, park_entries);
633 park->park_flags |= PUFFS_PARKFLAG_RECVREPLY;
634 simple_unlock(&pmp->pmp_lock);
635
636 /*
637 * If the caller has gone south, go to next, collect
638 * $200 and free the structure there instead of wakeup.
639 * We also need to copyin the
640 */
641 if (park->park_flags & PUFFS_PARKFLAG_WAITERGONE) {
642 DPRINTF(("puffs_putop: bad service - waiter gone for "
643 "park %p\n", park));
644 wgone = 1;
645 error = copyin(userbuf, &tmpreq,
646 sizeof(struct puffs_req));
647 if (error)
648 goto loopout;
649 nextpreq = &tmpreq;
650 goto next;
651 }
652
653 if (park->park_flags & PUFFS_PARKFLAG_ADJUSTABLE) {
654 /* sanitycheck size of incoming transmission. */
655 if (reqlen > pmp->pmp_req_maxsize) {
656 DPRINTF(("puffsputop: outrageous user buf "
657 "size: %zu\n", reqlen));
658 error = EINVAL;
659 goto loopout;
660 }
661
662 if (reqlen > park->park_copylen) {
663 if (reqlen > park->park_maxlen) {
664 DPRINTF(("puffsputop: adj copysize "
665 "> max size, %zu vs %zu\n",
666 reqlen, park->park_maxlen));
667 error = EINVAL;
668 goto loopout;
669 }
670 free(park->park_preq, M_PUFFS);
671 park->park_preq = malloc(reqlen,
672 M_PUFFS, M_WAITOK);
673
674 park->park_copylen = reqlen;
675 DPRINTF(("puffsputop: adjbuf, new addr %p, "
676 "len %zu\n", park->park_preq, reqlen));
677 }
678 } else {
679 if (reqlen == 0 || reqlen > park->park_copylen) {
680 reqlen = park->park_copylen;
681 DPRINTF(("puffsputop: kernel bufsize override: "
682 "%zu\n", reqlen));
683 }
684 }
685
686 DPRINTF(("puffsputpop: copyin from %p to %p, len %zu\n",
687 userbuf, park->park_preq, reqlen));
688 error = copyin(userbuf, park->park_preq, reqlen);
689 if (error)
690 goto loopout;
691 nextpreq = park->park_preq;
692 bp = park->park_bp;
693
694 if (park->park_flags & PUFFS_PARKFLAG_ASYNCBIOREAD) {
695 struct puffs_vnreq_read *read_argp;
696 size_t moved;
697
698 bp->b_error = park->park_preq->preq_rv;
699
700 DPRINTF(("puffs_putop: async bioread for park %p, "
701 "bp %p, error %d\n", park, bp, bp->b_error));
702
703 if (bp->b_error == 0) {
704 read_argp = (void *)park->park_preq;
705 moved = park->park_maxlen
706 - sizeof(struct puffs_vnreq_read)
707 - read_argp->pvnr_resid;
708 memcpy(bp->b_data, read_argp->pvnr_data, moved);
709 bp->b_resid = bp->b_bcount - moved;
710 biodone(bp);
711 }
712 }
713
714 next:
715 /* all's well, prepare for next op */
716 id = nextpreq->preq_id;
717 reqlen = nextpreq->preq_buflen;
718 userbuf = nextpreq->preq_nextbuf;
719 donesome++;
720
721 loopout:
722 if (error && park->park_preq) {
723 park->park_preq->preq_rv = error;
724 if (park->park_flags & PUFFS_PARKFLAG_ASYNCBIOREAD) {
725 bp = park->park_bp;
726 bp->b_error = error;
727 bp->b_flags |= B_ERROR;
728 biodone(bp);
729 }
730 }
731
732 if (wgone) {
733 FREE(park, M_PUFFS);
734 simple_lock(&pmp->pmp_lock);
735 } else if (park->park_flags & PUFFS_PARKFLAG_ASYNCBIOREAD) {
736 free(park->park_preq, M_PUFFS);
737 FREE(park, M_PUFFS);
738 simple_lock(&pmp->pmp_lock);
739 } else {
740 DPRINTF(("puffs_putop: flagging done for park %p\n",
741 park));
742 simple_lock(&pmp->pmp_lock);
743 park->park_flags |= PUFFS_PARKFLAG_DONE;
744 wakeup(park);
745 }
746
747 if (error)
748 break;
749 wgone = 0;
750 }
751
752 simple_unlock(&pmp->pmp_lock);
753 php->php_nops -= donesome;
754
755 return error;
756 }
757
758 /* this is probably going to die away at some point? */
759 /*
760 * XXX: currently bitrotted
761 */
762 #if 0
763 static int
764 puffssizeop(struct puffs_mount *pmp, struct puffs_sizeop *psop_user)
765 {
766 struct puffs_sizepark *pspark;
767 void *kernbuf;
768 size_t copylen;
769 int error;
770
771 /* locate correct op */
772 simple_lock(&pmp->pmp_lock);
773 TAILQ_FOREACH(pspark, &pmp->pmp_req_sizepark, pkso_entries) {
774 if (pspark->pkso_reqid == psop_user->pso_reqid) {
775 TAILQ_REMOVE(&pmp->pmp_req_sizepark, pspark,
776 pkso_entries);
777 break;
778 }
779 }
780 simple_unlock(&pmp->pmp_lock);
781
782 if (pspark == NULL)
783 return EINVAL;
784
785 error = 0;
786 copylen = MIN(pspark->pkso_bufsize, psop_user->pso_bufsize);
787
788 /*
789 * XXX: uvm stuff to avoid bouncy-bouncy copying?
790 */
791 if (PUFFS_SIZEOP_UIO(pspark->pkso_reqtype)) {
792 kernbuf = malloc(copylen, M_PUFFS, M_WAITOK | M_ZERO);
793 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_IN) {
794 error = copyin(psop_user->pso_userbuf,
795 kernbuf, copylen);
796 if (error) {
797 printf("psop ERROR1 %d\n", error);
798 goto escape;
799 }
800 }
801 error = uiomove(kernbuf, copylen, pspark->pkso_uio);
802 if (error) {
803 printf("uiomove from kernel %p, len %d failed: %d\n",
804 kernbuf, (int)copylen, error);
805 goto escape;
806 }
807
808 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_OUT) {
809 error = copyout(kernbuf,
810 psop_user->pso_userbuf, copylen);
811 if (error) {
812 printf("psop ERROR2 %d\n", error);
813 goto escape;
814 }
815 }
816 escape:
817 free(kernbuf, M_PUFFS);
818 } else if (PUFFS_SIZEOP_BUF(pspark->pkso_reqtype)) {
819 copylen = MAX(pspark->pkso_bufsize, psop_user->pso_bufsize);
820 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_BUF_IN) {
821 error = copyin(psop_user->pso_userbuf,
822 pspark->pkso_copybuf, copylen);
823 } else {
824 error = copyout(pspark->pkso_copybuf,
825 psop_user->pso_userbuf, copylen);
826 }
827 }
828 #ifdef DIAGNOSTIC
829 else
830 panic("puffssizeop: invalid reqtype %d\n",
831 pspark->pkso_reqtype);
832 #endif /* DIAGNOSTIC */
833
834 return error;
835 }
836 #endif
837