puffs_msgif.c revision 1.21 1 /* $NetBSD: puffs_msgif.c,v 1.21 2007/03/20 10:21:58 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.21 2007/03/20 10:21:58 pooka Exp $");
37
38 #include <sys/param.h>
39 #include <sys/fstrans.h>
40 #include <sys/malloc.h>
41 #include <sys/mount.h>
42 #include <sys/vnode.h>
43 #include <sys/lock.h>
44
45 #include <fs/puffs/puffs_msgif.h>
46 #include <fs/puffs/puffs_sys.h>
47
48
49 /*
50 * kernel-user-kernel waitqueues
51 */
52
53 static int touser(struct puffs_mount *, struct puffs_park *, uint64_t,
54 struct vnode *, struct vnode *);
55
56 uint64_t
57 puffs_getreqid(struct puffs_mount *pmp)
58 {
59 uint64_t rv;
60
61 simple_lock(&pmp->pmp_lock);
62 rv = pmp->pmp_nextreq++;
63 simple_unlock(&pmp->pmp_lock);
64
65 return rv;
66 }
67
68 /* vfs request */
69 int
70 puffs_vfstouser(struct puffs_mount *pmp, int optype, void *kbuf, size_t buflen)
71 {
72 struct puffs_park park;
73
74 park.park_preq = kbuf;
75
76 park.park_preq->preq_opclass = PUFFSOP_VFS;
77 park.park_preq->preq_optype = optype;
78
79 park.park_maxlen = park.park_copylen = buflen;
80 park.park_flags = 0;
81
82 return touser(pmp, &park, puffs_getreqid(pmp), NULL, NULL);
83 }
84
85 void
86 puffs_suspendtouser(struct puffs_mount *pmp, int status)
87 {
88 struct puffs_vfsreq_suspend *pvfsr_susp;
89 struct puffs_park *ppark;
90
91 pvfsr_susp = malloc(sizeof(struct puffs_vfsreq_suspend),
92 M_PUFFS, M_WAITOK | M_ZERO);
93 ppark = malloc(sizeof(struct puffs_park), M_PUFFS, M_WAITOK | M_ZERO);
94
95 pvfsr_susp->pvfsr_status = status;
96 ppark->park_preq = (struct puffs_req *)pvfsr_susp;
97
98 ppark->park_preq->preq_opclass = PUFFSOP_VFS | PUFFSOPFLAG_FAF;
99 ppark->park_preq->preq_optype = PUFFS_VFS_SUSPEND;
100
101 ppark->park_maxlen = ppark->park_copylen
102 = sizeof(struct puffs_vfsreq_suspend);
103 ppark->park_flags = 0;
104
105 (void)touser(pmp, ppark, 0, NULL, NULL);
106 }
107
108 /*
109 * vnode level request
110 */
111 int
112 puffs_vntouser(struct puffs_mount *pmp, int optype,
113 void *kbuf, size_t buflen, void *cookie,
114 struct vnode *vp1, struct vnode *vp2)
115 {
116 struct puffs_park park;
117
118 park.park_preq = kbuf;
119
120 park.park_preq->preq_opclass = PUFFSOP_VN;
121 park.park_preq->preq_optype = optype;
122 park.park_preq->preq_cookie = cookie;
123
124 park.park_maxlen = park.park_copylen = buflen;
125 park.park_flags = 0;
126
127 return touser(pmp, &park, puffs_getreqid(pmp), vp1, vp2);
128 }
129
130 /*
131 * vnode level request, caller-controller req id
132 */
133 int
134 puffs_vntouser_req(struct puffs_mount *pmp, int optype,
135 void *kbuf, size_t buflen, void *cookie, uint64_t reqid,
136 struct vnode *vp1, struct vnode *vp2)
137 {
138 struct puffs_park park;
139
140 park.park_preq = kbuf;
141
142 park.park_preq->preq_opclass = PUFFSOP_VN;
143 park.park_preq->preq_optype = optype;
144 park.park_preq->preq_cookie = cookie;
145
146 park.park_maxlen = park.park_copylen = buflen;
147 park.park_flags = 0;
148
149 return touser(pmp, &park, reqid, vp1, vp2);
150 }
151
152 /*
153 * vnode level request, copy routines can adjust "kernbuf".
154 */
155 int
156 puffs_vntouser_adjbuf(struct puffs_mount *pmp, int optype,
157 void **kbuf, size_t *buflen, size_t maxdelta,
158 void *cookie, struct vnode *vp1, struct vnode *vp2)
159 {
160 struct puffs_park park;
161 int error;
162
163 park.park_preq = *kbuf;
164
165 park.park_preq->preq_opclass = PUFFSOP_VN;
166 park.park_preq->preq_optype = optype;
167 park.park_preq->preq_cookie = cookie;
168
169 park.park_copylen = *buflen;
170 park.park_maxlen = maxdelta + *buflen;
171 park.park_flags = PUFFS_PARKFLAG_ADJUSTABLE;
172
173 error = touser(pmp, &park, puffs_getreqid(pmp), vp1, vp2);
174
175 *kbuf = park.park_preq;
176 *buflen = park.park_copylen;
177
178 return error;
179 }
180
181 /*
182 * File server interaction is async from caller perspective.
183 * biodone(bp)J is signalled in putop.
184 */
185 void
186 puffs_vntouser_bioread_async(struct puffs_mount *pmp, void *cookie,
187 size_t tomove, off_t offset, struct buf *bp,
188 struct vnode *vp1, struct vnode *vp2)
189 {
190 struct puffs_park *ppark;
191 struct puffs_vnreq_read *read_argp;
192
193 MALLOC(ppark, struct puffs_park *, sizeof(struct puffs_park),
194 M_PUFFS, M_WAITOK);
195 MALLOC(read_argp, struct puffs_vnreq_read *,
196 sizeof(struct puffs_vnreq_read), M_PUFFS, M_WAITOK | M_ZERO);
197
198 read_argp->pvnr_ioflag = 0;
199 read_argp->pvnr_resid = tomove;
200 read_argp->pvnr_offset = offset;
201 puffs_credcvt(&read_argp->pvnr_cred, FSCRED);
202
203 ppark->park_preq = (void *)read_argp;
204 ppark->park_preq->preq_opclass = PUFFSOP_VN;
205 ppark->park_preq->preq_optype = PUFFS_VN_READ;
206 ppark->park_preq->preq_cookie = cookie;
207
208 ppark->park_copylen = sizeof(struct puffs_vnreq_read);
209 ppark->park_maxlen = sizeof(struct puffs_vnreq_read) + tomove;
210 ppark->park_bp = bp;
211 ppark->park_flags
212 = PUFFS_PARKFLAG_ADJUSTABLE | PUFFS_PARKFLAG_ASYNCBIOREAD;
213
214 (void)touser(pmp, ppark, puffs_getreqid(pmp), vp1, vp2);
215 }
216
217 /*
218 * Notice: kbuf will be free'd later. I must be allocated from the
219 * kernel heap and it's ownership is shifted to this function from
220 * now on, i.e. the caller is not allowed to use it anymore!
221 */
222 void
223 puffs_vntouser_faf(struct puffs_mount *pmp, int optype,
224 void *kbuf, size_t buflen, void *cookie)
225 {
226 struct puffs_park *ppark;
227
228 /* XXX: is it allowable to sleep here? */
229 ppark = malloc(sizeof(struct puffs_park), M_PUFFS, M_NOWAIT | M_ZERO);
230 if (ppark == NULL)
231 return; /* 2bad */
232
233 ppark->park_preq = kbuf;
234
235 ppark->park_preq->preq_opclass = PUFFSOP_VN | PUFFSOPFLAG_FAF;
236 ppark->park_preq->preq_optype = optype;
237 ppark->park_preq->preq_cookie = cookie;
238
239 ppark->park_maxlen = ppark->park_copylen = buflen;
240 ppark->park_flags = 0;
241
242 (void)touser(pmp, ppark, 0, NULL, NULL);
243 }
244
245 void
246 puffs_cacheop(struct puffs_mount *pmp, struct puffs_park *ppark,
247 struct puffs_cacheinfo *pcinfo, size_t pcilen, void *cookie)
248 {
249
250 ppark->park_preq = (struct puffs_req *)pcinfo;
251 ppark->park_preq->preq_opclass = PUFFSOP_CACHE | PUFFSOPFLAG_FAF;
252 ppark->park_preq->preq_optype = PCACHE_TYPE_WRITE; /* XXX */
253 ppark->park_preq->preq_cookie = cookie;
254
255 ppark->park_maxlen = ppark->park_copylen = pcilen;
256
257 (void)touser(pmp, ppark, 0, NULL, NULL);
258 }
259
260 /*
261 * Wait for the userspace ping-pong game in calling process context.
262 *
263 * This unlocks vnodes if they are supplied. vp1 is the vnode
264 * before in the locking order, i.e. the one which must be locked
265 * before accessing vp2. This is done here so that operations are
266 * already ordered in the queue when vnodes are unlocked (I'm not
267 * sure if that's really necessary, but it can't hurt). Okok, maybe
268 * there's a slight ugly-factor also, but let's not worry about that.
269 */
270 static int
271 touser(struct puffs_mount *pmp, struct puffs_park *ppark, uint64_t reqid,
272 struct vnode *vp1, struct vnode *vp2)
273 {
274 struct lwp *l = curlwp;
275 struct mount *mp;
276 struct puffs_req *preq;
277 int rv = 0;
278
279 mp = PMPTOMP(pmp);
280 preq = ppark->park_preq;
281 preq->preq_id = ppark->park_id = reqid;
282 preq->preq_buflen = ALIGN(ppark->park_maxlen);
283
284 /*
285 * To support PCATCH, yet another movie: check if there are signals
286 * pending and we are issueing a non-FAF. If so, return an error
287 * directly UNLESS we are issueing INACTIVE. In that case, convert
288 * it to a FAF, fire off to the file server and return an error.
289 * Yes, this is bordering disgusting. Barfbags are on me.
290 */
291 if (PUFFSOP_WANTREPLY(ppark->park_preq->preq_opclass)
292 && (ppark->park_flags & PUFFS_PARKFLAG_ASYNCBIOREAD) == 0
293 && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) {
294 if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
295 && preq->preq_optype == PUFFS_VN_INACTIVE) {
296 struct puffs_park *newpark;
297
298 newpark = puffs_reqtofaf(ppark);
299 DPRINTF(("puffs touser: converted to FAF, old %p, "
300 "new %p\n", ppark, newpark));
301 ppark = newpark;
302 rv = EINTR;
303 } else {
304 return EINTR;
305 }
306 }
307
308 /*
309 * test for suspension lock.
310 *
311 * Note that we *DO NOT* keep the lock, since that might block
312 * lock acquiring PLUS it would give userlandia control over
313 * the lock. The operation queue enforces a strict ordering:
314 * when the fs server gets in the op stream, it knows things
315 * are in order. The kernel locks can't guarantee that for
316 * userspace, in any case.
317 *
318 * BUT: this presents a problem for ops which have a consistency
319 * clause based on more than one operation. Unfortunately such
320 * operations (read, write) do not reliably work yet.
321 *
322 * Ya, Ya, it's wrong wong wrong, me be fixink this someday.
323 *
324 * XXX: and there is one more problem. We sometimes need to
325 * take a lazy lock in case the fs is suspending and we are
326 * executing as the fs server context. This might happen
327 * e.g. in the case that the user server triggers a reclaim
328 * in the kernel while the fs is suspending. It's not a very
329 * likely event, but it needs to be fixed some day.
330 */
331 fstrans_start(mp, FSTRANS_NORMAL);
332 simple_lock(&pmp->pmp_lock);
333 fstrans_done(mp);
334
335 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
336 simple_unlock(&pmp->pmp_lock);
337 return ENXIO;
338 }
339
340 TAILQ_INSERT_TAIL(&pmp->pmp_req_touser, ppark, park_entries);
341 pmp->pmp_req_touser_waiters++;
342
343 /*
344 * Don't do unlock-relock dance yet. There are a couple of
345 * unsolved issues with it. If we don't unlock, we can have
346 * processes wanting vn_lock in case userspace hangs. But
347 * that can be "solved" by killing the userspace process. It
348 * would of course be nicer to have antilocking in the userspace
349 * interface protocol itself.. your patience will be rewarded.
350 */
351 #if 0
352 /* unlock */
353 if (vp2)
354 VOP_UNLOCK(vp2, 0);
355 if (vp1)
356 VOP_UNLOCK(vp1, 0);
357 #endif
358
359 /*
360 * XXX: does releasing the lock here cause trouble? Can't hold
361 * it, because otherwise the below would cause locking against
362 * oneself-problems in the kqueue stuff. yes, it is a
363 * theoretical race, so it must be solved
364 */
365 simple_unlock(&pmp->pmp_lock);
366
367 DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
368 "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, ppark,
369 preq->preq_opclass, preq->preq_optype, ppark->park_flags));
370
371 wakeup(&pmp->pmp_req_touser);
372 selnotify(pmp->pmp_sel, 0);
373
374 if (PUFFSOP_WANTREPLY(ppark->park_preq->preq_opclass)
375 && (ppark->park_flags & PUFFS_PARKFLAG_ASYNCBIOREAD) == 0) {
376 struct puffs_park *valetpark = NULL;
377 int error;
378
379 error = ltsleep(ppark, PUSER | PCATCH, "puffs1", 0, NULL);
380 rv = ppark->park_preq->preq_rv;
381
382 /*
383 * Ok, so it gets a bit tricky around here once again.
384 * We want to give interruptibility to the sleep to work
385 * around all kinds of locking-against-oneself problems
386 * and the file system recursing into itself and so forth.
387 * So if we break out of the ltsleep() for anything except
388 * natural causes, we need to caution ourselves.
389 *
390 * The stages at which we can break out are:
391 * 1) operation waiting to be fetched by file server
392 * 2) operation being copied to userspace, not on either queue
393 * 3) file server operating on .. err .. operation
394 * 4) putop: locate the correct park structure from the queue
395 * 5) putop: copy response from userspace
396 * 6) putop: wakeup waiter
397 *
398 * If we are still at stage 1, no problem, just remove
399 * ourselves from the queue to userspace. If we are at
400 * the stage before 4 has completed, replace the park structure
401 * with a park structure indicating that the caller is
402 * no more and no proper reply is required. If the server
403 * is already copying data from userspace to the kernel,
404 * wait for it to finish and return the real return value to
405 * the caller.
406 */
407 checkagain:
408 if (valetpark) {
409 FREE(valetpark, M_PUFFS);
410 valetpark = NULL;
411 }
412
413 if (error) {
414 DPRINTF(("puffs touser: got %d from ltsleep, "
415 "(unlocked) flags 0x%x (park %p)\n",
416 error, ppark->park_flags, ppark));
417 rv = error;
418
419 MALLOC(valetpark, struct puffs_park *,
420 sizeof(struct puffs_park), M_PUFFS,
421 M_ZERO | M_WAITOK);
422
423 simple_lock(&pmp->pmp_lock);
424
425 /*
426 * The order here for the clauses, per description
427 * in comment above, is:
428 * 1, after 6, after 4, 2-3.
429 */
430 if ((ppark->park_flags&PUFFS_PARKFLAG_PROCESSING)==0) {
431 TAILQ_REMOVE(&pmp->pmp_req_touser, ppark,
432 park_entries);
433 simple_unlock(&pmp->pmp_lock);
434 FREE(valetpark, M_PUFFS);
435 DPRINTF(("puffs touser: park %p removed "
436 "from queue one\n", ppark));
437 } else if
438 (ppark->park_flags & PUFFS_PARKFLAG_RECVREPLY) {
439 if (ppark->park_flags & PUFFS_PARKFLAG_DONE) {
440 rv = ppark->park_preq->preq_rv;
441 simple_unlock(&pmp->pmp_lock);
442 FREE(valetpark, M_PUFFS);
443 } else {
444 error = ltsleep(ppark,
445 PUSER | PCATCH | PNORELOCK,
446 "puffsre1", 0, &pmp->pmp_lock);
447 goto checkagain;
448 }
449 } else {
450 valetpark->park_flags
451 = PUFFS_PARKFLAG_WAITERGONE;
452 ppark->park_flags |= PUFFS_PARKFLAG_WAITERGONE;
453 valetpark->park_id = ppark->park_id;
454
455 if (ppark->park_flags & PUFFS_PARKFLAG_RQUEUE) {
456 TAILQ_INSERT_BEFORE(ppark, valetpark,
457 park_entries);
458 TAILQ_REMOVE(&pmp->pmp_req_replywait,
459 ppark, park_entries);
460 } else {
461 TAILQ_INSERT_TAIL(
462 &pmp->pmp_req_replywait,
463 valetpark, park_entries);
464 }
465
466 simple_unlock(&pmp->pmp_lock);
467 DPRINTF(("puffs touser: replaced park %p "
468 "with valet park %p\n", ppark, valetpark));
469 }
470 }
471
472 /*
473 * retake the lock and release. This makes sure (haha,
474 * I'm humorous) that we don't process the same vnode in
475 * multiple threads due to the locks hacks we have in
476 * puffs_lock(). In reality this is well protected by
477 * the biglock, but once that's gone, well, hopefully
478 * this will be fixed for real. (and when you read this
479 * comment in 2017 and subsequently barf, my condolences ;).
480 */
481 if (rv == 0 && !fstrans_is_owner(mp)) {
482 fstrans_start(mp, FSTRANS_NORMAL);
483 fstrans_done(mp);
484 }
485 }
486
487 #if 0
488 /* relock */
489 if (vp1)
490 KASSERT(vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY) == 0);
491 if (vp2)
492 KASSERT(vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY) == 0);
493 #endif
494
495 simple_lock(&pmp->pmp_lock);
496 if (--pmp->pmp_req_touser_waiters == 0)
497 wakeup(&pmp->pmp_req_touser_waiters);
498 simple_unlock(&pmp->pmp_lock);
499
500 return rv;
501 }
502
503
504 /*
505 * getop: scan through queued requests until:
506 * 1) max number of requests satisfied
507 * OR
508 * 2) buffer runs out of space
509 * OR
510 * 3) nonblocking is set AND there are no operations available
511 * OR
512 * 4) at least one operation was transferred AND there are no more waiting
513 */
514 int
515 puffs_getop(struct puffs_mount *pmp, struct puffs_reqh_get *phg, int nonblock)
516 {
517 struct puffs_park *park;
518 struct puffs_req *preq;
519 uint8_t *bufpos;
520 int error, donesome;
521
522 donesome = error = 0;
523 bufpos = phg->phg_buf;
524
525 simple_lock(&pmp->pmp_lock);
526 while (phg->phg_nops == 0 || donesome != phg->phg_nops) {
527 again:
528 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
529 /* if we got some, they don't really matter anymore */
530 error = ENXIO;
531 goto out;
532 }
533 if (TAILQ_EMPTY(&pmp->pmp_req_touser)) {
534 if (donesome)
535 goto out;
536
537 if (nonblock) {
538 error = EWOULDBLOCK;
539 goto out;
540 }
541
542 error = ltsleep(&pmp->pmp_req_touser, PUSER | PCATCH,
543 "puffs2", 0, &pmp->pmp_lock);
544 if (error)
545 goto out;
546 else
547 goto again;
548 }
549
550 park = TAILQ_FIRST(&pmp->pmp_req_touser);
551 preq = park->park_preq;
552
553 if (phg->phg_buflen < preq->preq_buflen) {
554 if (!donesome)
555 error = E2BIG;
556 goto out;
557 }
558 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
559 park->park_flags |= PUFFS_PARKFLAG_PROCESSING;
560 simple_unlock(&pmp->pmp_lock);
561
562 DPRINTF(("puffsgetop: get op %" PRIu64 " (%d.), from %p "
563 "len %zu (buflen %zu), target %p\n", preq->preq_id,
564 donesome, preq, park->park_copylen, preq->preq_buflen,
565 bufpos));
566
567 if ((error = copyout(preq, bufpos, park->park_copylen)) != 0) {
568 DPRINTF((" FAILED %d\n", error));
569 /*
570 * ok, user server is probably trying to cheat.
571 * stuff op back & return error to user
572 */
573 simple_lock(&pmp->pmp_lock);
574 TAILQ_INSERT_HEAD(&pmp->pmp_req_touser, park,
575 park_entries);
576
577 if (donesome)
578 error = 0;
579 goto out;
580 }
581 bufpos += preq->preq_buflen;
582 phg->phg_buflen -= preq->preq_buflen;
583 donesome++;
584
585 simple_lock(&pmp->pmp_lock);
586 if (PUFFSOP_WANTREPLY(preq->preq_opclass)) {
587 if ((park->park_flags & PUFFS_PARKFLAG_WAITERGONE)==0) {
588 TAILQ_INSERT_TAIL(&pmp->pmp_req_replywait, park,
589 park_entries);
590 park->park_flags |= PUFFS_PARKFLAG_RQUEUE;
591 }
592 } else {
593 simple_unlock(&pmp->pmp_lock);
594 free(preq, M_PUFFS);
595 free(park, M_PUFFS);
596 simple_lock(&pmp->pmp_lock);
597 }
598 }
599
600 out:
601 phg->phg_more = pmp->pmp_req_touser_waiters;
602 simple_unlock(&pmp->pmp_lock);
603
604 phg->phg_nops = donesome;
605
606 return error;
607 }
608
609 /*
610 * urgh, too complex, be very very careful while editing, i.e. NEEDS CLEANUP
611 */
612 int
613 puffs_putop(struct puffs_mount *pmp, struct puffs_reqh_put *php)
614 {
615 struct puffs_park *park;
616 struct puffs_req tmpreq;
617 struct puffs_req *nextpreq;
618 struct buf *bp;
619 void *userbuf;
620 uint64_t id;
621 size_t reqlen;
622 int donesome, error, wgone;
623
624 donesome = error = wgone = 0;
625
626 id = php->php_id;
627 userbuf = php->php_buf;
628 reqlen = php->php_buflen;
629
630 simple_lock(&pmp->pmp_lock);
631 while (donesome != php->php_nops) {
632 #ifdef PUFFSDEBUG
633 simple_unlock(&pmp->pmp_lock);
634 DPRINTF(("puffsputop: searching for %" PRIu64 ", ubuf: %p, "
635 "len %zu\n", id, userbuf, reqlen));
636 simple_lock(&pmp->pmp_lock);
637 #endif
638 TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
639 if (park->park_id == id)
640 break;
641 }
642
643 if (park == NULL) {
644 error = EINVAL;
645 break;
646 }
647 TAILQ_REMOVE(&pmp->pmp_req_replywait, park, park_entries);
648 park->park_flags |= PUFFS_PARKFLAG_RECVREPLY;
649 simple_unlock(&pmp->pmp_lock);
650
651 /*
652 * If the caller has gone south, go to next, collect
653 * $200 and free the structure there instead of wakeup.
654 * We also need to copyin the
655 */
656 if (park->park_flags & PUFFS_PARKFLAG_WAITERGONE) {
657 DPRINTF(("puffs_putop: bad service - waiter gone for "
658 "park %p\n", park));
659 wgone = 1;
660 error = copyin(userbuf, &tmpreq,
661 sizeof(struct puffs_req));
662 if (error)
663 goto loopout;
664 nextpreq = &tmpreq;
665 goto next;
666 }
667
668 if (park->park_flags & PUFFS_PARKFLAG_ADJUSTABLE) {
669 /* sanitycheck size of incoming transmission. */
670 if (reqlen > pmp->pmp_req_maxsize) {
671 DPRINTF(("puffsputop: outrageous user buf "
672 "size: %zu\n", reqlen));
673 error = EINVAL;
674 goto loopout;
675 }
676
677 if (reqlen > park->park_copylen) {
678 if (reqlen > park->park_maxlen) {
679 DPRINTF(("puffsputop: adj copysize "
680 "> max size, %zu vs %zu\n",
681 reqlen, park->park_maxlen));
682 error = EINVAL;
683 goto loopout;
684 }
685 free(park->park_preq, M_PUFFS);
686 park->park_preq = malloc(reqlen,
687 M_PUFFS, M_WAITOK);
688
689 park->park_copylen = reqlen;
690 DPRINTF(("puffsputop: adjbuf, new addr %p, "
691 "len %zu\n", park->park_preq, reqlen));
692 }
693 } else {
694 if (reqlen == 0 || reqlen > park->park_copylen) {
695 reqlen = park->park_copylen;
696 DPRINTF(("puffsputop: kernel bufsize override: "
697 "%zu\n", reqlen));
698 }
699 }
700
701 DPRINTF(("puffsputpop: copyin from %p to %p, len %zu\n",
702 userbuf, park->park_preq, reqlen));
703 error = copyin(userbuf, park->park_preq, reqlen);
704 if (error)
705 goto loopout;
706 nextpreq = park->park_preq;
707 bp = park->park_bp;
708
709 if (park->park_flags & PUFFS_PARKFLAG_ASYNCBIOREAD) {
710 struct puffs_vnreq_read *read_argp;
711 size_t moved;
712
713 bp->b_error = park->park_preq->preq_rv;
714
715 DPRINTF(("puffs_putop: async bioread for park %p, "
716 "bp %p, error %d\n", park, bp, bp->b_error));
717
718 if (bp->b_error == 0) {
719 read_argp = (void *)park->park_preq;
720 moved = park->park_maxlen
721 - sizeof(struct puffs_vnreq_read)
722 - read_argp->pvnr_resid;
723 memcpy(bp->b_data, read_argp->pvnr_data, moved);
724 bp->b_resid = bp->b_bcount - moved;
725 biodone(bp);
726 }
727 }
728
729 next:
730 /* all's well, prepare for next op */
731 id = nextpreq->preq_id;
732 reqlen = nextpreq->preq_buflen;
733 userbuf = nextpreq->preq_nextbuf;
734 donesome++;
735
736 loopout:
737 if (error && park->park_preq) {
738 park->park_preq->preq_rv = error;
739 if (park->park_flags & PUFFS_PARKFLAG_ASYNCBIOREAD) {
740 bp = park->park_bp;
741 bp->b_error = error;
742 bp->b_flags |= B_ERROR;
743 biodone(bp);
744 }
745 }
746
747 if (wgone) {
748 FREE(park, M_PUFFS);
749 simple_lock(&pmp->pmp_lock);
750 } else if (park->park_flags & PUFFS_PARKFLAG_ASYNCBIOREAD) {
751 free(park->park_preq, M_PUFFS);
752 FREE(park, M_PUFFS);
753 simple_lock(&pmp->pmp_lock);
754 } else {
755 DPRINTF(("puffs_putop: flagging done for park %p\n",
756 park));
757 simple_lock(&pmp->pmp_lock);
758 park->park_flags |= PUFFS_PARKFLAG_DONE;
759 wakeup(park);
760 }
761
762 if (error)
763 break;
764 wgone = 0;
765 }
766
767 simple_unlock(&pmp->pmp_lock);
768 php->php_nops -= donesome;
769
770 return error;
771 }
772
773 /* this is probably going to die away at some point? */
774 /*
775 * XXX: currently bitrotted
776 */
777 #if 0
778 static int
779 puffssizeop(struct puffs_mount *pmp, struct puffs_sizeop *psop_user)
780 {
781 struct puffs_sizepark *pspark;
782 void *kernbuf;
783 size_t copylen;
784 int error;
785
786 /* locate correct op */
787 simple_lock(&pmp->pmp_lock);
788 TAILQ_FOREACH(pspark, &pmp->pmp_req_sizepark, pkso_entries) {
789 if (pspark->pkso_reqid == psop_user->pso_reqid) {
790 TAILQ_REMOVE(&pmp->pmp_req_sizepark, pspark,
791 pkso_entries);
792 break;
793 }
794 }
795 simple_unlock(&pmp->pmp_lock);
796
797 if (pspark == NULL)
798 return EINVAL;
799
800 error = 0;
801 copylen = MIN(pspark->pkso_bufsize, psop_user->pso_bufsize);
802
803 /*
804 * XXX: uvm stuff to avoid bouncy-bouncy copying?
805 */
806 if (PUFFS_SIZEOP_UIO(pspark->pkso_reqtype)) {
807 kernbuf = malloc(copylen, M_PUFFS, M_WAITOK | M_ZERO);
808 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_IN) {
809 error = copyin(psop_user->pso_userbuf,
810 kernbuf, copylen);
811 if (error) {
812 printf("psop ERROR1 %d\n", error);
813 goto escape;
814 }
815 }
816 error = uiomove(kernbuf, copylen, pspark->pkso_uio);
817 if (error) {
818 printf("uiomove from kernel %p, len %d failed: %d\n",
819 kernbuf, (int)copylen, error);
820 goto escape;
821 }
822
823 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_OUT) {
824 error = copyout(kernbuf,
825 psop_user->pso_userbuf, copylen);
826 if (error) {
827 printf("psop ERROR2 %d\n", error);
828 goto escape;
829 }
830 }
831 escape:
832 free(kernbuf, M_PUFFS);
833 } else if (PUFFS_SIZEOP_BUF(pspark->pkso_reqtype)) {
834 copylen = MAX(pspark->pkso_bufsize, psop_user->pso_bufsize);
835 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_BUF_IN) {
836 error = copyin(psop_user->pso_userbuf,
837 pspark->pkso_copybuf, copylen);
838 } else {
839 error = copyout(pspark->pkso_copybuf,
840 psop_user->pso_userbuf, copylen);
841 }
842 }
843 #ifdef DIAGNOSTIC
844 else
845 panic("puffssizeop: invalid reqtype %d\n",
846 pspark->pkso_reqtype);
847 #endif /* DIAGNOSTIC */
848
849 return error;
850 }
851 #endif
852