puffs_msgif.c revision 1.6.2.5 1 /* $NetBSD: puffs_msgif.c,v 1.6.2.5 2007/02/09 21:03:52 ad Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.6.2.5 2007/02/09 21:03:52 ad Exp $");
37
38 #include <sys/param.h>
39 #include <sys/fstrans.h>
40 #include <sys/malloc.h>
41 #include <sys/mount.h>
42 #include <sys/vnode.h>
43 #include <sys/lock.h>
44
45 #include <fs/puffs/puffs_msgif.h>
46 #include <fs/puffs/puffs_sys.h>
47
48
49 /*
50 * kernel-user-kernel waitqueues
51 */
52
53 static int touser(struct puffs_mount *, struct puffs_park *, uint64_t,
54 struct vnode *, struct vnode *);
55
56 uint64_t
57 puffs_getreqid(struct puffs_mount *pmp)
58 {
59 uint64_t rv;
60
61 simple_lock(&pmp->pmp_lock);
62 rv = pmp->pmp_nextreq++;
63 simple_unlock(&pmp->pmp_lock);
64
65 return rv;
66 }
67
68 /* vfs request */
69 int
70 puffs_vfstouser(struct puffs_mount *pmp, int optype, void *kbuf, size_t buflen)
71 {
72 struct puffs_park park;
73
74 park.park_preq = kbuf;
75
76 park.park_preq->preq_opclass = PUFFSOP_VFS;
77 park.park_preq->preq_optype = optype;
78
79 park.park_maxlen = park.park_copylen = buflen;
80
81 return touser(pmp, &park, puffs_getreqid(pmp), NULL, NULL);
82 }
83
84 void
85 puffs_suspendtouser(struct puffs_mount *pmp, int status)
86 {
87 struct puffs_vfsreq_suspend *pvfsr_susp;
88 struct puffs_park *ppark;
89
90 pvfsr_susp = malloc(sizeof(struct puffs_vfsreq_suspend),
91 M_PUFFS, M_WAITOK | M_ZERO);
92 ppark = malloc(sizeof(struct puffs_park), M_PUFFS, M_WAITOK | M_ZERO);
93
94 pvfsr_susp->pvfsr_status = status;
95 ppark->park_preq = (struct puffs_req *)pvfsr_susp;
96
97 ppark->park_preq->preq_opclass = PUFFSOP_VFS | PUFFSOPFLAG_FAF;
98 ppark->park_preq->preq_optype = PUFFS_VFS_SUSPEND;
99
100 ppark->park_maxlen = ppark->park_copylen
101 = sizeof(struct puffs_vfsreq_suspend);
102
103 (void)touser(pmp, ppark, 0, NULL, NULL);
104 }
105
106 /*
107 * vnode level request
108 */
109 int
110 puffs_vntouser(struct puffs_mount *pmp, int optype,
111 void *kbuf, size_t buflen, void *cookie,
112 struct vnode *vp1, struct vnode *vp2)
113 {
114 struct puffs_park park;
115
116 park.park_preq = kbuf;
117
118 park.park_preq->preq_opclass = PUFFSOP_VN;
119 park.park_preq->preq_optype = optype;
120 park.park_preq->preq_cookie = cookie;
121
122 park.park_maxlen = park.park_copylen = buflen;
123
124 return touser(pmp, &park, puffs_getreqid(pmp), vp1, vp2);
125 }
126
127 /*
128 * vnode level request, caller-controller req id
129 */
130 int
131 puffs_vntouser_req(struct puffs_mount *pmp, int optype,
132 void *kbuf, size_t buflen, void *cookie, uint64_t reqid,
133 struct vnode *vp1, struct vnode *vp2)
134 {
135 struct puffs_park park;
136
137 park.park_preq = kbuf;
138
139 park.park_preq->preq_opclass = PUFFSOP_VN;
140 park.park_preq->preq_optype = optype;
141 park.park_preq->preq_cookie = cookie;
142
143 park.park_maxlen = park.park_copylen = buflen;
144
145 return touser(pmp, &park, reqid, vp1, vp2);
146 }
147
148 /*
149 * vnode level request, copy routines can adjust "kernbuf".
150 * We overload park_copylen != park_maxlen to signal that the park
151 * in question is of adjusting type.
152 */
153 int
154 puffs_vntouser_adjbuf(struct puffs_mount *pmp, int optype,
155 void **kbuf, size_t *buflen, size_t maxdelta,
156 void *cookie, struct vnode *vp1, struct vnode *vp2)
157 {
158 struct puffs_park park;
159 int error;
160
161 park.park_preq = *kbuf;
162
163 park.park_preq->preq_opclass = PUFFSOP_VN;
164 park.park_preq->preq_optype = optype;
165 park.park_preq->preq_cookie = cookie;
166
167 park.park_copylen = *buflen;
168 park.park_maxlen = maxdelta + *buflen;
169
170 error = touser(pmp, &park, puffs_getreqid(pmp), vp1, vp2);
171
172 *kbuf = park.park_preq;
173 *buflen = park.park_copylen;
174
175 return error;
176 }
177
178 /*
179 * Notice: kbuf will be free'd later. I must be allocated from the
180 * kernel heap and it's ownership is shifted to this function from
181 * now on, i.e. the caller is not allowed to use it anymore!
182 */
183 void
184 puffs_vntouser_faf(struct puffs_mount *pmp, int optype,
185 void *kbuf, size_t buflen, void *cookie)
186 {
187 struct puffs_park *ppark;
188
189 /* XXX: is it allowable to sleep here? */
190 ppark = malloc(sizeof(struct puffs_park), M_PUFFS, M_NOWAIT | M_ZERO);
191 if (ppark == NULL)
192 return; /* 2bad */
193
194 ppark->park_preq = kbuf;
195
196 ppark->park_preq->preq_opclass = PUFFSOP_VN | PUFFSOPFLAG_FAF;
197 ppark->park_preq->preq_optype = optype;
198 ppark->park_preq->preq_cookie = cookie;
199
200 ppark->park_maxlen = ppark->park_copylen = buflen;
201
202 (void)touser(pmp, ppark, 0, NULL, NULL);
203 }
204
205 /*
206 * Wait for the userspace ping-pong game in calling process context.
207 *
208 * This unlocks vnodes if they are supplied. vp1 is the vnode
209 * before in the locking order, i.e. the one which must be locked
210 * before accessing vp2. This is done here so that operations are
211 * already ordered in the queue when vnodes are unlocked (I'm not
212 * sure if that's really necessary, but it can't hurt). Okok, maybe
213 * there's a slight ugly-factor also, but let's not worry about that.
214 */
215 static int
216 touser(struct puffs_mount *pmp, struct puffs_park *ppark, uint64_t reqid,
217 struct vnode *vp1, struct vnode *vp2)
218 {
219 struct mount *mp;
220 struct puffs_req *preq;
221
222 mp = PMPTOMP(pmp);
223
224 /*
225 * test for suspension lock.
226 *
227 * Note that we *DO NOT* keep the lock, since that might block
228 * lock acquiring PLUS it would give userlandia control over
229 * the lock. The operation queue enforces a strict ordering:
230 * when the fs server gets in the op stream, it knows things
231 * are in order. The kernel locks can't guarantee that for
232 * userspace, in any case.
233 *
234 * BUT: this presents a problem for ops which have a consistency
235 * clause based on more than one operation. Unfortunately such
236 * operations (read, write) do not reliably work yet.
237 *
238 * Ya, Ya, it's wrong wong wrong, me be fixink this someday.
239 *
240 * XXX: and there is one more problem. We sometimes need to
241 * take a lazy lock in case the fs is suspending and we are
242 * executing as the fs server context. This might happen
243 * e.g. in the case that the user server triggers a reclaim
244 * in the kernel while the fs is suspending. It's not a very
245 * likely event, but it needs to be fixed some day.
246 */
247 fstrans_start(mp, FSTRANS_NORMAL);
248 simple_lock(&pmp->pmp_lock);
249 fstrans_done(mp);
250
251 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
252 simple_unlock(&pmp->pmp_lock);
253 return ENXIO;
254 }
255
256 preq = ppark->park_preq;
257 preq->preq_id = reqid;
258 preq->preq_buflen = ALIGN(ppark->park_maxlen);
259
260 TAILQ_INSERT_TAIL(&pmp->pmp_req_touser, ppark, park_entries);
261 pmp->pmp_req_touser_waiters++;
262
263 /*
264 * Don't do unlock-relock dance yet. There are a couple of
265 * unsolved issues with it. If we don't unlock, we can have
266 * processes wanting vn_lock in case userspace hangs. But
267 * that can be "solved" by killing the userspace process. It
268 * would of course be nicer to have antilocking in the userspace
269 * interface protocol itself.. your patience will be rewarded.
270 */
271 #if 0
272 /* unlock */
273 if (vp2)
274 VOP_UNLOCK(vp2, 0);
275 if (vp1)
276 VOP_UNLOCK(vp1, 0);
277 #endif
278
279 /*
280 * XXX: does releasing the lock here cause trouble? Can't hold
281 * it, because otherwise the below would cause locking against
282 * oneself-problems in the kqueue stuff. yes, it is a
283 * theoretical race, so it must be solved
284 */
285 simple_unlock(&pmp->pmp_lock);
286
287 DPRINTF(("touser: enqueueing req %" PRIu64 ", preq: %p, park: %p, "
288 "c/t: 0x%x/0x%x\n", preq->preq_id, preq, ppark, preq->preq_opclass,
289 preq->preq_optype));
290
291 wakeup(&pmp->pmp_req_touser);
292 selnotify(pmp->pmp_sel, 0);
293
294 if (PUFFSOP_WANTREPLY(ppark->park_preq->preq_opclass)) {
295 ltsleep(ppark, PUSER, "puffs1", 0, NULL);
296
297 /*
298 * retake the lock and release. This makes sure (haha,
299 * I'm humorous) that we don't process the same vnode in
300 * multiple threads due to the locks hacks we have in
301 * puffs_lock(). In reality this is well protected by
302 * the biglock, but once that's gone, well, hopefully
303 * this will be fixed for real. (and when you read this
304 * comment in 2017 and subsequently barf, my condolences ;).
305 */
306 if (!fstrans_is_owner(mp)) {
307 fstrans_start(mp, FSTRANS_NORMAL);
308 fstrans_done(mp);
309 }
310 }
311
312 #if 0
313 /* relock */
314 if (vp1)
315 KASSERT(vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY) == 0);
316 if (vp2)
317 KASSERT(vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY) == 0);
318 #endif
319
320 simple_lock(&pmp->pmp_lock);
321 if (--pmp->pmp_req_touser_waiters == 0)
322 wakeup(&pmp->pmp_req_touser_waiters);
323 simple_unlock(&pmp->pmp_lock);
324
325 return ppark->park_preq->preq_rv;
326 }
327
328
329 /*
330 * getop: scan through queued requests until:
331 * 1) max number of requests satisfied
332 * OR
333 * 2) buffer runs out of space
334 * OR
335 * 3) nonblocking is set AND there are no operations available
336 * OR
337 * 4) at least one operation was transferred AND there are no more waiting
338 */
339 int
340 puffs_getop(struct puffs_mount *pmp, struct puffs_reqh_get *phg, int nonblock)
341 {
342 struct puffs_park *park;
343 struct puffs_req *preq;
344 uint8_t *bufpos;
345 int error, donesome;
346
347 donesome = error = 0;
348 bufpos = phg->phg_buf;
349
350 simple_lock(&pmp->pmp_lock);
351 while (phg->phg_nops == 0 || donesome != phg->phg_nops) {
352 again:
353 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
354 /* if we got some, they don't really matter anymore */
355 error = ENXIO;
356 goto out;
357 }
358 if (TAILQ_EMPTY(&pmp->pmp_req_touser)) {
359 if (donesome)
360 goto out;
361
362 if (nonblock) {
363 error = EWOULDBLOCK;
364 goto out;
365 }
366
367 error = ltsleep(&pmp->pmp_req_touser, PUSER | PCATCH,
368 "puffs2", 0, &pmp->pmp_lock);
369 if (error)
370 goto out;
371 else
372 goto again;
373 }
374
375 park = TAILQ_FIRST(&pmp->pmp_req_touser);
376 preq = park->park_preq;
377
378 if (phg->phg_buflen < preq->preq_buflen) {
379 if (!donesome)
380 error = E2BIG;
381 goto out;
382 }
383 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
384
385 simple_unlock(&pmp->pmp_lock);
386 DPRINTF(("puffsgetop: get op %" PRIu64 " (%d.), from %p "
387 "len %zu (buflen %zu), target %p\n", preq->preq_id,
388 donesome, preq, park->park_copylen, preq->preq_buflen,
389 bufpos));
390
391 if ((error = copyout(preq, bufpos, park->park_copylen)) != 0) {
392 DPRINTF((" FAILED %d\n", error));
393 /*
394 * ok, user server is probably trying to cheat.
395 * stuff op back & return error to user
396 */
397 simple_lock(&pmp->pmp_lock);
398 TAILQ_INSERT_HEAD(&pmp->pmp_req_touser, park,
399 park_entries);
400
401 if (donesome)
402 error = 0;
403 goto out;
404 }
405 bufpos += preq->preq_buflen;
406 phg->phg_buflen -= preq->preq_buflen;
407 donesome++;
408
409 simple_lock(&pmp->pmp_lock);
410 if (PUFFSOP_WANTREPLY(preq->preq_opclass)) {
411 TAILQ_INSERT_TAIL(&pmp->pmp_req_replywait, park,
412 park_entries);
413 } else {
414 simple_unlock(&pmp->pmp_lock);
415 free(preq, M_PUFFS);
416 free(park, M_PUFFS);
417 simple_lock(&pmp->pmp_lock);
418 }
419 }
420
421 out:
422 phg->phg_more = pmp->pmp_req_touser_waiters;
423 simple_unlock(&pmp->pmp_lock);
424
425 phg->phg_nops = donesome;
426
427 return error;
428 }
429
430 int
431 puffs_putop(struct puffs_mount *pmp, struct puffs_reqh_put *php)
432 {
433 struct puffs_park *park;
434 void *userbuf;
435 uint64_t id;
436 size_t reqlen;
437 int error;
438 int donesome;
439
440 donesome = error = 0;
441
442 id = php->php_id;
443 userbuf = php->php_buf;
444 reqlen = php->php_buflen;
445
446 simple_lock(&pmp->pmp_lock);
447 while (donesome != php->php_nops) {
448 #ifdef DEBUG
449 simple_unlock(&pmp->pmp_lock);
450 DPRINTF(("puffsputop: searching for %" PRIu64 ", ubuf: %p, "
451 "len %zu\n", id, userbuf, reqlen));
452 simple_lock(&pmp->pmp_lock);
453 #endif
454 TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
455 if (park->park_preq->preq_id == id)
456 break;
457 }
458
459 if (park == NULL) {
460 error = EINVAL;
461 break;
462 }
463 TAILQ_REMOVE(&pmp->pmp_req_replywait, park, park_entries);
464 simple_unlock(&pmp->pmp_lock);
465
466 if (park->park_maxlen != park->park_copylen) {
467 /* sanitycheck size of incoming transmission. */
468 if (reqlen > pmp->pmp_req_maxsize) {
469 DPRINTF(("puffsputop: outrageous user buf "
470 "size: %zu\n", reqlen));
471 error = EINVAL;
472 goto loopout;
473 }
474
475 if (reqlen > park->park_copylen) {
476 if (reqlen > park->park_maxlen) {
477 DPRINTF(("puffsputop: adj copysize "
478 "> max size, %zu vs %zu\n",
479 reqlen, park->park_maxlen));
480 error = EINVAL;
481 goto loopout;
482 }
483 free(park->park_preq, M_PUFFS);
484 park->park_preq = malloc(reqlen,
485 M_PUFFS, M_WAITOK);
486
487 park->park_copylen = reqlen;
488 DPRINTF(("puffsputop: adjbuf, new addr %p, "
489 "len %zu\n", park->park_preq, reqlen));
490 }
491 } else {
492 if (reqlen == 0 || reqlen > park->park_copylen) {
493 reqlen = park->park_copylen;
494 DPRINTF(("puffsputop: kernel bufsize override: "
495 "%zu\n", reqlen));
496 }
497 }
498
499 DPRINTF(("puffsputpop: copyin from %p to %p, len %zu\n",
500 userbuf, park->park_preq, reqlen));
501 error = copyin(userbuf, park->park_preq, reqlen);
502 if (error)
503 goto loopout;
504
505 /* all's well, prepare for next op */
506 id = park->park_preq->preq_id;
507 reqlen = park->park_preq->preq_buflen;
508 userbuf = park->park_preq->preq_nextbuf;
509 donesome++;
510
511 loopout:
512 if (error)
513 park->park_preq->preq_rv = error;
514 wakeup(park);
515
516 simple_lock(&pmp->pmp_lock);
517 if (error)
518 break;
519 }
520
521 simple_unlock(&pmp->pmp_lock);
522 php->php_nops -= donesome;
523
524 return error;
525 }
526
527 /* this is probably going to die away at some point? */
528 /*
529 * XXX: currently bitrotted
530 */
531 #if 0
532 static int
533 puffssizeop(struct puffs_mount *pmp, struct puffs_sizeop *psop_user)
534 {
535 struct puffs_sizepark *pspark;
536 void *kernbuf;
537 size_t copylen;
538 int error;
539
540 /* locate correct op */
541 simple_lock(&pmp->pmp_lock);
542 TAILQ_FOREACH(pspark, &pmp->pmp_req_sizepark, pkso_entries) {
543 if (pspark->pkso_reqid == psop_user->pso_reqid) {
544 TAILQ_REMOVE(&pmp->pmp_req_sizepark, pspark,
545 pkso_entries);
546 break;
547 }
548 }
549 simple_unlock(&pmp->pmp_lock);
550
551 if (pspark == NULL)
552 return EINVAL;
553
554 error = 0;
555 copylen = MIN(pspark->pkso_bufsize, psop_user->pso_bufsize);
556
557 /*
558 * XXX: uvm stuff to avoid bouncy-bouncy copying?
559 */
560 if (PUFFS_SIZEOP_UIO(pspark->pkso_reqtype)) {
561 kernbuf = malloc(copylen, M_PUFFS, M_WAITOK | M_ZERO);
562 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_IN) {
563 error = copyin(psop_user->pso_userbuf,
564 kernbuf, copylen);
565 if (error) {
566 printf("psop ERROR1 %d\n", error);
567 goto escape;
568 }
569 }
570 error = uiomove(kernbuf, copylen, pspark->pkso_uio);
571 if (error) {
572 printf("uiomove from kernel %p, len %d failed: %d\n",
573 kernbuf, (int)copylen, error);
574 goto escape;
575 }
576
577 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_OUT) {
578 error = copyout(kernbuf,
579 psop_user->pso_userbuf, copylen);
580 if (error) {
581 printf("psop ERROR2 %d\n", error);
582 goto escape;
583 }
584 }
585 escape:
586 free(kernbuf, M_PUFFS);
587 } else if (PUFFS_SIZEOP_BUF(pspark->pkso_reqtype)) {
588 copylen = MAX(pspark->pkso_bufsize, psop_user->pso_bufsize);
589 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_BUF_IN) {
590 error = copyin(psop_user->pso_userbuf,
591 pspark->pkso_copybuf, copylen);
592 } else {
593 error = copyout(pspark->pkso_copybuf,
594 psop_user->pso_userbuf, copylen);
595 }
596 }
597 #ifdef DIAGNOSTIC
598 else
599 panic("puffssizeop: invalid reqtype %d\n",
600 pspark->pkso_reqtype);
601 #endif /* DIAGNOSTIC */
602
603 return error;
604 }
605 #endif
606