puffs_msgif.c revision 1.1 1 /* $NetBSD: puffs_msgif.c,v 1.1 2006/10/22 22:43:23 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.1 2006/10/22 22:43:23 pooka Exp $");
37
38 #include <sys/param.h>
39 #include <sys/conf.h>
40 #include <sys/malloc.h>
41 #include <sys/mount.h>
42 #include <sys/socketvar.h>
43 #include <sys/vnode.h>
44 #include <sys/file.h>
45 #include <sys/filedesc.h>
46 #include <sys/lock.h>
47 #include <sys/poll.h>
48
49 #include <fs/puffs/puffs_msgif.h>
50 #include <fs/puffs/puffs_sys.h>
51
52 #include <miscfs/syncfs/syncfs.h> /* XXX: for syncer_lock reference */
53
54
55 /*
56 * kernel-user-kernel waitqueues
57 */
58
59 static int touser(struct puffs_mount *, struct puffs_park *, unsigned int,
60 struct vnode *, struct vnode *);
61
62 unsigned int
63 puffs_getreqid(struct puffs_mount *pmp)
64 {
65 unsigned int rv;
66
67 simple_lock(&pmp->pmp_lock);
68 rv = pmp->pmp_nextreq++;
69 simple_unlock(&pmp->pmp_lock);
70
71 return rv;
72 }
73
74 /* vfs request */
75 int
76 puffs_vfstouser(struct puffs_mount *pmp, int optype, void *kbuf, size_t buflen)
77 {
78 struct puffs_req preq;
79 struct puffs_park park;
80
81 memset(&preq, 0, sizeof(struct puffs_req));
82
83 preq.preq_opclass = PUFFSOP_VFS;
84 preq.preq_optype = optype;
85
86 park.park_kernbuf = kbuf;
87 park.park_buflen = buflen;
88 park.park_copylen = buflen;
89 park.park_flags = 0;
90 park.park_preq = &preq;
91
92 return touser(pmp, &park, puffs_getreqid(pmp), NULL, NULL);
93 }
94
95 /*
96 * vnode level request
97 */
98 int
99 puffs_vntouser(struct puffs_mount *pmp, int optype,
100 void *kbuf, size_t buflen, void *cookie,
101 struct vnode *vp1, struct vnode *vp2)
102 {
103 struct puffs_req preq;
104 struct puffs_park park;
105
106 memset(&preq, 0, sizeof(struct puffs_req));
107
108 preq.preq_opclass = PUFFSOP_VN;
109 preq.preq_optype = optype;
110 preq.preq_cookie = cookie;
111
112 park.park_kernbuf = kbuf;
113 park.park_buflen = buflen;
114 park.park_copylen = buflen;
115 park.park_flags = 0;
116 park.park_preq = &preq;
117
118 return touser(pmp, &park, puffs_getreqid(pmp), vp1, vp2);
119 }
120
121 /*
122 * vnode level request, caller-controller req id
123 */
124 int
125 puffs_vntouser_req(struct puffs_mount *pmp, int optype,
126 void *kbuf, size_t buflen, void *cookie, unsigned int reqid,
127 struct vnode *vp1, struct vnode *vp2)
128 {
129 struct puffs_req preq;
130 struct puffs_park park;
131
132 memset(&preq, 0, sizeof(struct puffs_req));
133
134 preq.preq_opclass = PUFFSOP_VN;
135 preq.preq_optype = optype;
136 preq.preq_cookie = cookie;
137
138 park.park_kernbuf = kbuf;
139 park.park_buflen = buflen;
140 park.park_copylen = buflen;
141 park.park_flags = 0;
142 park.park_preq = &preq;
143
144 return touser(pmp, &park, reqid, vp1, vp2);
145 }
146
147 /*
148 * vnode level request, copy routines can adjust "kernbuf"
149 */
150 int
151 puffs_vntouser_adjbuf(struct puffs_mount *pmp, int optype,
152 void **kbuf, size_t *buflen, size_t copylen, void *cookie,
153 struct vnode *vp1, struct vnode *vp2)
154 {
155 struct puffs_req preq;
156 struct puffs_park park;
157 int error;
158
159 memset(&preq, 0, sizeof(struct puffs_req));
160
161 preq.preq_opclass = PUFFSOP_VN;
162 preq.preq_optype = optype;
163 preq.preq_cookie = cookie;
164
165 park.park_kernbuf = *kbuf;
166 park.park_buflen = *buflen;
167 park.park_copylen = copylen;
168 park.park_flags = PUFFS_REQFLAG_ADJBUF;
169 park.park_preq = &preq;
170
171 error = touser(pmp, &park, puffs_getreqid(pmp), vp1, vp2);
172 *kbuf = park.park_kernbuf;
173 *buflen = park.park_buflen;
174
175 return error;
176 }
177
178 /*
179 * Wait for the userspace ping-pong game in calling process context.
180 *
181 * This unlocks vnodes if they are supplied. vp1 is the vnode
182 * before in the locking order, i.e. the one which must be locked
183 * before accessing vp2. This is done here so that operations are
184 * already ordered in the queue when vnodes are unlocked (I'm not
185 * sure if that's really necessary, but it can't hurt). Okok, maybe
186 * there's a slight ugly-factor also, but let's not worry about that.
187 */
188 static int
189 touser(struct puffs_mount *pmp, struct puffs_park *park, unsigned int reqid,
190 struct vnode *vp1, struct vnode *vp2)
191 {
192
193 simple_lock(&pmp->pmp_lock);
194 if (pmp->pmp_status != PUFFSTAT_RUNNING
195 && pmp->pmp_status != PUFFSTAT_MOUNTING) {
196 simple_unlock(&pmp->pmp_lock);
197 return ENXIO;
198 }
199
200 park->park_preq->preq_id = reqid;
201
202 TAILQ_INSERT_TAIL(&pmp->pmp_req_touser, park, park_entries);
203 pmp->pmp_req_touser_waiters++;
204
205 /*
206 * Don't do unlock-relock dance yet. There are a couple of
207 * unsolved issues with it. If we don't unlock, we can have
208 * processes wanting vn_lock in case userspace hangs. But
209 * that can be "solved" by killing the userspace process. It
210 * would of course be nicer to have antilocking in the userspace
211 * interface protocol itself.. your patience will be rewarded.
212 */
213 #if 0
214 /* unlock */
215 if (vp2)
216 VOP_UNLOCK(vp2, 0);
217 if (vp1)
218 VOP_UNLOCK(vp1, 0);
219 #endif
220
221 /*
222 * XXX: does releasing the lock here cause trouble? Can't hold
223 * it, because otherwise the below would cause locking against
224 * oneself-problems in the kqueue stuff
225 */
226 simple_unlock(&pmp->pmp_lock);
227
228 wakeup(&pmp->pmp_req_touser);
229 selnotify(pmp->pmp_sel, 0);
230
231 ltsleep(park, PUSER, "puffs1", 0, NULL);
232
233 #if 0
234 /* relock */
235 if (vp1)
236 KASSERT(vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY) == 0);
237 if (vp2)
238 KASSERT(vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY) == 0);
239 #endif
240
241 return park->park_preq->preq_rv;
242 }
243
244 /*
245 * We're dead, kaput, RIP, slightly more than merely pining for the
246 * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
247 * our maker, ceased to be, etcetc. YASD. It's a dead FS!
248 */
249 void
250 puffs_userdead(struct puffs_mount *pmp)
251 {
252 struct puffs_park *park;
253
254 simple_lock(&pmp->pmp_lock);
255
256 /*
257 * Mark filesystem status as dying so that operations don't
258 * attempt to march to userspace any longer.
259 */
260 pmp->pmp_status = PUFFSTAT_DYING;
261
262 /* and wakeup processes waiting for a reply from userspace */
263 TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
264 park->park_preq->preq_rv = ENXIO;
265 TAILQ_REMOVE(&pmp->pmp_req_replywait, park, park_entries);
266 wakeup(park);
267 }
268
269 /* wakeup waiters for completion of vfs/vnode requests */
270 TAILQ_FOREACH(park, &pmp->pmp_req_touser, park_entries) {
271 park->park_preq->preq_rv = ENXIO;
272 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
273 wakeup(park);
274 }
275
276 simple_unlock(&pmp->pmp_lock);
277 }
278
279
280 /*
281 * Device routines
282 */
283
284 dev_type_open(puffscdopen);
285 dev_type_close(puffscdclose);
286 dev_type_ioctl(puffscdioctl);
287
288 /* dev */
289 const struct cdevsw puffs_cdevsw = {
290 puffscdopen, puffscdclose, noread, nowrite,
291 noioctl, nostop, notty, nopoll,
292 nommap, nokqfilter, D_OTHER
293 };
294
295 static int puffs_fop_read(struct file *, off_t *, struct uio *,
296 kauth_cred_t, int);
297 static int puffs_fop_write(struct file *, off_t *, struct uio *,
298 kauth_cred_t, int);
299 static int puffs_fop_ioctl(struct file*, u_long, void *, struct lwp *);
300 static int puffs_fop_poll(struct file *, int, struct lwp *);
301 static int puffs_fop_close(struct file *, struct lwp *);
302 static int puffs_fop_kqfilter(struct file *, struct knote *);
303
304
305 /* fd routines, for cloner */
306 static const struct fileops puffs_fileops = {
307 puffs_fop_read,
308 puffs_fop_write,
309 puffs_fop_ioctl,
310 fnullop_fcntl,
311 puffs_fop_poll,
312 fbadop_stat,
313 puffs_fop_close,
314 puffs_fop_kqfilter
315 };
316
317 /*
318 * puffs instance structures. these are always allocated and freed
319 * from the context of the device node / fileop code.
320 */
321 struct puffs_instance {
322 pid_t pi_pid;
323 int pi_idx;
324 int pi_fd;
325 struct puffs_mount *pi_pmp;
326 struct selinfo pi_sel;
327
328 TAILQ_ENTRY(puffs_instance) pi_entries;
329 };
330 #define PMP_EMBRYO ((struct puffs_mount *)-1) /* before mount */
331 #define PMP_DEAD ((struct puffs_mount *)-2) /* goner */
332
333 static TAILQ_HEAD(, puffs_instance) puffs_ilist
334 = TAILQ_HEAD_INITIALIZER(puffs_ilist);
335
336 /* protects both the list and the contents of the list elements */
337 static struct simplelock pi_lock = SIMPLELOCK_INITIALIZER;
338
339 static int get_pi_idx(struct puffs_instance *);
340
341 /* search sorted list of instances for free minor, sorted insert arg */
342 static int
343 get_pi_idx(struct puffs_instance *pi_i)
344 {
345 struct puffs_instance *pi;
346 int i;
347
348 i = 0;
349 TAILQ_FOREACH(pi, &puffs_ilist, pi_entries) {
350 if (i == PUFFS_CLONER)
351 return PUFFS_CLONER;
352 if (i != pi->pi_idx)
353 break;
354 i++;
355 }
356
357 pi_i->pi_pmp = PMP_EMBRYO;
358
359 if (pi == NULL)
360 TAILQ_INSERT_TAIL(&puffs_ilist, pi_i, pi_entries);
361 else
362 TAILQ_INSERT_BEFORE(pi, pi_i, pi_entries);
363
364 return i;
365 }
366
367 int
368 puffscdopen(dev_t dev, int flags, int fmt, struct lwp *l)
369 {
370 struct puffs_instance *pi;
371 struct file *fp;
372 int error, fd, idx;
373
374 /*
375 * XXX: decide on some security model and check permissions
376 */
377
378 if (minor(dev) != PUFFS_CLONER)
379 return ENXIO;
380
381 if ((error = falloc(l, &fp, &fd)) != 0)
382 return error;
383
384 MALLOC(pi, struct puffs_instance *, sizeof(struct puffs_instance),
385 M_PUFFS, M_WAITOK | M_ZERO);
386
387 simple_lock(&pi_lock);
388 idx = get_pi_idx(pi);
389 if (idx == PUFFS_CLONER) {
390 simple_unlock(&pi_lock);
391 FREE(pi, M_PUFFS);
392 FILE_UNUSE(fp, l);
393 ffree(fp);
394 return EBUSY;
395 }
396
397 pi->pi_pid = l->l_proc->p_pid;
398 pi->pi_idx = idx;
399 simple_unlock(&pi_lock);
400
401 DPRINTF(("puffscdopen: registered embryonic pmp for pid: %d\n",
402 pi->pi_pid));
403
404 return fdclone(l, fp, fd, FREAD|FWRITE, &puffs_fileops, pi);
405 }
406
407 int
408 puffscdclose(dev_t dev, int flags, int fmt, struct lwp *l)
409 {
410
411 panic("puffscdclose\n");
412
413 return 0;
414 }
415
416 /*
417 * Set puffs_mount -pointer. Called from puffs_mount(), which is the
418 * earliest place that knows about this.
419 *
420 * We only want to make sure that the caller had the right to open the
421 * device, we don't so much care about which context it gets in case
422 * the same process opened multiple (since they are equal at this point).
423 */
424 int
425 puffs_setpmp(pid_t pid, int fd, struct puffs_mount *pmp)
426 {
427 struct puffs_instance *pi;
428 int rv = 1;
429
430 simple_lock(&pi_lock);
431 TAILQ_FOREACH(pi, &puffs_ilist, pi_entries) {
432 if (pi->pi_pid == pid && pi->pi_pmp == PMP_EMBRYO) {
433 pi->pi_pmp = pmp;
434 pi->pi_fd = fd;
435 pmp->pmp_sel = &pi->pi_sel;
436 rv = 0;
437 break;
438 }
439 }
440 simple_unlock(&pi_lock);
441
442 return rv;
443 }
444
445 /*
446 * Remove mount point from list of instances. Called from unmount.
447 */
448 void
449 puffs_nukebypmp(struct puffs_mount *pmp)
450 {
451 struct puffs_instance *pi;
452
453 simple_lock(&pi_lock);
454 TAILQ_FOREACH(pi, &puffs_ilist, pi_entries) {
455 if (pi->pi_pmp == pmp) {
456 TAILQ_REMOVE(&puffs_ilist, pi, pi_entries);
457 break;
458 }
459 }
460 if (pi)
461 pi->pi_pmp = PMP_DEAD;
462
463 #ifdef DIAGNOSTIC
464 else
465 panic("puffs_nukebypmp: invalid puffs_mount\n");
466 #endif /* DIAGNOSTIC */
467
468 simple_unlock(&pi_lock);
469
470 DPRINTF(("puffs_nukebypmp: nuked %p\n", pi));
471 }
472
473
474 static int
475 puffs_fop_read(struct file *fp, off_t *off, struct uio *uio,
476 kauth_cred_t cred, int flags)
477 {
478
479 printf("READ\n");
480 return ENODEV;
481 }
482
483 static int
484 puffs_fop_write(struct file *fp, off_t *off, struct uio *uio,
485 kauth_cred_t cred, int flags)
486 {
487
488 printf("WRITE\n");
489 return ENODEV;
490 }
491
492 /*
493 * Poll query interface. The question is only if an event
494 * can be read from us (and by read I mean ioctl... ugh).
495 */
496 #define PUFFPOLL_EVSET (POLLIN | POLLRDNORM | POLLRDBAND | POLLPRI)
497 static int
498 puffs_fop_poll(struct file *fp, int events, struct lwp *l)
499 {
500 struct puffs_mount *pmp = FPTOPMP(fp);
501 int revents;
502
503 if (pmp == PMP_EMBRYO || pmp == PMP_DEAD) {
504 printf("puffs_fop_ioctl: puffs %p, not mounted\n", pmp);
505 return ENOENT;
506 }
507
508 revents = events & (POLLOUT | POLLWRNORM | POLLWRBAND);
509 if ((events & PUFFPOLL_EVSET) == 0)
510 return revents;
511
512 /* check queue */
513 simple_lock(&pmp->pmp_lock);
514 if (!TAILQ_EMPTY(&pmp->pmp_req_touser))
515 revents |= PUFFPOLL_EVSET;
516 else
517 selrecord(l, pmp->pmp_sel);
518 simple_unlock(&pmp->pmp_lock);
519
520 return revents;
521 }
522
523 /*
524 * device close = forced unmount.
525 *
526 * unmounting is a frightfully complex operation to avoid races
527 *
528 * XXX: if userspace is terminated by a signal, this will be
529 * called only after the signal is delivered (i.e. after someone tries
530 * to access the file system). Also, the first one for a delivery
531 * will get a free bounce-bounce ride before it can be notified
532 * that the fs is dead. I'm not terribly concerned about optimizing
533 * this for speed ...
534 */
535 static int
536 puffs_fop_close(struct file *fp, struct lwp *l)
537 {
538 struct puffs_instance *pi;
539 struct puffs_mount *pmp;
540 struct mount *mp;
541
542 DPRINTF(("puffs_fop_close: device closed, force filesystem unmount\n"));
543
544 simple_lock(&pi_lock);
545 pmp = FPTOPMP(fp);
546 /*
547 * First check if the fs was never mounted. In that case
548 * remove the instance from the list. If mount is attempted later,
549 * it will simply fail.
550 */
551 if (pmp == PMP_EMBRYO) {
552 pi = FPTOPI(fp);
553 TAILQ_REMOVE(&puffs_ilist, pi, pi_entries);
554 simple_unlock(&pi_lock);
555 FREE(pi, M_PUFFS);
556 return 0;
557 }
558
559 /*
560 * Next, analyze unmount was called and the instance is dead.
561 * In this case we can just free the structure and go home, it
562 * was removed from the list by puffs_nukebypmp().
563 */
564 if (pmp == PMP_DEAD) {
565 /* would be nice, but don't have a reference to it ... */
566 /* KASSERT(pmp_status == PUFFSTAT_DYING); */
567 simple_unlock(&pi_lock);
568 pi = FPTOPI(fp);
569 FREE(pi, M_PUFFS);
570 return 0;
571 }
572
573 /*
574 * So we have a reference. Proceed to unwrap the file system.
575 */
576 mp = PMPTOMP(pmp);
577 simple_unlock(&pi_lock);
578
579 /*
580 * Detach from VFS. First do necessary XXX-dance (from
581 * sys_unmount() & other callers of dounmount()
582 *
583 * XXX Freeze syncer. Must do this before locking the
584 * mount point. See dounmount() for details.
585 */
586 lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
587
588 /*
589 * The only way vfs_busy() will fail for us is if the filesystem
590 * is already a goner.
591 * XXX: skating on the thin ice of modern calling conventions ...
592 */
593 if (vfs_busy(mp, 0, 0)) {
594 lockmgr(&syncer_lock, LK_RELEASE, NULL);
595 return 0;
596 }
597
598 /* Once we have the mount point, unmount() can't interfere */
599 puffs_userdead(pmp);
600 dounmount(mp, MNT_FORCE, l);
601
602 return 0;
603 }
604
605 static int puffsgetop(struct puffs_mount *, struct puffs_req *, int);
606 static int puffsputop(struct puffs_mount *, struct puffs_req *);
607 static int puffssizeop(struct puffs_mount *, struct puffs_sizeop *);
608
609 static int
610 puffs_fop_ioctl(struct file *fp, u_long cmd, void *data, struct lwp *l)
611 {
612 struct puffs_mount *pmp = FPTOPMP(fp);
613
614 if (pmp == PMP_EMBRYO || pmp == PMP_DEAD) {
615 printf("puffs_fop_ioctl: puffs %p, not mounted\n", pmp);
616 return ENOENT;
617 }
618
619 switch (cmd) {
620 case PUFFSGETOP:
621 return puffsgetop(pmp, data, fp->f_flag & FNONBLOCK);
622 break;
623
624 case PUFFSPUTOP:
625 return puffsputop(pmp, data);
626 break;
627
628 case PUFFSSIZEOP:
629 return puffssizeop(pmp, data);
630 break;
631
632 case PUFFSMOUNTOP:
633 return puffs_start2(pmp, data);
634
635 /* already done in sys_ioctl() */
636 case FIONBIO:
637 return 0;
638
639 default:
640 return EINVAL;
641
642 }
643 }
644
645 static void
646 filt_puffsdetach(struct knote *kn)
647 {
648 struct puffs_instance *pi = kn->kn_hook;
649
650 simple_lock(&pi_lock);
651 SLIST_REMOVE(&pi->pi_sel.sel_klist, kn, knote, kn_selnext);
652 simple_unlock(&pi_lock);
653 }
654
655 static int
656 filt_puffsioctl(struct knote *kn, long hint)
657 {
658 struct puffs_instance *pi = kn->kn_hook;
659 struct puffs_mount *pmp;
660 int error;
661
662 error = 0;
663 simple_lock(&pi_lock);
664 pmp = pi->pi_pmp;
665 if (pmp == PMP_EMBRYO || pmp == PMP_DEAD)
666 error = 1;
667 simple_unlock(&pi_lock);
668 if (error)
669 return 0;
670
671 simple_lock(&pmp->pmp_lock);
672 kn->kn_data = pmp->pmp_req_touser_waiters;
673 simple_unlock(&pmp->pmp_lock);
674
675 return kn->kn_data != 0;
676 }
677
678 static const struct filterops puffsioctl_filtops =
679 { 1, NULL, filt_puffsdetach, filt_puffsioctl };
680
681 static int
682 puffs_fop_kqfilter(struct file *fp, struct knote *kn)
683 {
684 struct puffs_instance *pi = fp->f_data;
685 struct klist *klist;
686
687 if (kn->kn_filter != EVFILT_READ)
688 return 1;
689
690 klist = &pi->pi_sel.sel_klist;
691 kn->kn_fop = &puffsioctl_filtops;
692 kn->kn_hook = pi;
693
694 simple_lock(&pi_lock);
695 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
696 simple_unlock(&pi_lock);
697
698 return 0;
699 }
700
701 /*
702 * ioctl handlers
703 */
704
705 static int
706 puffsgetop(struct puffs_mount *pmp, struct puffs_req *preq, int nonblock)
707 {
708 struct puffs_park *park;
709 int error;
710
711 simple_lock(&pmp->pmp_lock);
712 again:
713 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
714 simple_unlock(&pmp->pmp_lock);
715 return ENXIO;
716 }
717 if (TAILQ_EMPTY(&pmp->pmp_req_touser)) {
718 if (nonblock) {
719 simple_unlock(&pmp->pmp_lock);
720 return EWOULDBLOCK;
721 }
722 ltsleep(&pmp->pmp_req_touser, PUSER, "puffs2", 0,
723 &pmp->pmp_lock);
724 goto again;
725 }
726
727 park = TAILQ_FIRST(&pmp->pmp_req_touser);
728 if (preq->preq_auxlen < park->park_copylen) {
729 simple_unlock(&pmp->pmp_lock);
730 return E2BIG;
731 }
732 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
733 pmp->pmp_req_touser_waiters--;
734 simple_unlock(&pmp->pmp_lock);
735
736 preq->preq_id = park->park_preq->preq_id;
737 preq->preq_opclass = park->park_preq->preq_opclass;
738 preq->preq_optype = park->park_preq->preq_optype;
739 preq->preq_cookie = park->park_preq->preq_cookie;
740 preq->preq_auxlen = park->park_copylen;
741
742 if ((error = copyout(park->park_kernbuf, preq->preq_aux,
743 park->park_copylen)) != 0) {
744 /*
745 * ok, user server is probably trying to cheat.
746 * stuff op back & return error to user
747 */
748 simple_lock(&pmp->pmp_lock);
749 TAILQ_INSERT_HEAD(&pmp->pmp_req_touser, park, park_entries);
750 simple_unlock(&pmp->pmp_lock);
751 return error;
752 }
753 simple_lock(&pmp->pmp_lock);
754 TAILQ_INSERT_TAIL(&pmp->pmp_req_replywait, park, park_entries);
755 simple_unlock(&pmp->pmp_lock);
756
757 return 0;
758 }
759
760 static int
761 puffsputop(struct puffs_mount *pmp, struct puffs_req *preq)
762 {
763 struct puffs_park *park;
764 size_t copylen;
765 int error;
766
767 simple_lock(&pmp->pmp_lock);
768 TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
769 if (park->park_preq->preq_id == preq->preq_id) {
770 TAILQ_REMOVE(&pmp->pmp_req_replywait, park,
771 park_entries);
772 break;
773 }
774 }
775 simple_unlock(&pmp->pmp_lock);
776
777 if (park == NULL)
778 return EINVAL;
779
780 /*
781 * check size of incoming transmission. allow to allocate a
782 * larger kernel buffer only if it was specified by the caller
783 * by setting preq->preq_auxadj. Else, just copy whatever the
784 * kernel buffer size is unless.
785 *
786 * However, don't allow ludicrously large buffers
787 */
788 copylen = preq->preq_auxlen;
789 if (copylen > pmp->pmp_req_maxsize) {
790 #ifdef DIAGNOSTIC
791 printf("puffsputop: outrageous user buf size: %zu\n", copylen);
792 #endif
793 error = EFAULT;
794 goto out;
795 }
796
797 if (park->park_buflen < copylen &&
798 park->park_flags & PUFFS_REQFLAG_ADJBUF) {
799 free(park->park_kernbuf, M_PUFFS);
800 park->park_kernbuf = malloc(copylen, M_PUFFS, M_WAITOK);
801 park->park_buflen = copylen;
802 }
803
804 error = copyin(preq->preq_aux, park->park_kernbuf, copylen);
805
806 /*
807 * if copyin botched, inform both userspace and the vnodeop
808 * desperately waiting for information
809 */
810 out:
811 if (error)
812 park->park_preq->preq_rv = error;
813 else
814 park->park_preq->preq_rv = preq->preq_rv;
815 wakeup(park);
816
817 return error;
818 }
819
820 /* this is probably going to die away at some point? */
821 static int
822 puffssizeop(struct puffs_mount *pmp, struct puffs_sizeop *psop_user)
823 {
824 struct puffs_sizepark *pspark;
825 void *kernbuf;
826 size_t copylen;
827 int error;
828
829 /* locate correct op */
830 simple_lock(&pmp->pmp_lock);
831 TAILQ_FOREACH(pspark, &pmp->pmp_req_sizepark, pkso_entries) {
832 if (pspark->pkso_reqid == psop_user->pso_reqid) {
833 TAILQ_REMOVE(&pmp->pmp_req_sizepark, pspark,
834 pkso_entries);
835 break;
836 }
837 }
838 simple_unlock(&pmp->pmp_lock);
839
840 if (pspark == NULL)
841 return EINVAL;
842
843 error = 0;
844 copylen = MIN(pspark->pkso_bufsize, psop_user->pso_bufsize);
845
846 /*
847 * XXX: uvm stuff to avoid bouncy-bouncy copying?
848 */
849 if (PUFFS_SIZEOP_UIO(pspark->pkso_reqtype)) {
850 kernbuf = malloc(copylen, M_PUFFS, M_WAITOK | M_ZERO);
851 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_IN) {
852 error = copyin(psop_user->pso_userbuf,
853 kernbuf, copylen);
854 if (error) {
855 printf("psop ERROR1 %d\n", error);
856 goto escape;
857 }
858 }
859 error = uiomove(kernbuf, copylen, pspark->pkso_uio);
860 if (error) {
861 printf("uiomove from kernel %p, len %d failed: %d\n",
862 kernbuf, (int)copylen, error);
863 goto escape;
864 }
865
866 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_OUT) {
867 error = copyout(kernbuf,
868 psop_user->pso_userbuf, copylen);
869 if (error) {
870 printf("psop ERROR2 %d\n", error);
871 goto escape;
872 }
873 }
874 escape:
875 free(kernbuf, M_PUFFS);
876 } else if (PUFFS_SIZEOP_BUF(pspark->pkso_reqtype)) {
877 copylen = MAX(pspark->pkso_bufsize, psop_user->pso_bufsize);
878 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_BUF_IN) {
879 error = copyin(psop_user->pso_userbuf,
880 pspark->pkso_copybuf, copylen);
881 } else {
882 error = copyout(pspark->pkso_copybuf,
883 psop_user->pso_userbuf, copylen);
884 }
885 }
886 #ifdef DIAGNOSTIC
887 else
888 panic("puffssizeop: invalid reqtype %d\n",
889 pspark->pkso_reqtype);
890 #endif /* DIAGNOSTIC */
891
892 return error;
893 }
894