ugen.c revision 1.106 1 /* $NetBSD: ugen.c,v 1.106 2009/12/06 21:40:31 dyoung Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.106 2009/12/06 21:40:31 dyoung Exp $");
41
42 #include "opt_ugen_bulk_ra_wb.h"
43 #include "opt_compat_netbsd.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #if defined(__NetBSD__) || defined(__OpenBSD__)
50 #include <sys/device.h>
51 #include <sys/ioctl.h>
52 #elif defined(__FreeBSD__)
53 #include <sys/module.h>
54 #include <sys/bus.h>
55 #include <sys/ioccom.h>
56 #include <sys/conf.h>
57 #include <sys/fcntl.h>
58 #include <sys/filio.h>
59 #endif
60 #include <sys/conf.h>
61 #include <sys/tty.h>
62 #include <sys/file.h>
63 #include <sys/select.h>
64 #include <sys/proc.h>
65 #include <sys/vnode.h>
66 #include <sys/poll.h>
67
68 #include <dev/usb/usb.h>
69 #include <dev/usb/usbdi.h>
70 #include <dev/usb/usbdi_util.h>
71
72 #ifdef UGEN_DEBUG
73 #define DPRINTF(x) if (ugendebug) logprintf x
74 #define DPRINTFN(n,x) if (ugendebug>(n)) logprintf x
75 int ugendebug = 0;
76 #else
77 #define DPRINTF(x)
78 #define DPRINTFN(n,x)
79 #endif
80
81 #define UGEN_CHUNK 128 /* chunk size for read */
82 #define UGEN_IBSIZE 1020 /* buffer size */
83 #define UGEN_BBSIZE 1024
84
85 #define UGEN_NISOFRAMES 500 /* 0.5 seconds worth */
86 #define UGEN_NISOREQS 6 /* number of outstanding xfer requests */
87 #define UGEN_NISORFRMS 4 /* number of frames (miliseconds) per req */
88
89 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
90 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
91
92 struct ugen_endpoint {
93 struct ugen_softc *sc;
94 usb_endpoint_descriptor_t *edesc;
95 usbd_interface_handle iface;
96 int state;
97 #define UGEN_ASLP 0x02 /* waiting for data */
98 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
99 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
100 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
101 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
102 usbd_pipe_handle pipeh;
103 struct clist q;
104 struct selinfo rsel;
105 u_char *ibuf; /* start of buffer (circular for isoc) */
106 u_char *fill; /* location for input (isoc) */
107 u_char *limit; /* end of circular buffer (isoc) */
108 u_char *cur; /* current read location (isoc) */
109 u_int32_t timeout;
110 #ifdef UGEN_BULK_RA_WB
111 u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
112 u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
113 u_int32_t ra_wb_used; /* how much is in buffer */
114 u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
115 usbd_xfer_handle ra_wb_xfer;
116 #endif
117 struct isoreq {
118 struct ugen_endpoint *sce;
119 usbd_xfer_handle xfer;
120 void *dmabuf;
121 u_int16_t sizes[UGEN_NISORFRMS];
122 } isoreqs[UGEN_NISOREQS];
123 };
124
125 struct ugen_softc {
126 USBBASEDEVICE sc_dev; /* base device */
127 usbd_device_handle sc_udev;
128
129 char sc_is_open[USB_MAX_ENDPOINTS];
130 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
131 #define OUT 0
132 #define IN 1
133
134 int sc_refcnt;
135 char sc_buffer[UGEN_BBSIZE];
136 u_char sc_dying;
137 };
138
139 #if defined(__NetBSD__)
140 dev_type_open(ugenopen);
141 dev_type_close(ugenclose);
142 dev_type_read(ugenread);
143 dev_type_write(ugenwrite);
144 dev_type_ioctl(ugenioctl);
145 dev_type_poll(ugenpoll);
146 dev_type_kqfilter(ugenkqfilter);
147
148 const struct cdevsw ugen_cdevsw = {
149 ugenopen, ugenclose, ugenread, ugenwrite, ugenioctl,
150 nostop, notty, ugenpoll, nommap, ugenkqfilter, D_OTHER,
151 };
152 #elif defined(__OpenBSD__)
153 cdev_decl(ugen);
154 #elif defined(__FreeBSD__)
155 d_open_t ugenopen;
156 d_close_t ugenclose;
157 d_read_t ugenread;
158 d_write_t ugenwrite;
159 d_ioctl_t ugenioctl;
160 d_poll_t ugenpoll;
161
162 #define UGEN_CDEV_MAJOR 114
163
164 Static struct cdevsw ugen_cdevsw = {
165 /* open */ ugenopen,
166 /* close */ ugenclose,
167 /* read */ ugenread,
168 /* write */ ugenwrite,
169 /* ioctl */ ugenioctl,
170 /* poll */ ugenpoll,
171 /* mmap */ nommap,
172 /* strategy */ nostrategy,
173 /* name */ "ugen",
174 /* maj */ UGEN_CDEV_MAJOR,
175 /* dump */ nodump,
176 /* psize */ nopsize,
177 /* flags */ 0,
178 /* bmaj */ -1
179 };
180 #endif
181
182 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
183 usbd_status status);
184 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
185 usbd_status status);
186 #ifdef UGEN_BULK_RA_WB
187 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
188 usbd_status status);
189 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
190 usbd_status status);
191 #endif
192 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
193 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
194 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
195 void *, int, struct lwp *);
196 Static int ugen_set_config(struct ugen_softc *sc, int configno);
197 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
198 int index, int *lenp);
199 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
200 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
201
202 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
203 #define UGENENDPOINT(n) (minor(n) & 0xf)
204 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
205
206 USB_DECLARE_DRIVER(ugen);
207
208 /* toggle to control attach priority. -1 means "let autoconf decide" */
209 int ugen_override = -1;
210
211 USB_MATCH(ugen)
212 {
213 USB_MATCH_START(ugen, uaa);
214 int override;
215
216 if (ugen_override != -1)
217 override = ugen_override;
218 else
219 override = match->cf_flags & 1;
220
221 if (override)
222 return (UMATCH_HIGHEST);
223 else if (uaa->usegeneric)
224 return (UMATCH_GENERIC);
225 else
226 return (UMATCH_NONE);
227 }
228
229 USB_ATTACH(ugen)
230 {
231 USB_ATTACH_START(ugen, sc, uaa);
232 usbd_device_handle udev;
233 char *devinfop;
234 usbd_status err;
235 int i, dir, conf;
236
237 aprint_naive("\n");
238 aprint_normal("\n");
239
240 devinfop = usbd_devinfo_alloc(uaa->device, 0);
241 aprint_normal_dev(self, "%s\n", devinfop);
242 usbd_devinfo_free(devinfop);
243
244 sc->sc_dev = self;
245 sc->sc_udev = udev = uaa->device;
246
247 /* First set configuration index 0, the default one for ugen. */
248 err = usbd_set_config_index(udev, 0, 0);
249 if (err) {
250 aprint_error_dev(self,
251 "setting configuration index 0 failed\n");
252 sc->sc_dying = 1;
253 USB_ATTACH_ERROR_RETURN;
254 }
255 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
256
257 /* Set up all the local state for this configuration. */
258 err = ugen_set_config(sc, conf);
259 if (err) {
260 aprint_error_dev(self, "setting configuration %d failed\n",
261 conf);
262 sc->sc_dying = 1;
263 USB_ATTACH_ERROR_RETURN;
264 }
265
266 #ifdef __FreeBSD__
267 {
268 static int global_init_done = 0;
269 if (!global_init_done) {
270 cdevsw_add(&ugen_cdevsw);
271 global_init_done = 1;
272 }
273 }
274 #endif
275 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
276 for (dir = OUT; dir <= IN; dir++) {
277 struct ugen_endpoint *sce;
278
279 sce = &sc->sc_endpoints[i][dir];
280 selinit(&sce->rsel);
281 }
282 }
283
284 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
285 USBDEV(sc->sc_dev));
286
287 if (!pmf_device_register(self, NULL, NULL))
288 aprint_error_dev(self, "couldn't establish power handler\n");
289
290 USB_ATTACH_SUCCESS_RETURN;
291 }
292
293 Static int
294 ugen_set_config(struct ugen_softc *sc, int configno)
295 {
296 usbd_device_handle dev = sc->sc_udev;
297 usb_config_descriptor_t *cdesc;
298 usbd_interface_handle iface;
299 usb_endpoint_descriptor_t *ed;
300 struct ugen_endpoint *sce;
301 u_int8_t niface, nendpt;
302 int ifaceno, endptno, endpt;
303 usbd_status err;
304 int dir;
305
306 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
307 USBDEVNAME(sc->sc_dev), configno, sc));
308
309 /*
310 * We start at 1, not 0, because we don't care whether the
311 * control endpoint is open or not. It is always present.
312 */
313 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
314 if (sc->sc_is_open[endptno]) {
315 DPRINTFN(1,
316 ("ugen_set_config: %s - endpoint %d is open\n",
317 USBDEVNAME(sc->sc_dev), endptno));
318 return (USBD_IN_USE);
319 }
320
321 /* Avoid setting the current value. */
322 cdesc = usbd_get_config_descriptor(dev);
323 if (!cdesc || cdesc->bConfigurationValue != configno) {
324 err = usbd_set_config_no(dev, configno, 1);
325 if (err)
326 return (err);
327 }
328
329 err = usbd_interface_count(dev, &niface);
330 if (err)
331 return (err);
332 memset(sc->sc_endpoints, 0, sizeof sc->sc_endpoints);
333 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
334 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
335 err = usbd_device2interface_handle(dev, ifaceno, &iface);
336 if (err)
337 return (err);
338 err = usbd_endpoint_count(iface, &nendpt);
339 if (err)
340 return (err);
341 for (endptno = 0; endptno < nendpt; endptno++) {
342 ed = usbd_interface2endpoint_descriptor(iface,endptno);
343 KASSERT(ed != NULL);
344 endpt = ed->bEndpointAddress;
345 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
346 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
347 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
348 "(%d,%d), sce=%p\n",
349 endptno, endpt, UE_GET_ADDR(endpt),
350 UE_GET_DIR(endpt), sce));
351 sce->sc = sc;
352 sce->edesc = ed;
353 sce->iface = iface;
354 }
355 }
356 return (USBD_NORMAL_COMPLETION);
357 }
358
359 int
360 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
361 {
362 struct ugen_softc *sc;
363 int unit = UGENUNIT(dev);
364 int endpt = UGENENDPOINT(dev);
365 usb_endpoint_descriptor_t *edesc;
366 struct ugen_endpoint *sce;
367 int dir, isize;
368 usbd_status err;
369 usbd_xfer_handle xfer;
370 void *tbuf;
371 int i, j;
372
373 USB_GET_SC_OPEN(ugen, unit, sc);
374
375 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
376 flag, mode, unit, endpt));
377
378 if (sc == NULL || sc->sc_dying)
379 return (ENXIO);
380
381 /* The control endpoint allows multiple opens. */
382 if (endpt == USB_CONTROL_ENDPOINT) {
383 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
384 return (0);
385 }
386
387 if (sc->sc_is_open[endpt])
388 return (EBUSY);
389
390 /* Make sure there are pipes for all directions. */
391 for (dir = OUT; dir <= IN; dir++) {
392 if (flag & (dir == OUT ? FWRITE : FREAD)) {
393 sce = &sc->sc_endpoints[endpt][dir];
394 if (sce == 0 || sce->edesc == 0)
395 return (ENXIO);
396 }
397 }
398
399 /* Actually open the pipes. */
400 /* XXX Should back out properly if it fails. */
401 for (dir = OUT; dir <= IN; dir++) {
402 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
403 continue;
404 sce = &sc->sc_endpoints[endpt][dir];
405 sce->state = 0;
406 sce->timeout = USBD_NO_TIMEOUT;
407 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
408 sc, endpt, dir, sce));
409 edesc = sce->edesc;
410 switch (edesc->bmAttributes & UE_XFERTYPE) {
411 case UE_INTERRUPT:
412 if (dir == OUT) {
413 err = usbd_open_pipe(sce->iface,
414 edesc->bEndpointAddress, 0, &sce->pipeh);
415 if (err)
416 return (EIO);
417 break;
418 }
419 isize = UGETW(edesc->wMaxPacketSize);
420 if (isize == 0) /* shouldn't happen */
421 return (EINVAL);
422 sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
423 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
424 endpt, isize));
425 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1)
426 return (ENOMEM);
427 err = usbd_open_pipe_intr(sce->iface,
428 edesc->bEndpointAddress,
429 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
430 sce->ibuf, isize, ugenintr,
431 USBD_DEFAULT_INTERVAL);
432 if (err) {
433 free(sce->ibuf, M_USBDEV);
434 clfree(&sce->q);
435 return (EIO);
436 }
437 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
438 break;
439 case UE_BULK:
440 err = usbd_open_pipe(sce->iface,
441 edesc->bEndpointAddress, 0, &sce->pipeh);
442 if (err)
443 return (EIO);
444 #ifdef UGEN_BULK_RA_WB
445 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
446 /*
447 * Use request size for non-RA/WB transfers
448 * as the default.
449 */
450 sce->ra_wb_reqsize = UGEN_BBSIZE;
451 #endif
452 break;
453 case UE_ISOCHRONOUS:
454 if (dir == OUT)
455 return (EINVAL);
456 isize = UGETW(edesc->wMaxPacketSize);
457 if (isize == 0) /* shouldn't happen */
458 return (EINVAL);
459 sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
460 M_USBDEV, M_WAITOK);
461 sce->cur = sce->fill = sce->ibuf;
462 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
463 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
464 endpt, isize));
465 err = usbd_open_pipe(sce->iface,
466 edesc->bEndpointAddress, 0, &sce->pipeh);
467 if (err) {
468 free(sce->ibuf, M_USBDEV);
469 return (EIO);
470 }
471 for(i = 0; i < UGEN_NISOREQS; ++i) {
472 sce->isoreqs[i].sce = sce;
473 xfer = usbd_alloc_xfer(sc->sc_udev);
474 if (xfer == 0)
475 goto bad;
476 sce->isoreqs[i].xfer = xfer;
477 tbuf = usbd_alloc_buffer
478 (xfer, isize * UGEN_NISORFRMS);
479 if (tbuf == 0) {
480 i++;
481 goto bad;
482 }
483 sce->isoreqs[i].dmabuf = tbuf;
484 for(j = 0; j < UGEN_NISORFRMS; ++j)
485 sce->isoreqs[i].sizes[j] = isize;
486 usbd_setup_isoc_xfer
487 (xfer, sce->pipeh, &sce->isoreqs[i],
488 sce->isoreqs[i].sizes,
489 UGEN_NISORFRMS, USBD_NO_COPY,
490 ugen_isoc_rintr);
491 (void)usbd_transfer(xfer);
492 }
493 DPRINTFN(5, ("ugenopen: isoc open done\n"));
494 break;
495 bad:
496 while (--i >= 0) /* implicit buffer free */
497 usbd_free_xfer(sce->isoreqs[i].xfer);
498 return (ENOMEM);
499 case UE_CONTROL:
500 sce->timeout = USBD_DEFAULT_TIMEOUT;
501 return (EINVAL);
502 }
503 }
504 sc->sc_is_open[endpt] = 1;
505 return (0);
506 }
507
508 int
509 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
510 {
511 int endpt = UGENENDPOINT(dev);
512 struct ugen_softc *sc;
513 struct ugen_endpoint *sce;
514 int dir;
515 int i;
516
517 USB_GET_SC(ugen, UGENUNIT(dev), sc);
518
519 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
520 flag, mode, UGENUNIT(dev), endpt));
521
522 #ifdef DIAGNOSTIC
523 if (!sc->sc_is_open[endpt]) {
524 printf("ugenclose: not open\n");
525 return (EINVAL);
526 }
527 #endif
528
529 if (endpt == USB_CONTROL_ENDPOINT) {
530 DPRINTFN(5, ("ugenclose: close control\n"));
531 sc->sc_is_open[endpt] = 0;
532 return (0);
533 }
534
535 for (dir = OUT; dir <= IN; dir++) {
536 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
537 continue;
538 sce = &sc->sc_endpoints[endpt][dir];
539 if (sce == NULL || sce->pipeh == NULL)
540 continue;
541 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
542 endpt, dir, sce));
543
544 usbd_abort_pipe(sce->pipeh);
545 usbd_close_pipe(sce->pipeh);
546 sce->pipeh = NULL;
547
548 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
549 case UE_INTERRUPT:
550 ndflush(&sce->q, sce->q.c_cc);
551 clfree(&sce->q);
552 break;
553 case UE_ISOCHRONOUS:
554 for (i = 0; i < UGEN_NISOREQS; ++i)
555 usbd_free_xfer(sce->isoreqs[i].xfer);
556 break;
557 #ifdef UGEN_BULK_RA_WB
558 case UE_BULK:
559 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
560 /* ibuf freed below */
561 usbd_free_xfer(sce->ra_wb_xfer);
562 break;
563 #endif
564 default:
565 break;
566 }
567
568 if (sce->ibuf != NULL) {
569 free(sce->ibuf, M_USBDEV);
570 sce->ibuf = NULL;
571 clfree(&sce->q);
572 }
573 }
574 sc->sc_is_open[endpt] = 0;
575
576 return (0);
577 }
578
579 Static int
580 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
581 {
582 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
583 u_int32_t n, tn;
584 usbd_xfer_handle xfer;
585 usbd_status err;
586 int s;
587 int error = 0;
588
589 DPRINTFN(5, ("%s: ugenread: %d\n", USBDEVNAME(sc->sc_dev), endpt));
590
591 if (sc->sc_dying)
592 return (EIO);
593
594 if (endpt == USB_CONTROL_ENDPOINT)
595 return (ENODEV);
596
597 #ifdef DIAGNOSTIC
598 if (sce->edesc == NULL) {
599 printf("ugenread: no edesc\n");
600 return (EIO);
601 }
602 if (sce->pipeh == NULL) {
603 printf("ugenread: no pipe\n");
604 return (EIO);
605 }
606 #endif
607
608 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
609 case UE_INTERRUPT:
610 /* Block until activity occurred. */
611 s = splusb();
612 while (sce->q.c_cc == 0) {
613 if (flag & IO_NDELAY) {
614 splx(s);
615 return (EWOULDBLOCK);
616 }
617 sce->state |= UGEN_ASLP;
618 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
619 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0);
620 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
621 if (sc->sc_dying)
622 error = EIO;
623 if (error) {
624 sce->state &= ~UGEN_ASLP;
625 break;
626 }
627 }
628 splx(s);
629
630 /* Transfer as many chunks as possible. */
631 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
632 n = min(sce->q.c_cc, uio->uio_resid);
633 if (n > sizeof(sc->sc_buffer))
634 n = sizeof(sc->sc_buffer);
635
636 /* Remove a small chunk from the input queue. */
637 q_to_b(&sce->q, sc->sc_buffer, n);
638 DPRINTFN(5, ("ugenread: got %d chars\n", n));
639
640 /* Copy the data to the user process. */
641 error = uiomove(sc->sc_buffer, n, uio);
642 if (error)
643 break;
644 }
645 break;
646 case UE_BULK:
647 #ifdef UGEN_BULK_RA_WB
648 if (sce->state & UGEN_BULK_RA) {
649 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
650 uio->uio_resid, sce->ra_wb_used));
651 xfer = sce->ra_wb_xfer;
652
653 s = splusb();
654 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
655 splx(s);
656 return (EWOULDBLOCK);
657 }
658 while (uio->uio_resid > 0 && !error) {
659 while (sce->ra_wb_used == 0) {
660 sce->state |= UGEN_ASLP;
661 DPRINTFN(5,
662 ("ugenread: sleep on %p\n",
663 sce));
664 error = tsleep(sce, PZERO | PCATCH,
665 "ugenrb", 0);
666 DPRINTFN(5,
667 ("ugenread: woke, error=%d\n",
668 error));
669 if (sc->sc_dying)
670 error = EIO;
671 if (error) {
672 sce->state &= ~UGEN_ASLP;
673 break;
674 }
675 }
676
677 /* Copy data to the process. */
678 while (uio->uio_resid > 0
679 && sce->ra_wb_used > 0) {
680 n = min(uio->uio_resid,
681 sce->ra_wb_used);
682 n = min(n, sce->limit - sce->cur);
683 error = uiomove(sce->cur, n, uio);
684 if (error)
685 break;
686 sce->cur += n;
687 sce->ra_wb_used -= n;
688 if (sce->cur == sce->limit)
689 sce->cur = sce->ibuf;
690 }
691
692 /*
693 * If the transfers stopped because the
694 * buffer was full, restart them.
695 */
696 if (sce->state & UGEN_RA_WB_STOP &&
697 sce->ra_wb_used < sce->limit - sce->ibuf) {
698 n = (sce->limit - sce->ibuf)
699 - sce->ra_wb_used;
700 usbd_setup_xfer(xfer,
701 sce->pipeh, sce, NULL,
702 min(n, sce->ra_wb_xferlen),
703 USBD_NO_COPY, USBD_NO_TIMEOUT,
704 ugen_bulkra_intr);
705 sce->state &= ~UGEN_RA_WB_STOP;
706 err = usbd_transfer(xfer);
707 if (err != USBD_IN_PROGRESS)
708 /*
709 * The transfer has not been
710 * queued. Setting STOP
711 * will make us try
712 * again at the next read.
713 */
714 sce->state |= UGEN_RA_WB_STOP;
715 }
716 }
717 splx(s);
718 break;
719 }
720 #endif
721 xfer = usbd_alloc_xfer(sc->sc_udev);
722 if (xfer == 0)
723 return (ENOMEM);
724 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
725 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
726 tn = n;
727 err = usbd_bulk_transfer(
728 xfer, sce->pipeh,
729 sce->state & UGEN_SHORT_OK ?
730 USBD_SHORT_XFER_OK : 0,
731 sce->timeout, sc->sc_buffer, &tn, "ugenrb");
732 if (err) {
733 if (err == USBD_INTERRUPTED)
734 error = EINTR;
735 else if (err == USBD_TIMEOUT)
736 error = ETIMEDOUT;
737 else
738 error = EIO;
739 break;
740 }
741 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
742 error = uiomove(sc->sc_buffer, tn, uio);
743 if (error || tn < n)
744 break;
745 }
746 usbd_free_xfer(xfer);
747 break;
748 case UE_ISOCHRONOUS:
749 s = splusb();
750 while (sce->cur == sce->fill) {
751 if (flag & IO_NDELAY) {
752 splx(s);
753 return (EWOULDBLOCK);
754 }
755 sce->state |= UGEN_ASLP;
756 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
757 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0);
758 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
759 if (sc->sc_dying)
760 error = EIO;
761 if (error) {
762 sce->state &= ~UGEN_ASLP;
763 break;
764 }
765 }
766
767 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
768 if(sce->fill > sce->cur)
769 n = min(sce->fill - sce->cur, uio->uio_resid);
770 else
771 n = min(sce->limit - sce->cur, uio->uio_resid);
772
773 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
774
775 /* Copy the data to the user process. */
776 error = uiomove(sce->cur, n, uio);
777 if (error)
778 break;
779 sce->cur += n;
780 if(sce->cur >= sce->limit)
781 sce->cur = sce->ibuf;
782 }
783 splx(s);
784 break;
785
786
787 default:
788 return (ENXIO);
789 }
790 return (error);
791 }
792
793 int
794 ugenread(dev_t dev, struct uio *uio, int flag)
795 {
796 int endpt = UGENENDPOINT(dev);
797 struct ugen_softc *sc;
798 int error;
799
800 USB_GET_SC(ugen, UGENUNIT(dev), sc);
801
802 sc->sc_refcnt++;
803 error = ugen_do_read(sc, endpt, uio, flag);
804 if (--sc->sc_refcnt < 0)
805 usb_detach_wakeup(USBDEV(sc->sc_dev));
806 return (error);
807 }
808
809 Static int
810 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
811 int flag)
812 {
813 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
814 u_int32_t n;
815 int error = 0;
816 #ifdef UGEN_BULK_RA_WB
817 int s;
818 u_int32_t tn;
819 char *dbuf;
820 #endif
821 usbd_xfer_handle xfer;
822 usbd_status err;
823
824 DPRINTFN(5, ("%s: ugenwrite: %d\n", USBDEVNAME(sc->sc_dev), endpt));
825
826 if (sc->sc_dying)
827 return (EIO);
828
829 if (endpt == USB_CONTROL_ENDPOINT)
830 return (ENODEV);
831
832 #ifdef DIAGNOSTIC
833 if (sce->edesc == NULL) {
834 printf("ugenwrite: no edesc\n");
835 return (EIO);
836 }
837 if (sce->pipeh == NULL) {
838 printf("ugenwrite: no pipe\n");
839 return (EIO);
840 }
841 #endif
842
843 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
844 case UE_BULK:
845 #ifdef UGEN_BULK_RA_WB
846 if (sce->state & UGEN_BULK_WB) {
847 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
848 uio->uio_resid, sce->ra_wb_used));
849 xfer = sce->ra_wb_xfer;
850
851 s = splusb();
852 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
853 flag & IO_NDELAY) {
854 splx(s);
855 return (EWOULDBLOCK);
856 }
857 while (uio->uio_resid > 0 && !error) {
858 while (sce->ra_wb_used ==
859 sce->limit - sce->ibuf) {
860 sce->state |= UGEN_ASLP;
861 DPRINTFN(5,
862 ("ugenwrite: sleep on %p\n",
863 sce));
864 error = tsleep(sce, PZERO | PCATCH,
865 "ugenwb", 0);
866 DPRINTFN(5,
867 ("ugenwrite: woke, error=%d\n",
868 error));
869 if (sc->sc_dying)
870 error = EIO;
871 if (error) {
872 sce->state &= ~UGEN_ASLP;
873 break;
874 }
875 }
876
877 /* Copy data from the process. */
878 while (uio->uio_resid > 0 &&
879 sce->ra_wb_used < sce->limit - sce->ibuf) {
880 n = min(uio->uio_resid,
881 (sce->limit - sce->ibuf)
882 - sce->ra_wb_used);
883 n = min(n, sce->limit - sce->fill);
884 error = uiomove(sce->fill, n, uio);
885 if (error)
886 break;
887 sce->fill += n;
888 sce->ra_wb_used += n;
889 if (sce->fill == sce->limit)
890 sce->fill = sce->ibuf;
891 }
892
893 /*
894 * If the transfers stopped because the
895 * buffer was empty, restart them.
896 */
897 if (sce->state & UGEN_RA_WB_STOP &&
898 sce->ra_wb_used > 0) {
899 dbuf = (char *)usbd_get_buffer(xfer);
900 n = min(sce->ra_wb_used,
901 sce->ra_wb_xferlen);
902 tn = min(n, sce->limit - sce->cur);
903 memcpy(dbuf, sce->cur, tn);
904 dbuf += tn;
905 if (n - tn > 0)
906 memcpy(dbuf, sce->ibuf,
907 n - tn);
908 usbd_setup_xfer(xfer,
909 sce->pipeh, sce, NULL, n,
910 USBD_NO_COPY, USBD_NO_TIMEOUT,
911 ugen_bulkwb_intr);
912 sce->state &= ~UGEN_RA_WB_STOP;
913 err = usbd_transfer(xfer);
914 if (err != USBD_IN_PROGRESS)
915 /*
916 * The transfer has not been
917 * queued. Setting STOP
918 * will make us try again
919 * at the next read.
920 */
921 sce->state |= UGEN_RA_WB_STOP;
922 }
923 }
924 splx(s);
925 break;
926 }
927 #endif
928 xfer = usbd_alloc_xfer(sc->sc_udev);
929 if (xfer == 0)
930 return (EIO);
931 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
932 error = uiomove(sc->sc_buffer, n, uio);
933 if (error)
934 break;
935 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
936 err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
937 sce->timeout, sc->sc_buffer, &n,"ugenwb");
938 if (err) {
939 if (err == USBD_INTERRUPTED)
940 error = EINTR;
941 else if (err == USBD_TIMEOUT)
942 error = ETIMEDOUT;
943 else
944 error = EIO;
945 break;
946 }
947 }
948 usbd_free_xfer(xfer);
949 break;
950 case UE_INTERRUPT:
951 xfer = usbd_alloc_xfer(sc->sc_udev);
952 if (xfer == 0)
953 return (EIO);
954 while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
955 uio->uio_resid)) != 0) {
956 error = uiomove(sc->sc_buffer, n, uio);
957 if (error)
958 break;
959 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
960 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
961 sce->timeout, sc->sc_buffer, &n, "ugenwi");
962 if (err) {
963 if (err == USBD_INTERRUPTED)
964 error = EINTR;
965 else if (err == USBD_TIMEOUT)
966 error = ETIMEDOUT;
967 else
968 error = EIO;
969 break;
970 }
971 }
972 usbd_free_xfer(xfer);
973 break;
974 default:
975 return (ENXIO);
976 }
977 return (error);
978 }
979
980 int
981 ugenwrite(dev_t dev, struct uio *uio, int flag)
982 {
983 int endpt = UGENENDPOINT(dev);
984 struct ugen_softc *sc;
985 int error;
986
987 USB_GET_SC(ugen, UGENUNIT(dev), sc);
988
989 sc->sc_refcnt++;
990 error = ugen_do_write(sc, endpt, uio, flag);
991 if (--sc->sc_refcnt < 0)
992 usb_detach_wakeup(USBDEV(sc->sc_dev));
993 return (error);
994 }
995
996 #if defined(__NetBSD__) || defined(__OpenBSD__)
997 int
998 ugen_activate(device_ptr_t self, enum devact act)
999 {
1000 struct ugen_softc *sc = device_private(self);
1001
1002 switch (act) {
1003 case DVACT_DEACTIVATE:
1004 sc->sc_dying = 1;
1005 return 0;
1006 default:
1007 return EOPNOTSUPP;
1008 }
1009 }
1010 #endif
1011
1012 USB_DETACH(ugen)
1013 {
1014 USB_DETACH_START(ugen, sc);
1015 struct ugen_endpoint *sce;
1016 int i, dir;
1017 int s;
1018 #if defined(__NetBSD__) || defined(__OpenBSD__)
1019 int maj, mn;
1020
1021 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1022 #elif defined(__FreeBSD__)
1023 DPRINTF(("ugen_detach: sc=%p\n", sc));
1024 #endif
1025
1026 sc->sc_dying = 1;
1027 pmf_device_deregister(self);
1028 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1029 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1030 for (dir = OUT; dir <= IN; dir++) {
1031 sce = &sc->sc_endpoints[i][dir];
1032 if (sce && sce->pipeh)
1033 usbd_abort_pipe(sce->pipeh);
1034 }
1035 }
1036
1037 s = splusb();
1038 if (--sc->sc_refcnt >= 0) {
1039 /* Wake everyone */
1040 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1041 wakeup(&sc->sc_endpoints[i][IN]);
1042 /* Wait for processes to go away. */
1043 usb_detach_wait(USBDEV(sc->sc_dev));
1044 }
1045 splx(s);
1046
1047 #if defined(__NetBSD__) || defined(__OpenBSD__)
1048 /* locate the major number */
1049 #if defined(__NetBSD__)
1050 maj = cdevsw_lookup_major(&ugen_cdevsw);
1051 #elif defined(__OpenBSD__)
1052 for (maj = 0; maj < nchrdev; maj++)
1053 if (cdevsw[maj].d_open == ugenopen)
1054 break;
1055 #endif
1056
1057 /* Nuke the vnodes for any open instances (calls close). */
1058 mn = device_unit(self) * USB_MAX_ENDPOINTS;
1059 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1060 #elif defined(__FreeBSD__)
1061 /* XXX not implemented yet */
1062 #endif
1063
1064 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1065 USBDEV(sc->sc_dev));
1066
1067 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1068 for (dir = OUT; dir <= IN; dir++) {
1069 sce = &sc->sc_endpoints[i][dir];
1070 seldestroy(&sce->rsel);
1071 }
1072 }
1073
1074 return (0);
1075 }
1076
1077 Static void
1078 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1079 {
1080 struct ugen_endpoint *sce = addr;
1081 /*struct ugen_softc *sc = sce->sc;*/
1082 u_int32_t count;
1083 u_char *ibuf;
1084
1085 if (status == USBD_CANCELLED)
1086 return;
1087
1088 if (status != USBD_NORMAL_COMPLETION) {
1089 DPRINTF(("ugenintr: status=%d\n", status));
1090 if (status == USBD_STALLED)
1091 usbd_clear_endpoint_stall_async(sce->pipeh);
1092 return;
1093 }
1094
1095 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1096 ibuf = sce->ibuf;
1097
1098 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1099 xfer, status, count));
1100 DPRINTFN(5, (" data = %02x %02x %02x\n",
1101 ibuf[0], ibuf[1], ibuf[2]));
1102
1103 (void)b_to_q(ibuf, count, &sce->q);
1104
1105 if (sce->state & UGEN_ASLP) {
1106 sce->state &= ~UGEN_ASLP;
1107 DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1108 wakeup(sce);
1109 }
1110 selnotify(&sce->rsel, 0, 0);
1111 }
1112
1113 Static void
1114 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1115 usbd_status status)
1116 {
1117 struct isoreq *req = addr;
1118 struct ugen_endpoint *sce = req->sce;
1119 u_int32_t count, n;
1120 int i, isize;
1121
1122 /* Return if we are aborting. */
1123 if (status == USBD_CANCELLED)
1124 return;
1125
1126 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1127 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1128 (long)(req - sce->isoreqs), count));
1129
1130 /* throw away oldest input if the buffer is full */
1131 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1132 sce->cur += count;
1133 if(sce->cur >= sce->limit)
1134 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1135 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1136 count));
1137 }
1138
1139 isize = UGETW(sce->edesc->wMaxPacketSize);
1140 for (i = 0; i < UGEN_NISORFRMS; i++) {
1141 u_int32_t actlen = req->sizes[i];
1142 char const *tbuf = (char const *)req->dmabuf + isize * i;
1143
1144 /* copy data to buffer */
1145 while (actlen > 0) {
1146 n = min(actlen, sce->limit - sce->fill);
1147 memcpy(sce->fill, tbuf, n);
1148
1149 tbuf += n;
1150 actlen -= n;
1151 sce->fill += n;
1152 if(sce->fill == sce->limit)
1153 sce->fill = sce->ibuf;
1154 }
1155
1156 /* setup size for next transfer */
1157 req->sizes[i] = isize;
1158 }
1159
1160 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1161 USBD_NO_COPY, ugen_isoc_rintr);
1162 (void)usbd_transfer(xfer);
1163
1164 if (sce->state & UGEN_ASLP) {
1165 sce->state &= ~UGEN_ASLP;
1166 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1167 wakeup(sce);
1168 }
1169 selnotify(&sce->rsel, 0, 0);
1170 }
1171
1172 #ifdef UGEN_BULK_RA_WB
1173 Static void
1174 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1175 usbd_status status)
1176 {
1177 struct ugen_endpoint *sce = addr;
1178 u_int32_t count, n;
1179 char const *tbuf;
1180 usbd_status err;
1181
1182 /* Return if we are aborting. */
1183 if (status == USBD_CANCELLED)
1184 return;
1185
1186 if (status != USBD_NORMAL_COMPLETION) {
1187 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1188 sce->state |= UGEN_RA_WB_STOP;
1189 if (status == USBD_STALLED)
1190 usbd_clear_endpoint_stall_async(sce->pipeh);
1191 return;
1192 }
1193
1194 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1195
1196 /* Keep track of how much is in the buffer. */
1197 sce->ra_wb_used += count;
1198
1199 /* Copy data to buffer. */
1200 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1201 n = min(count, sce->limit - sce->fill);
1202 memcpy(sce->fill, tbuf, n);
1203 tbuf += n;
1204 count -= n;
1205 sce->fill += n;
1206 if (sce->fill == sce->limit)
1207 sce->fill = sce->ibuf;
1208 if (count > 0) {
1209 memcpy(sce->fill, tbuf, count);
1210 sce->fill += count;
1211 }
1212
1213 /* Set up the next request if necessary. */
1214 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1215 if (n > 0) {
1216 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1217 min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1218 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1219 err = usbd_transfer(xfer);
1220 if (err != USBD_IN_PROGRESS) {
1221 printf("usbd_bulkra_intr: error=%d\n", err);
1222 /*
1223 * The transfer has not been queued. Setting STOP
1224 * will make us try again at the next read.
1225 */
1226 sce->state |= UGEN_RA_WB_STOP;
1227 }
1228 }
1229 else
1230 sce->state |= UGEN_RA_WB_STOP;
1231
1232 if (sce->state & UGEN_ASLP) {
1233 sce->state &= ~UGEN_ASLP;
1234 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1235 wakeup(sce);
1236 }
1237 selnotify(&sce->rsel, 0, 0);
1238 }
1239
1240 Static void
1241 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1242 usbd_status status)
1243 {
1244 struct ugen_endpoint *sce = addr;
1245 u_int32_t count, n;
1246 char *tbuf;
1247 usbd_status err;
1248
1249 /* Return if we are aborting. */
1250 if (status == USBD_CANCELLED)
1251 return;
1252
1253 if (status != USBD_NORMAL_COMPLETION) {
1254 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1255 sce->state |= UGEN_RA_WB_STOP;
1256 if (status == USBD_STALLED)
1257 usbd_clear_endpoint_stall_async(sce->pipeh);
1258 return;
1259 }
1260
1261 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1262
1263 /* Keep track of how much is in the buffer. */
1264 sce->ra_wb_used -= count;
1265
1266 /* Update buffer pointers. */
1267 sce->cur += count;
1268 if (sce->cur >= sce->limit)
1269 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1270
1271 /* Set up next request if necessary. */
1272 if (sce->ra_wb_used > 0) {
1273 /* copy data from buffer */
1274 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1275 count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1276 n = min(count, sce->limit - sce->cur);
1277 memcpy(tbuf, sce->cur, n);
1278 tbuf += n;
1279 if (count - n > 0)
1280 memcpy(tbuf, sce->ibuf, count - n);
1281
1282 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1283 count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1284 err = usbd_transfer(xfer);
1285 if (err != USBD_IN_PROGRESS) {
1286 printf("usbd_bulkwb_intr: error=%d\n", err);
1287 /*
1288 * The transfer has not been queued. Setting STOP
1289 * will make us try again at the next write.
1290 */
1291 sce->state |= UGEN_RA_WB_STOP;
1292 }
1293 }
1294 else
1295 sce->state |= UGEN_RA_WB_STOP;
1296
1297 if (sce->state & UGEN_ASLP) {
1298 sce->state &= ~UGEN_ASLP;
1299 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1300 wakeup(sce);
1301 }
1302 selnotify(&sce->rsel, 0, 0);
1303 }
1304 #endif
1305
1306 Static usbd_status
1307 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1308 {
1309 usbd_interface_handle iface;
1310 usb_endpoint_descriptor_t *ed;
1311 usbd_status err;
1312 struct ugen_endpoint *sce;
1313 u_int8_t niface, nendpt, endptno, endpt;
1314 int dir;
1315
1316 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1317
1318 err = usbd_interface_count(sc->sc_udev, &niface);
1319 if (err)
1320 return (err);
1321 if (ifaceidx < 0 || ifaceidx >= niface)
1322 return (USBD_INVAL);
1323
1324 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1325 if (err)
1326 return (err);
1327 err = usbd_endpoint_count(iface, &nendpt);
1328 if (err)
1329 return (err);
1330 /* XXX should only do this after setting new altno has succeeded */
1331 for (endptno = 0; endptno < nendpt; endptno++) {
1332 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1333 endpt = ed->bEndpointAddress;
1334 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1335 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1336 sce->sc = 0;
1337 sce->edesc = 0;
1338 sce->iface = 0;
1339 }
1340
1341 /* change setting */
1342 err = usbd_set_interface(iface, altno);
1343 if (err)
1344 return (err);
1345
1346 err = usbd_endpoint_count(iface, &nendpt);
1347 if (err)
1348 return (err);
1349 for (endptno = 0; endptno < nendpt; endptno++) {
1350 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1351 KASSERT(ed != NULL);
1352 endpt = ed->bEndpointAddress;
1353 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1354 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1355 sce->sc = sc;
1356 sce->edesc = ed;
1357 sce->iface = iface;
1358 }
1359 return (0);
1360 }
1361
1362 /* Retrieve a complete descriptor for a certain device and index. */
1363 Static usb_config_descriptor_t *
1364 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1365 {
1366 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1367 int len;
1368 usbd_status err;
1369
1370 if (index == USB_CURRENT_CONFIG_INDEX) {
1371 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1372 len = UGETW(tdesc->wTotalLength);
1373 if (lenp)
1374 *lenp = len;
1375 cdesc = malloc(len, M_TEMP, M_WAITOK);
1376 memcpy(cdesc, tdesc, len);
1377 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1378 } else {
1379 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1380 if (err)
1381 return (0);
1382 len = UGETW(cdescr.wTotalLength);
1383 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1384 if (lenp)
1385 *lenp = len;
1386 cdesc = malloc(len, M_TEMP, M_WAITOK);
1387 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1388 if (err) {
1389 free(cdesc, M_TEMP);
1390 return (0);
1391 }
1392 }
1393 return (cdesc);
1394 }
1395
1396 Static int
1397 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1398 {
1399 usbd_interface_handle iface;
1400 usbd_status err;
1401
1402 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1403 if (err)
1404 return (-1);
1405 return (usbd_get_interface_altindex(iface));
1406 }
1407
1408 Static int
1409 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1410 void *addr, int flag, struct lwp *l)
1411 {
1412 struct ugen_endpoint *sce;
1413 usbd_status err;
1414 usbd_interface_handle iface;
1415 struct usb_config_desc *cd;
1416 usb_config_descriptor_t *cdesc;
1417 struct usb_interface_desc *id;
1418 usb_interface_descriptor_t *idesc;
1419 struct usb_endpoint_desc *ed;
1420 usb_endpoint_descriptor_t *edesc;
1421 struct usb_alt_interface *ai;
1422 struct usb_string_desc *si;
1423 u_int8_t conf, alt;
1424
1425 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1426 if (sc->sc_dying)
1427 return (EIO);
1428
1429 switch (cmd) {
1430 case FIONBIO:
1431 /* All handled in the upper FS layer. */
1432 return (0);
1433 case USB_SET_SHORT_XFER:
1434 if (endpt == USB_CONTROL_ENDPOINT)
1435 return (EINVAL);
1436 /* This flag only affects read */
1437 sce = &sc->sc_endpoints[endpt][IN];
1438 if (sce == NULL || sce->pipeh == NULL)
1439 return (EINVAL);
1440 if (*(int *)addr)
1441 sce->state |= UGEN_SHORT_OK;
1442 else
1443 sce->state &= ~UGEN_SHORT_OK;
1444 return (0);
1445 case USB_SET_TIMEOUT:
1446 sce = &sc->sc_endpoints[endpt][IN];
1447 if (sce == NULL
1448 /* XXX this shouldn't happen, but the distinction between
1449 input and output pipes isn't clear enough.
1450 || sce->pipeh == NULL */
1451 )
1452 return (EINVAL);
1453 sce->timeout = *(int *)addr;
1454 return (0);
1455 case USB_SET_BULK_RA:
1456 #ifdef UGEN_BULK_RA_WB
1457 if (endpt == USB_CONTROL_ENDPOINT)
1458 return (EINVAL);
1459 sce = &sc->sc_endpoints[endpt][IN];
1460 if (sce == NULL || sce->pipeh == NULL)
1461 return (EINVAL);
1462 edesc = sce->edesc;
1463 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1464 return (EINVAL);
1465
1466 if (*(int *)addr) {
1467 /* Only turn RA on if it's currently off. */
1468 if (sce->state & UGEN_BULK_RA)
1469 return (0);
1470
1471 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1472 /* shouldn't happen */
1473 return (EINVAL);
1474 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1475 if (sce->ra_wb_xfer == NULL)
1476 return (ENOMEM);
1477 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1478 /*
1479 * Set up a dmabuf because we reuse the xfer with
1480 * the same (max) request length like isoc.
1481 */
1482 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1483 sce->ra_wb_xferlen) == 0) {
1484 usbd_free_xfer(sce->ra_wb_xfer);
1485 return (ENOMEM);
1486 }
1487 sce->ibuf = malloc(sce->ra_wb_bufsize,
1488 M_USBDEV, M_WAITOK);
1489 sce->fill = sce->cur = sce->ibuf;
1490 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1491 sce->ra_wb_used = 0;
1492 sce->state |= UGEN_BULK_RA;
1493 sce->state &= ~UGEN_RA_WB_STOP;
1494 /* Now start reading. */
1495 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1496 NULL,
1497 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1498 USBD_NO_COPY, USBD_NO_TIMEOUT,
1499 ugen_bulkra_intr);
1500 err = usbd_transfer(sce->ra_wb_xfer);
1501 if (err != USBD_IN_PROGRESS) {
1502 sce->state &= ~UGEN_BULK_RA;
1503 free(sce->ibuf, M_USBDEV);
1504 sce->ibuf = NULL;
1505 usbd_free_xfer(sce->ra_wb_xfer);
1506 return (EIO);
1507 }
1508 } else {
1509 /* Only turn RA off if it's currently on. */
1510 if (!(sce->state & UGEN_BULK_RA))
1511 return (0);
1512
1513 sce->state &= ~UGEN_BULK_RA;
1514 usbd_abort_pipe(sce->pipeh);
1515 usbd_free_xfer(sce->ra_wb_xfer);
1516 /*
1517 * XXX Discard whatever's in the buffer, but we
1518 * should keep it around and drain the buffer
1519 * instead.
1520 */
1521 free(sce->ibuf, M_USBDEV);
1522 sce->ibuf = NULL;
1523 }
1524 return (0);
1525 #else
1526 return (EOPNOTSUPP);
1527 #endif
1528 case USB_SET_BULK_WB:
1529 #ifdef UGEN_BULK_RA_WB
1530 if (endpt == USB_CONTROL_ENDPOINT)
1531 return (EINVAL);
1532 sce = &sc->sc_endpoints[endpt][OUT];
1533 if (sce == NULL || sce->pipeh == NULL)
1534 return (EINVAL);
1535 edesc = sce->edesc;
1536 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1537 return (EINVAL);
1538
1539 if (*(int *)addr) {
1540 /* Only turn WB on if it's currently off. */
1541 if (sce->state & UGEN_BULK_WB)
1542 return (0);
1543
1544 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1545 /* shouldn't happen */
1546 return (EINVAL);
1547 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1548 if (sce->ra_wb_xfer == NULL)
1549 return (ENOMEM);
1550 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1551 /*
1552 * Set up a dmabuf because we reuse the xfer with
1553 * the same (max) request length like isoc.
1554 */
1555 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1556 sce->ra_wb_xferlen) == 0) {
1557 usbd_free_xfer(sce->ra_wb_xfer);
1558 return (ENOMEM);
1559 }
1560 sce->ibuf = malloc(sce->ra_wb_bufsize,
1561 M_USBDEV, M_WAITOK);
1562 sce->fill = sce->cur = sce->ibuf;
1563 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1564 sce->ra_wb_used = 0;
1565 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1566 } else {
1567 /* Only turn WB off if it's currently on. */
1568 if (!(sce->state & UGEN_BULK_WB))
1569 return (0);
1570
1571 sce->state &= ~UGEN_BULK_WB;
1572 /*
1573 * XXX Discard whatever's in the buffer, but we
1574 * should keep it around and keep writing to
1575 * drain the buffer instead.
1576 */
1577 usbd_abort_pipe(sce->pipeh);
1578 usbd_free_xfer(sce->ra_wb_xfer);
1579 free(sce->ibuf, M_USBDEV);
1580 sce->ibuf = NULL;
1581 }
1582 return (0);
1583 #else
1584 return (EOPNOTSUPP);
1585 #endif
1586 case USB_SET_BULK_RA_OPT:
1587 case USB_SET_BULK_WB_OPT:
1588 #ifdef UGEN_BULK_RA_WB
1589 {
1590 struct usb_bulk_ra_wb_opt *opt;
1591
1592 if (endpt == USB_CONTROL_ENDPOINT)
1593 return (EINVAL);
1594 opt = (struct usb_bulk_ra_wb_opt *)addr;
1595 if (cmd == USB_SET_BULK_RA_OPT)
1596 sce = &sc->sc_endpoints[endpt][IN];
1597 else
1598 sce = &sc->sc_endpoints[endpt][OUT];
1599 if (sce == NULL || sce->pipeh == NULL)
1600 return (EINVAL);
1601 if (opt->ra_wb_buffer_size < 1 ||
1602 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1603 opt->ra_wb_request_size < 1 ||
1604 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1605 return (EINVAL);
1606 /*
1607 * XXX These changes do not take effect until the
1608 * next time RA/WB mode is enabled but they ought to
1609 * take effect immediately.
1610 */
1611 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1612 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1613 return (0);
1614 }
1615 #else
1616 return (EOPNOTSUPP);
1617 #endif
1618 default:
1619 break;
1620 }
1621
1622 if (endpt != USB_CONTROL_ENDPOINT)
1623 return (EINVAL);
1624
1625 switch (cmd) {
1626 #ifdef UGEN_DEBUG
1627 case USB_SETDEBUG:
1628 ugendebug = *(int *)addr;
1629 break;
1630 #endif
1631 case USB_GET_CONFIG:
1632 err = usbd_get_config(sc->sc_udev, &conf);
1633 if (err)
1634 return (EIO);
1635 *(int *)addr = conf;
1636 break;
1637 case USB_SET_CONFIG:
1638 if (!(flag & FWRITE))
1639 return (EPERM);
1640 err = ugen_set_config(sc, *(int *)addr);
1641 switch (err) {
1642 case USBD_NORMAL_COMPLETION:
1643 break;
1644 case USBD_IN_USE:
1645 return (EBUSY);
1646 default:
1647 return (EIO);
1648 }
1649 break;
1650 case USB_GET_ALTINTERFACE:
1651 ai = (struct usb_alt_interface *)addr;
1652 err = usbd_device2interface_handle(sc->sc_udev,
1653 ai->uai_interface_index, &iface);
1654 if (err)
1655 return (EINVAL);
1656 idesc = usbd_get_interface_descriptor(iface);
1657 if (idesc == NULL)
1658 return (EIO);
1659 ai->uai_alt_no = idesc->bAlternateSetting;
1660 break;
1661 case USB_SET_ALTINTERFACE:
1662 if (!(flag & FWRITE))
1663 return (EPERM);
1664 ai = (struct usb_alt_interface *)addr;
1665 err = usbd_device2interface_handle(sc->sc_udev,
1666 ai->uai_interface_index, &iface);
1667 if (err)
1668 return (EINVAL);
1669 err = ugen_set_interface(sc, ai->uai_interface_index,
1670 ai->uai_alt_no);
1671 if (err)
1672 return (EINVAL);
1673 break;
1674 case USB_GET_NO_ALT:
1675 ai = (struct usb_alt_interface *)addr;
1676 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1677 if (cdesc == NULL)
1678 return (EINVAL);
1679 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1680 if (idesc == NULL) {
1681 free(cdesc, M_TEMP);
1682 return (EINVAL);
1683 }
1684 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1685 idesc->bInterfaceNumber);
1686 free(cdesc, M_TEMP);
1687 break;
1688 case USB_GET_DEVICE_DESC:
1689 *(usb_device_descriptor_t *)addr =
1690 *usbd_get_device_descriptor(sc->sc_udev);
1691 break;
1692 case USB_GET_CONFIG_DESC:
1693 cd = (struct usb_config_desc *)addr;
1694 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1695 if (cdesc == NULL)
1696 return (EINVAL);
1697 cd->ucd_desc = *cdesc;
1698 free(cdesc, M_TEMP);
1699 break;
1700 case USB_GET_INTERFACE_DESC:
1701 id = (struct usb_interface_desc *)addr;
1702 cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1703 if (cdesc == NULL)
1704 return (EINVAL);
1705 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1706 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1707 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1708 else
1709 alt = id->uid_alt_index;
1710 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1711 if (idesc == NULL) {
1712 free(cdesc, M_TEMP);
1713 return (EINVAL);
1714 }
1715 id->uid_desc = *idesc;
1716 free(cdesc, M_TEMP);
1717 break;
1718 case USB_GET_ENDPOINT_DESC:
1719 ed = (struct usb_endpoint_desc *)addr;
1720 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1721 if (cdesc == NULL)
1722 return (EINVAL);
1723 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1724 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1725 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1726 else
1727 alt = ed->ued_alt_index;
1728 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1729 alt, ed->ued_endpoint_index);
1730 if (edesc == NULL) {
1731 free(cdesc, M_TEMP);
1732 return (EINVAL);
1733 }
1734 ed->ued_desc = *edesc;
1735 free(cdesc, M_TEMP);
1736 break;
1737 case USB_GET_FULL_DESC:
1738 {
1739 int len;
1740 struct iovec iov;
1741 struct uio uio;
1742 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1743 int error;
1744
1745 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1746 if (cdesc == NULL)
1747 return (EINVAL);
1748 if (len > fd->ufd_size)
1749 len = fd->ufd_size;
1750 iov.iov_base = (void *)fd->ufd_data;
1751 iov.iov_len = len;
1752 uio.uio_iov = &iov;
1753 uio.uio_iovcnt = 1;
1754 uio.uio_resid = len;
1755 uio.uio_offset = 0;
1756 uio.uio_rw = UIO_READ;
1757 uio.uio_vmspace = l->l_proc->p_vmspace;
1758 error = uiomove((void *)cdesc, len, &uio);
1759 free(cdesc, M_TEMP);
1760 return (error);
1761 }
1762 case USB_GET_STRING_DESC: {
1763 int len;
1764 si = (struct usb_string_desc *)addr;
1765 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1766 si->usd_language_id, &si->usd_desc, &len);
1767 if (err)
1768 return (EINVAL);
1769 break;
1770 }
1771 case USB_DO_REQUEST:
1772 {
1773 struct usb_ctl_request *ur = (void *)addr;
1774 int len = UGETW(ur->ucr_request.wLength);
1775 struct iovec iov;
1776 struct uio uio;
1777 void *ptr = 0;
1778 usbd_status xerr;
1779 int error = 0;
1780
1781 if (!(flag & FWRITE))
1782 return (EPERM);
1783 /* Avoid requests that would damage the bus integrity. */
1784 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1785 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1786 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1787 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1788 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1789 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1790 return (EINVAL);
1791
1792 if (len < 0 || len > 32767)
1793 return (EINVAL);
1794 if (len != 0) {
1795 iov.iov_base = (void *)ur->ucr_data;
1796 iov.iov_len = len;
1797 uio.uio_iov = &iov;
1798 uio.uio_iovcnt = 1;
1799 uio.uio_resid = len;
1800 uio.uio_offset = 0;
1801 uio.uio_rw =
1802 ur->ucr_request.bmRequestType & UT_READ ?
1803 UIO_READ : UIO_WRITE;
1804 uio.uio_vmspace = l->l_proc->p_vmspace;
1805 ptr = malloc(len, M_TEMP, M_WAITOK);
1806 if (uio.uio_rw == UIO_WRITE) {
1807 error = uiomove(ptr, len, &uio);
1808 if (error)
1809 goto ret;
1810 }
1811 }
1812 sce = &sc->sc_endpoints[endpt][IN];
1813 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1814 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1815 if (xerr) {
1816 error = EIO;
1817 goto ret;
1818 }
1819 if (len != 0) {
1820 if (uio.uio_rw == UIO_READ) {
1821 error = uiomove(ptr, len, &uio);
1822 if (error)
1823 goto ret;
1824 }
1825 }
1826 ret:
1827 if (ptr)
1828 free(ptr, M_TEMP);
1829 return (error);
1830 }
1831 case USB_GET_DEVICEINFO:
1832 usbd_fill_deviceinfo(sc->sc_udev,
1833 (struct usb_device_info *)addr, 0);
1834 break;
1835 #ifdef COMPAT_30
1836 case USB_GET_DEVICEINFO_OLD:
1837 usbd_fill_deviceinfo_old(sc->sc_udev,
1838 (struct usb_device_info_old *)addr, 0);
1839
1840 break;
1841 #endif
1842 default:
1843 return (EINVAL);
1844 }
1845 return (0);
1846 }
1847
1848 int
1849 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1850 {
1851 int endpt = UGENENDPOINT(dev);
1852 struct ugen_softc *sc;
1853 int error;
1854
1855 USB_GET_SC(ugen, UGENUNIT(dev), sc);
1856
1857 sc->sc_refcnt++;
1858 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1859 if (--sc->sc_refcnt < 0)
1860 usb_detach_wakeup(USBDEV(sc->sc_dev));
1861 return (error);
1862 }
1863
1864 int
1865 ugenpoll(dev_t dev, int events, struct lwp *l)
1866 {
1867 struct ugen_softc *sc;
1868 struct ugen_endpoint *sce_in, *sce_out;
1869 int revents = 0;
1870 int s;
1871
1872 USB_GET_SC(ugen, UGENUNIT(dev), sc);
1873
1874 if (sc->sc_dying)
1875 return (POLLHUP);
1876
1877 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1878 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1879 if (sce_in == NULL && sce_out == NULL)
1880 return (POLLERR);
1881 #ifdef DIAGNOSTIC
1882 if (!sce_in->edesc && !sce_out->edesc) {
1883 printf("ugenpoll: no edesc\n");
1884 return (POLLERR);
1885 }
1886 /* It's possible to have only one pipe open. */
1887 if (!sce_in->pipeh && !sce_out->pipeh) {
1888 printf("ugenpoll: no pipe\n");
1889 return (POLLERR);
1890 }
1891 #endif
1892 s = splusb();
1893 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1894 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1895 case UE_INTERRUPT:
1896 if (sce_in->q.c_cc > 0)
1897 revents |= events & (POLLIN | POLLRDNORM);
1898 else
1899 selrecord(l, &sce_in->rsel);
1900 break;
1901 case UE_ISOCHRONOUS:
1902 if (sce_in->cur != sce_in->fill)
1903 revents |= events & (POLLIN | POLLRDNORM);
1904 else
1905 selrecord(l, &sce_in->rsel);
1906 break;
1907 case UE_BULK:
1908 #ifdef UGEN_BULK_RA_WB
1909 if (sce_in->state & UGEN_BULK_RA) {
1910 if (sce_in->ra_wb_used > 0)
1911 revents |= events &
1912 (POLLIN | POLLRDNORM);
1913 else
1914 selrecord(l, &sce_in->rsel);
1915 break;
1916 }
1917 #endif
1918 /*
1919 * We have no easy way of determining if a read will
1920 * yield any data or a write will happen.
1921 * Pretend they will.
1922 */
1923 revents |= events & (POLLIN | POLLRDNORM);
1924 break;
1925 default:
1926 break;
1927 }
1928 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1929 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1930 case UE_INTERRUPT:
1931 case UE_ISOCHRONOUS:
1932 /* XXX unimplemented */
1933 break;
1934 case UE_BULK:
1935 #ifdef UGEN_BULK_RA_WB
1936 if (sce_out->state & UGEN_BULK_WB) {
1937 if (sce_out->ra_wb_used <
1938 sce_out->limit - sce_out->ibuf)
1939 revents |= events &
1940 (POLLOUT | POLLWRNORM);
1941 else
1942 selrecord(l, &sce_out->rsel);
1943 break;
1944 }
1945 #endif
1946 /*
1947 * We have no easy way of determining if a read will
1948 * yield any data or a write will happen.
1949 * Pretend they will.
1950 */
1951 revents |= events & (POLLOUT | POLLWRNORM);
1952 break;
1953 default:
1954 break;
1955 }
1956
1957
1958 splx(s);
1959 return (revents);
1960 }
1961
1962 static void
1963 filt_ugenrdetach(struct knote *kn)
1964 {
1965 struct ugen_endpoint *sce = kn->kn_hook;
1966 int s;
1967
1968 s = splusb();
1969 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1970 splx(s);
1971 }
1972
1973 static int
1974 filt_ugenread_intr(struct knote *kn, long hint)
1975 {
1976 struct ugen_endpoint *sce = kn->kn_hook;
1977
1978 kn->kn_data = sce->q.c_cc;
1979 return (kn->kn_data > 0);
1980 }
1981
1982 static int
1983 filt_ugenread_isoc(struct knote *kn, long hint)
1984 {
1985 struct ugen_endpoint *sce = kn->kn_hook;
1986
1987 if (sce->cur == sce->fill)
1988 return (0);
1989
1990 if (sce->cur < sce->fill)
1991 kn->kn_data = sce->fill - sce->cur;
1992 else
1993 kn->kn_data = (sce->limit - sce->cur) +
1994 (sce->fill - sce->ibuf);
1995
1996 return (1);
1997 }
1998
1999 #ifdef UGEN_BULK_RA_WB
2000 static int
2001 filt_ugenread_bulk(struct knote *kn, long hint)
2002 {
2003 struct ugen_endpoint *sce = kn->kn_hook;
2004
2005 if (!(sce->state & UGEN_BULK_RA))
2006 /*
2007 * We have no easy way of determining if a read will
2008 * yield any data or a write will happen.
2009 * So, emulate "seltrue".
2010 */
2011 return (filt_seltrue(kn, hint));
2012
2013 if (sce->ra_wb_used == 0)
2014 return (0);
2015
2016 kn->kn_data = sce->ra_wb_used;
2017
2018 return (1);
2019 }
2020
2021 static int
2022 filt_ugenwrite_bulk(struct knote *kn, long hint)
2023 {
2024 struct ugen_endpoint *sce = kn->kn_hook;
2025
2026 if (!(sce->state & UGEN_BULK_WB))
2027 /*
2028 * We have no easy way of determining if a read will
2029 * yield any data or a write will happen.
2030 * So, emulate "seltrue".
2031 */
2032 return (filt_seltrue(kn, hint));
2033
2034 if (sce->ra_wb_used == sce->limit - sce->ibuf)
2035 return (0);
2036
2037 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2038
2039 return (1);
2040 }
2041 #endif
2042
2043 static const struct filterops ugenread_intr_filtops =
2044 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2045
2046 static const struct filterops ugenread_isoc_filtops =
2047 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2048
2049 #ifdef UGEN_BULK_RA_WB
2050 static const struct filterops ugenread_bulk_filtops =
2051 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2052
2053 static const struct filterops ugenwrite_bulk_filtops =
2054 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2055 #else
2056 static const struct filterops ugen_seltrue_filtops =
2057 { 1, NULL, filt_ugenrdetach, filt_seltrue };
2058 #endif
2059
2060 int
2061 ugenkqfilter(dev_t dev, struct knote *kn)
2062 {
2063 struct ugen_softc *sc;
2064 struct ugen_endpoint *sce;
2065 struct klist *klist;
2066 int s;
2067
2068 USB_GET_SC(ugen, UGENUNIT(dev), sc);
2069
2070 if (sc->sc_dying)
2071 return (ENXIO);
2072
2073 switch (kn->kn_filter) {
2074 case EVFILT_READ:
2075 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2076 if (sce == NULL)
2077 return (EINVAL);
2078
2079 klist = &sce->rsel.sel_klist;
2080 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2081 case UE_INTERRUPT:
2082 kn->kn_fop = &ugenread_intr_filtops;
2083 break;
2084 case UE_ISOCHRONOUS:
2085 kn->kn_fop = &ugenread_isoc_filtops;
2086 break;
2087 case UE_BULK:
2088 #ifdef UGEN_BULK_RA_WB
2089 kn->kn_fop = &ugenread_bulk_filtops;
2090 break;
2091 #else
2092 /*
2093 * We have no easy way of determining if a read will
2094 * yield any data or a write will happen.
2095 * So, emulate "seltrue".
2096 */
2097 kn->kn_fop = &ugen_seltrue_filtops;
2098 #endif
2099 break;
2100 default:
2101 return (EINVAL);
2102 }
2103 break;
2104
2105 case EVFILT_WRITE:
2106 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2107 if (sce == NULL)
2108 return (EINVAL);
2109
2110 klist = &sce->rsel.sel_klist;
2111 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2112 case UE_INTERRUPT:
2113 case UE_ISOCHRONOUS:
2114 /* XXX poll doesn't support this */
2115 return (EINVAL);
2116
2117 case UE_BULK:
2118 #ifdef UGEN_BULK_RA_WB
2119 kn->kn_fop = &ugenwrite_bulk_filtops;
2120 #else
2121 /*
2122 * We have no easy way of determining if a read will
2123 * yield any data or a write will happen.
2124 * So, emulate "seltrue".
2125 */
2126 kn->kn_fop = &ugen_seltrue_filtops;
2127 #endif
2128 break;
2129 default:
2130 return (EINVAL);
2131 }
2132 break;
2133
2134 default:
2135 return (EINVAL);
2136 }
2137
2138 kn->kn_hook = sce;
2139
2140 s = splusb();
2141 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2142 splx(s);
2143
2144 return (0);
2145 }
2146
2147 #if defined(__FreeBSD__)
2148 DRIVER_MODULE(ugen, uhub, ugen_driver, ugen_devclass, usbd_driver_load, 0);
2149 #endif
2150