ugen.c revision 1.99 1 /* $NetBSD: ugen.c,v 1.99 2008/05/24 16:40:58 cube Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.99 2008/05/24 16:40:58 cube Exp $");
41
42 #include "opt_ugen_bulk_ra_wb.h"
43 #include "opt_compat_netbsd.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #if defined(__NetBSD__) || defined(__OpenBSD__)
50 #include <sys/device.h>
51 #include <sys/ioctl.h>
52 #elif defined(__FreeBSD__)
53 #include <sys/module.h>
54 #include <sys/bus.h>
55 #include <sys/ioccom.h>
56 #include <sys/conf.h>
57 #include <sys/fcntl.h>
58 #include <sys/filio.h>
59 #endif
60 #include <sys/conf.h>
61 #include <sys/tty.h>
62 #include <sys/file.h>
63 #include <sys/select.h>
64 #include <sys/proc.h>
65 #include <sys/vnode.h>
66 #include <sys/poll.h>
67
68 #include <dev/usb/usb.h>
69 #include <dev/usb/usbdi.h>
70 #include <dev/usb/usbdi_util.h>
71
72 #ifdef UGEN_DEBUG
73 #define DPRINTF(x) if (ugendebug) logprintf x
74 #define DPRINTFN(n,x) if (ugendebug>(n)) logprintf x
75 int ugendebug = 0;
76 #else
77 #define DPRINTF(x)
78 #define DPRINTFN(n,x)
79 #endif
80
81 #define UGEN_CHUNK 128 /* chunk size for read */
82 #define UGEN_IBSIZE 1020 /* buffer size */
83 #define UGEN_BBSIZE 1024
84
85 #define UGEN_NISOFRAMES 500 /* 0.5 seconds worth */
86 #define UGEN_NISOREQS 6 /* number of outstanding xfer requests */
87 #define UGEN_NISORFRMS 4 /* number of frames (miliseconds) per req */
88
89 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
90 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
91
92 struct ugen_endpoint {
93 struct ugen_softc *sc;
94 usb_endpoint_descriptor_t *edesc;
95 usbd_interface_handle iface;
96 int state;
97 #define UGEN_ASLP 0x02 /* waiting for data */
98 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
99 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
100 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
101 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
102 usbd_pipe_handle pipeh;
103 struct clist q;
104 struct selinfo rsel;
105 u_char *ibuf; /* start of buffer (circular for isoc) */
106 u_char *fill; /* location for input (isoc) */
107 u_char *limit; /* end of circular buffer (isoc) */
108 u_char *cur; /* current read location (isoc) */
109 u_int32_t timeout;
110 #ifdef UGEN_BULK_RA_WB
111 u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
112 u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
113 u_int32_t ra_wb_used; /* how much is in buffer */
114 u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
115 usbd_xfer_handle ra_wb_xfer;
116 #endif
117 struct isoreq {
118 struct ugen_endpoint *sce;
119 usbd_xfer_handle xfer;
120 void *dmabuf;
121 u_int16_t sizes[UGEN_NISORFRMS];
122 } isoreqs[UGEN_NISOREQS];
123 };
124
125 struct ugen_softc {
126 USBBASEDEVICE sc_dev; /* base device */
127 usbd_device_handle sc_udev;
128
129 char sc_is_open[USB_MAX_ENDPOINTS];
130 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
131 #define OUT 0
132 #define IN 1
133
134 int sc_refcnt;
135 char sc_buffer[UGEN_BBSIZE];
136 u_char sc_dying;
137 };
138
139 #if defined(__NetBSD__)
140 dev_type_open(ugenopen);
141 dev_type_close(ugenclose);
142 dev_type_read(ugenread);
143 dev_type_write(ugenwrite);
144 dev_type_ioctl(ugenioctl);
145 dev_type_poll(ugenpoll);
146 dev_type_kqfilter(ugenkqfilter);
147
148 const struct cdevsw ugen_cdevsw = {
149 ugenopen, ugenclose, ugenread, ugenwrite, ugenioctl,
150 nostop, notty, ugenpoll, nommap, ugenkqfilter, D_OTHER,
151 };
152 #elif defined(__OpenBSD__)
153 cdev_decl(ugen);
154 #elif defined(__FreeBSD__)
155 d_open_t ugenopen;
156 d_close_t ugenclose;
157 d_read_t ugenread;
158 d_write_t ugenwrite;
159 d_ioctl_t ugenioctl;
160 d_poll_t ugenpoll;
161
162 #define UGEN_CDEV_MAJOR 114
163
164 Static struct cdevsw ugen_cdevsw = {
165 /* open */ ugenopen,
166 /* close */ ugenclose,
167 /* read */ ugenread,
168 /* write */ ugenwrite,
169 /* ioctl */ ugenioctl,
170 /* poll */ ugenpoll,
171 /* mmap */ nommap,
172 /* strategy */ nostrategy,
173 /* name */ "ugen",
174 /* maj */ UGEN_CDEV_MAJOR,
175 /* dump */ nodump,
176 /* psize */ nopsize,
177 /* flags */ 0,
178 /* bmaj */ -1
179 };
180 #endif
181
182 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
183 usbd_status status);
184 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
185 usbd_status status);
186 #ifdef UGEN_BULK_RA_WB
187 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
188 usbd_status status);
189 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
190 usbd_status status);
191 #endif
192 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
193 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
194 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
195 void *, int, struct lwp *);
196 Static int ugen_set_config(struct ugen_softc *sc, int configno);
197 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
198 int index, int *lenp);
199 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
200 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
201
202 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
203 #define UGENENDPOINT(n) (minor(n) & 0xf)
204 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
205
206 USB_DECLARE_DRIVER(ugen);
207
208 USB_MATCH(ugen)
209 {
210 USB_MATCH_START(ugen, uaa);
211
212 if (match->cf_flags & 1)
213 return (UMATCH_HIGHEST);
214 else if (uaa->usegeneric)
215 return (UMATCH_GENERIC);
216 else
217 return (UMATCH_NONE);
218 }
219
220 USB_ATTACH(ugen)
221 {
222 USB_ATTACH_START(ugen, sc, uaa);
223 usbd_device_handle udev;
224 char *devinfop;
225 usbd_status err;
226 int i, dir, conf;
227
228 devinfop = usbd_devinfo_alloc(uaa->device, 0);
229 USB_ATTACH_SETUP;
230 aprint_normal_dev(self, "%s\n", devinfop);
231 usbd_devinfo_free(devinfop);
232
233 sc->sc_dev = self;
234 sc->sc_udev = udev = uaa->device;
235
236 /* First set configuration index 0, the default one for ugen. */
237 err = usbd_set_config_index(udev, 0, 0);
238 if (err) {
239 aprint_error_dev(self,
240 "setting configuration index 0 failed\n");
241 sc->sc_dying = 1;
242 USB_ATTACH_ERROR_RETURN;
243 }
244 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
245
246 /* Set up all the local state for this configuration. */
247 err = ugen_set_config(sc, conf);
248 if (err) {
249 aprint_error_dev(self, "setting configuration %d failed\n",
250 conf);
251 sc->sc_dying = 1;
252 USB_ATTACH_ERROR_RETURN;
253 }
254
255 #ifdef __FreeBSD__
256 {
257 static int global_init_done = 0;
258 if (!global_init_done) {
259 cdevsw_add(&ugen_cdevsw);
260 global_init_done = 1;
261 }
262 }
263 #endif
264 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
265 for (dir = OUT; dir <= IN; dir++) {
266 struct ugen_endpoint *sce;
267
268 sce = &sc->sc_endpoints[i][dir];
269 selinit(&sce->rsel);
270 }
271 }
272
273 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
274 USBDEV(sc->sc_dev));
275
276 if (!pmf_device_register(self, NULL, NULL))
277 aprint_error_dev(self, "couldn't establish power handler\n");
278
279 USB_ATTACH_SUCCESS_RETURN;
280 }
281
282 Static int
283 ugen_set_config(struct ugen_softc *sc, int configno)
284 {
285 usbd_device_handle dev = sc->sc_udev;
286 usbd_interface_handle iface;
287 usb_endpoint_descriptor_t *ed;
288 struct ugen_endpoint *sce;
289 u_int8_t niface, nendpt;
290 int ifaceno, endptno, endpt;
291 usbd_status err;
292 int dir;
293
294 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
295 USBDEVNAME(sc->sc_dev), configno, sc));
296
297 /*
298 * We start at 1, not 0, because we don't care whether the
299 * control endpoint is open or not. It is always present.
300 */
301 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
302 if (sc->sc_is_open[endptno]) {
303 DPRINTFN(1,
304 ("ugen_set_config: %s - endpoint %d is open\n",
305 USBDEVNAME(sc->sc_dev), endptno));
306 return (USBD_IN_USE);
307 }
308
309 /* Avoid setting the current value. */
310 if (usbd_get_config_descriptor(dev)->bConfigurationValue != configno) {
311 err = usbd_set_config_no(dev, configno, 1);
312 if (err)
313 return (err);
314 }
315
316 err = usbd_interface_count(dev, &niface);
317 if (err)
318 return (err);
319 memset(sc->sc_endpoints, 0, sizeof sc->sc_endpoints);
320 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
321 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
322 err = usbd_device2interface_handle(dev, ifaceno, &iface);
323 if (err)
324 return (err);
325 err = usbd_endpoint_count(iface, &nendpt);
326 if (err)
327 return (err);
328 for (endptno = 0; endptno < nendpt; endptno++) {
329 ed = usbd_interface2endpoint_descriptor(iface,endptno);
330 KASSERT(ed != NULL);
331 endpt = ed->bEndpointAddress;
332 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
333 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
334 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
335 "(%d,%d), sce=%p\n",
336 endptno, endpt, UE_GET_ADDR(endpt),
337 UE_GET_DIR(endpt), sce));
338 sce->sc = sc;
339 sce->edesc = ed;
340 sce->iface = iface;
341 }
342 }
343 return (USBD_NORMAL_COMPLETION);
344 }
345
346 int
347 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
348 {
349 struct ugen_softc *sc;
350 int unit = UGENUNIT(dev);
351 int endpt = UGENENDPOINT(dev);
352 usb_endpoint_descriptor_t *edesc;
353 struct ugen_endpoint *sce;
354 int dir, isize;
355 usbd_status err;
356 usbd_xfer_handle xfer;
357 void *tbuf;
358 int i, j;
359
360 USB_GET_SC_OPEN(ugen, unit, sc);
361
362 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
363 flag, mode, unit, endpt));
364
365 if (sc == NULL || sc->sc_dying)
366 return (ENXIO);
367
368 /* The control endpoint allows multiple opens. */
369 if (endpt == USB_CONTROL_ENDPOINT) {
370 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
371 return (0);
372 }
373
374 if (sc->sc_is_open[endpt])
375 return (EBUSY);
376
377 /* Make sure there are pipes for all directions. */
378 for (dir = OUT; dir <= IN; dir++) {
379 if (flag & (dir == OUT ? FWRITE : FREAD)) {
380 sce = &sc->sc_endpoints[endpt][dir];
381 if (sce == 0 || sce->edesc == 0)
382 return (ENXIO);
383 }
384 }
385
386 /* Actually open the pipes. */
387 /* XXX Should back out properly if it fails. */
388 for (dir = OUT; dir <= IN; dir++) {
389 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
390 continue;
391 sce = &sc->sc_endpoints[endpt][dir];
392 sce->state = 0;
393 sce->timeout = USBD_NO_TIMEOUT;
394 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
395 sc, endpt, dir, sce));
396 edesc = sce->edesc;
397 switch (edesc->bmAttributes & UE_XFERTYPE) {
398 case UE_INTERRUPT:
399 if (dir == OUT) {
400 err = usbd_open_pipe(sce->iface,
401 edesc->bEndpointAddress, 0, &sce->pipeh);
402 if (err)
403 return (EIO);
404 break;
405 }
406 isize = UGETW(edesc->wMaxPacketSize);
407 if (isize == 0) /* shouldn't happen */
408 return (EINVAL);
409 sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
410 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
411 endpt, isize));
412 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1)
413 return (ENOMEM);
414 err = usbd_open_pipe_intr(sce->iface,
415 edesc->bEndpointAddress,
416 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
417 sce->ibuf, isize, ugenintr,
418 USBD_DEFAULT_INTERVAL);
419 if (err) {
420 free(sce->ibuf, M_USBDEV);
421 clfree(&sce->q);
422 return (EIO);
423 }
424 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
425 break;
426 case UE_BULK:
427 err = usbd_open_pipe(sce->iface,
428 edesc->bEndpointAddress, 0, &sce->pipeh);
429 if (err)
430 return (EIO);
431 #ifdef UGEN_BULK_RA_WB
432 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
433 /*
434 * Use request size for non-RA/WB transfers
435 * as the default.
436 */
437 sce->ra_wb_reqsize = UGEN_BBSIZE;
438 #endif
439 break;
440 case UE_ISOCHRONOUS:
441 if (dir == OUT)
442 return (EINVAL);
443 isize = UGETW(edesc->wMaxPacketSize);
444 if (isize == 0) /* shouldn't happen */
445 return (EINVAL);
446 sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
447 M_USBDEV, M_WAITOK);
448 sce->cur = sce->fill = sce->ibuf;
449 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
450 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
451 endpt, isize));
452 err = usbd_open_pipe(sce->iface,
453 edesc->bEndpointAddress, 0, &sce->pipeh);
454 if (err) {
455 free(sce->ibuf, M_USBDEV);
456 return (EIO);
457 }
458 for(i = 0; i < UGEN_NISOREQS; ++i) {
459 sce->isoreqs[i].sce = sce;
460 xfer = usbd_alloc_xfer(sc->sc_udev);
461 if (xfer == 0)
462 goto bad;
463 sce->isoreqs[i].xfer = xfer;
464 tbuf = usbd_alloc_buffer
465 (xfer, isize * UGEN_NISORFRMS);
466 if (tbuf == 0) {
467 i++;
468 goto bad;
469 }
470 sce->isoreqs[i].dmabuf = tbuf;
471 for(j = 0; j < UGEN_NISORFRMS; ++j)
472 sce->isoreqs[i].sizes[j] = isize;
473 usbd_setup_isoc_xfer
474 (xfer, sce->pipeh, &sce->isoreqs[i],
475 sce->isoreqs[i].sizes,
476 UGEN_NISORFRMS, USBD_NO_COPY,
477 ugen_isoc_rintr);
478 (void)usbd_transfer(xfer);
479 }
480 DPRINTFN(5, ("ugenopen: isoc open done\n"));
481 break;
482 bad:
483 while (--i >= 0) /* implicit buffer free */
484 usbd_free_xfer(sce->isoreqs[i].xfer);
485 return (ENOMEM);
486 case UE_CONTROL:
487 sce->timeout = USBD_DEFAULT_TIMEOUT;
488 return (EINVAL);
489 }
490 }
491 sc->sc_is_open[endpt] = 1;
492 return (0);
493 }
494
495 int
496 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
497 {
498 int endpt = UGENENDPOINT(dev);
499 struct ugen_softc *sc;
500 struct ugen_endpoint *sce;
501 int dir;
502 int i;
503
504 USB_GET_SC(ugen, UGENUNIT(dev), sc);
505
506 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
507 flag, mode, UGENUNIT(dev), endpt));
508
509 #ifdef DIAGNOSTIC
510 if (!sc->sc_is_open[endpt]) {
511 printf("ugenclose: not open\n");
512 return (EINVAL);
513 }
514 #endif
515
516 if (endpt == USB_CONTROL_ENDPOINT) {
517 DPRINTFN(5, ("ugenclose: close control\n"));
518 sc->sc_is_open[endpt] = 0;
519 return (0);
520 }
521
522 for (dir = OUT; dir <= IN; dir++) {
523 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
524 continue;
525 sce = &sc->sc_endpoints[endpt][dir];
526 if (sce == NULL || sce->pipeh == NULL)
527 continue;
528 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
529 endpt, dir, sce));
530
531 usbd_abort_pipe(sce->pipeh);
532 usbd_close_pipe(sce->pipeh);
533 sce->pipeh = NULL;
534
535 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
536 case UE_INTERRUPT:
537 ndflush(&sce->q, sce->q.c_cc);
538 clfree(&sce->q);
539 break;
540 case UE_ISOCHRONOUS:
541 for (i = 0; i < UGEN_NISOREQS; ++i)
542 usbd_free_xfer(sce->isoreqs[i].xfer);
543 break;
544 #ifdef UGEN_BULK_RA_WB
545 case UE_BULK:
546 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
547 /* ibuf freed below */
548 usbd_free_xfer(sce->ra_wb_xfer);
549 break;
550 #endif
551 default:
552 break;
553 }
554
555 if (sce->ibuf != NULL) {
556 free(sce->ibuf, M_USBDEV);
557 sce->ibuf = NULL;
558 clfree(&sce->q);
559 }
560 }
561 sc->sc_is_open[endpt] = 0;
562
563 return (0);
564 }
565
566 Static int
567 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
568 {
569 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
570 u_int32_t n, tn;
571 usbd_xfer_handle xfer;
572 usbd_status err;
573 int s;
574 int error = 0;
575
576 DPRINTFN(5, ("%s: ugenread: %d\n", USBDEVNAME(sc->sc_dev), endpt));
577
578 if (sc->sc_dying)
579 return (EIO);
580
581 if (endpt == USB_CONTROL_ENDPOINT)
582 return (ENODEV);
583
584 #ifdef DIAGNOSTIC
585 if (sce->edesc == NULL) {
586 printf("ugenread: no edesc\n");
587 return (EIO);
588 }
589 if (sce->pipeh == NULL) {
590 printf("ugenread: no pipe\n");
591 return (EIO);
592 }
593 #endif
594
595 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
596 case UE_INTERRUPT:
597 /* Block until activity occurred. */
598 s = splusb();
599 while (sce->q.c_cc == 0) {
600 if (flag & IO_NDELAY) {
601 splx(s);
602 return (EWOULDBLOCK);
603 }
604 sce->state |= UGEN_ASLP;
605 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
606 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0);
607 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
608 if (sc->sc_dying)
609 error = EIO;
610 if (error) {
611 sce->state &= ~UGEN_ASLP;
612 break;
613 }
614 }
615 splx(s);
616
617 /* Transfer as many chunks as possible. */
618 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
619 n = min(sce->q.c_cc, uio->uio_resid);
620 if (n > sizeof(sc->sc_buffer))
621 n = sizeof(sc->sc_buffer);
622
623 /* Remove a small chunk from the input queue. */
624 q_to_b(&sce->q, sc->sc_buffer, n);
625 DPRINTFN(5, ("ugenread: got %d chars\n", n));
626
627 /* Copy the data to the user process. */
628 error = uiomove(sc->sc_buffer, n, uio);
629 if (error)
630 break;
631 }
632 break;
633 case UE_BULK:
634 #ifdef UGEN_BULK_RA_WB
635 if (sce->state & UGEN_BULK_RA) {
636 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
637 uio->uio_resid, sce->ra_wb_used));
638 xfer = sce->ra_wb_xfer;
639
640 s = splusb();
641 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
642 splx(s);
643 return (EWOULDBLOCK);
644 }
645 while (uio->uio_resid > 0 && !error) {
646 while (sce->ra_wb_used == 0) {
647 sce->state |= UGEN_ASLP;
648 DPRINTFN(5,
649 ("ugenread: sleep on %p\n",
650 sce));
651 error = tsleep(sce, PZERO | PCATCH,
652 "ugenrb", 0);
653 DPRINTFN(5,
654 ("ugenread: woke, error=%d\n",
655 error));
656 if (sc->sc_dying)
657 error = EIO;
658 if (error) {
659 sce->state &= ~UGEN_ASLP;
660 break;
661 }
662 }
663
664 /* Copy data to the process. */
665 while (uio->uio_resid > 0
666 && sce->ra_wb_used > 0) {
667 n = min(uio->uio_resid,
668 sce->ra_wb_used);
669 n = min(n, sce->limit - sce->cur);
670 error = uiomove(sce->cur, n, uio);
671 if (error)
672 break;
673 sce->cur += n;
674 sce->ra_wb_used -= n;
675 if (sce->cur == sce->limit)
676 sce->cur = sce->ibuf;
677 }
678
679 /*
680 * If the transfers stopped because the
681 * buffer was full, restart them.
682 */
683 if (sce->state & UGEN_RA_WB_STOP &&
684 sce->ra_wb_used < sce->limit - sce->ibuf) {
685 n = (sce->limit - sce->ibuf)
686 - sce->ra_wb_used;
687 usbd_setup_xfer(xfer,
688 sce->pipeh, sce, NULL,
689 min(n, sce->ra_wb_xferlen),
690 USBD_NO_COPY, USBD_NO_TIMEOUT,
691 ugen_bulkra_intr);
692 sce->state &= ~UGEN_RA_WB_STOP;
693 err = usbd_transfer(xfer);
694 if (err != USBD_IN_PROGRESS)
695 /*
696 * The transfer has not been
697 * queued. Setting STOP
698 * will make us try
699 * again at the next read.
700 */
701 sce->state |= UGEN_RA_WB_STOP;
702 }
703 }
704 splx(s);
705 break;
706 }
707 #endif
708 xfer = usbd_alloc_xfer(sc->sc_udev);
709 if (xfer == 0)
710 return (ENOMEM);
711 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
712 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
713 tn = n;
714 err = usbd_bulk_transfer(
715 xfer, sce->pipeh,
716 sce->state & UGEN_SHORT_OK ?
717 USBD_SHORT_XFER_OK : 0,
718 sce->timeout, sc->sc_buffer, &tn, "ugenrb");
719 if (err) {
720 if (err == USBD_INTERRUPTED)
721 error = EINTR;
722 else if (err == USBD_TIMEOUT)
723 error = ETIMEDOUT;
724 else
725 error = EIO;
726 break;
727 }
728 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
729 error = uiomove(sc->sc_buffer, tn, uio);
730 if (error || tn < n)
731 break;
732 }
733 usbd_free_xfer(xfer);
734 break;
735 case UE_ISOCHRONOUS:
736 s = splusb();
737 while (sce->cur == sce->fill) {
738 if (flag & IO_NDELAY) {
739 splx(s);
740 return (EWOULDBLOCK);
741 }
742 sce->state |= UGEN_ASLP;
743 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
744 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0);
745 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
746 if (sc->sc_dying)
747 error = EIO;
748 if (error) {
749 sce->state &= ~UGEN_ASLP;
750 break;
751 }
752 }
753
754 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
755 if(sce->fill > sce->cur)
756 n = min(sce->fill - sce->cur, uio->uio_resid);
757 else
758 n = min(sce->limit - sce->cur, uio->uio_resid);
759
760 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
761
762 /* Copy the data to the user process. */
763 error = uiomove(sce->cur, n, uio);
764 if (error)
765 break;
766 sce->cur += n;
767 if(sce->cur >= sce->limit)
768 sce->cur = sce->ibuf;
769 }
770 splx(s);
771 break;
772
773
774 default:
775 return (ENXIO);
776 }
777 return (error);
778 }
779
780 int
781 ugenread(dev_t dev, struct uio *uio, int flag)
782 {
783 int endpt = UGENENDPOINT(dev);
784 struct ugen_softc *sc;
785 int error;
786
787 USB_GET_SC(ugen, UGENUNIT(dev), sc);
788
789 sc->sc_refcnt++;
790 error = ugen_do_read(sc, endpt, uio, flag);
791 if (--sc->sc_refcnt < 0)
792 usb_detach_wakeup(USBDEV(sc->sc_dev));
793 return (error);
794 }
795
796 Static int
797 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
798 int flag)
799 {
800 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
801 u_int32_t n;
802 int error = 0;
803 #ifdef UGEN_BULK_RA_WB
804 int s;
805 u_int32_t tn;
806 char *dbuf;
807 #endif
808 usbd_xfer_handle xfer;
809 usbd_status err;
810
811 DPRINTFN(5, ("%s: ugenwrite: %d\n", USBDEVNAME(sc->sc_dev), endpt));
812
813 if (sc->sc_dying)
814 return (EIO);
815
816 if (endpt == USB_CONTROL_ENDPOINT)
817 return (ENODEV);
818
819 #ifdef DIAGNOSTIC
820 if (sce->edesc == NULL) {
821 printf("ugenwrite: no edesc\n");
822 return (EIO);
823 }
824 if (sce->pipeh == NULL) {
825 printf("ugenwrite: no pipe\n");
826 return (EIO);
827 }
828 #endif
829
830 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
831 case UE_BULK:
832 #ifdef UGEN_BULK_RA_WB
833 if (sce->state & UGEN_BULK_WB) {
834 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
835 uio->uio_resid, sce->ra_wb_used));
836 xfer = sce->ra_wb_xfer;
837
838 s = splusb();
839 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
840 flag & IO_NDELAY) {
841 splx(s);
842 return (EWOULDBLOCK);
843 }
844 while (uio->uio_resid > 0 && !error) {
845 while (sce->ra_wb_used ==
846 sce->limit - sce->ibuf) {
847 sce->state |= UGEN_ASLP;
848 DPRINTFN(5,
849 ("ugenwrite: sleep on %p\n",
850 sce));
851 error = tsleep(sce, PZERO | PCATCH,
852 "ugenwb", 0);
853 DPRINTFN(5,
854 ("ugenwrite: woke, error=%d\n",
855 error));
856 if (sc->sc_dying)
857 error = EIO;
858 if (error) {
859 sce->state &= ~UGEN_ASLP;
860 break;
861 }
862 }
863
864 /* Copy data from the process. */
865 while (uio->uio_resid > 0 &&
866 sce->ra_wb_used < sce->limit - sce->ibuf) {
867 n = min(uio->uio_resid,
868 (sce->limit - sce->ibuf)
869 - sce->ra_wb_used);
870 n = min(n, sce->limit - sce->fill);
871 error = uiomove(sce->fill, n, uio);
872 if (error)
873 break;
874 sce->fill += n;
875 sce->ra_wb_used += n;
876 if (sce->fill == sce->limit)
877 sce->fill = sce->ibuf;
878 }
879
880 /*
881 * If the transfers stopped because the
882 * buffer was empty, restart them.
883 */
884 if (sce->state & UGEN_RA_WB_STOP &&
885 sce->ra_wb_used > 0) {
886 dbuf = (char *)usbd_get_buffer(xfer);
887 n = min(sce->ra_wb_used,
888 sce->ra_wb_xferlen);
889 tn = min(n, sce->limit - sce->cur);
890 memcpy(dbuf, sce->cur, tn);
891 dbuf += tn;
892 if (n - tn > 0)
893 memcpy(dbuf, sce->ibuf,
894 n - tn);
895 usbd_setup_xfer(xfer,
896 sce->pipeh, sce, NULL, n,
897 USBD_NO_COPY, USBD_NO_TIMEOUT,
898 ugen_bulkwb_intr);
899 sce->state &= ~UGEN_RA_WB_STOP;
900 err = usbd_transfer(xfer);
901 if (err != USBD_IN_PROGRESS)
902 /*
903 * The transfer has not been
904 * queued. Setting STOP
905 * will make us try again
906 * at the next read.
907 */
908 sce->state |= UGEN_RA_WB_STOP;
909 }
910 }
911 splx(s);
912 break;
913 }
914 #endif
915 xfer = usbd_alloc_xfer(sc->sc_udev);
916 if (xfer == 0)
917 return (EIO);
918 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
919 error = uiomove(sc->sc_buffer, n, uio);
920 if (error)
921 break;
922 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
923 err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
924 sce->timeout, sc->sc_buffer, &n,"ugenwb");
925 if (err) {
926 if (err == USBD_INTERRUPTED)
927 error = EINTR;
928 else if (err == USBD_TIMEOUT)
929 error = ETIMEDOUT;
930 else
931 error = EIO;
932 break;
933 }
934 }
935 usbd_free_xfer(xfer);
936 break;
937 case UE_INTERRUPT:
938 xfer = usbd_alloc_xfer(sc->sc_udev);
939 if (xfer == 0)
940 return (EIO);
941 while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
942 uio->uio_resid)) != 0) {
943 error = uiomove(sc->sc_buffer, n, uio);
944 if (error)
945 break;
946 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
947 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
948 sce->timeout, sc->sc_buffer, &n, "ugenwi");
949 if (err) {
950 if (err == USBD_INTERRUPTED)
951 error = EINTR;
952 else if (err == USBD_TIMEOUT)
953 error = ETIMEDOUT;
954 else
955 error = EIO;
956 break;
957 }
958 }
959 usbd_free_xfer(xfer);
960 break;
961 default:
962 return (ENXIO);
963 }
964 return (error);
965 }
966
967 int
968 ugenwrite(dev_t dev, struct uio *uio, int flag)
969 {
970 int endpt = UGENENDPOINT(dev);
971 struct ugen_softc *sc;
972 int error;
973
974 USB_GET_SC(ugen, UGENUNIT(dev), sc);
975
976 sc->sc_refcnt++;
977 error = ugen_do_write(sc, endpt, uio, flag);
978 if (--sc->sc_refcnt < 0)
979 usb_detach_wakeup(USBDEV(sc->sc_dev));
980 return (error);
981 }
982
983 #if defined(__NetBSD__) || defined(__OpenBSD__)
984 int
985 ugen_activate(device_ptr_t self, enum devact act)
986 {
987 struct ugen_softc *sc = device_private(self);
988
989 switch (act) {
990 case DVACT_ACTIVATE:
991 return (EOPNOTSUPP);
992
993 case DVACT_DEACTIVATE:
994 sc->sc_dying = 1;
995 break;
996 }
997 return (0);
998 }
999 #endif
1000
1001 USB_DETACH(ugen)
1002 {
1003 USB_DETACH_START(ugen, sc);
1004 struct ugen_endpoint *sce;
1005 int i, dir;
1006 int s;
1007 #if defined(__NetBSD__) || defined(__OpenBSD__)
1008 int maj, mn;
1009
1010 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1011 #elif defined(__FreeBSD__)
1012 DPRINTF(("ugen_detach: sc=%p\n", sc));
1013 #endif
1014
1015 sc->sc_dying = 1;
1016 pmf_device_deregister(self);
1017 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1018 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1019 for (dir = OUT; dir <= IN; dir++) {
1020 sce = &sc->sc_endpoints[i][dir];
1021 if (sce && sce->pipeh)
1022 usbd_abort_pipe(sce->pipeh);
1023 }
1024 }
1025
1026 s = splusb();
1027 if (--sc->sc_refcnt >= 0) {
1028 /* Wake everyone */
1029 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1030 wakeup(&sc->sc_endpoints[i][IN]);
1031 /* Wait for processes to go away. */
1032 usb_detach_wait(USBDEV(sc->sc_dev));
1033 }
1034 splx(s);
1035
1036 #if defined(__NetBSD__) || defined(__OpenBSD__)
1037 /* locate the major number */
1038 #if defined(__NetBSD__)
1039 maj = cdevsw_lookup_major(&ugen_cdevsw);
1040 #elif defined(__OpenBSD__)
1041 for (maj = 0; maj < nchrdev; maj++)
1042 if (cdevsw[maj].d_open == ugenopen)
1043 break;
1044 #endif
1045
1046 /* Nuke the vnodes for any open instances (calls close). */
1047 mn = device_unit(self) * USB_MAX_ENDPOINTS;
1048 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1049 #elif defined(__FreeBSD__)
1050 /* XXX not implemented yet */
1051 #endif
1052
1053 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1054 USBDEV(sc->sc_dev));
1055
1056 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1057 for (dir = OUT; dir <= IN; dir++) {
1058 sce = &sc->sc_endpoints[i][dir];
1059 seldestroy(&sce->rsel);
1060 }
1061 }
1062
1063 return (0);
1064 }
1065
1066 Static void
1067 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1068 {
1069 struct ugen_endpoint *sce = addr;
1070 /*struct ugen_softc *sc = sce->sc;*/
1071 u_int32_t count;
1072 u_char *ibuf;
1073
1074 if (status == USBD_CANCELLED)
1075 return;
1076
1077 if (status != USBD_NORMAL_COMPLETION) {
1078 DPRINTF(("ugenintr: status=%d\n", status));
1079 if (status == USBD_STALLED)
1080 usbd_clear_endpoint_stall_async(sce->pipeh);
1081 return;
1082 }
1083
1084 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1085 ibuf = sce->ibuf;
1086
1087 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1088 xfer, status, count));
1089 DPRINTFN(5, (" data = %02x %02x %02x\n",
1090 ibuf[0], ibuf[1], ibuf[2]));
1091
1092 (void)b_to_q(ibuf, count, &sce->q);
1093
1094 if (sce->state & UGEN_ASLP) {
1095 sce->state &= ~UGEN_ASLP;
1096 DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1097 wakeup(sce);
1098 }
1099 selnotify(&sce->rsel, 0, 0);
1100 }
1101
1102 Static void
1103 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1104 usbd_status status)
1105 {
1106 struct isoreq *req = addr;
1107 struct ugen_endpoint *sce = req->sce;
1108 u_int32_t count, n;
1109 int i, isize;
1110
1111 /* Return if we are aborting. */
1112 if (status == USBD_CANCELLED)
1113 return;
1114
1115 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1116 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1117 (long)(req - sce->isoreqs), count));
1118
1119 /* throw away oldest input if the buffer is full */
1120 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1121 sce->cur += count;
1122 if(sce->cur >= sce->limit)
1123 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1124 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1125 count));
1126 }
1127
1128 isize = UGETW(sce->edesc->wMaxPacketSize);
1129 for (i = 0; i < UGEN_NISORFRMS; i++) {
1130 u_int32_t actlen = req->sizes[i];
1131 char const *tbuf = (char const *)req->dmabuf + isize * i;
1132
1133 /* copy data to buffer */
1134 while (actlen > 0) {
1135 n = min(actlen, sce->limit - sce->fill);
1136 memcpy(sce->fill, tbuf, n);
1137
1138 tbuf += n;
1139 actlen -= n;
1140 sce->fill += n;
1141 if(sce->fill == sce->limit)
1142 sce->fill = sce->ibuf;
1143 }
1144
1145 /* setup size for next transfer */
1146 req->sizes[i] = isize;
1147 }
1148
1149 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1150 USBD_NO_COPY, ugen_isoc_rintr);
1151 (void)usbd_transfer(xfer);
1152
1153 if (sce->state & UGEN_ASLP) {
1154 sce->state &= ~UGEN_ASLP;
1155 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1156 wakeup(sce);
1157 }
1158 selnotify(&sce->rsel, 0, 0);
1159 }
1160
1161 #ifdef UGEN_BULK_RA_WB
1162 Static void
1163 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1164 usbd_status status)
1165 {
1166 struct ugen_endpoint *sce = addr;
1167 u_int32_t count, n;
1168 char const *tbuf;
1169 usbd_status err;
1170
1171 /* Return if we are aborting. */
1172 if (status == USBD_CANCELLED)
1173 return;
1174
1175 if (status != USBD_NORMAL_COMPLETION) {
1176 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1177 sce->state |= UGEN_RA_WB_STOP;
1178 if (status == USBD_STALLED)
1179 usbd_clear_endpoint_stall_async(sce->pipeh);
1180 return;
1181 }
1182
1183 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1184
1185 /* Keep track of how much is in the buffer. */
1186 sce->ra_wb_used += count;
1187
1188 /* Copy data to buffer. */
1189 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1190 n = min(count, sce->limit - sce->fill);
1191 memcpy(sce->fill, tbuf, n);
1192 tbuf += n;
1193 count -= n;
1194 sce->fill += n;
1195 if (sce->fill == sce->limit)
1196 sce->fill = sce->ibuf;
1197 if (count > 0) {
1198 memcpy(sce->fill, tbuf, count);
1199 sce->fill += count;
1200 }
1201
1202 /* Set up the next request if necessary. */
1203 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1204 if (n > 0) {
1205 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1206 min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1207 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1208 err = usbd_transfer(xfer);
1209 if (err != USBD_IN_PROGRESS) {
1210 printf("usbd_bulkra_intr: error=%d\n", err);
1211 /*
1212 * The transfer has not been queued. Setting STOP
1213 * will make us try again at the next read.
1214 */
1215 sce->state |= UGEN_RA_WB_STOP;
1216 }
1217 }
1218 else
1219 sce->state |= UGEN_RA_WB_STOP;
1220
1221 if (sce->state & UGEN_ASLP) {
1222 sce->state &= ~UGEN_ASLP;
1223 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1224 wakeup(sce);
1225 }
1226 selnotify(&sce->rsel, 0, 0);
1227 }
1228
1229 Static void
1230 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1231 usbd_status status)
1232 {
1233 struct ugen_endpoint *sce = addr;
1234 u_int32_t count, n;
1235 char *tbuf;
1236 usbd_status err;
1237
1238 /* Return if we are aborting. */
1239 if (status == USBD_CANCELLED)
1240 return;
1241
1242 if (status != USBD_NORMAL_COMPLETION) {
1243 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1244 sce->state |= UGEN_RA_WB_STOP;
1245 if (status == USBD_STALLED)
1246 usbd_clear_endpoint_stall_async(sce->pipeh);
1247 return;
1248 }
1249
1250 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1251
1252 /* Keep track of how much is in the buffer. */
1253 sce->ra_wb_used -= count;
1254
1255 /* Update buffer pointers. */
1256 sce->cur += count;
1257 if (sce->cur >= sce->limit)
1258 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1259
1260 /* Set up next request if necessary. */
1261 if (sce->ra_wb_used > 0) {
1262 /* copy data from buffer */
1263 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1264 count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1265 n = min(count, sce->limit - sce->cur);
1266 memcpy(tbuf, sce->cur, n);
1267 tbuf += n;
1268 if (count - n > 0)
1269 memcpy(tbuf, sce->ibuf, count - n);
1270
1271 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1272 count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1273 err = usbd_transfer(xfer);
1274 if (err != USBD_IN_PROGRESS) {
1275 printf("usbd_bulkwb_intr: error=%d\n", err);
1276 /*
1277 * The transfer has not been queued. Setting STOP
1278 * will make us try again at the next write.
1279 */
1280 sce->state |= UGEN_RA_WB_STOP;
1281 }
1282 }
1283 else
1284 sce->state |= UGEN_RA_WB_STOP;
1285
1286 if (sce->state & UGEN_ASLP) {
1287 sce->state &= ~UGEN_ASLP;
1288 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1289 wakeup(sce);
1290 }
1291 selnotify(&sce->rsel, 0, 0);
1292 }
1293 #endif
1294
1295 Static usbd_status
1296 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1297 {
1298 usbd_interface_handle iface;
1299 usb_endpoint_descriptor_t *ed;
1300 usbd_status err;
1301 struct ugen_endpoint *sce;
1302 u_int8_t niface, nendpt, endptno, endpt;
1303 int dir;
1304
1305 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1306
1307 err = usbd_interface_count(sc->sc_udev, &niface);
1308 if (err)
1309 return (err);
1310 if (ifaceidx < 0 || ifaceidx >= niface)
1311 return (USBD_INVAL);
1312
1313 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1314 if (err)
1315 return (err);
1316 err = usbd_endpoint_count(iface, &nendpt);
1317 if (err)
1318 return (err);
1319 /* XXX should only do this after setting new altno has succeeded */
1320 for (endptno = 0; endptno < nendpt; endptno++) {
1321 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1322 endpt = ed->bEndpointAddress;
1323 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1324 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1325 sce->sc = 0;
1326 sce->edesc = 0;
1327 sce->iface = 0;
1328 }
1329
1330 /* change setting */
1331 err = usbd_set_interface(iface, altno);
1332 if (err)
1333 return (err);
1334
1335 err = usbd_endpoint_count(iface, &nendpt);
1336 if (err)
1337 return (err);
1338 for (endptno = 0; endptno < nendpt; endptno++) {
1339 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1340 KASSERT(ed != NULL);
1341 endpt = ed->bEndpointAddress;
1342 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1343 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1344 sce->sc = sc;
1345 sce->edesc = ed;
1346 sce->iface = iface;
1347 }
1348 return (0);
1349 }
1350
1351 /* Retrieve a complete descriptor for a certain device and index. */
1352 Static usb_config_descriptor_t *
1353 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1354 {
1355 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1356 int len;
1357 usbd_status err;
1358
1359 if (index == USB_CURRENT_CONFIG_INDEX) {
1360 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1361 len = UGETW(tdesc->wTotalLength);
1362 if (lenp)
1363 *lenp = len;
1364 cdesc = malloc(len, M_TEMP, M_WAITOK);
1365 memcpy(cdesc, tdesc, len);
1366 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1367 } else {
1368 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1369 if (err)
1370 return (0);
1371 len = UGETW(cdescr.wTotalLength);
1372 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1373 if (lenp)
1374 *lenp = len;
1375 cdesc = malloc(len, M_TEMP, M_WAITOK);
1376 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1377 if (err) {
1378 free(cdesc, M_TEMP);
1379 return (0);
1380 }
1381 }
1382 return (cdesc);
1383 }
1384
1385 Static int
1386 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1387 {
1388 usbd_interface_handle iface;
1389 usbd_status err;
1390
1391 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1392 if (err)
1393 return (-1);
1394 return (usbd_get_interface_altindex(iface));
1395 }
1396
1397 Static int
1398 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1399 void *addr, int flag, struct lwp *l)
1400 {
1401 struct ugen_endpoint *sce;
1402 usbd_status err;
1403 usbd_interface_handle iface;
1404 struct usb_config_desc *cd;
1405 usb_config_descriptor_t *cdesc;
1406 struct usb_interface_desc *id;
1407 usb_interface_descriptor_t *idesc;
1408 struct usb_endpoint_desc *ed;
1409 usb_endpoint_descriptor_t *edesc;
1410 struct usb_alt_interface *ai;
1411 struct usb_string_desc *si;
1412 u_int8_t conf, alt;
1413
1414 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1415 if (sc->sc_dying)
1416 return (EIO);
1417
1418 switch (cmd) {
1419 case FIONBIO:
1420 /* All handled in the upper FS layer. */
1421 return (0);
1422 case USB_SET_SHORT_XFER:
1423 if (endpt == USB_CONTROL_ENDPOINT)
1424 return (EINVAL);
1425 /* This flag only affects read */
1426 sce = &sc->sc_endpoints[endpt][IN];
1427 if (sce == NULL || sce->pipeh == NULL)
1428 return (EINVAL);
1429 if (*(int *)addr)
1430 sce->state |= UGEN_SHORT_OK;
1431 else
1432 sce->state &= ~UGEN_SHORT_OK;
1433 return (0);
1434 case USB_SET_TIMEOUT:
1435 sce = &sc->sc_endpoints[endpt][IN];
1436 if (sce == NULL
1437 /* XXX this shouldn't happen, but the distinction between
1438 input and output pipes isn't clear enough.
1439 || sce->pipeh == NULL */
1440 )
1441 return (EINVAL);
1442 sce->timeout = *(int *)addr;
1443 return (0);
1444 case USB_SET_BULK_RA:
1445 #ifdef UGEN_BULK_RA_WB
1446 if (endpt == USB_CONTROL_ENDPOINT)
1447 return (EINVAL);
1448 sce = &sc->sc_endpoints[endpt][IN];
1449 if (sce == NULL || sce->pipeh == NULL)
1450 return (EINVAL);
1451 edesc = sce->edesc;
1452 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1453 return (EINVAL);
1454
1455 if (*(int *)addr) {
1456 /* Only turn RA on if it's currently off. */
1457 if (sce->state & UGEN_BULK_RA)
1458 return (0);
1459
1460 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1461 /* shouldn't happen */
1462 return (EINVAL);
1463 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1464 if (sce->ra_wb_xfer == NULL)
1465 return (ENOMEM);
1466 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1467 /*
1468 * Set up a dmabuf because we reuse the xfer with
1469 * the same (max) request length like isoc.
1470 */
1471 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1472 sce->ra_wb_xferlen) == 0) {
1473 usbd_free_xfer(sce->ra_wb_xfer);
1474 return (ENOMEM);
1475 }
1476 sce->ibuf = malloc(sce->ra_wb_bufsize,
1477 M_USBDEV, M_WAITOK);
1478 sce->fill = sce->cur = sce->ibuf;
1479 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1480 sce->ra_wb_used = 0;
1481 sce->state |= UGEN_BULK_RA;
1482 sce->state &= ~UGEN_RA_WB_STOP;
1483 /* Now start reading. */
1484 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1485 NULL,
1486 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1487 USBD_NO_COPY, USBD_NO_TIMEOUT,
1488 ugen_bulkra_intr);
1489 err = usbd_transfer(sce->ra_wb_xfer);
1490 if (err != USBD_IN_PROGRESS) {
1491 sce->state &= ~UGEN_BULK_RA;
1492 free(sce->ibuf, M_USBDEV);
1493 sce->ibuf = NULL;
1494 usbd_free_xfer(sce->ra_wb_xfer);
1495 return (EIO);
1496 }
1497 } else {
1498 /* Only turn RA off if it's currently on. */
1499 if (!(sce->state & UGEN_BULK_RA))
1500 return (0);
1501
1502 sce->state &= ~UGEN_BULK_RA;
1503 usbd_abort_pipe(sce->pipeh);
1504 usbd_free_xfer(sce->ra_wb_xfer);
1505 /*
1506 * XXX Discard whatever's in the buffer, but we
1507 * should keep it around and drain the buffer
1508 * instead.
1509 */
1510 free(sce->ibuf, M_USBDEV);
1511 sce->ibuf = NULL;
1512 }
1513 return (0);
1514 #else
1515 return (EOPNOTSUPP);
1516 #endif
1517 case USB_SET_BULK_WB:
1518 #ifdef UGEN_BULK_RA_WB
1519 if (endpt == USB_CONTROL_ENDPOINT)
1520 return (EINVAL);
1521 sce = &sc->sc_endpoints[endpt][OUT];
1522 if (sce == NULL || sce->pipeh == NULL)
1523 return (EINVAL);
1524 edesc = sce->edesc;
1525 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1526 return (EINVAL);
1527
1528 if (*(int *)addr) {
1529 /* Only turn WB on if it's currently off. */
1530 if (sce->state & UGEN_BULK_WB)
1531 return (0);
1532
1533 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1534 /* shouldn't happen */
1535 return (EINVAL);
1536 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1537 if (sce->ra_wb_xfer == NULL)
1538 return (ENOMEM);
1539 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1540 /*
1541 * Set up a dmabuf because we reuse the xfer with
1542 * the same (max) request length like isoc.
1543 */
1544 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1545 sce->ra_wb_xferlen) == 0) {
1546 usbd_free_xfer(sce->ra_wb_xfer);
1547 return (ENOMEM);
1548 }
1549 sce->ibuf = malloc(sce->ra_wb_bufsize,
1550 M_USBDEV, M_WAITOK);
1551 sce->fill = sce->cur = sce->ibuf;
1552 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1553 sce->ra_wb_used = 0;
1554 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1555 } else {
1556 /* Only turn WB off if it's currently on. */
1557 if (!(sce->state & UGEN_BULK_WB))
1558 return (0);
1559
1560 sce->state &= ~UGEN_BULK_WB;
1561 /*
1562 * XXX Discard whatever's in the buffer, but we
1563 * should keep it around and keep writing to
1564 * drain the buffer instead.
1565 */
1566 usbd_abort_pipe(sce->pipeh);
1567 usbd_free_xfer(sce->ra_wb_xfer);
1568 free(sce->ibuf, M_USBDEV);
1569 sce->ibuf = NULL;
1570 }
1571 return (0);
1572 #else
1573 return (EOPNOTSUPP);
1574 #endif
1575 case USB_SET_BULK_RA_OPT:
1576 case USB_SET_BULK_WB_OPT:
1577 #ifdef UGEN_BULK_RA_WB
1578 {
1579 struct usb_bulk_ra_wb_opt *opt;
1580
1581 if (endpt == USB_CONTROL_ENDPOINT)
1582 return (EINVAL);
1583 opt = (struct usb_bulk_ra_wb_opt *)addr;
1584 if (cmd == USB_SET_BULK_RA_OPT)
1585 sce = &sc->sc_endpoints[endpt][IN];
1586 else
1587 sce = &sc->sc_endpoints[endpt][OUT];
1588 if (sce == NULL || sce->pipeh == NULL)
1589 return (EINVAL);
1590 if (opt->ra_wb_buffer_size < 1 ||
1591 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1592 opt->ra_wb_request_size < 1 ||
1593 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1594 return (EINVAL);
1595 /*
1596 * XXX These changes do not take effect until the
1597 * next time RA/WB mode is enabled but they ought to
1598 * take effect immediately.
1599 */
1600 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1601 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1602 return (0);
1603 }
1604 #else
1605 return (EOPNOTSUPP);
1606 #endif
1607 default:
1608 break;
1609 }
1610
1611 if (endpt != USB_CONTROL_ENDPOINT)
1612 return (EINVAL);
1613
1614 switch (cmd) {
1615 #ifdef UGEN_DEBUG
1616 case USB_SETDEBUG:
1617 ugendebug = *(int *)addr;
1618 break;
1619 #endif
1620 case USB_GET_CONFIG:
1621 err = usbd_get_config(sc->sc_udev, &conf);
1622 if (err)
1623 return (EIO);
1624 *(int *)addr = conf;
1625 break;
1626 case USB_SET_CONFIG:
1627 if (!(flag & FWRITE))
1628 return (EPERM);
1629 err = ugen_set_config(sc, *(int *)addr);
1630 switch (err) {
1631 case USBD_NORMAL_COMPLETION:
1632 break;
1633 case USBD_IN_USE:
1634 return (EBUSY);
1635 default:
1636 return (EIO);
1637 }
1638 break;
1639 case USB_GET_ALTINTERFACE:
1640 ai = (struct usb_alt_interface *)addr;
1641 err = usbd_device2interface_handle(sc->sc_udev,
1642 ai->uai_interface_index, &iface);
1643 if (err)
1644 return (EINVAL);
1645 idesc = usbd_get_interface_descriptor(iface);
1646 if (idesc == NULL)
1647 return (EIO);
1648 ai->uai_alt_no = idesc->bAlternateSetting;
1649 break;
1650 case USB_SET_ALTINTERFACE:
1651 if (!(flag & FWRITE))
1652 return (EPERM);
1653 ai = (struct usb_alt_interface *)addr;
1654 err = usbd_device2interface_handle(sc->sc_udev,
1655 ai->uai_interface_index, &iface);
1656 if (err)
1657 return (EINVAL);
1658 err = ugen_set_interface(sc, ai->uai_interface_index,
1659 ai->uai_alt_no);
1660 if (err)
1661 return (EINVAL);
1662 break;
1663 case USB_GET_NO_ALT:
1664 ai = (struct usb_alt_interface *)addr;
1665 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1666 if (cdesc == NULL)
1667 return (EINVAL);
1668 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1669 if (idesc == NULL) {
1670 free(cdesc, M_TEMP);
1671 return (EINVAL);
1672 }
1673 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1674 idesc->bInterfaceNumber);
1675 free(cdesc, M_TEMP);
1676 break;
1677 case USB_GET_DEVICE_DESC:
1678 *(usb_device_descriptor_t *)addr =
1679 *usbd_get_device_descriptor(sc->sc_udev);
1680 break;
1681 case USB_GET_CONFIG_DESC:
1682 cd = (struct usb_config_desc *)addr;
1683 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1684 if (cdesc == NULL)
1685 return (EINVAL);
1686 cd->ucd_desc = *cdesc;
1687 free(cdesc, M_TEMP);
1688 break;
1689 case USB_GET_INTERFACE_DESC:
1690 id = (struct usb_interface_desc *)addr;
1691 cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1692 if (cdesc == NULL)
1693 return (EINVAL);
1694 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1695 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1696 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1697 else
1698 alt = id->uid_alt_index;
1699 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1700 if (idesc == NULL) {
1701 free(cdesc, M_TEMP);
1702 return (EINVAL);
1703 }
1704 id->uid_desc = *idesc;
1705 free(cdesc, M_TEMP);
1706 break;
1707 case USB_GET_ENDPOINT_DESC:
1708 ed = (struct usb_endpoint_desc *)addr;
1709 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1710 if (cdesc == NULL)
1711 return (EINVAL);
1712 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1713 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1714 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1715 else
1716 alt = ed->ued_alt_index;
1717 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1718 alt, ed->ued_endpoint_index);
1719 if (edesc == NULL) {
1720 free(cdesc, M_TEMP);
1721 return (EINVAL);
1722 }
1723 ed->ued_desc = *edesc;
1724 free(cdesc, M_TEMP);
1725 break;
1726 case USB_GET_FULL_DESC:
1727 {
1728 int len;
1729 struct iovec iov;
1730 struct uio uio;
1731 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1732 int error;
1733
1734 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1735 if (len > fd->ufd_size)
1736 len = fd->ufd_size;
1737 iov.iov_base = (void *)fd->ufd_data;
1738 iov.iov_len = len;
1739 uio.uio_iov = &iov;
1740 uio.uio_iovcnt = 1;
1741 uio.uio_resid = len;
1742 uio.uio_offset = 0;
1743 uio.uio_rw = UIO_READ;
1744 uio.uio_vmspace = l->l_proc->p_vmspace;
1745 error = uiomove((void *)cdesc, len, &uio);
1746 free(cdesc, M_TEMP);
1747 return (error);
1748 }
1749 case USB_GET_STRING_DESC: {
1750 int len;
1751 si = (struct usb_string_desc *)addr;
1752 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1753 si->usd_language_id, &si->usd_desc, &len);
1754 if (err)
1755 return (EINVAL);
1756 break;
1757 }
1758 case USB_DO_REQUEST:
1759 {
1760 struct usb_ctl_request *ur = (void *)addr;
1761 int len = UGETW(ur->ucr_request.wLength);
1762 struct iovec iov;
1763 struct uio uio;
1764 void *ptr = 0;
1765 usbd_status xerr;
1766 int error = 0;
1767
1768 if (!(flag & FWRITE))
1769 return (EPERM);
1770 /* Avoid requests that would damage the bus integrity. */
1771 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1772 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1773 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1774 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1775 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1776 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1777 return (EINVAL);
1778
1779 if (len < 0 || len > 32767)
1780 return (EINVAL);
1781 if (len != 0) {
1782 iov.iov_base = (void *)ur->ucr_data;
1783 iov.iov_len = len;
1784 uio.uio_iov = &iov;
1785 uio.uio_iovcnt = 1;
1786 uio.uio_resid = len;
1787 uio.uio_offset = 0;
1788 uio.uio_rw =
1789 ur->ucr_request.bmRequestType & UT_READ ?
1790 UIO_READ : UIO_WRITE;
1791 uio.uio_vmspace = l->l_proc->p_vmspace;
1792 ptr = malloc(len, M_TEMP, M_WAITOK);
1793 if (uio.uio_rw == UIO_WRITE) {
1794 error = uiomove(ptr, len, &uio);
1795 if (error)
1796 goto ret;
1797 }
1798 }
1799 sce = &sc->sc_endpoints[endpt][IN];
1800 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1801 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1802 if (xerr) {
1803 error = EIO;
1804 goto ret;
1805 }
1806 if (len != 0) {
1807 if (uio.uio_rw == UIO_READ) {
1808 error = uiomove(ptr, len, &uio);
1809 if (error)
1810 goto ret;
1811 }
1812 }
1813 ret:
1814 if (ptr)
1815 free(ptr, M_TEMP);
1816 return (error);
1817 }
1818 case USB_GET_DEVICEINFO:
1819 usbd_fill_deviceinfo(sc->sc_udev,
1820 (struct usb_device_info *)addr, 0);
1821 break;
1822 #ifdef COMPAT_30
1823 case USB_GET_DEVICEINFO_OLD:
1824 usbd_fill_deviceinfo_old(sc->sc_udev,
1825 (struct usb_device_info_old *)addr, 0);
1826
1827 break;
1828 #endif
1829 default:
1830 return (EINVAL);
1831 }
1832 return (0);
1833 }
1834
1835 int
1836 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1837 {
1838 int endpt = UGENENDPOINT(dev);
1839 struct ugen_softc *sc;
1840 int error;
1841
1842 USB_GET_SC(ugen, UGENUNIT(dev), sc);
1843
1844 sc->sc_refcnt++;
1845 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1846 if (--sc->sc_refcnt < 0)
1847 usb_detach_wakeup(USBDEV(sc->sc_dev));
1848 return (error);
1849 }
1850
1851 int
1852 ugenpoll(dev_t dev, int events, struct lwp *l)
1853 {
1854 struct ugen_softc *sc;
1855 struct ugen_endpoint *sce_in, *sce_out;
1856 int revents = 0;
1857 int s;
1858
1859 USB_GET_SC(ugen, UGENUNIT(dev), sc);
1860
1861 if (sc->sc_dying)
1862 return (POLLHUP);
1863
1864 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1865 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1866 if (sce_in == NULL && sce_out == NULL)
1867 return (POLLERR);
1868 #ifdef DIAGNOSTIC
1869 if (!sce_in->edesc && !sce_out->edesc) {
1870 printf("ugenpoll: no edesc\n");
1871 return (POLLERR);
1872 }
1873 /* It's possible to have only one pipe open. */
1874 if (!sce_in->pipeh && !sce_out->pipeh) {
1875 printf("ugenpoll: no pipe\n");
1876 return (POLLERR);
1877 }
1878 #endif
1879 s = splusb();
1880 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1881 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1882 case UE_INTERRUPT:
1883 if (sce_in->q.c_cc > 0)
1884 revents |= events & (POLLIN | POLLRDNORM);
1885 else
1886 selrecord(l, &sce_in->rsel);
1887 break;
1888 case UE_ISOCHRONOUS:
1889 if (sce_in->cur != sce_in->fill)
1890 revents |= events & (POLLIN | POLLRDNORM);
1891 else
1892 selrecord(l, &sce_in->rsel);
1893 break;
1894 case UE_BULK:
1895 #ifdef UGEN_BULK_RA_WB
1896 if (sce_in->state & UGEN_BULK_RA) {
1897 if (sce_in->ra_wb_used > 0)
1898 revents |= events &
1899 (POLLIN | POLLRDNORM);
1900 else
1901 selrecord(l, &sce_in->rsel);
1902 break;
1903 }
1904 #endif
1905 /*
1906 * We have no easy way of determining if a read will
1907 * yield any data or a write will happen.
1908 * Pretend they will.
1909 */
1910 revents |= events & (POLLIN | POLLRDNORM);
1911 break;
1912 default:
1913 break;
1914 }
1915 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1916 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1917 case UE_INTERRUPT:
1918 case UE_ISOCHRONOUS:
1919 /* XXX unimplemented */
1920 break;
1921 case UE_BULK:
1922 #ifdef UGEN_BULK_RA_WB
1923 if (sce_out->state & UGEN_BULK_WB) {
1924 if (sce_out->ra_wb_used <
1925 sce_out->limit - sce_out->ibuf)
1926 revents |= events &
1927 (POLLOUT | POLLWRNORM);
1928 else
1929 selrecord(l, &sce_out->rsel);
1930 break;
1931 }
1932 #endif
1933 /*
1934 * We have no easy way of determining if a read will
1935 * yield any data or a write will happen.
1936 * Pretend they will.
1937 */
1938 revents |= events & (POLLOUT | POLLWRNORM);
1939 break;
1940 default:
1941 break;
1942 }
1943
1944
1945 splx(s);
1946 return (revents);
1947 }
1948
1949 static void
1950 filt_ugenrdetach(struct knote *kn)
1951 {
1952 struct ugen_endpoint *sce = kn->kn_hook;
1953 int s;
1954
1955 s = splusb();
1956 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1957 splx(s);
1958 }
1959
1960 static int
1961 filt_ugenread_intr(struct knote *kn, long hint)
1962 {
1963 struct ugen_endpoint *sce = kn->kn_hook;
1964
1965 kn->kn_data = sce->q.c_cc;
1966 return (kn->kn_data > 0);
1967 }
1968
1969 static int
1970 filt_ugenread_isoc(struct knote *kn, long hint)
1971 {
1972 struct ugen_endpoint *sce = kn->kn_hook;
1973
1974 if (sce->cur == sce->fill)
1975 return (0);
1976
1977 if (sce->cur < sce->fill)
1978 kn->kn_data = sce->fill - sce->cur;
1979 else
1980 kn->kn_data = (sce->limit - sce->cur) +
1981 (sce->fill - sce->ibuf);
1982
1983 return (1);
1984 }
1985
1986 #ifdef UGEN_BULK_RA_WB
1987 static int
1988 filt_ugenread_bulk(struct knote *kn, long hint)
1989 {
1990 struct ugen_endpoint *sce = kn->kn_hook;
1991
1992 if (!(sce->state & UGEN_BULK_RA))
1993 /*
1994 * We have no easy way of determining if a read will
1995 * yield any data or a write will happen.
1996 * So, emulate "seltrue".
1997 */
1998 return (filt_seltrue(kn, hint));
1999
2000 if (sce->ra_wb_used == 0)
2001 return (0);
2002
2003 kn->kn_data = sce->ra_wb_used;
2004
2005 return (1);
2006 }
2007
2008 static int
2009 filt_ugenwrite_bulk(struct knote *kn, long hint)
2010 {
2011 struct ugen_endpoint *sce = kn->kn_hook;
2012
2013 if (!(sce->state & UGEN_BULK_WB))
2014 /*
2015 * We have no easy way of determining if a read will
2016 * yield any data or a write will happen.
2017 * So, emulate "seltrue".
2018 */
2019 return (filt_seltrue(kn, hint));
2020
2021 if (sce->ra_wb_used == sce->limit - sce->ibuf)
2022 return (0);
2023
2024 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2025
2026 return (1);
2027 }
2028 #endif
2029
2030 static const struct filterops ugenread_intr_filtops =
2031 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2032
2033 static const struct filterops ugenread_isoc_filtops =
2034 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2035
2036 #ifdef UGEN_BULK_RA_WB
2037 static const struct filterops ugenread_bulk_filtops =
2038 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2039
2040 static const struct filterops ugenwrite_bulk_filtops =
2041 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2042 #else
2043 static const struct filterops ugen_seltrue_filtops =
2044 { 1, NULL, filt_ugenrdetach, filt_seltrue };
2045 #endif
2046
2047 int
2048 ugenkqfilter(dev_t dev, struct knote *kn)
2049 {
2050 struct ugen_softc *sc;
2051 struct ugen_endpoint *sce;
2052 struct klist *klist;
2053 int s;
2054
2055 USB_GET_SC(ugen, UGENUNIT(dev), sc);
2056
2057 if (sc->sc_dying)
2058 return (ENXIO);
2059
2060 switch (kn->kn_filter) {
2061 case EVFILT_READ:
2062 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2063 if (sce == NULL)
2064 return (EINVAL);
2065
2066 klist = &sce->rsel.sel_klist;
2067 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2068 case UE_INTERRUPT:
2069 kn->kn_fop = &ugenread_intr_filtops;
2070 break;
2071 case UE_ISOCHRONOUS:
2072 kn->kn_fop = &ugenread_isoc_filtops;
2073 break;
2074 case UE_BULK:
2075 #ifdef UGEN_BULK_RA_WB
2076 kn->kn_fop = &ugenread_bulk_filtops;
2077 break;
2078 #else
2079 /*
2080 * We have no easy way of determining if a read will
2081 * yield any data or a write will happen.
2082 * So, emulate "seltrue".
2083 */
2084 kn->kn_fop = &ugen_seltrue_filtops;
2085 #endif
2086 break;
2087 default:
2088 return (EINVAL);
2089 }
2090 break;
2091
2092 case EVFILT_WRITE:
2093 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2094 if (sce == NULL)
2095 return (EINVAL);
2096
2097 klist = &sce->rsel.sel_klist;
2098 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2099 case UE_INTERRUPT:
2100 case UE_ISOCHRONOUS:
2101 /* XXX poll doesn't support this */
2102 return (EINVAL);
2103
2104 case UE_BULK:
2105 #ifdef UGEN_BULK_RA_WB
2106 kn->kn_fop = &ugenwrite_bulk_filtops;
2107 #else
2108 /*
2109 * We have no easy way of determining if a read will
2110 * yield any data or a write will happen.
2111 * So, emulate "seltrue".
2112 */
2113 kn->kn_fop = &ugen_seltrue_filtops;
2114 #endif
2115 break;
2116 default:
2117 return (EINVAL);
2118 }
2119 break;
2120
2121 default:
2122 return (EINVAL);
2123 }
2124
2125 kn->kn_hook = sce;
2126
2127 s = splusb();
2128 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2129 splx(s);
2130
2131 return (0);
2132 }
2133
2134 #if defined(__FreeBSD__)
2135 DRIVER_MODULE(ugen, uhub, ugen_driver, ugen_devclass, usbd_driver_load, 0);
2136 #endif
2137