ugen.c revision 1.95 1 /* $NetBSD: ugen.c,v 1.95 2007/12/09 20:28:24 jmcneill Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. All advertising materials mentioning features or use of this software
25 * must display the following acknowledgement:
26 * This product includes software developed by the NetBSD
27 * Foundation, Inc. and its contributors.
28 * 4. Neither the name of The NetBSD Foundation nor the names of its
29 * contributors may be used to endorse or promote products derived
30 * from this software without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
33 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
34 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
35 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
36 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
39 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
40 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGE.
43 */
44
45
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.95 2007/12/09 20:28:24 jmcneill Exp $");
48
49 #include "opt_ugen_bulk_ra_wb.h"
50 #include "opt_compat_netbsd.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
55 #include <sys/malloc.h>
56 #if defined(__NetBSD__) || defined(__OpenBSD__)
57 #include <sys/device.h>
58 #include <sys/ioctl.h>
59 #elif defined(__FreeBSD__)
60 #include <sys/module.h>
61 #include <sys/bus.h>
62 #include <sys/ioccom.h>
63 #include <sys/conf.h>
64 #include <sys/fcntl.h>
65 #include <sys/filio.h>
66 #endif
67 #include <sys/conf.h>
68 #include <sys/tty.h>
69 #include <sys/file.h>
70 #include <sys/select.h>
71 #include <sys/proc.h>
72 #include <sys/vnode.h>
73 #include <sys/poll.h>
74
75 #include <dev/usb/usb.h>
76 #include <dev/usb/usbdi.h>
77 #include <dev/usb/usbdi_util.h>
78
79 #ifdef UGEN_DEBUG
80 #define DPRINTF(x) if (ugendebug) logprintf x
81 #define DPRINTFN(n,x) if (ugendebug>(n)) logprintf x
82 int ugendebug = 0;
83 #else
84 #define DPRINTF(x)
85 #define DPRINTFN(n,x)
86 #endif
87
88 #define UGEN_CHUNK 128 /* chunk size for read */
89 #define UGEN_IBSIZE 1020 /* buffer size */
90 #define UGEN_BBSIZE 1024
91
92 #define UGEN_NISOFRAMES 500 /* 0.5 seconds worth */
93 #define UGEN_NISOREQS 6 /* number of outstanding xfer requests */
94 #define UGEN_NISORFRMS 4 /* number of frames (miliseconds) per req */
95
96 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
97 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
98
99 struct ugen_endpoint {
100 struct ugen_softc *sc;
101 usb_endpoint_descriptor_t *edesc;
102 usbd_interface_handle iface;
103 int state;
104 #define UGEN_ASLP 0x02 /* waiting for data */
105 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
106 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
107 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
108 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
109 usbd_pipe_handle pipeh;
110 struct clist q;
111 struct selinfo rsel;
112 u_char *ibuf; /* start of buffer (circular for isoc) */
113 u_char *fill; /* location for input (isoc) */
114 u_char *limit; /* end of circular buffer (isoc) */
115 u_char *cur; /* current read location (isoc) */
116 u_int32_t timeout;
117 #ifdef UGEN_BULK_RA_WB
118 u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
119 u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
120 u_int32_t ra_wb_used; /* how much is in buffer */
121 u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
122 usbd_xfer_handle ra_wb_xfer;
123 #endif
124 struct isoreq {
125 struct ugen_endpoint *sce;
126 usbd_xfer_handle xfer;
127 void *dmabuf;
128 u_int16_t sizes[UGEN_NISORFRMS];
129 } isoreqs[UGEN_NISOREQS];
130 };
131
132 struct ugen_softc {
133 USBBASEDEVICE sc_dev; /* base device */
134 usbd_device_handle sc_udev;
135
136 char sc_is_open[USB_MAX_ENDPOINTS];
137 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
138 #define OUT 0
139 #define IN 1
140
141 int sc_refcnt;
142 char sc_buffer[UGEN_BBSIZE];
143 u_char sc_dying;
144 };
145
146 #if defined(__NetBSD__)
147 dev_type_open(ugenopen);
148 dev_type_close(ugenclose);
149 dev_type_read(ugenread);
150 dev_type_write(ugenwrite);
151 dev_type_ioctl(ugenioctl);
152 dev_type_poll(ugenpoll);
153 dev_type_kqfilter(ugenkqfilter);
154
155 const struct cdevsw ugen_cdevsw = {
156 ugenopen, ugenclose, ugenread, ugenwrite, ugenioctl,
157 nostop, notty, ugenpoll, nommap, ugenkqfilter, D_OTHER,
158 };
159 #elif defined(__OpenBSD__)
160 cdev_decl(ugen);
161 #elif defined(__FreeBSD__)
162 d_open_t ugenopen;
163 d_close_t ugenclose;
164 d_read_t ugenread;
165 d_write_t ugenwrite;
166 d_ioctl_t ugenioctl;
167 d_poll_t ugenpoll;
168
169 #define UGEN_CDEV_MAJOR 114
170
171 Static struct cdevsw ugen_cdevsw = {
172 /* open */ ugenopen,
173 /* close */ ugenclose,
174 /* read */ ugenread,
175 /* write */ ugenwrite,
176 /* ioctl */ ugenioctl,
177 /* poll */ ugenpoll,
178 /* mmap */ nommap,
179 /* strategy */ nostrategy,
180 /* name */ "ugen",
181 /* maj */ UGEN_CDEV_MAJOR,
182 /* dump */ nodump,
183 /* psize */ nopsize,
184 /* flags */ 0,
185 /* bmaj */ -1
186 };
187 #endif
188
189 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
190 usbd_status status);
191 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
192 usbd_status status);
193 #ifdef UGEN_BULK_RA_WB
194 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
195 usbd_status status);
196 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
197 usbd_status status);
198 #endif
199 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
200 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
201 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
202 void *, int, struct lwp *);
203 Static int ugen_set_config(struct ugen_softc *sc, int configno);
204 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
205 int index, int *lenp);
206 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
207 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
208
209 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
210 #define UGENENDPOINT(n) (minor(n) & 0xf)
211 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
212
213 USB_DECLARE_DRIVER(ugen);
214
215 USB_MATCH(ugen)
216 {
217 USB_MATCH_START(ugen, uaa);
218
219 if (match->cf_flags & 1)
220 return (UMATCH_HIGHEST);
221 else if (uaa->usegeneric)
222 return (UMATCH_GENERIC);
223 else
224 return (UMATCH_NONE);
225 }
226
227 USB_ATTACH(ugen)
228 {
229 USB_ATTACH_START(ugen, sc, uaa);
230 usbd_device_handle udev;
231 char *devinfop;
232 usbd_status err;
233 int conf;
234
235 devinfop = usbd_devinfo_alloc(uaa->device, 0);
236 USB_ATTACH_SETUP;
237 aprint_normal("%s: %s\n", USBDEVNAME(sc->sc_dev), devinfop);
238 usbd_devinfo_free(devinfop);
239
240 sc->sc_udev = udev = uaa->device;
241
242 /* First set configuration index 0, the default one for ugen. */
243 err = usbd_set_config_index(udev, 0, 0);
244 if (err) {
245 aprint_error("%s: setting configuration index 0 failed\n",
246 USBDEVNAME(sc->sc_dev));
247 sc->sc_dying = 1;
248 USB_ATTACH_ERROR_RETURN;
249 }
250 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
251
252 /* Set up all the local state for this configuration. */
253 err = ugen_set_config(sc, conf);
254 if (err) {
255 aprint_error("%s: setting configuration %d failed\n",
256 USBDEVNAME(sc->sc_dev), conf);
257 sc->sc_dying = 1;
258 USB_ATTACH_ERROR_RETURN;
259 }
260
261 #ifdef __FreeBSD__
262 {
263 static int global_init_done = 0;
264 if (!global_init_done) {
265 cdevsw_add(&ugen_cdevsw);
266 global_init_done = 1;
267 }
268 }
269 #endif
270
271 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
272 USBDEV(sc->sc_dev));
273
274 if (!pmf_device_register(self, NULL, NULL))
275 aprint_error_dev(self, "couldn't establish power handler\n");
276
277 USB_ATTACH_SUCCESS_RETURN;
278 }
279
280 Static int
281 ugen_set_config(struct ugen_softc *sc, int configno)
282 {
283 usbd_device_handle dev = sc->sc_udev;
284 usbd_interface_handle iface;
285 usb_endpoint_descriptor_t *ed;
286 struct ugen_endpoint *sce;
287 u_int8_t niface, nendpt;
288 int ifaceno, endptno, endpt;
289 usbd_status err;
290 int dir;
291
292 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
293 USBDEVNAME(sc->sc_dev), configno, sc));
294
295 /*
296 * We start at 1, not 0, because we don't care whether the
297 * control endpoint is open or not. It is always present.
298 */
299 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
300 if (sc->sc_is_open[endptno]) {
301 DPRINTFN(1,
302 ("ugen_set_config: %s - endpoint %d is open\n",
303 USBDEVNAME(sc->sc_dev), endptno));
304 return (USBD_IN_USE);
305 }
306
307 /* Avoid setting the current value. */
308 if (usbd_get_config_descriptor(dev)->bConfigurationValue != configno) {
309 err = usbd_set_config_no(dev, configno, 1);
310 if (err)
311 return (err);
312 }
313
314 err = usbd_interface_count(dev, &niface);
315 if (err)
316 return (err);
317 memset(sc->sc_endpoints, 0, sizeof sc->sc_endpoints);
318 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
319 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
320 err = usbd_device2interface_handle(dev, ifaceno, &iface);
321 if (err)
322 return (err);
323 err = usbd_endpoint_count(iface, &nendpt);
324 if (err)
325 return (err);
326 for (endptno = 0; endptno < nendpt; endptno++) {
327 ed = usbd_interface2endpoint_descriptor(iface,endptno);
328 KASSERT(ed != NULL);
329 endpt = ed->bEndpointAddress;
330 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
331 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
332 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
333 "(%d,%d), sce=%p\n",
334 endptno, endpt, UE_GET_ADDR(endpt),
335 UE_GET_DIR(endpt), sce));
336 sce->sc = sc;
337 sce->edesc = ed;
338 sce->iface = iface;
339 }
340 }
341 return (USBD_NORMAL_COMPLETION);
342 }
343
344 int
345 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
346 {
347 struct ugen_softc *sc;
348 int unit = UGENUNIT(dev);
349 int endpt = UGENENDPOINT(dev);
350 usb_endpoint_descriptor_t *edesc;
351 struct ugen_endpoint *sce;
352 int dir, isize;
353 usbd_status err;
354 usbd_xfer_handle xfer;
355 void *tbuf;
356 int i, j;
357
358 USB_GET_SC_OPEN(ugen, unit, sc);
359
360 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
361 flag, mode, unit, endpt));
362
363 if (sc == NULL || sc->sc_dying)
364 return (ENXIO);
365
366 /* The control endpoint allows multiple opens. */
367 if (endpt == USB_CONTROL_ENDPOINT) {
368 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
369 return (0);
370 }
371
372 if (sc->sc_is_open[endpt])
373 return (EBUSY);
374
375 /* Make sure there are pipes for all directions. */
376 for (dir = OUT; dir <= IN; dir++) {
377 if (flag & (dir == OUT ? FWRITE : FREAD)) {
378 sce = &sc->sc_endpoints[endpt][dir];
379 if (sce == 0 || sce->edesc == 0)
380 return (ENXIO);
381 }
382 }
383
384 /* Actually open the pipes. */
385 /* XXX Should back out properly if it fails. */
386 for (dir = OUT; dir <= IN; dir++) {
387 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
388 continue;
389 sce = &sc->sc_endpoints[endpt][dir];
390 sce->state = 0;
391 sce->timeout = USBD_NO_TIMEOUT;
392 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
393 sc, endpt, dir, sce));
394 edesc = sce->edesc;
395 switch (edesc->bmAttributes & UE_XFERTYPE) {
396 case UE_INTERRUPT:
397 if (dir == OUT) {
398 err = usbd_open_pipe(sce->iface,
399 edesc->bEndpointAddress, 0, &sce->pipeh);
400 if (err)
401 return (EIO);
402 break;
403 }
404 isize = UGETW(edesc->wMaxPacketSize);
405 if (isize == 0) /* shouldn't happen */
406 return (EINVAL);
407 sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
408 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
409 endpt, isize));
410 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1)
411 return (ENOMEM);
412 err = usbd_open_pipe_intr(sce->iface,
413 edesc->bEndpointAddress,
414 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
415 sce->ibuf, isize, ugenintr,
416 USBD_DEFAULT_INTERVAL);
417 if (err) {
418 free(sce->ibuf, M_USBDEV);
419 clfree(&sce->q);
420 return (EIO);
421 }
422 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
423 break;
424 case UE_BULK:
425 err = usbd_open_pipe(sce->iface,
426 edesc->bEndpointAddress, 0, &sce->pipeh);
427 if (err)
428 return (EIO);
429 #ifdef UGEN_BULK_RA_WB
430 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
431 /*
432 * Use request size for non-RA/WB transfers
433 * as the default.
434 */
435 sce->ra_wb_reqsize = UGEN_BBSIZE;
436 #endif
437 break;
438 case UE_ISOCHRONOUS:
439 if (dir == OUT)
440 return (EINVAL);
441 isize = UGETW(edesc->wMaxPacketSize);
442 if (isize == 0) /* shouldn't happen */
443 return (EINVAL);
444 sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
445 M_USBDEV, M_WAITOK);
446 sce->cur = sce->fill = sce->ibuf;
447 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
448 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
449 endpt, isize));
450 err = usbd_open_pipe(sce->iface,
451 edesc->bEndpointAddress, 0, &sce->pipeh);
452 if (err) {
453 free(sce->ibuf, M_USBDEV);
454 return (EIO);
455 }
456 for(i = 0; i < UGEN_NISOREQS; ++i) {
457 sce->isoreqs[i].sce = sce;
458 xfer = usbd_alloc_xfer(sc->sc_udev);
459 if (xfer == 0)
460 goto bad;
461 sce->isoreqs[i].xfer = xfer;
462 tbuf = usbd_alloc_buffer
463 (xfer, isize * UGEN_NISORFRMS);
464 if (tbuf == 0) {
465 i++;
466 goto bad;
467 }
468 sce->isoreqs[i].dmabuf = tbuf;
469 for(j = 0; j < UGEN_NISORFRMS; ++j)
470 sce->isoreqs[i].sizes[j] = isize;
471 usbd_setup_isoc_xfer
472 (xfer, sce->pipeh, &sce->isoreqs[i],
473 sce->isoreqs[i].sizes,
474 UGEN_NISORFRMS, USBD_NO_COPY,
475 ugen_isoc_rintr);
476 (void)usbd_transfer(xfer);
477 }
478 DPRINTFN(5, ("ugenopen: isoc open done\n"));
479 break;
480 bad:
481 while (--i >= 0) /* implicit buffer free */
482 usbd_free_xfer(sce->isoreqs[i].xfer);
483 return (ENOMEM);
484 case UE_CONTROL:
485 sce->timeout = USBD_DEFAULT_TIMEOUT;
486 return (EINVAL);
487 }
488 }
489 sc->sc_is_open[endpt] = 1;
490 return (0);
491 }
492
493 int
494 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
495 {
496 int endpt = UGENENDPOINT(dev);
497 struct ugen_softc *sc;
498 struct ugen_endpoint *sce;
499 int dir;
500 int i;
501
502 USB_GET_SC(ugen, UGENUNIT(dev), sc);
503
504 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
505 flag, mode, UGENUNIT(dev), endpt));
506
507 #ifdef DIAGNOSTIC
508 if (!sc->sc_is_open[endpt]) {
509 printf("ugenclose: not open\n");
510 return (EINVAL);
511 }
512 #endif
513
514 if (endpt == USB_CONTROL_ENDPOINT) {
515 DPRINTFN(5, ("ugenclose: close control\n"));
516 sc->sc_is_open[endpt] = 0;
517 return (0);
518 }
519
520 for (dir = OUT; dir <= IN; dir++) {
521 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
522 continue;
523 sce = &sc->sc_endpoints[endpt][dir];
524 if (sce == NULL || sce->pipeh == NULL)
525 continue;
526 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
527 endpt, dir, sce));
528
529 usbd_abort_pipe(sce->pipeh);
530 usbd_close_pipe(sce->pipeh);
531 sce->pipeh = NULL;
532
533 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
534 case UE_INTERRUPT:
535 ndflush(&sce->q, sce->q.c_cc);
536 clfree(&sce->q);
537 break;
538 case UE_ISOCHRONOUS:
539 for (i = 0; i < UGEN_NISOREQS; ++i)
540 usbd_free_xfer(sce->isoreqs[i].xfer);
541 break;
542 #ifdef UGEN_BULK_RA_WB
543 case UE_BULK:
544 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
545 /* ibuf freed below */
546 usbd_free_xfer(sce->ra_wb_xfer);
547 break;
548 #endif
549 default:
550 break;
551 }
552
553 if (sce->ibuf != NULL) {
554 free(sce->ibuf, M_USBDEV);
555 sce->ibuf = NULL;
556 clfree(&sce->q);
557 }
558 }
559 sc->sc_is_open[endpt] = 0;
560
561 return (0);
562 }
563
564 Static int
565 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
566 {
567 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
568 u_int32_t n, tn;
569 usbd_xfer_handle xfer;
570 usbd_status err;
571 int s;
572 int error = 0;
573
574 DPRINTFN(5, ("%s: ugenread: %d\n", USBDEVNAME(sc->sc_dev), endpt));
575
576 if (sc->sc_dying)
577 return (EIO);
578
579 if (endpt == USB_CONTROL_ENDPOINT)
580 return (ENODEV);
581
582 #ifdef DIAGNOSTIC
583 if (sce->edesc == NULL) {
584 printf("ugenread: no edesc\n");
585 return (EIO);
586 }
587 if (sce->pipeh == NULL) {
588 printf("ugenread: no pipe\n");
589 return (EIO);
590 }
591 #endif
592
593 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
594 case UE_INTERRUPT:
595 /* Block until activity occurred. */
596 s = splusb();
597 while (sce->q.c_cc == 0) {
598 if (flag & IO_NDELAY) {
599 splx(s);
600 return (EWOULDBLOCK);
601 }
602 sce->state |= UGEN_ASLP;
603 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
604 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0);
605 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
606 if (sc->sc_dying)
607 error = EIO;
608 if (error) {
609 sce->state &= ~UGEN_ASLP;
610 break;
611 }
612 }
613 splx(s);
614
615 /* Transfer as many chunks as possible. */
616 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
617 n = min(sce->q.c_cc, uio->uio_resid);
618 if (n > sizeof(sc->sc_buffer))
619 n = sizeof(sc->sc_buffer);
620
621 /* Remove a small chunk from the input queue. */
622 q_to_b(&sce->q, sc->sc_buffer, n);
623 DPRINTFN(5, ("ugenread: got %d chars\n", n));
624
625 /* Copy the data to the user process. */
626 error = uiomove(sc->sc_buffer, n, uio);
627 if (error)
628 break;
629 }
630 break;
631 case UE_BULK:
632 #ifdef UGEN_BULK_RA_WB
633 if (sce->state & UGEN_BULK_RA) {
634 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
635 uio->uio_resid, sce->ra_wb_used));
636 xfer = sce->ra_wb_xfer;
637
638 s = splusb();
639 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
640 splx(s);
641 return (EWOULDBLOCK);
642 }
643 while (uio->uio_resid > 0 && !error) {
644 while (sce->ra_wb_used == 0) {
645 sce->state |= UGEN_ASLP;
646 DPRINTFN(5,
647 ("ugenread: sleep on %p\n",
648 sce));
649 error = tsleep(sce, PZERO | PCATCH,
650 "ugenrb", 0);
651 DPRINTFN(5,
652 ("ugenread: woke, error=%d\n",
653 error));
654 if (sc->sc_dying)
655 error = EIO;
656 if (error) {
657 sce->state &= ~UGEN_ASLP;
658 break;
659 }
660 }
661
662 /* Copy data to the process. */
663 while (uio->uio_resid > 0
664 && sce->ra_wb_used > 0) {
665 n = min(uio->uio_resid,
666 sce->ra_wb_used);
667 n = min(n, sce->limit - sce->cur);
668 error = uiomove(sce->cur, n, uio);
669 if (error)
670 break;
671 sce->cur += n;
672 sce->ra_wb_used -= n;
673 if (sce->cur == sce->limit)
674 sce->cur = sce->ibuf;
675 }
676
677 /*
678 * If the transfers stopped because the
679 * buffer was full, restart them.
680 */
681 if (sce->state & UGEN_RA_WB_STOP &&
682 sce->ra_wb_used < sce->limit - sce->ibuf) {
683 n = (sce->limit - sce->ibuf)
684 - sce->ra_wb_used;
685 usbd_setup_xfer(xfer,
686 sce->pipeh, sce, NULL,
687 min(n, sce->ra_wb_xferlen),
688 USBD_NO_COPY, USBD_NO_TIMEOUT,
689 ugen_bulkra_intr);
690 sce->state &= ~UGEN_RA_WB_STOP;
691 err = usbd_transfer(xfer);
692 if (err != USBD_IN_PROGRESS)
693 /*
694 * The transfer has not been
695 * queued. Setting STOP
696 * will make us try
697 * again at the next read.
698 */
699 sce->state |= UGEN_RA_WB_STOP;
700 }
701 }
702 splx(s);
703 break;
704 }
705 #endif
706 xfer = usbd_alloc_xfer(sc->sc_udev);
707 if (xfer == 0)
708 return (ENOMEM);
709 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
710 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
711 tn = n;
712 err = usbd_bulk_transfer(
713 xfer, sce->pipeh,
714 sce->state & UGEN_SHORT_OK ?
715 USBD_SHORT_XFER_OK : 0,
716 sce->timeout, sc->sc_buffer, &tn, "ugenrb");
717 if (err) {
718 if (err == USBD_INTERRUPTED)
719 error = EINTR;
720 else if (err == USBD_TIMEOUT)
721 error = ETIMEDOUT;
722 else
723 error = EIO;
724 break;
725 }
726 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
727 error = uiomove(sc->sc_buffer, tn, uio);
728 if (error || tn < n)
729 break;
730 }
731 usbd_free_xfer(xfer);
732 break;
733 case UE_ISOCHRONOUS:
734 s = splusb();
735 while (sce->cur == sce->fill) {
736 if (flag & IO_NDELAY) {
737 splx(s);
738 return (EWOULDBLOCK);
739 }
740 sce->state |= UGEN_ASLP;
741 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
742 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0);
743 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
744 if (sc->sc_dying)
745 error = EIO;
746 if (error) {
747 sce->state &= ~UGEN_ASLP;
748 break;
749 }
750 }
751
752 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
753 if(sce->fill > sce->cur)
754 n = min(sce->fill - sce->cur, uio->uio_resid);
755 else
756 n = min(sce->limit - sce->cur, uio->uio_resid);
757
758 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
759
760 /* Copy the data to the user process. */
761 error = uiomove(sce->cur, n, uio);
762 if (error)
763 break;
764 sce->cur += n;
765 if(sce->cur >= sce->limit)
766 sce->cur = sce->ibuf;
767 }
768 splx(s);
769 break;
770
771
772 default:
773 return (ENXIO);
774 }
775 return (error);
776 }
777
778 int
779 ugenread(dev_t dev, struct uio *uio, int flag)
780 {
781 int endpt = UGENENDPOINT(dev);
782 struct ugen_softc *sc;
783 int error;
784
785 USB_GET_SC(ugen, UGENUNIT(dev), sc);
786
787 sc->sc_refcnt++;
788 error = ugen_do_read(sc, endpt, uio, flag);
789 if (--sc->sc_refcnt < 0)
790 usb_detach_wakeup(USBDEV(sc->sc_dev));
791 return (error);
792 }
793
794 Static int
795 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
796 int flag)
797 {
798 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
799 u_int32_t n;
800 int error = 0;
801 #ifdef UGEN_BULK_RA_WB
802 int s;
803 u_int32_t tn;
804 char *dbuf;
805 #endif
806 usbd_xfer_handle xfer;
807 usbd_status err;
808
809 DPRINTFN(5, ("%s: ugenwrite: %d\n", USBDEVNAME(sc->sc_dev), endpt));
810
811 if (sc->sc_dying)
812 return (EIO);
813
814 if (endpt == USB_CONTROL_ENDPOINT)
815 return (ENODEV);
816
817 #ifdef DIAGNOSTIC
818 if (sce->edesc == NULL) {
819 printf("ugenwrite: no edesc\n");
820 return (EIO);
821 }
822 if (sce->pipeh == NULL) {
823 printf("ugenwrite: no pipe\n");
824 return (EIO);
825 }
826 #endif
827
828 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
829 case UE_BULK:
830 #ifdef UGEN_BULK_RA_WB
831 if (sce->state & UGEN_BULK_WB) {
832 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
833 uio->uio_resid, sce->ra_wb_used));
834 xfer = sce->ra_wb_xfer;
835
836 s = splusb();
837 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
838 flag & IO_NDELAY) {
839 splx(s);
840 return (EWOULDBLOCK);
841 }
842 while (uio->uio_resid > 0 && !error) {
843 while (sce->ra_wb_used ==
844 sce->limit - sce->ibuf) {
845 sce->state |= UGEN_ASLP;
846 DPRINTFN(5,
847 ("ugenwrite: sleep on %p\n",
848 sce));
849 error = tsleep(sce, PZERO | PCATCH,
850 "ugenwb", 0);
851 DPRINTFN(5,
852 ("ugenwrite: woke, error=%d\n",
853 error));
854 if (sc->sc_dying)
855 error = EIO;
856 if (error) {
857 sce->state &= ~UGEN_ASLP;
858 break;
859 }
860 }
861
862 /* Copy data from the process. */
863 while (uio->uio_resid > 0 &&
864 sce->ra_wb_used < sce->limit - sce->ibuf) {
865 n = min(uio->uio_resid,
866 (sce->limit - sce->ibuf)
867 - sce->ra_wb_used);
868 n = min(n, sce->limit - sce->fill);
869 error = uiomove(sce->fill, n, uio);
870 if (error)
871 break;
872 sce->fill += n;
873 sce->ra_wb_used += n;
874 if (sce->fill == sce->limit)
875 sce->fill = sce->ibuf;
876 }
877
878 /*
879 * If the transfers stopped because the
880 * buffer was empty, restart them.
881 */
882 if (sce->state & UGEN_RA_WB_STOP &&
883 sce->ra_wb_used > 0) {
884 dbuf = (char *)usbd_get_buffer(xfer);
885 n = min(sce->ra_wb_used,
886 sce->ra_wb_xferlen);
887 tn = min(n, sce->limit - sce->cur);
888 memcpy(dbuf, sce->cur, tn);
889 dbuf += tn;
890 if (n - tn > 0)
891 memcpy(dbuf, sce->ibuf,
892 n - tn);
893 usbd_setup_xfer(xfer,
894 sce->pipeh, sce, NULL, n,
895 USBD_NO_COPY, USBD_NO_TIMEOUT,
896 ugen_bulkwb_intr);
897 sce->state &= ~UGEN_RA_WB_STOP;
898 err = usbd_transfer(xfer);
899 if (err != USBD_IN_PROGRESS)
900 /*
901 * The transfer has not been
902 * queued. Setting STOP
903 * will make us try again
904 * at the next read.
905 */
906 sce->state |= UGEN_RA_WB_STOP;
907 }
908 }
909 splx(s);
910 break;
911 }
912 #endif
913 xfer = usbd_alloc_xfer(sc->sc_udev);
914 if (xfer == 0)
915 return (EIO);
916 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
917 error = uiomove(sc->sc_buffer, n, uio);
918 if (error)
919 break;
920 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
921 err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
922 sce->timeout, sc->sc_buffer, &n,"ugenwb");
923 if (err) {
924 if (err == USBD_INTERRUPTED)
925 error = EINTR;
926 else if (err == USBD_TIMEOUT)
927 error = ETIMEDOUT;
928 else
929 error = EIO;
930 break;
931 }
932 }
933 usbd_free_xfer(xfer);
934 break;
935 case UE_INTERRUPT:
936 xfer = usbd_alloc_xfer(sc->sc_udev);
937 if (xfer == 0)
938 return (EIO);
939 while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
940 uio->uio_resid)) != 0) {
941 error = uiomove(sc->sc_buffer, n, uio);
942 if (error)
943 break;
944 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
945 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
946 sce->timeout, sc->sc_buffer, &n, "ugenwi");
947 if (err) {
948 if (err == USBD_INTERRUPTED)
949 error = EINTR;
950 else if (err == USBD_TIMEOUT)
951 error = ETIMEDOUT;
952 else
953 error = EIO;
954 break;
955 }
956 }
957 usbd_free_xfer(xfer);
958 break;
959 default:
960 return (ENXIO);
961 }
962 return (error);
963 }
964
965 int
966 ugenwrite(dev_t dev, struct uio *uio, int flag)
967 {
968 int endpt = UGENENDPOINT(dev);
969 struct ugen_softc *sc;
970 int error;
971
972 USB_GET_SC(ugen, UGENUNIT(dev), sc);
973
974 sc->sc_refcnt++;
975 error = ugen_do_write(sc, endpt, uio, flag);
976 if (--sc->sc_refcnt < 0)
977 usb_detach_wakeup(USBDEV(sc->sc_dev));
978 return (error);
979 }
980
981 #if defined(__NetBSD__) || defined(__OpenBSD__)
982 int
983 ugen_activate(device_ptr_t self, enum devact act)
984 {
985 struct ugen_softc *sc = (struct ugen_softc *)self;
986
987 switch (act) {
988 case DVACT_ACTIVATE:
989 return (EOPNOTSUPP);
990
991 case DVACT_DEACTIVATE:
992 sc->sc_dying = 1;
993 break;
994 }
995 return (0);
996 }
997 #endif
998
999 USB_DETACH(ugen)
1000 {
1001 USB_DETACH_START(ugen, sc);
1002 struct ugen_endpoint *sce;
1003 int i, dir;
1004 int s;
1005 #if defined(__NetBSD__) || defined(__OpenBSD__)
1006 int maj, mn;
1007
1008 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1009 #elif defined(__FreeBSD__)
1010 DPRINTF(("ugen_detach: sc=%p\n", sc));
1011 #endif
1012
1013 sc->sc_dying = 1;
1014 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1015 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1016 for (dir = OUT; dir <= IN; dir++) {
1017 sce = &sc->sc_endpoints[i][dir];
1018 if (sce && sce->pipeh)
1019 usbd_abort_pipe(sce->pipeh);
1020 }
1021 }
1022
1023 s = splusb();
1024 if (--sc->sc_refcnt >= 0) {
1025 /* Wake everyone */
1026 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1027 wakeup(&sc->sc_endpoints[i][IN]);
1028 /* Wait for processes to go away. */
1029 usb_detach_wait(USBDEV(sc->sc_dev));
1030 }
1031 splx(s);
1032
1033 #if defined(__NetBSD__) || defined(__OpenBSD__)
1034 /* locate the major number */
1035 #if defined(__NetBSD__)
1036 maj = cdevsw_lookup_major(&ugen_cdevsw);
1037 #elif defined(__OpenBSD__)
1038 for (maj = 0; maj < nchrdev; maj++)
1039 if (cdevsw[maj].d_open == ugenopen)
1040 break;
1041 #endif
1042
1043 /* Nuke the vnodes for any open instances (calls close). */
1044 mn = device_unit(self) * USB_MAX_ENDPOINTS;
1045 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1046 #elif defined(__FreeBSD__)
1047 /* XXX not implemented yet */
1048 #endif
1049
1050 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1051 USBDEV(sc->sc_dev));
1052
1053 return (0);
1054 }
1055
1056 Static void
1057 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1058 {
1059 struct ugen_endpoint *sce = addr;
1060 /*struct ugen_softc *sc = sce->sc;*/
1061 u_int32_t count;
1062 u_char *ibuf;
1063
1064 if (status == USBD_CANCELLED)
1065 return;
1066
1067 if (status != USBD_NORMAL_COMPLETION) {
1068 DPRINTF(("ugenintr: status=%d\n", status));
1069 if (status == USBD_STALLED)
1070 usbd_clear_endpoint_stall_async(sce->pipeh);
1071 return;
1072 }
1073
1074 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1075 ibuf = sce->ibuf;
1076
1077 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1078 xfer, status, count));
1079 DPRINTFN(5, (" data = %02x %02x %02x\n",
1080 ibuf[0], ibuf[1], ibuf[2]));
1081
1082 (void)b_to_q(ibuf, count, &sce->q);
1083
1084 if (sce->state & UGEN_ASLP) {
1085 sce->state &= ~UGEN_ASLP;
1086 DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1087 wakeup(sce);
1088 }
1089 selnotify(&sce->rsel, 0);
1090 }
1091
1092 Static void
1093 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1094 usbd_status status)
1095 {
1096 struct isoreq *req = addr;
1097 struct ugen_endpoint *sce = req->sce;
1098 u_int32_t count, n;
1099 int i, isize;
1100
1101 /* Return if we are aborting. */
1102 if (status == USBD_CANCELLED)
1103 return;
1104
1105 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1106 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1107 (long)(req - sce->isoreqs), count));
1108
1109 /* throw away oldest input if the buffer is full */
1110 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1111 sce->cur += count;
1112 if(sce->cur >= sce->limit)
1113 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1114 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1115 count));
1116 }
1117
1118 isize = UGETW(sce->edesc->wMaxPacketSize);
1119 for (i = 0; i < UGEN_NISORFRMS; i++) {
1120 u_int32_t actlen = req->sizes[i];
1121 char const *tbuf = (char const *)req->dmabuf + isize * i;
1122
1123 /* copy data to buffer */
1124 while (actlen > 0) {
1125 n = min(actlen, sce->limit - sce->fill);
1126 memcpy(sce->fill, tbuf, n);
1127
1128 tbuf += n;
1129 actlen -= n;
1130 sce->fill += n;
1131 if(sce->fill == sce->limit)
1132 sce->fill = sce->ibuf;
1133 }
1134
1135 /* setup size for next transfer */
1136 req->sizes[i] = isize;
1137 }
1138
1139 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1140 USBD_NO_COPY, ugen_isoc_rintr);
1141 (void)usbd_transfer(xfer);
1142
1143 if (sce->state & UGEN_ASLP) {
1144 sce->state &= ~UGEN_ASLP;
1145 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1146 wakeup(sce);
1147 }
1148 selnotify(&sce->rsel, 0);
1149 }
1150
1151 #ifdef UGEN_BULK_RA_WB
1152 Static void
1153 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1154 usbd_status status)
1155 {
1156 struct ugen_endpoint *sce = addr;
1157 u_int32_t count, n;
1158 char const *tbuf;
1159 usbd_status err;
1160
1161 /* Return if we are aborting. */
1162 if (status == USBD_CANCELLED)
1163 return;
1164
1165 if (status != USBD_NORMAL_COMPLETION) {
1166 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1167 sce->state |= UGEN_RA_WB_STOP;
1168 if (status == USBD_STALLED)
1169 usbd_clear_endpoint_stall_async(sce->pipeh);
1170 return;
1171 }
1172
1173 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1174
1175 /* Keep track of how much is in the buffer. */
1176 sce->ra_wb_used += count;
1177
1178 /* Copy data to buffer. */
1179 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1180 n = min(count, sce->limit - sce->fill);
1181 memcpy(sce->fill, tbuf, n);
1182 tbuf += n;
1183 count -= n;
1184 sce->fill += n;
1185 if (sce->fill == sce->limit)
1186 sce->fill = sce->ibuf;
1187 if (count > 0) {
1188 memcpy(sce->fill, tbuf, count);
1189 sce->fill += count;
1190 }
1191
1192 /* Set up the next request if necessary. */
1193 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1194 if (n > 0) {
1195 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1196 min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1197 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1198 err = usbd_transfer(xfer);
1199 if (err != USBD_IN_PROGRESS) {
1200 printf("usbd_bulkra_intr: error=%d\n", err);
1201 /*
1202 * The transfer has not been queued. Setting STOP
1203 * will make us try again at the next read.
1204 */
1205 sce->state |= UGEN_RA_WB_STOP;
1206 }
1207 }
1208 else
1209 sce->state |= UGEN_RA_WB_STOP;
1210
1211 if (sce->state & UGEN_ASLP) {
1212 sce->state &= ~UGEN_ASLP;
1213 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1214 wakeup(sce);
1215 }
1216 selnotify(&sce->rsel, 0);
1217 }
1218
1219 Static void
1220 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1221 usbd_status status)
1222 {
1223 struct ugen_endpoint *sce = addr;
1224 u_int32_t count, n;
1225 char *tbuf;
1226 usbd_status err;
1227
1228 /* Return if we are aborting. */
1229 if (status == USBD_CANCELLED)
1230 return;
1231
1232 if (status != USBD_NORMAL_COMPLETION) {
1233 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1234 sce->state |= UGEN_RA_WB_STOP;
1235 if (status == USBD_STALLED)
1236 usbd_clear_endpoint_stall_async(sce->pipeh);
1237 return;
1238 }
1239
1240 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1241
1242 /* Keep track of how much is in the buffer. */
1243 sce->ra_wb_used -= count;
1244
1245 /* Update buffer pointers. */
1246 sce->cur += count;
1247 if (sce->cur >= sce->limit)
1248 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1249
1250 /* Set up next request if necessary. */
1251 if (sce->ra_wb_used > 0) {
1252 /* copy data from buffer */
1253 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1254 count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1255 n = min(count, sce->limit - sce->cur);
1256 memcpy(tbuf, sce->cur, n);
1257 tbuf += n;
1258 if (count - n > 0)
1259 memcpy(tbuf, sce->ibuf, count - n);
1260
1261 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1262 count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1263 err = usbd_transfer(xfer);
1264 if (err != USBD_IN_PROGRESS) {
1265 printf("usbd_bulkwb_intr: error=%d\n", err);
1266 /*
1267 * The transfer has not been queued. Setting STOP
1268 * will make us try again at the next write.
1269 */
1270 sce->state |= UGEN_RA_WB_STOP;
1271 }
1272 }
1273 else
1274 sce->state |= UGEN_RA_WB_STOP;
1275
1276 if (sce->state & UGEN_ASLP) {
1277 sce->state &= ~UGEN_ASLP;
1278 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1279 wakeup(sce);
1280 }
1281 selnotify(&sce->rsel, 0);
1282 }
1283 #endif
1284
1285 Static usbd_status
1286 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1287 {
1288 usbd_interface_handle iface;
1289 usb_endpoint_descriptor_t *ed;
1290 usbd_status err;
1291 struct ugen_endpoint *sce;
1292 u_int8_t niface, nendpt, endptno, endpt;
1293 int dir;
1294
1295 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1296
1297 err = usbd_interface_count(sc->sc_udev, &niface);
1298 if (err)
1299 return (err);
1300 if (ifaceidx < 0 || ifaceidx >= niface)
1301 return (USBD_INVAL);
1302
1303 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1304 if (err)
1305 return (err);
1306 err = usbd_endpoint_count(iface, &nendpt);
1307 if (err)
1308 return (err);
1309 /* XXX should only do this after setting new altno has succeeded */
1310 for (endptno = 0; endptno < nendpt; endptno++) {
1311 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1312 endpt = ed->bEndpointAddress;
1313 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1314 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1315 sce->sc = 0;
1316 sce->edesc = 0;
1317 sce->iface = 0;
1318 }
1319
1320 /* change setting */
1321 err = usbd_set_interface(iface, altno);
1322 if (err)
1323 return (err);
1324
1325 err = usbd_endpoint_count(iface, &nendpt);
1326 if (err)
1327 return (err);
1328 for (endptno = 0; endptno < nendpt; endptno++) {
1329 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1330 KASSERT(ed != NULL);
1331 endpt = ed->bEndpointAddress;
1332 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1333 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1334 sce->sc = sc;
1335 sce->edesc = ed;
1336 sce->iface = iface;
1337 }
1338 return (0);
1339 }
1340
1341 /* Retrieve a complete descriptor for a certain device and index. */
1342 Static usb_config_descriptor_t *
1343 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1344 {
1345 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1346 int len;
1347 usbd_status err;
1348
1349 if (index == USB_CURRENT_CONFIG_INDEX) {
1350 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1351 len = UGETW(tdesc->wTotalLength);
1352 if (lenp)
1353 *lenp = len;
1354 cdesc = malloc(len, M_TEMP, M_WAITOK);
1355 memcpy(cdesc, tdesc, len);
1356 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1357 } else {
1358 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1359 if (err)
1360 return (0);
1361 len = UGETW(cdescr.wTotalLength);
1362 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1363 if (lenp)
1364 *lenp = len;
1365 cdesc = malloc(len, M_TEMP, M_WAITOK);
1366 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1367 if (err) {
1368 free(cdesc, M_TEMP);
1369 return (0);
1370 }
1371 }
1372 return (cdesc);
1373 }
1374
1375 Static int
1376 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1377 {
1378 usbd_interface_handle iface;
1379 usbd_status err;
1380
1381 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1382 if (err)
1383 return (-1);
1384 return (usbd_get_interface_altindex(iface));
1385 }
1386
1387 Static int
1388 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1389 void *addr, int flag, struct lwp *l)
1390 {
1391 struct ugen_endpoint *sce;
1392 usbd_status err;
1393 usbd_interface_handle iface;
1394 struct usb_config_desc *cd;
1395 usb_config_descriptor_t *cdesc;
1396 struct usb_interface_desc *id;
1397 usb_interface_descriptor_t *idesc;
1398 struct usb_endpoint_desc *ed;
1399 usb_endpoint_descriptor_t *edesc;
1400 struct usb_alt_interface *ai;
1401 struct usb_string_desc *si;
1402 u_int8_t conf, alt;
1403
1404 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1405 if (sc->sc_dying)
1406 return (EIO);
1407
1408 switch (cmd) {
1409 case FIONBIO:
1410 /* All handled in the upper FS layer. */
1411 return (0);
1412 case USB_SET_SHORT_XFER:
1413 if (endpt == USB_CONTROL_ENDPOINT)
1414 return (EINVAL);
1415 /* This flag only affects read */
1416 sce = &sc->sc_endpoints[endpt][IN];
1417 if (sce == NULL || sce->pipeh == NULL)
1418 return (EINVAL);
1419 if (*(int *)addr)
1420 sce->state |= UGEN_SHORT_OK;
1421 else
1422 sce->state &= ~UGEN_SHORT_OK;
1423 return (0);
1424 case USB_SET_TIMEOUT:
1425 sce = &sc->sc_endpoints[endpt][IN];
1426 if (sce == NULL
1427 /* XXX this shouldn't happen, but the distinction between
1428 input and output pipes isn't clear enough.
1429 || sce->pipeh == NULL */
1430 )
1431 return (EINVAL);
1432 sce->timeout = *(int *)addr;
1433 return (0);
1434 case USB_SET_BULK_RA:
1435 #ifdef UGEN_BULK_RA_WB
1436 if (endpt == USB_CONTROL_ENDPOINT)
1437 return (EINVAL);
1438 sce = &sc->sc_endpoints[endpt][IN];
1439 if (sce == NULL || sce->pipeh == NULL)
1440 return (EINVAL);
1441 edesc = sce->edesc;
1442 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1443 return (EINVAL);
1444
1445 if (*(int *)addr) {
1446 /* Only turn RA on if it's currently off. */
1447 if (sce->state & UGEN_BULK_RA)
1448 return (0);
1449
1450 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1451 /* shouldn't happen */
1452 return (EINVAL);
1453 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1454 if (sce->ra_wb_xfer == NULL)
1455 return (ENOMEM);
1456 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1457 /*
1458 * Set up a dmabuf because we reuse the xfer with
1459 * the same (max) request length like isoc.
1460 */
1461 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1462 sce->ra_wb_xferlen) == 0) {
1463 usbd_free_xfer(sce->ra_wb_xfer);
1464 return (ENOMEM);
1465 }
1466 sce->ibuf = malloc(sce->ra_wb_bufsize,
1467 M_USBDEV, M_WAITOK);
1468 sce->fill = sce->cur = sce->ibuf;
1469 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1470 sce->ra_wb_used = 0;
1471 sce->state |= UGEN_BULK_RA;
1472 sce->state &= ~UGEN_RA_WB_STOP;
1473 /* Now start reading. */
1474 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1475 NULL,
1476 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1477 USBD_NO_COPY, USBD_NO_TIMEOUT,
1478 ugen_bulkra_intr);
1479 err = usbd_transfer(sce->ra_wb_xfer);
1480 if (err != USBD_IN_PROGRESS) {
1481 sce->state &= ~UGEN_BULK_RA;
1482 free(sce->ibuf, M_USBDEV);
1483 sce->ibuf = NULL;
1484 usbd_free_xfer(sce->ra_wb_xfer);
1485 return (EIO);
1486 }
1487 } else {
1488 /* Only turn RA off if it's currently on. */
1489 if (!(sce->state & UGEN_BULK_RA))
1490 return (0);
1491
1492 sce->state &= ~UGEN_BULK_RA;
1493 usbd_abort_pipe(sce->pipeh);
1494 usbd_free_xfer(sce->ra_wb_xfer);
1495 /*
1496 * XXX Discard whatever's in the buffer, but we
1497 * should keep it around and drain the buffer
1498 * instead.
1499 */
1500 free(sce->ibuf, M_USBDEV);
1501 sce->ibuf = NULL;
1502 }
1503 return (0);
1504 #else
1505 return (EOPNOTSUPP);
1506 #endif
1507 case USB_SET_BULK_WB:
1508 #ifdef UGEN_BULK_RA_WB
1509 if (endpt == USB_CONTROL_ENDPOINT)
1510 return (EINVAL);
1511 sce = &sc->sc_endpoints[endpt][OUT];
1512 if (sce == NULL || sce->pipeh == NULL)
1513 return (EINVAL);
1514 edesc = sce->edesc;
1515 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1516 return (EINVAL);
1517
1518 if (*(int *)addr) {
1519 /* Only turn WB on if it's currently off. */
1520 if (sce->state & UGEN_BULK_WB)
1521 return (0);
1522
1523 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1524 /* shouldn't happen */
1525 return (EINVAL);
1526 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1527 if (sce->ra_wb_xfer == NULL)
1528 return (ENOMEM);
1529 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1530 /*
1531 * Set up a dmabuf because we reuse the xfer with
1532 * the same (max) request length like isoc.
1533 */
1534 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1535 sce->ra_wb_xferlen) == 0) {
1536 usbd_free_xfer(sce->ra_wb_xfer);
1537 return (ENOMEM);
1538 }
1539 sce->ibuf = malloc(sce->ra_wb_bufsize,
1540 M_USBDEV, M_WAITOK);
1541 sce->fill = sce->cur = sce->ibuf;
1542 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1543 sce->ra_wb_used = 0;
1544 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1545 } else {
1546 /* Only turn WB off if it's currently on. */
1547 if (!(sce->state & UGEN_BULK_WB))
1548 return (0);
1549
1550 sce->state &= ~UGEN_BULK_WB;
1551 /*
1552 * XXX Discard whatever's in the buffer, but we
1553 * should keep it around and keep writing to
1554 * drain the buffer instead.
1555 */
1556 usbd_abort_pipe(sce->pipeh);
1557 usbd_free_xfer(sce->ra_wb_xfer);
1558 free(sce->ibuf, M_USBDEV);
1559 sce->ibuf = NULL;
1560 }
1561 return (0);
1562 #else
1563 return (EOPNOTSUPP);
1564 #endif
1565 case USB_SET_BULK_RA_OPT:
1566 case USB_SET_BULK_WB_OPT:
1567 #ifdef UGEN_BULK_RA_WB
1568 {
1569 struct usb_bulk_ra_wb_opt *opt;
1570
1571 if (endpt == USB_CONTROL_ENDPOINT)
1572 return (EINVAL);
1573 opt = (struct usb_bulk_ra_wb_opt *)addr;
1574 if (cmd == USB_SET_BULK_RA_OPT)
1575 sce = &sc->sc_endpoints[endpt][IN];
1576 else
1577 sce = &sc->sc_endpoints[endpt][OUT];
1578 if (sce == NULL || sce->pipeh == NULL)
1579 return (EINVAL);
1580 if (opt->ra_wb_buffer_size < 1 ||
1581 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1582 opt->ra_wb_request_size < 1 ||
1583 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1584 return (EINVAL);
1585 /*
1586 * XXX These changes do not take effect until the
1587 * next time RA/WB mode is enabled but they ought to
1588 * take effect immediately.
1589 */
1590 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1591 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1592 return (0);
1593 }
1594 #else
1595 return (EOPNOTSUPP);
1596 #endif
1597 default:
1598 break;
1599 }
1600
1601 if (endpt != USB_CONTROL_ENDPOINT)
1602 return (EINVAL);
1603
1604 switch (cmd) {
1605 #ifdef UGEN_DEBUG
1606 case USB_SETDEBUG:
1607 ugendebug = *(int *)addr;
1608 break;
1609 #endif
1610 case USB_GET_CONFIG:
1611 err = usbd_get_config(sc->sc_udev, &conf);
1612 if (err)
1613 return (EIO);
1614 *(int *)addr = conf;
1615 break;
1616 case USB_SET_CONFIG:
1617 if (!(flag & FWRITE))
1618 return (EPERM);
1619 err = ugen_set_config(sc, *(int *)addr);
1620 switch (err) {
1621 case USBD_NORMAL_COMPLETION:
1622 break;
1623 case USBD_IN_USE:
1624 return (EBUSY);
1625 default:
1626 return (EIO);
1627 }
1628 break;
1629 case USB_GET_ALTINTERFACE:
1630 ai = (struct usb_alt_interface *)addr;
1631 err = usbd_device2interface_handle(sc->sc_udev,
1632 ai->uai_interface_index, &iface);
1633 if (err)
1634 return (EINVAL);
1635 idesc = usbd_get_interface_descriptor(iface);
1636 if (idesc == NULL)
1637 return (EIO);
1638 ai->uai_alt_no = idesc->bAlternateSetting;
1639 break;
1640 case USB_SET_ALTINTERFACE:
1641 if (!(flag & FWRITE))
1642 return (EPERM);
1643 ai = (struct usb_alt_interface *)addr;
1644 err = usbd_device2interface_handle(sc->sc_udev,
1645 ai->uai_interface_index, &iface);
1646 if (err)
1647 return (EINVAL);
1648 err = ugen_set_interface(sc, ai->uai_interface_index,
1649 ai->uai_alt_no);
1650 if (err)
1651 return (EINVAL);
1652 break;
1653 case USB_GET_NO_ALT:
1654 ai = (struct usb_alt_interface *)addr;
1655 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1656 if (cdesc == NULL)
1657 return (EINVAL);
1658 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1659 if (idesc == NULL) {
1660 free(cdesc, M_TEMP);
1661 return (EINVAL);
1662 }
1663 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1664 idesc->bInterfaceNumber);
1665 free(cdesc, M_TEMP);
1666 break;
1667 case USB_GET_DEVICE_DESC:
1668 *(usb_device_descriptor_t *)addr =
1669 *usbd_get_device_descriptor(sc->sc_udev);
1670 break;
1671 case USB_GET_CONFIG_DESC:
1672 cd = (struct usb_config_desc *)addr;
1673 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1674 if (cdesc == NULL)
1675 return (EINVAL);
1676 cd->ucd_desc = *cdesc;
1677 free(cdesc, M_TEMP);
1678 break;
1679 case USB_GET_INTERFACE_DESC:
1680 id = (struct usb_interface_desc *)addr;
1681 cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1682 if (cdesc == NULL)
1683 return (EINVAL);
1684 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1685 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1686 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1687 else
1688 alt = id->uid_alt_index;
1689 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1690 if (idesc == NULL) {
1691 free(cdesc, M_TEMP);
1692 return (EINVAL);
1693 }
1694 id->uid_desc = *idesc;
1695 free(cdesc, M_TEMP);
1696 break;
1697 case USB_GET_ENDPOINT_DESC:
1698 ed = (struct usb_endpoint_desc *)addr;
1699 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1700 if (cdesc == NULL)
1701 return (EINVAL);
1702 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1703 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1704 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1705 else
1706 alt = ed->ued_alt_index;
1707 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1708 alt, ed->ued_endpoint_index);
1709 if (edesc == NULL) {
1710 free(cdesc, M_TEMP);
1711 return (EINVAL);
1712 }
1713 ed->ued_desc = *edesc;
1714 free(cdesc, M_TEMP);
1715 break;
1716 case USB_GET_FULL_DESC:
1717 {
1718 int len;
1719 struct iovec iov;
1720 struct uio uio;
1721 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1722 int error;
1723
1724 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1725 if (len > fd->ufd_size)
1726 len = fd->ufd_size;
1727 iov.iov_base = (void *)fd->ufd_data;
1728 iov.iov_len = len;
1729 uio.uio_iov = &iov;
1730 uio.uio_iovcnt = 1;
1731 uio.uio_resid = len;
1732 uio.uio_offset = 0;
1733 uio.uio_rw = UIO_READ;
1734 uio.uio_vmspace = l->l_proc->p_vmspace;
1735 error = uiomove((void *)cdesc, len, &uio);
1736 free(cdesc, M_TEMP);
1737 return (error);
1738 }
1739 case USB_GET_STRING_DESC: {
1740 int len;
1741 si = (struct usb_string_desc *)addr;
1742 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1743 si->usd_language_id, &si->usd_desc, &len);
1744 if (err)
1745 return (EINVAL);
1746 break;
1747 }
1748 case USB_DO_REQUEST:
1749 {
1750 struct usb_ctl_request *ur = (void *)addr;
1751 int len = UGETW(ur->ucr_request.wLength);
1752 struct iovec iov;
1753 struct uio uio;
1754 void *ptr = 0;
1755 usbd_status xerr;
1756 int error = 0;
1757
1758 if (!(flag & FWRITE))
1759 return (EPERM);
1760 /* Avoid requests that would damage the bus integrity. */
1761 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1762 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1763 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1764 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1765 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1766 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1767 return (EINVAL);
1768
1769 if (len < 0 || len > 32767)
1770 return (EINVAL);
1771 if (len != 0) {
1772 iov.iov_base = (void *)ur->ucr_data;
1773 iov.iov_len = len;
1774 uio.uio_iov = &iov;
1775 uio.uio_iovcnt = 1;
1776 uio.uio_resid = len;
1777 uio.uio_offset = 0;
1778 uio.uio_rw =
1779 ur->ucr_request.bmRequestType & UT_READ ?
1780 UIO_READ : UIO_WRITE;
1781 uio.uio_vmspace = l->l_proc->p_vmspace;
1782 ptr = malloc(len, M_TEMP, M_WAITOK);
1783 if (uio.uio_rw == UIO_WRITE) {
1784 error = uiomove(ptr, len, &uio);
1785 if (error)
1786 goto ret;
1787 }
1788 }
1789 sce = &sc->sc_endpoints[endpt][IN];
1790 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1791 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1792 if (xerr) {
1793 error = EIO;
1794 goto ret;
1795 }
1796 if (len != 0) {
1797 if (uio.uio_rw == UIO_READ) {
1798 error = uiomove(ptr, len, &uio);
1799 if (error)
1800 goto ret;
1801 }
1802 }
1803 ret:
1804 if (ptr)
1805 free(ptr, M_TEMP);
1806 return (error);
1807 }
1808 case USB_GET_DEVICEINFO:
1809 usbd_fill_deviceinfo(sc->sc_udev,
1810 (struct usb_device_info *)addr, 0);
1811 break;
1812 #ifdef COMPAT_30
1813 case USB_GET_DEVICEINFO_OLD:
1814 usbd_fill_deviceinfo_old(sc->sc_udev,
1815 (struct usb_device_info_old *)addr, 0);
1816
1817 break;
1818 #endif
1819 default:
1820 return (EINVAL);
1821 }
1822 return (0);
1823 }
1824
1825 int
1826 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1827 {
1828 int endpt = UGENENDPOINT(dev);
1829 struct ugen_softc *sc;
1830 int error;
1831
1832 USB_GET_SC(ugen, UGENUNIT(dev), sc);
1833
1834 sc->sc_refcnt++;
1835 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1836 if (--sc->sc_refcnt < 0)
1837 usb_detach_wakeup(USBDEV(sc->sc_dev));
1838 return (error);
1839 }
1840
1841 int
1842 ugenpoll(dev_t dev, int events, struct lwp *l)
1843 {
1844 struct ugen_softc *sc;
1845 struct ugen_endpoint *sce_in, *sce_out;
1846 int revents = 0;
1847 int s;
1848
1849 USB_GET_SC(ugen, UGENUNIT(dev), sc);
1850
1851 if (sc->sc_dying)
1852 return (POLLHUP);
1853
1854 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1855 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1856 if (sce_in == NULL && sce_out == NULL)
1857 return (POLLERR);
1858 #ifdef DIAGNOSTIC
1859 if (!sce_in->edesc && !sce_out->edesc) {
1860 printf("ugenpoll: no edesc\n");
1861 return (POLLERR);
1862 }
1863 /* It's possible to have only one pipe open. */
1864 if (!sce_in->pipeh && !sce_out->pipeh) {
1865 printf("ugenpoll: no pipe\n");
1866 return (POLLERR);
1867 }
1868 #endif
1869 s = splusb();
1870 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1871 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1872 case UE_INTERRUPT:
1873 if (sce_in->q.c_cc > 0)
1874 revents |= events & (POLLIN | POLLRDNORM);
1875 else
1876 selrecord(l, &sce_in->rsel);
1877 break;
1878 case UE_ISOCHRONOUS:
1879 if (sce_in->cur != sce_in->fill)
1880 revents |= events & (POLLIN | POLLRDNORM);
1881 else
1882 selrecord(l, &sce_in->rsel);
1883 break;
1884 case UE_BULK:
1885 #ifdef UGEN_BULK_RA_WB
1886 if (sce_in->state & UGEN_BULK_RA) {
1887 if (sce_in->ra_wb_used > 0)
1888 revents |= events &
1889 (POLLIN | POLLRDNORM);
1890 else
1891 selrecord(l, &sce_in->rsel);
1892 break;
1893 }
1894 #endif
1895 /*
1896 * We have no easy way of determining if a read will
1897 * yield any data or a write will happen.
1898 * Pretend they will.
1899 */
1900 revents |= events & (POLLIN | POLLRDNORM);
1901 break;
1902 default:
1903 break;
1904 }
1905 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1906 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1907 case UE_INTERRUPT:
1908 case UE_ISOCHRONOUS:
1909 /* XXX unimplemented */
1910 break;
1911 case UE_BULK:
1912 #ifdef UGEN_BULK_RA_WB
1913 if (sce_out->state & UGEN_BULK_WB) {
1914 if (sce_out->ra_wb_used <
1915 sce_out->limit - sce_out->ibuf)
1916 revents |= events &
1917 (POLLOUT | POLLWRNORM);
1918 else
1919 selrecord(l, &sce_out->rsel);
1920 break;
1921 }
1922 #endif
1923 /*
1924 * We have no easy way of determining if a read will
1925 * yield any data or a write will happen.
1926 * Pretend they will.
1927 */
1928 revents |= events & (POLLOUT | POLLWRNORM);
1929 break;
1930 default:
1931 break;
1932 }
1933
1934
1935 splx(s);
1936 return (revents);
1937 }
1938
1939 static void
1940 filt_ugenrdetach(struct knote *kn)
1941 {
1942 struct ugen_endpoint *sce = kn->kn_hook;
1943 int s;
1944
1945 s = splusb();
1946 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1947 splx(s);
1948 }
1949
1950 static int
1951 filt_ugenread_intr(struct knote *kn, long hint)
1952 {
1953 struct ugen_endpoint *sce = kn->kn_hook;
1954
1955 kn->kn_data = sce->q.c_cc;
1956 return (kn->kn_data > 0);
1957 }
1958
1959 static int
1960 filt_ugenread_isoc(struct knote *kn, long hint)
1961 {
1962 struct ugen_endpoint *sce = kn->kn_hook;
1963
1964 if (sce->cur == sce->fill)
1965 return (0);
1966
1967 if (sce->cur < sce->fill)
1968 kn->kn_data = sce->fill - sce->cur;
1969 else
1970 kn->kn_data = (sce->limit - sce->cur) +
1971 (sce->fill - sce->ibuf);
1972
1973 return (1);
1974 }
1975
1976 #ifdef UGEN_BULK_RA_WB
1977 static int
1978 filt_ugenread_bulk(struct knote *kn, long hint)
1979 {
1980 struct ugen_endpoint *sce = kn->kn_hook;
1981
1982 if (!(sce->state & UGEN_BULK_RA))
1983 /*
1984 * We have no easy way of determining if a read will
1985 * yield any data or a write will happen.
1986 * So, emulate "seltrue".
1987 */
1988 return (filt_seltrue(kn, hint));
1989
1990 if (sce->ra_wb_used == 0)
1991 return (0);
1992
1993 kn->kn_data = sce->ra_wb_used;
1994
1995 return (1);
1996 }
1997
1998 static int
1999 filt_ugenwrite_bulk(struct knote *kn, long hint)
2000 {
2001 struct ugen_endpoint *sce = kn->kn_hook;
2002
2003 if (!(sce->state & UGEN_BULK_WB))
2004 /*
2005 * We have no easy way of determining if a read will
2006 * yield any data or a write will happen.
2007 * So, emulate "seltrue".
2008 */
2009 return (filt_seltrue(kn, hint));
2010
2011 if (sce->ra_wb_used == sce->limit - sce->ibuf)
2012 return (0);
2013
2014 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2015
2016 return (1);
2017 }
2018 #endif
2019
2020 static const struct filterops ugenread_intr_filtops =
2021 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2022
2023 static const struct filterops ugenread_isoc_filtops =
2024 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2025
2026 #ifdef UGEN_BULK_RA_WB
2027 static const struct filterops ugenread_bulk_filtops =
2028 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2029
2030 static const struct filterops ugenwrite_bulk_filtops =
2031 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2032 #else
2033 static const struct filterops ugen_seltrue_filtops =
2034 { 1, NULL, filt_ugenrdetach, filt_seltrue };
2035 #endif
2036
2037 int
2038 ugenkqfilter(dev_t dev, struct knote *kn)
2039 {
2040 struct ugen_softc *sc;
2041 struct ugen_endpoint *sce;
2042 struct klist *klist;
2043 int s;
2044
2045 USB_GET_SC(ugen, UGENUNIT(dev), sc);
2046
2047 if (sc->sc_dying)
2048 return (ENXIO);
2049
2050 switch (kn->kn_filter) {
2051 case EVFILT_READ:
2052 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2053 if (sce == NULL)
2054 return (EINVAL);
2055
2056 klist = &sce->rsel.sel_klist;
2057 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2058 case UE_INTERRUPT:
2059 kn->kn_fop = &ugenread_intr_filtops;
2060 break;
2061 case UE_ISOCHRONOUS:
2062 kn->kn_fop = &ugenread_isoc_filtops;
2063 break;
2064 case UE_BULK:
2065 #ifdef UGEN_BULK_RA_WB
2066 kn->kn_fop = &ugenread_bulk_filtops;
2067 break;
2068 #else
2069 /*
2070 * We have no easy way of determining if a read will
2071 * yield any data or a write will happen.
2072 * So, emulate "seltrue".
2073 */
2074 kn->kn_fop = &ugen_seltrue_filtops;
2075 #endif
2076 break;
2077 default:
2078 return (EINVAL);
2079 }
2080 break;
2081
2082 case EVFILT_WRITE:
2083 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2084 if (sce == NULL)
2085 return (EINVAL);
2086
2087 klist = &sce->rsel.sel_klist;
2088 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2089 case UE_INTERRUPT:
2090 case UE_ISOCHRONOUS:
2091 /* XXX poll doesn't support this */
2092 return (EINVAL);
2093
2094 case UE_BULK:
2095 #ifdef UGEN_BULK_RA_WB
2096 kn->kn_fop = &ugenwrite_bulk_filtops;
2097 #else
2098 /*
2099 * We have no easy way of determining if a read will
2100 * yield any data or a write will happen.
2101 * So, emulate "seltrue".
2102 */
2103 kn->kn_fop = &ugen_seltrue_filtops;
2104 #endif
2105 break;
2106 default:
2107 return (EINVAL);
2108 }
2109 break;
2110
2111 default:
2112 return (EINVAL);
2113 }
2114
2115 kn->kn_hook = sce;
2116
2117 s = splusb();
2118 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2119 splx(s);
2120
2121 return (0);
2122 }
2123
2124 #if defined(__FreeBSD__)
2125 DRIVER_MODULE(ugen, uhub, ugen_driver, ugen_devclass, usbd_driver_load, 0);
2126 #endif
2127