ugen.c revision 1.96.6.5 1 /* $NetBSD: ugen.c,v 1.96.6.5 2009/01/17 13:29:09 mjf Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.96.6.5 2009/01/17 13:29:09 mjf Exp $");
41
42 #include "opt_ugen_bulk_ra_wb.h"
43 #include "opt_compat_netbsd.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #if defined(__NetBSD__) || defined(__OpenBSD__)
50 #include <sys/device.h>
51 #include <sys/ioctl.h>
52 #elif defined(__FreeBSD__)
53 #include <sys/module.h>
54 #include <sys/bus.h>
55 #include <sys/ioccom.h>
56 #include <sys/conf.h>
57 #include <sys/fcntl.h>
58 #include <sys/filio.h>
59 #endif
60 #include <sys/conf.h>
61 #include <sys/tty.h>
62 #include <sys/file.h>
63 #include <sys/select.h>
64 #include <sys/proc.h>
65 #include <sys/vnode.h>
66 #include <sys/poll.h>
67
68 #include <dev/usb/usb.h>
69 #include <dev/usb/usbdi.h>
70 #include <dev/usb/usbdi_util.h>
71
72 #ifdef UGEN_DEBUG
73 #define DPRINTF(x) if (ugendebug) logprintf x
74 #define DPRINTFN(n,x) if (ugendebug>(n)) logprintf x
75 int ugendebug = 0;
76 #else
77 #define DPRINTF(x)
78 #define DPRINTFN(n,x)
79 #endif
80
81 #define UGEN_CHUNK 128 /* chunk size for read */
82 #define UGEN_IBSIZE 1020 /* buffer size */
83 #define UGEN_BBSIZE 1024
84
85 #define UGEN_NISOFRAMES 500 /* 0.5 seconds worth */
86 #define UGEN_NISOREQS 6 /* number of outstanding xfer requests */
87 #define UGEN_NISORFRMS 4 /* number of frames (miliseconds) per req */
88
89 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
90 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
91
92 struct ugen_endpoint {
93 struct ugen_softc *sc;
94 usb_endpoint_descriptor_t *edesc;
95 usbd_interface_handle iface;
96 int state;
97 #define UGEN_ASLP 0x02 /* waiting for data */
98 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
99 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
100 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
101 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
102 usbd_pipe_handle pipeh;
103 struct clist q;
104 struct selinfo rsel;
105 u_char *ibuf; /* start of buffer (circular for isoc) */
106 u_char *fill; /* location for input (isoc) */
107 u_char *limit; /* end of circular buffer (isoc) */
108 u_char *cur; /* current read location (isoc) */
109 u_int32_t timeout;
110 #ifdef UGEN_BULK_RA_WB
111 u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
112 u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
113 u_int32_t ra_wb_used; /* how much is in buffer */
114 u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
115 usbd_xfer_handle ra_wb_xfer;
116 #endif
117 struct isoreq {
118 struct ugen_endpoint *sce;
119 usbd_xfer_handle xfer;
120 void *dmabuf;
121 u_int16_t sizes[UGEN_NISORFRMS];
122 } isoreqs[UGEN_NISOREQS];
123 };
124
125 struct ugen_softc {
126 USBBASEDEVICE sc_dev; /* base device */
127 usbd_device_handle sc_udev;
128
129 char sc_is_open[USB_MAX_ENDPOINTS];
130 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
131 #define OUT 0
132 #define IN 1
133
134 int sc_refcnt;
135 char sc_buffer[UGEN_BBSIZE];
136 u_char sc_dying;
137 };
138
139 #if defined(__NetBSD__)
140 dev_type_open(ugenopen);
141 dev_type_close(ugenclose);
142 dev_type_read(ugenread);
143 dev_type_write(ugenwrite);
144 dev_type_ioctl(ugenioctl);
145 dev_type_poll(ugenpoll);
146 dev_type_kqfilter(ugenkqfilter);
147
148 const struct cdevsw ugen_cdevsw = {
149 ugenopen, ugenclose, ugenread, ugenwrite, ugenioctl,
150 nostop, notty, ugenpoll, nommap, ugenkqfilter, D_OTHER,
151 };
152 #elif defined(__OpenBSD__)
153 cdev_decl(ugen);
154 #elif defined(__FreeBSD__)
155 d_open_t ugenopen;
156 d_close_t ugenclose;
157 d_read_t ugenread;
158 d_write_t ugenwrite;
159 d_ioctl_t ugenioctl;
160 d_poll_t ugenpoll;
161
162 #define UGEN_CDEV_MAJOR 114
163
164 Static struct cdevsw ugen_cdevsw = {
165 /* open */ ugenopen,
166 /* close */ ugenclose,
167 /* read */ ugenread,
168 /* write */ ugenwrite,
169 /* ioctl */ ugenioctl,
170 /* poll */ ugenpoll,
171 /* mmap */ nommap,
172 /* strategy */ nostrategy,
173 /* name */ "ugen",
174 /* maj */ UGEN_CDEV_MAJOR,
175 /* dump */ nodump,
176 /* psize */ nopsize,
177 /* flags */ 0,
178 /* bmaj */ -1
179 };
180 #endif
181
182 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
183 usbd_status status);
184 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
185 usbd_status status);
186 #ifdef UGEN_BULK_RA_WB
187 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
188 usbd_status status);
189 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
190 usbd_status status);
191 #endif
192 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
193 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
194 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
195 void *, int, struct lwp *);
196 Static int ugen_set_config(struct ugen_softc *sc, int configno);
197 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
198 int index, int *lenp);
199 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
200 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
201
202 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
203 #define UGENENDPOINT(n) (minor(n) & 0xf)
204 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
205
206 USB_DECLARE_DRIVER(ugen);
207
208 USB_MATCH(ugen)
209 {
210 USB_MATCH_START(ugen, uaa);
211
212 if (match->cf_flags & 1)
213 return (UMATCH_HIGHEST);
214 else if (uaa->usegeneric)
215 return (UMATCH_GENERIC);
216 else
217 return (UMATCH_NONE);
218 }
219
220 USB_ATTACH(ugen)
221 {
222 USB_ATTACH_START(ugen, sc, uaa);
223 usbd_device_handle udev;
224 char *devinfop;
225 usbd_status err;
226 int i, dir, conf, maj;
227
228 devinfop = usbd_devinfo_alloc(uaa->device, 0);
229 USB_ATTACH_SETUP;
230 aprint_normal_dev(self, "%s\n", devinfop);
231 usbd_devinfo_free(devinfop);
232
233 sc->sc_dev = self;
234 sc->sc_udev = udev = uaa->device;
235
236 /* First set configuration index 0, the default one for ugen. */
237 err = usbd_set_config_index(udev, 0, 0);
238 if (err) {
239 aprint_error_dev(self,
240 "setting configuration index 0 failed\n");
241 sc->sc_dying = 1;
242 USB_ATTACH_ERROR_RETURN;
243 }
244 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
245
246 /* Set up all the local state for this configuration. */
247 err = ugen_set_config(sc, conf);
248 if (err) {
249 aprint_error_dev(self, "setting configuration %d failed\n",
250 conf);
251 sc->sc_dying = 1;
252 USB_ATTACH_ERROR_RETURN;
253 }
254
255 #ifdef __FreeBSD__
256 {
257 static int global_init_done = 0;
258 if (!global_init_done) {
259 cdevsw_add(&ugen_cdevsw);
260 global_init_done = 1;
261 }
262 }
263 #endif
264 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
265 for (dir = OUT; dir <= IN; dir++) {
266 struct ugen_endpoint *sce;
267
268 sce = &sc->sc_endpoints[i][dir];
269 selinit(&sce->rsel);
270 }
271 }
272
273 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
274 USBDEV(sc->sc_dev));
275
276 if (!pmf_device_register(self, NULL, NULL))
277 aprint_error_dev(self, "couldn't establish power handler\n");
278
279 maj = cdevsw_lookup_major(&ugen_cdevsw);
280 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
281 device_register_name(makedev(maj, device_unit(self)), self,
282 true, DEV_OTHER, "%s.%2d", device_xname(self), i);
283 }
284
285 USB_ATTACH_SUCCESS_RETURN;
286 }
287
288 Static int
289 ugen_set_config(struct ugen_softc *sc, int configno)
290 {
291 usbd_device_handle dev = sc->sc_udev;
292 usbd_interface_handle iface;
293 usb_endpoint_descriptor_t *ed;
294 struct ugen_endpoint *sce;
295 u_int8_t niface, nendpt;
296 int ifaceno, endptno, endpt;
297 usbd_status err;
298 int dir;
299
300 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
301 USBDEVNAME(sc->sc_dev), configno, sc));
302
303 /*
304 * We start at 1, not 0, because we don't care whether the
305 * control endpoint is open or not. It is always present.
306 */
307 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
308 if (sc->sc_is_open[endptno]) {
309 DPRINTFN(1,
310 ("ugen_set_config: %s - endpoint %d is open\n",
311 USBDEVNAME(sc->sc_dev), endptno));
312 return (USBD_IN_USE);
313 }
314
315 /* Avoid setting the current value. */
316 if (usbd_get_config_descriptor(dev)->bConfigurationValue != configno) {
317 err = usbd_set_config_no(dev, configno, 1);
318 if (err)
319 return (err);
320 }
321
322 err = usbd_interface_count(dev, &niface);
323 if (err)
324 return (err);
325 memset(sc->sc_endpoints, 0, sizeof sc->sc_endpoints);
326 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
327 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
328 err = usbd_device2interface_handle(dev, ifaceno, &iface);
329 if (err)
330 return (err);
331 err = usbd_endpoint_count(iface, &nendpt);
332 if (err)
333 return (err);
334 for (endptno = 0; endptno < nendpt; endptno++) {
335 ed = usbd_interface2endpoint_descriptor(iface,endptno);
336 KASSERT(ed != NULL);
337 endpt = ed->bEndpointAddress;
338 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
339 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
340 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
341 "(%d,%d), sce=%p\n",
342 endptno, endpt, UE_GET_ADDR(endpt),
343 UE_GET_DIR(endpt), sce));
344 sce->sc = sc;
345 sce->edesc = ed;
346 sce->iface = iface;
347 }
348 }
349 return (USBD_NORMAL_COMPLETION);
350 }
351
352 int
353 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
354 {
355 struct ugen_softc *sc;
356 int unit = UGENUNIT(dev);
357 int endpt = UGENENDPOINT(dev);
358 usb_endpoint_descriptor_t *edesc;
359 struct ugen_endpoint *sce;
360 int dir, isize;
361 usbd_status err;
362 usbd_xfer_handle xfer;
363 void *tbuf;
364 int i, j;
365
366 USB_GET_SC_OPEN(ugen, unit, sc);
367
368 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
369 flag, mode, unit, endpt));
370
371 if (sc == NULL || sc->sc_dying)
372 return (ENXIO);
373
374 /* The control endpoint allows multiple opens. */
375 if (endpt == USB_CONTROL_ENDPOINT) {
376 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
377 return (0);
378 }
379
380 if (sc->sc_is_open[endpt])
381 return (EBUSY);
382
383 /* Make sure there are pipes for all directions. */
384 for (dir = OUT; dir <= IN; dir++) {
385 if (flag & (dir == OUT ? FWRITE : FREAD)) {
386 sce = &sc->sc_endpoints[endpt][dir];
387 if (sce == 0 || sce->edesc == 0)
388 return (ENXIO);
389 }
390 }
391
392 /* Actually open the pipes. */
393 /* XXX Should back out properly if it fails. */
394 for (dir = OUT; dir <= IN; dir++) {
395 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
396 continue;
397 sce = &sc->sc_endpoints[endpt][dir];
398 sce->state = 0;
399 sce->timeout = USBD_NO_TIMEOUT;
400 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
401 sc, endpt, dir, sce));
402 edesc = sce->edesc;
403 switch (edesc->bmAttributes & UE_XFERTYPE) {
404 case UE_INTERRUPT:
405 if (dir == OUT) {
406 err = usbd_open_pipe(sce->iface,
407 edesc->bEndpointAddress, 0, &sce->pipeh);
408 if (err)
409 return (EIO);
410 break;
411 }
412 isize = UGETW(edesc->wMaxPacketSize);
413 if (isize == 0) /* shouldn't happen */
414 return (EINVAL);
415 sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
416 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
417 endpt, isize));
418 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1)
419 return (ENOMEM);
420 err = usbd_open_pipe_intr(sce->iface,
421 edesc->bEndpointAddress,
422 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
423 sce->ibuf, isize, ugenintr,
424 USBD_DEFAULT_INTERVAL);
425 if (err) {
426 free(sce->ibuf, M_USBDEV);
427 clfree(&sce->q);
428 return (EIO);
429 }
430 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
431 break;
432 case UE_BULK:
433 err = usbd_open_pipe(sce->iface,
434 edesc->bEndpointAddress, 0, &sce->pipeh);
435 if (err)
436 return (EIO);
437 #ifdef UGEN_BULK_RA_WB
438 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
439 /*
440 * Use request size for non-RA/WB transfers
441 * as the default.
442 */
443 sce->ra_wb_reqsize = UGEN_BBSIZE;
444 #endif
445 break;
446 case UE_ISOCHRONOUS:
447 if (dir == OUT)
448 return (EINVAL);
449 isize = UGETW(edesc->wMaxPacketSize);
450 if (isize == 0) /* shouldn't happen */
451 return (EINVAL);
452 sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
453 M_USBDEV, M_WAITOK);
454 sce->cur = sce->fill = sce->ibuf;
455 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
456 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
457 endpt, isize));
458 err = usbd_open_pipe(sce->iface,
459 edesc->bEndpointAddress, 0, &sce->pipeh);
460 if (err) {
461 free(sce->ibuf, M_USBDEV);
462 return (EIO);
463 }
464 for(i = 0; i < UGEN_NISOREQS; ++i) {
465 sce->isoreqs[i].sce = sce;
466 xfer = usbd_alloc_xfer(sc->sc_udev);
467 if (xfer == 0)
468 goto bad;
469 sce->isoreqs[i].xfer = xfer;
470 tbuf = usbd_alloc_buffer
471 (xfer, isize * UGEN_NISORFRMS);
472 if (tbuf == 0) {
473 i++;
474 goto bad;
475 }
476 sce->isoreqs[i].dmabuf = tbuf;
477 for(j = 0; j < UGEN_NISORFRMS; ++j)
478 sce->isoreqs[i].sizes[j] = isize;
479 usbd_setup_isoc_xfer
480 (xfer, sce->pipeh, &sce->isoreqs[i],
481 sce->isoreqs[i].sizes,
482 UGEN_NISORFRMS, USBD_NO_COPY,
483 ugen_isoc_rintr);
484 (void)usbd_transfer(xfer);
485 }
486 DPRINTFN(5, ("ugenopen: isoc open done\n"));
487 break;
488 bad:
489 while (--i >= 0) /* implicit buffer free */
490 usbd_free_xfer(sce->isoreqs[i].xfer);
491 return (ENOMEM);
492 case UE_CONTROL:
493 sce->timeout = USBD_DEFAULT_TIMEOUT;
494 return (EINVAL);
495 }
496 }
497 sc->sc_is_open[endpt] = 1;
498 return (0);
499 }
500
501 int
502 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
503 {
504 int endpt = UGENENDPOINT(dev);
505 struct ugen_softc *sc;
506 struct ugen_endpoint *sce;
507 int dir;
508 int i;
509
510 USB_GET_SC(ugen, UGENUNIT(dev), sc);
511
512 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%"PRId64", endpt=%d\n",
513 flag, mode, UGENUNIT(dev), endpt));
514
515 #ifdef DIAGNOSTIC
516 if (!sc->sc_is_open[endpt]) {
517 printf("ugenclose: not open\n");
518 return (EINVAL);
519 }
520 #endif
521
522 if (endpt == USB_CONTROL_ENDPOINT) {
523 DPRINTFN(5, ("ugenclose: close control\n"));
524 sc->sc_is_open[endpt] = 0;
525 return (0);
526 }
527
528 for (dir = OUT; dir <= IN; dir++) {
529 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
530 continue;
531 sce = &sc->sc_endpoints[endpt][dir];
532 if (sce == NULL || sce->pipeh == NULL)
533 continue;
534 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
535 endpt, dir, sce));
536
537 usbd_abort_pipe(sce->pipeh);
538 usbd_close_pipe(sce->pipeh);
539 sce->pipeh = NULL;
540
541 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
542 case UE_INTERRUPT:
543 ndflush(&sce->q, sce->q.c_cc);
544 clfree(&sce->q);
545 break;
546 case UE_ISOCHRONOUS:
547 for (i = 0; i < UGEN_NISOREQS; ++i)
548 usbd_free_xfer(sce->isoreqs[i].xfer);
549 break;
550 #ifdef UGEN_BULK_RA_WB
551 case UE_BULK:
552 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
553 /* ibuf freed below */
554 usbd_free_xfer(sce->ra_wb_xfer);
555 break;
556 #endif
557 default:
558 break;
559 }
560
561 if (sce->ibuf != NULL) {
562 free(sce->ibuf, M_USBDEV);
563 sce->ibuf = NULL;
564 clfree(&sce->q);
565 }
566 }
567 sc->sc_is_open[endpt] = 0;
568
569 return (0);
570 }
571
572 Static int
573 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
574 {
575 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
576 u_int32_t n, tn;
577 usbd_xfer_handle xfer;
578 usbd_status err;
579 int s;
580 int error = 0;
581
582 DPRINTFN(5, ("%s: ugenread: %d\n", USBDEVNAME(sc->sc_dev), endpt));
583
584 if (sc->sc_dying)
585 return (EIO);
586
587 if (endpt == USB_CONTROL_ENDPOINT)
588 return (ENODEV);
589
590 #ifdef DIAGNOSTIC
591 if (sce->edesc == NULL) {
592 printf("ugenread: no edesc\n");
593 return (EIO);
594 }
595 if (sce->pipeh == NULL) {
596 printf("ugenread: no pipe\n");
597 return (EIO);
598 }
599 #endif
600
601 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
602 case UE_INTERRUPT:
603 /* Block until activity occurred. */
604 s = splusb();
605 while (sce->q.c_cc == 0) {
606 if (flag & IO_NDELAY) {
607 splx(s);
608 return (EWOULDBLOCK);
609 }
610 sce->state |= UGEN_ASLP;
611 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
612 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0);
613 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
614 if (sc->sc_dying)
615 error = EIO;
616 if (error) {
617 sce->state &= ~UGEN_ASLP;
618 break;
619 }
620 }
621 splx(s);
622
623 /* Transfer as many chunks as possible. */
624 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
625 n = min(sce->q.c_cc, uio->uio_resid);
626 if (n > sizeof(sc->sc_buffer))
627 n = sizeof(sc->sc_buffer);
628
629 /* Remove a small chunk from the input queue. */
630 q_to_b(&sce->q, sc->sc_buffer, n);
631 DPRINTFN(5, ("ugenread: got %d chars\n", n));
632
633 /* Copy the data to the user process. */
634 error = uiomove(sc->sc_buffer, n, uio);
635 if (error)
636 break;
637 }
638 break;
639 case UE_BULK:
640 #ifdef UGEN_BULK_RA_WB
641 if (sce->state & UGEN_BULK_RA) {
642 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
643 uio->uio_resid, sce->ra_wb_used));
644 xfer = sce->ra_wb_xfer;
645
646 s = splusb();
647 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
648 splx(s);
649 return (EWOULDBLOCK);
650 }
651 while (uio->uio_resid > 0 && !error) {
652 while (sce->ra_wb_used == 0) {
653 sce->state |= UGEN_ASLP;
654 DPRINTFN(5,
655 ("ugenread: sleep on %p\n",
656 sce));
657 error = tsleep(sce, PZERO | PCATCH,
658 "ugenrb", 0);
659 DPRINTFN(5,
660 ("ugenread: woke, error=%d\n",
661 error));
662 if (sc->sc_dying)
663 error = EIO;
664 if (error) {
665 sce->state &= ~UGEN_ASLP;
666 break;
667 }
668 }
669
670 /* Copy data to the process. */
671 while (uio->uio_resid > 0
672 && sce->ra_wb_used > 0) {
673 n = min(uio->uio_resid,
674 sce->ra_wb_used);
675 n = min(n, sce->limit - sce->cur);
676 error = uiomove(sce->cur, n, uio);
677 if (error)
678 break;
679 sce->cur += n;
680 sce->ra_wb_used -= n;
681 if (sce->cur == sce->limit)
682 sce->cur = sce->ibuf;
683 }
684
685 /*
686 * If the transfers stopped because the
687 * buffer was full, restart them.
688 */
689 if (sce->state & UGEN_RA_WB_STOP &&
690 sce->ra_wb_used < sce->limit - sce->ibuf) {
691 n = (sce->limit - sce->ibuf)
692 - sce->ra_wb_used;
693 usbd_setup_xfer(xfer,
694 sce->pipeh, sce, NULL,
695 min(n, sce->ra_wb_xferlen),
696 USBD_NO_COPY, USBD_NO_TIMEOUT,
697 ugen_bulkra_intr);
698 sce->state &= ~UGEN_RA_WB_STOP;
699 err = usbd_transfer(xfer);
700 if (err != USBD_IN_PROGRESS)
701 /*
702 * The transfer has not been
703 * queued. Setting STOP
704 * will make us try
705 * again at the next read.
706 */
707 sce->state |= UGEN_RA_WB_STOP;
708 }
709 }
710 splx(s);
711 break;
712 }
713 #endif
714 xfer = usbd_alloc_xfer(sc->sc_udev);
715 if (xfer == 0)
716 return (ENOMEM);
717 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
718 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
719 tn = n;
720 err = usbd_bulk_transfer(
721 xfer, sce->pipeh,
722 sce->state & UGEN_SHORT_OK ?
723 USBD_SHORT_XFER_OK : 0,
724 sce->timeout, sc->sc_buffer, &tn, "ugenrb");
725 if (err) {
726 if (err == USBD_INTERRUPTED)
727 error = EINTR;
728 else if (err == USBD_TIMEOUT)
729 error = ETIMEDOUT;
730 else
731 error = EIO;
732 break;
733 }
734 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
735 error = uiomove(sc->sc_buffer, tn, uio);
736 if (error || tn < n)
737 break;
738 }
739 usbd_free_xfer(xfer);
740 break;
741 case UE_ISOCHRONOUS:
742 s = splusb();
743 while (sce->cur == sce->fill) {
744 if (flag & IO_NDELAY) {
745 splx(s);
746 return (EWOULDBLOCK);
747 }
748 sce->state |= UGEN_ASLP;
749 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
750 error = tsleep(sce, PZERO | PCATCH, "ugenri", 0);
751 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
752 if (sc->sc_dying)
753 error = EIO;
754 if (error) {
755 sce->state &= ~UGEN_ASLP;
756 break;
757 }
758 }
759
760 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
761 if(sce->fill > sce->cur)
762 n = min(sce->fill - sce->cur, uio->uio_resid);
763 else
764 n = min(sce->limit - sce->cur, uio->uio_resid);
765
766 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
767
768 /* Copy the data to the user process. */
769 error = uiomove(sce->cur, n, uio);
770 if (error)
771 break;
772 sce->cur += n;
773 if(sce->cur >= sce->limit)
774 sce->cur = sce->ibuf;
775 }
776 splx(s);
777 break;
778
779
780 default:
781 return (ENXIO);
782 }
783 return (error);
784 }
785
786 int
787 ugenread(dev_t dev, struct uio *uio, int flag)
788 {
789 int endpt = UGENENDPOINT(dev);
790 struct ugen_softc *sc;
791 int error;
792
793 USB_GET_SC(ugen, UGENUNIT(dev), sc);
794
795 sc->sc_refcnt++;
796 error = ugen_do_read(sc, endpt, uio, flag);
797 if (--sc->sc_refcnt < 0)
798 usb_detach_wakeup(USBDEV(sc->sc_dev));
799 return (error);
800 }
801
802 Static int
803 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
804 int flag)
805 {
806 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
807 u_int32_t n;
808 int error = 0;
809 #ifdef UGEN_BULK_RA_WB
810 int s;
811 u_int32_t tn;
812 char *dbuf;
813 #endif
814 usbd_xfer_handle xfer;
815 usbd_status err;
816
817 DPRINTFN(5, ("%s: ugenwrite: %d\n", USBDEVNAME(sc->sc_dev), endpt));
818
819 if (sc->sc_dying)
820 return (EIO);
821
822 if (endpt == USB_CONTROL_ENDPOINT)
823 return (ENODEV);
824
825 #ifdef DIAGNOSTIC
826 if (sce->edesc == NULL) {
827 printf("ugenwrite: no edesc\n");
828 return (EIO);
829 }
830 if (sce->pipeh == NULL) {
831 printf("ugenwrite: no pipe\n");
832 return (EIO);
833 }
834 #endif
835
836 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
837 case UE_BULK:
838 #ifdef UGEN_BULK_RA_WB
839 if (sce->state & UGEN_BULK_WB) {
840 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
841 uio->uio_resid, sce->ra_wb_used));
842 xfer = sce->ra_wb_xfer;
843
844 s = splusb();
845 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
846 flag & IO_NDELAY) {
847 splx(s);
848 return (EWOULDBLOCK);
849 }
850 while (uio->uio_resid > 0 && !error) {
851 while (sce->ra_wb_used ==
852 sce->limit - sce->ibuf) {
853 sce->state |= UGEN_ASLP;
854 DPRINTFN(5,
855 ("ugenwrite: sleep on %p\n",
856 sce));
857 error = tsleep(sce, PZERO | PCATCH,
858 "ugenwb", 0);
859 DPRINTFN(5,
860 ("ugenwrite: woke, error=%d\n",
861 error));
862 if (sc->sc_dying)
863 error = EIO;
864 if (error) {
865 sce->state &= ~UGEN_ASLP;
866 break;
867 }
868 }
869
870 /* Copy data from the process. */
871 while (uio->uio_resid > 0 &&
872 sce->ra_wb_used < sce->limit - sce->ibuf) {
873 n = min(uio->uio_resid,
874 (sce->limit - sce->ibuf)
875 - sce->ra_wb_used);
876 n = min(n, sce->limit - sce->fill);
877 error = uiomove(sce->fill, n, uio);
878 if (error)
879 break;
880 sce->fill += n;
881 sce->ra_wb_used += n;
882 if (sce->fill == sce->limit)
883 sce->fill = sce->ibuf;
884 }
885
886 /*
887 * If the transfers stopped because the
888 * buffer was empty, restart them.
889 */
890 if (sce->state & UGEN_RA_WB_STOP &&
891 sce->ra_wb_used > 0) {
892 dbuf = (char *)usbd_get_buffer(xfer);
893 n = min(sce->ra_wb_used,
894 sce->ra_wb_xferlen);
895 tn = min(n, sce->limit - sce->cur);
896 memcpy(dbuf, sce->cur, tn);
897 dbuf += tn;
898 if (n - tn > 0)
899 memcpy(dbuf, sce->ibuf,
900 n - tn);
901 usbd_setup_xfer(xfer,
902 sce->pipeh, sce, NULL, n,
903 USBD_NO_COPY, USBD_NO_TIMEOUT,
904 ugen_bulkwb_intr);
905 sce->state &= ~UGEN_RA_WB_STOP;
906 err = usbd_transfer(xfer);
907 if (err != USBD_IN_PROGRESS)
908 /*
909 * The transfer has not been
910 * queued. Setting STOP
911 * will make us try again
912 * at the next read.
913 */
914 sce->state |= UGEN_RA_WB_STOP;
915 }
916 }
917 splx(s);
918 break;
919 }
920 #endif
921 xfer = usbd_alloc_xfer(sc->sc_udev);
922 if (xfer == 0)
923 return (EIO);
924 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
925 error = uiomove(sc->sc_buffer, n, uio);
926 if (error)
927 break;
928 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
929 err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
930 sce->timeout, sc->sc_buffer, &n,"ugenwb");
931 if (err) {
932 if (err == USBD_INTERRUPTED)
933 error = EINTR;
934 else if (err == USBD_TIMEOUT)
935 error = ETIMEDOUT;
936 else
937 error = EIO;
938 break;
939 }
940 }
941 usbd_free_xfer(xfer);
942 break;
943 case UE_INTERRUPT:
944 xfer = usbd_alloc_xfer(sc->sc_udev);
945 if (xfer == 0)
946 return (EIO);
947 while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
948 uio->uio_resid)) != 0) {
949 error = uiomove(sc->sc_buffer, n, uio);
950 if (error)
951 break;
952 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
953 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
954 sce->timeout, sc->sc_buffer, &n, "ugenwi");
955 if (err) {
956 if (err == USBD_INTERRUPTED)
957 error = EINTR;
958 else if (err == USBD_TIMEOUT)
959 error = ETIMEDOUT;
960 else
961 error = EIO;
962 break;
963 }
964 }
965 usbd_free_xfer(xfer);
966 break;
967 default:
968 return (ENXIO);
969 }
970 return (error);
971 }
972
973 int
974 ugenwrite(dev_t dev, struct uio *uio, int flag)
975 {
976 int endpt = UGENENDPOINT(dev);
977 struct ugen_softc *sc;
978 int error;
979
980 USB_GET_SC(ugen, UGENUNIT(dev), sc);
981
982 sc->sc_refcnt++;
983 error = ugen_do_write(sc, endpt, uio, flag);
984 if (--sc->sc_refcnt < 0)
985 usb_detach_wakeup(USBDEV(sc->sc_dev));
986 return (error);
987 }
988
989 #if defined(__NetBSD__) || defined(__OpenBSD__)
990 int
991 ugen_activate(device_ptr_t self, enum devact act)
992 {
993 struct ugen_softc *sc = device_private(self);
994
995 switch (act) {
996 case DVACT_ACTIVATE:
997 return (EOPNOTSUPP);
998
999 case DVACT_DEACTIVATE:
1000 sc->sc_dying = 1;
1001 break;
1002 }
1003 return (0);
1004 }
1005 #endif
1006
1007 USB_DETACH(ugen)
1008 {
1009 USB_DETACH_START(ugen, sc);
1010 struct ugen_endpoint *sce;
1011 int i, dir;
1012 int s;
1013 #if defined(__NetBSD__) || defined(__OpenBSD__)
1014 int maj, mn;
1015
1016 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1017 #elif defined(__FreeBSD__)
1018 DPRINTF(("ugen_detach: sc=%p\n", sc));
1019 #endif
1020
1021 device_deregister_all(self);
1022
1023 sc->sc_dying = 1;
1024 pmf_device_deregister(self);
1025 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1026 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1027 for (dir = OUT; dir <= IN; dir++) {
1028 sce = &sc->sc_endpoints[i][dir];
1029 if (sce && sce->pipeh)
1030 usbd_abort_pipe(sce->pipeh);
1031 }
1032 }
1033
1034 s = splusb();
1035 if (--sc->sc_refcnt >= 0) {
1036 /* Wake everyone */
1037 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1038 wakeup(&sc->sc_endpoints[i][IN]);
1039 /* Wait for processes to go away. */
1040 usb_detach_wait(USBDEV(sc->sc_dev));
1041 }
1042 splx(s);
1043
1044 #if defined(__NetBSD__) || defined(__OpenBSD__)
1045 /* locate the major number */
1046 #if defined(__NetBSD__)
1047 maj = cdevsw_lookup_major(&ugen_cdevsw);
1048 #elif defined(__OpenBSD__)
1049 for (maj = 0; maj < nchrdev; maj++)
1050 if (cdevsw[maj].d_open == ugenopen)
1051 break;
1052 #endif
1053
1054 /* Nuke the vnodes for any open instances (calls close). */
1055 mn = device_unit(self) * USB_MAX_ENDPOINTS;
1056 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1057 #elif defined(__FreeBSD__)
1058 /* XXX not implemented yet */
1059 #endif
1060
1061 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1062 USBDEV(sc->sc_dev));
1063
1064 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1065 for (dir = OUT; dir <= IN; dir++) {
1066 sce = &sc->sc_endpoints[i][dir];
1067 seldestroy(&sce->rsel);
1068 }
1069 }
1070
1071 return (0);
1072 }
1073
1074 Static void
1075 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1076 {
1077 struct ugen_endpoint *sce = addr;
1078 /*struct ugen_softc *sc = sce->sc;*/
1079 u_int32_t count;
1080 u_char *ibuf;
1081
1082 if (status == USBD_CANCELLED)
1083 return;
1084
1085 if (status != USBD_NORMAL_COMPLETION) {
1086 DPRINTF(("ugenintr: status=%d\n", status));
1087 if (status == USBD_STALLED)
1088 usbd_clear_endpoint_stall_async(sce->pipeh);
1089 return;
1090 }
1091
1092 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1093 ibuf = sce->ibuf;
1094
1095 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1096 xfer, status, count));
1097 DPRINTFN(5, (" data = %02x %02x %02x\n",
1098 ibuf[0], ibuf[1], ibuf[2]));
1099
1100 (void)b_to_q(ibuf, count, &sce->q);
1101
1102 if (sce->state & UGEN_ASLP) {
1103 sce->state &= ~UGEN_ASLP;
1104 DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1105 wakeup(sce);
1106 }
1107 selnotify(&sce->rsel, 0, 0);
1108 }
1109
1110 Static void
1111 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1112 usbd_status status)
1113 {
1114 struct isoreq *req = addr;
1115 struct ugen_endpoint *sce = req->sce;
1116 u_int32_t count, n;
1117 int i, isize;
1118
1119 /* Return if we are aborting. */
1120 if (status == USBD_CANCELLED)
1121 return;
1122
1123 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1124 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1125 (long)(req - sce->isoreqs), count));
1126
1127 /* throw away oldest input if the buffer is full */
1128 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1129 sce->cur += count;
1130 if(sce->cur >= sce->limit)
1131 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1132 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1133 count));
1134 }
1135
1136 isize = UGETW(sce->edesc->wMaxPacketSize);
1137 for (i = 0; i < UGEN_NISORFRMS; i++) {
1138 u_int32_t actlen = req->sizes[i];
1139 char const *tbuf = (char const *)req->dmabuf + isize * i;
1140
1141 /* copy data to buffer */
1142 while (actlen > 0) {
1143 n = min(actlen, sce->limit - sce->fill);
1144 memcpy(sce->fill, tbuf, n);
1145
1146 tbuf += n;
1147 actlen -= n;
1148 sce->fill += n;
1149 if(sce->fill == sce->limit)
1150 sce->fill = sce->ibuf;
1151 }
1152
1153 /* setup size for next transfer */
1154 req->sizes[i] = isize;
1155 }
1156
1157 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1158 USBD_NO_COPY, ugen_isoc_rintr);
1159 (void)usbd_transfer(xfer);
1160
1161 if (sce->state & UGEN_ASLP) {
1162 sce->state &= ~UGEN_ASLP;
1163 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1164 wakeup(sce);
1165 }
1166 selnotify(&sce->rsel, 0, 0);
1167 }
1168
1169 #ifdef UGEN_BULK_RA_WB
1170 Static void
1171 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1172 usbd_status status)
1173 {
1174 struct ugen_endpoint *sce = addr;
1175 u_int32_t count, n;
1176 char const *tbuf;
1177 usbd_status err;
1178
1179 /* Return if we are aborting. */
1180 if (status == USBD_CANCELLED)
1181 return;
1182
1183 if (status != USBD_NORMAL_COMPLETION) {
1184 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1185 sce->state |= UGEN_RA_WB_STOP;
1186 if (status == USBD_STALLED)
1187 usbd_clear_endpoint_stall_async(sce->pipeh);
1188 return;
1189 }
1190
1191 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1192
1193 /* Keep track of how much is in the buffer. */
1194 sce->ra_wb_used += count;
1195
1196 /* Copy data to buffer. */
1197 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1198 n = min(count, sce->limit - sce->fill);
1199 memcpy(sce->fill, tbuf, n);
1200 tbuf += n;
1201 count -= n;
1202 sce->fill += n;
1203 if (sce->fill == sce->limit)
1204 sce->fill = sce->ibuf;
1205 if (count > 0) {
1206 memcpy(sce->fill, tbuf, count);
1207 sce->fill += count;
1208 }
1209
1210 /* Set up the next request if necessary. */
1211 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1212 if (n > 0) {
1213 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1214 min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1215 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1216 err = usbd_transfer(xfer);
1217 if (err != USBD_IN_PROGRESS) {
1218 printf("usbd_bulkra_intr: error=%d\n", err);
1219 /*
1220 * The transfer has not been queued. Setting STOP
1221 * will make us try again at the next read.
1222 */
1223 sce->state |= UGEN_RA_WB_STOP;
1224 }
1225 }
1226 else
1227 sce->state |= UGEN_RA_WB_STOP;
1228
1229 if (sce->state & UGEN_ASLP) {
1230 sce->state &= ~UGEN_ASLP;
1231 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1232 wakeup(sce);
1233 }
1234 selnotify(&sce->rsel, 0, 0);
1235 }
1236
1237 Static void
1238 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1239 usbd_status status)
1240 {
1241 struct ugen_endpoint *sce = addr;
1242 u_int32_t count, n;
1243 char *tbuf;
1244 usbd_status err;
1245
1246 /* Return if we are aborting. */
1247 if (status == USBD_CANCELLED)
1248 return;
1249
1250 if (status != USBD_NORMAL_COMPLETION) {
1251 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1252 sce->state |= UGEN_RA_WB_STOP;
1253 if (status == USBD_STALLED)
1254 usbd_clear_endpoint_stall_async(sce->pipeh);
1255 return;
1256 }
1257
1258 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1259
1260 /* Keep track of how much is in the buffer. */
1261 sce->ra_wb_used -= count;
1262
1263 /* Update buffer pointers. */
1264 sce->cur += count;
1265 if (sce->cur >= sce->limit)
1266 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1267
1268 /* Set up next request if necessary. */
1269 if (sce->ra_wb_used > 0) {
1270 /* copy data from buffer */
1271 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1272 count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1273 n = min(count, sce->limit - sce->cur);
1274 memcpy(tbuf, sce->cur, n);
1275 tbuf += n;
1276 if (count - n > 0)
1277 memcpy(tbuf, sce->ibuf, count - n);
1278
1279 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1280 count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1281 err = usbd_transfer(xfer);
1282 if (err != USBD_IN_PROGRESS) {
1283 printf("usbd_bulkwb_intr: error=%d\n", err);
1284 /*
1285 * The transfer has not been queued. Setting STOP
1286 * will make us try again at the next write.
1287 */
1288 sce->state |= UGEN_RA_WB_STOP;
1289 }
1290 }
1291 else
1292 sce->state |= UGEN_RA_WB_STOP;
1293
1294 if (sce->state & UGEN_ASLP) {
1295 sce->state &= ~UGEN_ASLP;
1296 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1297 wakeup(sce);
1298 }
1299 selnotify(&sce->rsel, 0, 0);
1300 }
1301 #endif
1302
1303 Static usbd_status
1304 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1305 {
1306 usbd_interface_handle iface;
1307 usb_endpoint_descriptor_t *ed;
1308 usbd_status err;
1309 struct ugen_endpoint *sce;
1310 u_int8_t niface, nendpt, endptno, endpt;
1311 int dir;
1312
1313 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1314
1315 err = usbd_interface_count(sc->sc_udev, &niface);
1316 if (err)
1317 return (err);
1318 if (ifaceidx < 0 || ifaceidx >= niface)
1319 return (USBD_INVAL);
1320
1321 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1322 if (err)
1323 return (err);
1324 err = usbd_endpoint_count(iface, &nendpt);
1325 if (err)
1326 return (err);
1327 /* XXX should only do this after setting new altno has succeeded */
1328 for (endptno = 0; endptno < nendpt; endptno++) {
1329 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1330 endpt = ed->bEndpointAddress;
1331 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1332 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1333 sce->sc = 0;
1334 sce->edesc = 0;
1335 sce->iface = 0;
1336 }
1337
1338 /* change setting */
1339 err = usbd_set_interface(iface, altno);
1340 if (err)
1341 return (err);
1342
1343 err = usbd_endpoint_count(iface, &nendpt);
1344 if (err)
1345 return (err);
1346 for (endptno = 0; endptno < nendpt; endptno++) {
1347 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1348 KASSERT(ed != NULL);
1349 endpt = ed->bEndpointAddress;
1350 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1351 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1352 sce->sc = sc;
1353 sce->edesc = ed;
1354 sce->iface = iface;
1355 }
1356 return (0);
1357 }
1358
1359 /* Retrieve a complete descriptor for a certain device and index. */
1360 Static usb_config_descriptor_t *
1361 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1362 {
1363 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1364 int len;
1365 usbd_status err;
1366
1367 if (index == USB_CURRENT_CONFIG_INDEX) {
1368 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1369 len = UGETW(tdesc->wTotalLength);
1370 if (lenp)
1371 *lenp = len;
1372 cdesc = malloc(len, M_TEMP, M_WAITOK);
1373 memcpy(cdesc, tdesc, len);
1374 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1375 } else {
1376 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1377 if (err)
1378 return (0);
1379 len = UGETW(cdescr.wTotalLength);
1380 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1381 if (lenp)
1382 *lenp = len;
1383 cdesc = malloc(len, M_TEMP, M_WAITOK);
1384 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1385 if (err) {
1386 free(cdesc, M_TEMP);
1387 return (0);
1388 }
1389 }
1390 return (cdesc);
1391 }
1392
1393 Static int
1394 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1395 {
1396 usbd_interface_handle iface;
1397 usbd_status err;
1398
1399 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1400 if (err)
1401 return (-1);
1402 return (usbd_get_interface_altindex(iface));
1403 }
1404
1405 Static int
1406 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1407 void *addr, int flag, struct lwp *l)
1408 {
1409 struct ugen_endpoint *sce;
1410 usbd_status err;
1411 usbd_interface_handle iface;
1412 struct usb_config_desc *cd;
1413 usb_config_descriptor_t *cdesc;
1414 struct usb_interface_desc *id;
1415 usb_interface_descriptor_t *idesc;
1416 struct usb_endpoint_desc *ed;
1417 usb_endpoint_descriptor_t *edesc;
1418 struct usb_alt_interface *ai;
1419 struct usb_string_desc *si;
1420 u_int8_t conf, alt;
1421
1422 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1423 if (sc->sc_dying)
1424 return (EIO);
1425
1426 switch (cmd) {
1427 case FIONBIO:
1428 /* All handled in the upper FS layer. */
1429 return (0);
1430 case USB_SET_SHORT_XFER:
1431 if (endpt == USB_CONTROL_ENDPOINT)
1432 return (EINVAL);
1433 /* This flag only affects read */
1434 sce = &sc->sc_endpoints[endpt][IN];
1435 if (sce == NULL || sce->pipeh == NULL)
1436 return (EINVAL);
1437 if (*(int *)addr)
1438 sce->state |= UGEN_SHORT_OK;
1439 else
1440 sce->state &= ~UGEN_SHORT_OK;
1441 return (0);
1442 case USB_SET_TIMEOUT:
1443 sce = &sc->sc_endpoints[endpt][IN];
1444 if (sce == NULL
1445 /* XXX this shouldn't happen, but the distinction between
1446 input and output pipes isn't clear enough.
1447 || sce->pipeh == NULL */
1448 )
1449 return (EINVAL);
1450 sce->timeout = *(int *)addr;
1451 return (0);
1452 case USB_SET_BULK_RA:
1453 #ifdef UGEN_BULK_RA_WB
1454 if (endpt == USB_CONTROL_ENDPOINT)
1455 return (EINVAL);
1456 sce = &sc->sc_endpoints[endpt][IN];
1457 if (sce == NULL || sce->pipeh == NULL)
1458 return (EINVAL);
1459 edesc = sce->edesc;
1460 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1461 return (EINVAL);
1462
1463 if (*(int *)addr) {
1464 /* Only turn RA on if it's currently off. */
1465 if (sce->state & UGEN_BULK_RA)
1466 return (0);
1467
1468 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1469 /* shouldn't happen */
1470 return (EINVAL);
1471 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1472 if (sce->ra_wb_xfer == NULL)
1473 return (ENOMEM);
1474 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1475 /*
1476 * Set up a dmabuf because we reuse the xfer with
1477 * the same (max) request length like isoc.
1478 */
1479 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1480 sce->ra_wb_xferlen) == 0) {
1481 usbd_free_xfer(sce->ra_wb_xfer);
1482 return (ENOMEM);
1483 }
1484 sce->ibuf = malloc(sce->ra_wb_bufsize,
1485 M_USBDEV, M_WAITOK);
1486 sce->fill = sce->cur = sce->ibuf;
1487 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1488 sce->ra_wb_used = 0;
1489 sce->state |= UGEN_BULK_RA;
1490 sce->state &= ~UGEN_RA_WB_STOP;
1491 /* Now start reading. */
1492 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1493 NULL,
1494 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1495 USBD_NO_COPY, USBD_NO_TIMEOUT,
1496 ugen_bulkra_intr);
1497 err = usbd_transfer(sce->ra_wb_xfer);
1498 if (err != USBD_IN_PROGRESS) {
1499 sce->state &= ~UGEN_BULK_RA;
1500 free(sce->ibuf, M_USBDEV);
1501 sce->ibuf = NULL;
1502 usbd_free_xfer(sce->ra_wb_xfer);
1503 return (EIO);
1504 }
1505 } else {
1506 /* Only turn RA off if it's currently on. */
1507 if (!(sce->state & UGEN_BULK_RA))
1508 return (0);
1509
1510 sce->state &= ~UGEN_BULK_RA;
1511 usbd_abort_pipe(sce->pipeh);
1512 usbd_free_xfer(sce->ra_wb_xfer);
1513 /*
1514 * XXX Discard whatever's in the buffer, but we
1515 * should keep it around and drain the buffer
1516 * instead.
1517 */
1518 free(sce->ibuf, M_USBDEV);
1519 sce->ibuf = NULL;
1520 }
1521 return (0);
1522 #else
1523 return (EOPNOTSUPP);
1524 #endif
1525 case USB_SET_BULK_WB:
1526 #ifdef UGEN_BULK_RA_WB
1527 if (endpt == USB_CONTROL_ENDPOINT)
1528 return (EINVAL);
1529 sce = &sc->sc_endpoints[endpt][OUT];
1530 if (sce == NULL || sce->pipeh == NULL)
1531 return (EINVAL);
1532 edesc = sce->edesc;
1533 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1534 return (EINVAL);
1535
1536 if (*(int *)addr) {
1537 /* Only turn WB on if it's currently off. */
1538 if (sce->state & UGEN_BULK_WB)
1539 return (0);
1540
1541 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1542 /* shouldn't happen */
1543 return (EINVAL);
1544 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1545 if (sce->ra_wb_xfer == NULL)
1546 return (ENOMEM);
1547 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1548 /*
1549 * Set up a dmabuf because we reuse the xfer with
1550 * the same (max) request length like isoc.
1551 */
1552 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1553 sce->ra_wb_xferlen) == 0) {
1554 usbd_free_xfer(sce->ra_wb_xfer);
1555 return (ENOMEM);
1556 }
1557 sce->ibuf = malloc(sce->ra_wb_bufsize,
1558 M_USBDEV, M_WAITOK);
1559 sce->fill = sce->cur = sce->ibuf;
1560 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1561 sce->ra_wb_used = 0;
1562 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1563 } else {
1564 /* Only turn WB off if it's currently on. */
1565 if (!(sce->state & UGEN_BULK_WB))
1566 return (0);
1567
1568 sce->state &= ~UGEN_BULK_WB;
1569 /*
1570 * XXX Discard whatever's in the buffer, but we
1571 * should keep it around and keep writing to
1572 * drain the buffer instead.
1573 */
1574 usbd_abort_pipe(sce->pipeh);
1575 usbd_free_xfer(sce->ra_wb_xfer);
1576 free(sce->ibuf, M_USBDEV);
1577 sce->ibuf = NULL;
1578 }
1579 return (0);
1580 #else
1581 return (EOPNOTSUPP);
1582 #endif
1583 case USB_SET_BULK_RA_OPT:
1584 case USB_SET_BULK_WB_OPT:
1585 #ifdef UGEN_BULK_RA_WB
1586 {
1587 struct usb_bulk_ra_wb_opt *opt;
1588
1589 if (endpt == USB_CONTROL_ENDPOINT)
1590 return (EINVAL);
1591 opt = (struct usb_bulk_ra_wb_opt *)addr;
1592 if (cmd == USB_SET_BULK_RA_OPT)
1593 sce = &sc->sc_endpoints[endpt][IN];
1594 else
1595 sce = &sc->sc_endpoints[endpt][OUT];
1596 if (sce == NULL || sce->pipeh == NULL)
1597 return (EINVAL);
1598 if (opt->ra_wb_buffer_size < 1 ||
1599 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1600 opt->ra_wb_request_size < 1 ||
1601 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1602 return (EINVAL);
1603 /*
1604 * XXX These changes do not take effect until the
1605 * next time RA/WB mode is enabled but they ought to
1606 * take effect immediately.
1607 */
1608 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1609 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1610 return (0);
1611 }
1612 #else
1613 return (EOPNOTSUPP);
1614 #endif
1615 default:
1616 break;
1617 }
1618
1619 if (endpt != USB_CONTROL_ENDPOINT)
1620 return (EINVAL);
1621
1622 switch (cmd) {
1623 #ifdef UGEN_DEBUG
1624 case USB_SETDEBUG:
1625 ugendebug = *(int *)addr;
1626 break;
1627 #endif
1628 case USB_GET_CONFIG:
1629 err = usbd_get_config(sc->sc_udev, &conf);
1630 if (err)
1631 return (EIO);
1632 *(int *)addr = conf;
1633 break;
1634 case USB_SET_CONFIG:
1635 if (!(flag & FWRITE))
1636 return (EPERM);
1637 err = ugen_set_config(sc, *(int *)addr);
1638 switch (err) {
1639 case USBD_NORMAL_COMPLETION:
1640 break;
1641 case USBD_IN_USE:
1642 return (EBUSY);
1643 default:
1644 return (EIO);
1645 }
1646 break;
1647 case USB_GET_ALTINTERFACE:
1648 ai = (struct usb_alt_interface *)addr;
1649 err = usbd_device2interface_handle(sc->sc_udev,
1650 ai->uai_interface_index, &iface);
1651 if (err)
1652 return (EINVAL);
1653 idesc = usbd_get_interface_descriptor(iface);
1654 if (idesc == NULL)
1655 return (EIO);
1656 ai->uai_alt_no = idesc->bAlternateSetting;
1657 break;
1658 case USB_SET_ALTINTERFACE:
1659 if (!(flag & FWRITE))
1660 return (EPERM);
1661 ai = (struct usb_alt_interface *)addr;
1662 err = usbd_device2interface_handle(sc->sc_udev,
1663 ai->uai_interface_index, &iface);
1664 if (err)
1665 return (EINVAL);
1666 err = ugen_set_interface(sc, ai->uai_interface_index,
1667 ai->uai_alt_no);
1668 if (err)
1669 return (EINVAL);
1670 break;
1671 case USB_GET_NO_ALT:
1672 ai = (struct usb_alt_interface *)addr;
1673 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1674 if (cdesc == NULL)
1675 return (EINVAL);
1676 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1677 if (idesc == NULL) {
1678 free(cdesc, M_TEMP);
1679 return (EINVAL);
1680 }
1681 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1682 idesc->bInterfaceNumber);
1683 free(cdesc, M_TEMP);
1684 break;
1685 case USB_GET_DEVICE_DESC:
1686 *(usb_device_descriptor_t *)addr =
1687 *usbd_get_device_descriptor(sc->sc_udev);
1688 break;
1689 case USB_GET_CONFIG_DESC:
1690 cd = (struct usb_config_desc *)addr;
1691 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1692 if (cdesc == NULL)
1693 return (EINVAL);
1694 cd->ucd_desc = *cdesc;
1695 free(cdesc, M_TEMP);
1696 break;
1697 case USB_GET_INTERFACE_DESC:
1698 id = (struct usb_interface_desc *)addr;
1699 cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1700 if (cdesc == NULL)
1701 return (EINVAL);
1702 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1703 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1704 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1705 else
1706 alt = id->uid_alt_index;
1707 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1708 if (idesc == NULL) {
1709 free(cdesc, M_TEMP);
1710 return (EINVAL);
1711 }
1712 id->uid_desc = *idesc;
1713 free(cdesc, M_TEMP);
1714 break;
1715 case USB_GET_ENDPOINT_DESC:
1716 ed = (struct usb_endpoint_desc *)addr;
1717 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1718 if (cdesc == NULL)
1719 return (EINVAL);
1720 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1721 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1722 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1723 else
1724 alt = ed->ued_alt_index;
1725 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1726 alt, ed->ued_endpoint_index);
1727 if (edesc == NULL) {
1728 free(cdesc, M_TEMP);
1729 return (EINVAL);
1730 }
1731 ed->ued_desc = *edesc;
1732 free(cdesc, M_TEMP);
1733 break;
1734 case USB_GET_FULL_DESC:
1735 {
1736 int len;
1737 struct iovec iov;
1738 struct uio uio;
1739 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1740 int error;
1741
1742 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1743 if (len > fd->ufd_size)
1744 len = fd->ufd_size;
1745 iov.iov_base = (void *)fd->ufd_data;
1746 iov.iov_len = len;
1747 uio.uio_iov = &iov;
1748 uio.uio_iovcnt = 1;
1749 uio.uio_resid = len;
1750 uio.uio_offset = 0;
1751 uio.uio_rw = UIO_READ;
1752 uio.uio_vmspace = l->l_proc->p_vmspace;
1753 error = uiomove((void *)cdesc, len, &uio);
1754 free(cdesc, M_TEMP);
1755 return (error);
1756 }
1757 case USB_GET_STRING_DESC: {
1758 int len;
1759 si = (struct usb_string_desc *)addr;
1760 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1761 si->usd_language_id, &si->usd_desc, &len);
1762 if (err)
1763 return (EINVAL);
1764 break;
1765 }
1766 case USB_DO_REQUEST:
1767 {
1768 struct usb_ctl_request *ur = (void *)addr;
1769 int len = UGETW(ur->ucr_request.wLength);
1770 struct iovec iov;
1771 struct uio uio;
1772 void *ptr = 0;
1773 usbd_status xerr;
1774 int error = 0;
1775
1776 if (!(flag & FWRITE))
1777 return (EPERM);
1778 /* Avoid requests that would damage the bus integrity. */
1779 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1780 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1781 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1782 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1783 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1784 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1785 return (EINVAL);
1786
1787 if (len < 0 || len > 32767)
1788 return (EINVAL);
1789 if (len != 0) {
1790 iov.iov_base = (void *)ur->ucr_data;
1791 iov.iov_len = len;
1792 uio.uio_iov = &iov;
1793 uio.uio_iovcnt = 1;
1794 uio.uio_resid = len;
1795 uio.uio_offset = 0;
1796 uio.uio_rw =
1797 ur->ucr_request.bmRequestType & UT_READ ?
1798 UIO_READ : UIO_WRITE;
1799 uio.uio_vmspace = l->l_proc->p_vmspace;
1800 ptr = malloc(len, M_TEMP, M_WAITOK);
1801 if (uio.uio_rw == UIO_WRITE) {
1802 error = uiomove(ptr, len, &uio);
1803 if (error)
1804 goto ret;
1805 }
1806 }
1807 sce = &sc->sc_endpoints[endpt][IN];
1808 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1809 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1810 if (xerr) {
1811 error = EIO;
1812 goto ret;
1813 }
1814 if (len != 0) {
1815 if (uio.uio_rw == UIO_READ) {
1816 error = uiomove(ptr, len, &uio);
1817 if (error)
1818 goto ret;
1819 }
1820 }
1821 ret:
1822 if (ptr)
1823 free(ptr, M_TEMP);
1824 return (error);
1825 }
1826 case USB_GET_DEVICEINFO:
1827 usbd_fill_deviceinfo(sc->sc_udev,
1828 (struct usb_device_info *)addr, 0);
1829 break;
1830 #ifdef COMPAT_30
1831 case USB_GET_DEVICEINFO_OLD:
1832 usbd_fill_deviceinfo_old(sc->sc_udev,
1833 (struct usb_device_info_old *)addr, 0);
1834
1835 break;
1836 #endif
1837 default:
1838 return (EINVAL);
1839 }
1840 return (0);
1841 }
1842
1843 int
1844 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1845 {
1846 int endpt = UGENENDPOINT(dev);
1847 struct ugen_softc *sc;
1848 int error;
1849
1850 USB_GET_SC(ugen, UGENUNIT(dev), sc);
1851
1852 sc->sc_refcnt++;
1853 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1854 if (--sc->sc_refcnt < 0)
1855 usb_detach_wakeup(USBDEV(sc->sc_dev));
1856 return (error);
1857 }
1858
1859 int
1860 ugenpoll(dev_t dev, int events, struct lwp *l)
1861 {
1862 struct ugen_softc *sc;
1863 struct ugen_endpoint *sce_in, *sce_out;
1864 int revents = 0;
1865 int s;
1866
1867 USB_GET_SC(ugen, UGENUNIT(dev), sc);
1868
1869 if (sc->sc_dying)
1870 return (POLLHUP);
1871
1872 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1873 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1874 if (sce_in == NULL && sce_out == NULL)
1875 return (POLLERR);
1876 #ifdef DIAGNOSTIC
1877 if (!sce_in->edesc && !sce_out->edesc) {
1878 printf("ugenpoll: no edesc\n");
1879 return (POLLERR);
1880 }
1881 /* It's possible to have only one pipe open. */
1882 if (!sce_in->pipeh && !sce_out->pipeh) {
1883 printf("ugenpoll: no pipe\n");
1884 return (POLLERR);
1885 }
1886 #endif
1887 s = splusb();
1888 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1889 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1890 case UE_INTERRUPT:
1891 if (sce_in->q.c_cc > 0)
1892 revents |= events & (POLLIN | POLLRDNORM);
1893 else
1894 selrecord(l, &sce_in->rsel);
1895 break;
1896 case UE_ISOCHRONOUS:
1897 if (sce_in->cur != sce_in->fill)
1898 revents |= events & (POLLIN | POLLRDNORM);
1899 else
1900 selrecord(l, &sce_in->rsel);
1901 break;
1902 case UE_BULK:
1903 #ifdef UGEN_BULK_RA_WB
1904 if (sce_in->state & UGEN_BULK_RA) {
1905 if (sce_in->ra_wb_used > 0)
1906 revents |= events &
1907 (POLLIN | POLLRDNORM);
1908 else
1909 selrecord(l, &sce_in->rsel);
1910 break;
1911 }
1912 #endif
1913 /*
1914 * We have no easy way of determining if a read will
1915 * yield any data or a write will happen.
1916 * Pretend they will.
1917 */
1918 revents |= events & (POLLIN | POLLRDNORM);
1919 break;
1920 default:
1921 break;
1922 }
1923 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1924 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1925 case UE_INTERRUPT:
1926 case UE_ISOCHRONOUS:
1927 /* XXX unimplemented */
1928 break;
1929 case UE_BULK:
1930 #ifdef UGEN_BULK_RA_WB
1931 if (sce_out->state & UGEN_BULK_WB) {
1932 if (sce_out->ra_wb_used <
1933 sce_out->limit - sce_out->ibuf)
1934 revents |= events &
1935 (POLLOUT | POLLWRNORM);
1936 else
1937 selrecord(l, &sce_out->rsel);
1938 break;
1939 }
1940 #endif
1941 /*
1942 * We have no easy way of determining if a read will
1943 * yield any data or a write will happen.
1944 * Pretend they will.
1945 */
1946 revents |= events & (POLLOUT | POLLWRNORM);
1947 break;
1948 default:
1949 break;
1950 }
1951
1952
1953 splx(s);
1954 return (revents);
1955 }
1956
1957 static void
1958 filt_ugenrdetach(struct knote *kn)
1959 {
1960 struct ugen_endpoint *sce = kn->kn_hook;
1961 int s;
1962
1963 s = splusb();
1964 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1965 splx(s);
1966 }
1967
1968 static int
1969 filt_ugenread_intr(struct knote *kn, long hint)
1970 {
1971 struct ugen_endpoint *sce = kn->kn_hook;
1972
1973 kn->kn_data = sce->q.c_cc;
1974 return (kn->kn_data > 0);
1975 }
1976
1977 static int
1978 filt_ugenread_isoc(struct knote *kn, long hint)
1979 {
1980 struct ugen_endpoint *sce = kn->kn_hook;
1981
1982 if (sce->cur == sce->fill)
1983 return (0);
1984
1985 if (sce->cur < sce->fill)
1986 kn->kn_data = sce->fill - sce->cur;
1987 else
1988 kn->kn_data = (sce->limit - sce->cur) +
1989 (sce->fill - sce->ibuf);
1990
1991 return (1);
1992 }
1993
1994 #ifdef UGEN_BULK_RA_WB
1995 static int
1996 filt_ugenread_bulk(struct knote *kn, long hint)
1997 {
1998 struct ugen_endpoint *sce = kn->kn_hook;
1999
2000 if (!(sce->state & UGEN_BULK_RA))
2001 /*
2002 * We have no easy way of determining if a read will
2003 * yield any data or a write will happen.
2004 * So, emulate "seltrue".
2005 */
2006 return (filt_seltrue(kn, hint));
2007
2008 if (sce->ra_wb_used == 0)
2009 return (0);
2010
2011 kn->kn_data = sce->ra_wb_used;
2012
2013 return (1);
2014 }
2015
2016 static int
2017 filt_ugenwrite_bulk(struct knote *kn, long hint)
2018 {
2019 struct ugen_endpoint *sce = kn->kn_hook;
2020
2021 if (!(sce->state & UGEN_BULK_WB))
2022 /*
2023 * We have no easy way of determining if a read will
2024 * yield any data or a write will happen.
2025 * So, emulate "seltrue".
2026 */
2027 return (filt_seltrue(kn, hint));
2028
2029 if (sce->ra_wb_used == sce->limit - sce->ibuf)
2030 return (0);
2031
2032 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2033
2034 return (1);
2035 }
2036 #endif
2037
2038 static const struct filterops ugenread_intr_filtops =
2039 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2040
2041 static const struct filterops ugenread_isoc_filtops =
2042 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2043
2044 #ifdef UGEN_BULK_RA_WB
2045 static const struct filterops ugenread_bulk_filtops =
2046 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2047
2048 static const struct filterops ugenwrite_bulk_filtops =
2049 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2050 #else
2051 static const struct filterops ugen_seltrue_filtops =
2052 { 1, NULL, filt_ugenrdetach, filt_seltrue };
2053 #endif
2054
2055 int
2056 ugenkqfilter(dev_t dev, struct knote *kn)
2057 {
2058 struct ugen_softc *sc;
2059 struct ugen_endpoint *sce;
2060 struct klist *klist;
2061 int s;
2062
2063 USB_GET_SC(ugen, UGENUNIT(dev), sc);
2064
2065 if (sc->sc_dying)
2066 return (ENXIO);
2067
2068 switch (kn->kn_filter) {
2069 case EVFILT_READ:
2070 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2071 if (sce == NULL)
2072 return (EINVAL);
2073
2074 klist = &sce->rsel.sel_klist;
2075 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2076 case UE_INTERRUPT:
2077 kn->kn_fop = &ugenread_intr_filtops;
2078 break;
2079 case UE_ISOCHRONOUS:
2080 kn->kn_fop = &ugenread_isoc_filtops;
2081 break;
2082 case UE_BULK:
2083 #ifdef UGEN_BULK_RA_WB
2084 kn->kn_fop = &ugenread_bulk_filtops;
2085 break;
2086 #else
2087 /*
2088 * We have no easy way of determining if a read will
2089 * yield any data or a write will happen.
2090 * So, emulate "seltrue".
2091 */
2092 kn->kn_fop = &ugen_seltrue_filtops;
2093 #endif
2094 break;
2095 default:
2096 return (EINVAL);
2097 }
2098 break;
2099
2100 case EVFILT_WRITE:
2101 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2102 if (sce == NULL)
2103 return (EINVAL);
2104
2105 klist = &sce->rsel.sel_klist;
2106 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2107 case UE_INTERRUPT:
2108 case UE_ISOCHRONOUS:
2109 /* XXX poll doesn't support this */
2110 return (EINVAL);
2111
2112 case UE_BULK:
2113 #ifdef UGEN_BULK_RA_WB
2114 kn->kn_fop = &ugenwrite_bulk_filtops;
2115 #else
2116 /*
2117 * We have no easy way of determining if a read will
2118 * yield any data or a write will happen.
2119 * So, emulate "seltrue".
2120 */
2121 kn->kn_fop = &ugen_seltrue_filtops;
2122 #endif
2123 break;
2124 default:
2125 return (EINVAL);
2126 }
2127 break;
2128
2129 default:
2130 return (EINVAL);
2131 }
2132
2133 kn->kn_hook = sce;
2134
2135 s = splusb();
2136 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2137 splx(s);
2138
2139 return (0);
2140 }
2141
2142 #if defined(__FreeBSD__)
2143 DRIVER_MODULE(ugen, uhub, ugen_driver, ugen_devclass, usbd_driver_load, 0);
2144 #endif
2145