ugen.c revision 1.111.8.1 1 /* $NetBSD: ugen.c,v 1.111.8.1 2012/04/17 00:08:07 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.111.8.1 2012/04/17 00:08:07 yamt Exp $");
41
42 #include "opt_compat_netbsd.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #include <sys/device.h>
49 #include <sys/ioctl.h>
50 #include <sys/conf.h>
51 #include <sys/tty.h>
52 #include <sys/file.h>
53 #include <sys/select.h>
54 #include <sys/proc.h>
55 #include <sys/vnode.h>
56 #include <sys/poll.h>
57
58 #include <dev/usb/usb.h>
59 #include <dev/usb/usbdi.h>
60 #include <dev/usb/usbdi_util.h>
61
62 #ifdef UGEN_DEBUG
63 #define DPRINTF(x) if (ugendebug) printf x
64 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x
65 int ugendebug = 0;
66 #else
67 #define DPRINTF(x)
68 #define DPRINTFN(n,x)
69 #endif
70
71 #define UGEN_CHUNK 128 /* chunk size for read */
72 #define UGEN_IBSIZE 1020 /* buffer size */
73 #define UGEN_BBSIZE 1024
74
75 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
76 #define UGEN_NISORFRMS 8 /* number of transactions per req */
77 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
78
79 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
80 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
81
82 struct ugen_endpoint {
83 struct ugen_softc *sc;
84 usb_endpoint_descriptor_t *edesc;
85 usbd_interface_handle iface;
86 int state;
87 #define UGEN_ASLP 0x02 /* waiting for data */
88 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
89 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
90 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
91 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
92 usbd_pipe_handle pipeh;
93 struct clist q;
94 struct selinfo rsel;
95 u_char *ibuf; /* start of buffer (circular for isoc) */
96 u_char *fill; /* location for input (isoc) */
97 u_char *limit; /* end of circular buffer (isoc) */
98 u_char *cur; /* current read location (isoc) */
99 u_int32_t timeout;
100 u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
101 u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
102 u_int32_t ra_wb_used; /* how much is in buffer */
103 u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
104 usbd_xfer_handle ra_wb_xfer;
105 struct isoreq {
106 struct ugen_endpoint *sce;
107 usbd_xfer_handle xfer;
108 void *dmabuf;
109 u_int16_t sizes[UGEN_NISORFRMS];
110 } isoreqs[UGEN_NISOREQS];
111 };
112
113 struct ugen_softc {
114 device_t sc_dev; /* base device */
115 usbd_device_handle sc_udev;
116
117 char sc_is_open[USB_MAX_ENDPOINTS];
118 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
119 #define OUT 0
120 #define IN 1
121
122 int sc_refcnt;
123 char sc_buffer[UGEN_BBSIZE];
124 u_char sc_dying;
125 };
126
127 dev_type_open(ugenopen);
128 dev_type_close(ugenclose);
129 dev_type_read(ugenread);
130 dev_type_write(ugenwrite);
131 dev_type_ioctl(ugenioctl);
132 dev_type_poll(ugenpoll);
133 dev_type_kqfilter(ugenkqfilter);
134
135 const struct cdevsw ugen_cdevsw = {
136 ugenopen, ugenclose, ugenread, ugenwrite, ugenioctl,
137 nostop, notty, ugenpoll, nommap, ugenkqfilter, D_OTHER,
138 };
139
140 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
141 usbd_status status);
142 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
143 usbd_status status);
144 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
145 usbd_status status);
146 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
147 usbd_status status);
148 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
149 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
150 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
151 void *, int, struct lwp *);
152 Static int ugen_set_config(struct ugen_softc *sc, int configno);
153 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
154 int index, int *lenp);
155 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
156 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
157
158 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
159 #define UGENENDPOINT(n) (minor(n) & 0xf)
160 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
161
162 int ugen_match(device_t, cfdata_t, void *);
163 void ugen_attach(device_t, device_t, void *);
164 int ugen_detach(device_t, int);
165 int ugen_activate(device_t, enum devact);
166 extern struct cfdriver ugen_cd;
167 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate);
168
169 /* toggle to control attach priority. -1 means "let autoconf decide" */
170 int ugen_override = -1;
171
172 int
173 ugen_match(device_t parent, cfdata_t match, void *aux)
174 {
175 struct usb_attach_arg *uaa = aux;
176 int override;
177
178 if (ugen_override != -1)
179 override = ugen_override;
180 else
181 override = match->cf_flags & 1;
182
183 if (override)
184 return (UMATCH_HIGHEST);
185 else if (uaa->usegeneric)
186 return (UMATCH_GENERIC);
187 else
188 return (UMATCH_NONE);
189 }
190
191 void
192 ugen_attach(device_t parent, device_t self, void *aux)
193 {
194 struct ugen_softc *sc = device_private(self);
195 struct usb_attach_arg *uaa = aux;
196 usbd_device_handle udev;
197 char *devinfop;
198 usbd_status err;
199 int i, dir, conf;
200
201 aprint_naive("\n");
202 aprint_normal("\n");
203
204 devinfop = usbd_devinfo_alloc(uaa->device, 0);
205 aprint_normal_dev(self, "%s\n", devinfop);
206 usbd_devinfo_free(devinfop);
207
208 sc->sc_dev = self;
209 sc->sc_udev = udev = uaa->device;
210
211 /* First set configuration index 0, the default one for ugen. */
212 err = usbd_set_config_index(udev, 0, 0);
213 if (err) {
214 aprint_error_dev(self,
215 "setting configuration index 0 failed\n");
216 sc->sc_dying = 1;
217 return;
218 }
219 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
220
221 /* Set up all the local state for this configuration. */
222 err = ugen_set_config(sc, conf);
223 if (err) {
224 aprint_error_dev(self, "setting configuration %d failed\n",
225 conf);
226 sc->sc_dying = 1;
227 return;
228 }
229
230 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
231 for (dir = OUT; dir <= IN; dir++) {
232 struct ugen_endpoint *sce;
233
234 sce = &sc->sc_endpoints[i][dir];
235 selinit(&sce->rsel);
236 }
237 }
238
239 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
240 sc->sc_dev);
241
242 if (!pmf_device_register(self, NULL, NULL))
243 aprint_error_dev(self, "couldn't establish power handler\n");
244
245 return;
246 }
247
248 Static int
249 ugen_set_config(struct ugen_softc *sc, int configno)
250 {
251 usbd_device_handle dev = sc->sc_udev;
252 usb_config_descriptor_t *cdesc;
253 usbd_interface_handle iface;
254 usb_endpoint_descriptor_t *ed;
255 struct ugen_endpoint *sce;
256 u_int8_t niface, nendpt;
257 int ifaceno, endptno, endpt;
258 usbd_status err;
259 int dir;
260
261 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
262 device_xname(sc->sc_dev), configno, sc));
263
264 /*
265 * We start at 1, not 0, because we don't care whether the
266 * control endpoint is open or not. It is always present.
267 */
268 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
269 if (sc->sc_is_open[endptno]) {
270 DPRINTFN(1,
271 ("ugen_set_config: %s - endpoint %d is open\n",
272 device_xname(sc->sc_dev), endptno));
273 return (USBD_IN_USE);
274 }
275
276 /* Avoid setting the current value. */
277 cdesc = usbd_get_config_descriptor(dev);
278 if (!cdesc || cdesc->bConfigurationValue != configno) {
279 err = usbd_set_config_no(dev, configno, 1);
280 if (err)
281 return (err);
282 }
283
284 err = usbd_interface_count(dev, &niface);
285 if (err)
286 return (err);
287 memset(sc->sc_endpoints, 0, sizeof sc->sc_endpoints);
288 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
289 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
290 err = usbd_device2interface_handle(dev, ifaceno, &iface);
291 if (err)
292 return (err);
293 err = usbd_endpoint_count(iface, &nendpt);
294 if (err)
295 return (err);
296 for (endptno = 0; endptno < nendpt; endptno++) {
297 ed = usbd_interface2endpoint_descriptor(iface,endptno);
298 KASSERT(ed != NULL);
299 endpt = ed->bEndpointAddress;
300 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
301 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
302 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
303 "(%d,%d), sce=%p\n",
304 endptno, endpt, UE_GET_ADDR(endpt),
305 UE_GET_DIR(endpt), sce));
306 sce->sc = sc;
307 sce->edesc = ed;
308 sce->iface = iface;
309 }
310 }
311 return (USBD_NORMAL_COMPLETION);
312 }
313
314 int
315 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
316 {
317 struct ugen_softc *sc;
318 int unit = UGENUNIT(dev);
319 int endpt = UGENENDPOINT(dev);
320 usb_endpoint_descriptor_t *edesc;
321 struct ugen_endpoint *sce;
322 int dir, isize;
323 usbd_status err;
324 usbd_xfer_handle xfer;
325 void *tbuf;
326 int i, j;
327
328 sc = device_lookup_private(&ugen_cd, unit);
329 if (sc == NULL)
330 return ENXIO;
331
332 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
333 flag, mode, unit, endpt));
334
335 if (sc == NULL || sc->sc_dying)
336 return (ENXIO);
337
338 /* The control endpoint allows multiple opens. */
339 if (endpt == USB_CONTROL_ENDPOINT) {
340 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
341 return (0);
342 }
343
344 if (sc->sc_is_open[endpt])
345 return (EBUSY);
346
347 /* Make sure there are pipes for all directions. */
348 for (dir = OUT; dir <= IN; dir++) {
349 if (flag & (dir == OUT ? FWRITE : FREAD)) {
350 sce = &sc->sc_endpoints[endpt][dir];
351 if (sce == 0 || sce->edesc == 0)
352 return (ENXIO);
353 }
354 }
355
356 /* Actually open the pipes. */
357 /* XXX Should back out properly if it fails. */
358 for (dir = OUT; dir <= IN; dir++) {
359 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
360 continue;
361 sce = &sc->sc_endpoints[endpt][dir];
362 sce->state = 0;
363 sce->timeout = USBD_NO_TIMEOUT;
364 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
365 sc, endpt, dir, sce));
366 edesc = sce->edesc;
367 switch (edesc->bmAttributes & UE_XFERTYPE) {
368 case UE_INTERRUPT:
369 if (dir == OUT) {
370 err = usbd_open_pipe(sce->iface,
371 edesc->bEndpointAddress, 0, &sce->pipeh);
372 if (err)
373 return (EIO);
374 break;
375 }
376 isize = UGETW(edesc->wMaxPacketSize);
377 if (isize == 0) /* shouldn't happen */
378 return (EINVAL);
379 sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
380 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
381 endpt, isize));
382 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1)
383 return (ENOMEM);
384 err = usbd_open_pipe_intr(sce->iface,
385 edesc->bEndpointAddress,
386 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
387 sce->ibuf, isize, ugenintr,
388 USBD_DEFAULT_INTERVAL);
389 if (err) {
390 free(sce->ibuf, M_USBDEV);
391 clfree(&sce->q);
392 return (EIO);
393 }
394 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
395 break;
396 case UE_BULK:
397 err = usbd_open_pipe(sce->iface,
398 edesc->bEndpointAddress, 0, &sce->pipeh);
399 if (err)
400 return (EIO);
401 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
402 /*
403 * Use request size for non-RA/WB transfers
404 * as the default.
405 */
406 sce->ra_wb_reqsize = UGEN_BBSIZE;
407 break;
408 case UE_ISOCHRONOUS:
409 if (dir == OUT)
410 return (EINVAL);
411 isize = UGETW(edesc->wMaxPacketSize);
412 if (isize == 0) /* shouldn't happen */
413 return (EINVAL);
414 sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
415 M_USBDEV, M_WAITOK);
416 sce->cur = sce->fill = sce->ibuf;
417 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
418 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
419 endpt, isize));
420 err = usbd_open_pipe(sce->iface,
421 edesc->bEndpointAddress, 0, &sce->pipeh);
422 if (err) {
423 free(sce->ibuf, M_USBDEV);
424 return (EIO);
425 }
426 for(i = 0; i < UGEN_NISOREQS; ++i) {
427 sce->isoreqs[i].sce = sce;
428 xfer = usbd_alloc_xfer(sc->sc_udev);
429 if (xfer == 0)
430 goto bad;
431 sce->isoreqs[i].xfer = xfer;
432 tbuf = usbd_alloc_buffer
433 (xfer, isize * UGEN_NISORFRMS);
434 if (tbuf == 0) {
435 i++;
436 goto bad;
437 }
438 sce->isoreqs[i].dmabuf = tbuf;
439 for(j = 0; j < UGEN_NISORFRMS; ++j)
440 sce->isoreqs[i].sizes[j] = isize;
441 usbd_setup_isoc_xfer
442 (xfer, sce->pipeh, &sce->isoreqs[i],
443 sce->isoreqs[i].sizes,
444 UGEN_NISORFRMS, USBD_NO_COPY,
445 ugen_isoc_rintr);
446 (void)usbd_transfer(xfer);
447 }
448 DPRINTFN(5, ("ugenopen: isoc open done\n"));
449 break;
450 bad:
451 while (--i >= 0) /* implicit buffer free */
452 usbd_free_xfer(sce->isoreqs[i].xfer);
453 return (ENOMEM);
454 case UE_CONTROL:
455 sce->timeout = USBD_DEFAULT_TIMEOUT;
456 return (EINVAL);
457 }
458 }
459 sc->sc_is_open[endpt] = 1;
460 return (0);
461 }
462
463 int
464 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
465 {
466 int endpt = UGENENDPOINT(dev);
467 struct ugen_softc *sc;
468 struct ugen_endpoint *sce;
469 int dir;
470 int i;
471
472 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
473 if (sc == NULL)
474 return ENXIO;
475
476 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
477 flag, mode, UGENUNIT(dev), endpt));
478
479 #ifdef DIAGNOSTIC
480 if (!sc->sc_is_open[endpt]) {
481 printf("ugenclose: not open\n");
482 return (EINVAL);
483 }
484 #endif
485
486 if (endpt == USB_CONTROL_ENDPOINT) {
487 DPRINTFN(5, ("ugenclose: close control\n"));
488 sc->sc_is_open[endpt] = 0;
489 return (0);
490 }
491
492 for (dir = OUT; dir <= IN; dir++) {
493 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
494 continue;
495 sce = &sc->sc_endpoints[endpt][dir];
496 if (sce == NULL || sce->pipeh == NULL)
497 continue;
498 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
499 endpt, dir, sce));
500
501 usbd_abort_pipe(sce->pipeh);
502 usbd_close_pipe(sce->pipeh);
503 sce->pipeh = NULL;
504
505 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
506 case UE_INTERRUPT:
507 ndflush(&sce->q, sce->q.c_cc);
508 clfree(&sce->q);
509 break;
510 case UE_ISOCHRONOUS:
511 for (i = 0; i < UGEN_NISOREQS; ++i)
512 usbd_free_xfer(sce->isoreqs[i].xfer);
513 break;
514 case UE_BULK:
515 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
516 /* ibuf freed below */
517 usbd_free_xfer(sce->ra_wb_xfer);
518 break;
519 default:
520 break;
521 }
522
523 if (sce->ibuf != NULL) {
524 free(sce->ibuf, M_USBDEV);
525 sce->ibuf = NULL;
526 }
527 }
528 sc->sc_is_open[endpt] = 0;
529
530 return (0);
531 }
532
533 Static int
534 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
535 {
536 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
537 u_int32_t n, tn;
538 usbd_xfer_handle xfer;
539 usbd_status err;
540 int s;
541 int error = 0;
542
543 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
544
545 if (sc->sc_dying)
546 return (EIO);
547
548 if (endpt == USB_CONTROL_ENDPOINT)
549 return (ENODEV);
550
551 #ifdef DIAGNOSTIC
552 if (sce->edesc == NULL) {
553 printf("ugenread: no edesc\n");
554 return (EIO);
555 }
556 if (sce->pipeh == NULL) {
557 printf("ugenread: no pipe\n");
558 return (EIO);
559 }
560 #endif
561
562 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
563 case UE_INTERRUPT:
564 /* Block until activity occurred. */
565 s = splusb();
566 while (sce->q.c_cc == 0) {
567 if (flag & IO_NDELAY) {
568 splx(s);
569 return (EWOULDBLOCK);
570 }
571 sce->state |= UGEN_ASLP;
572 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
573 error = tsleep(sce, PZERO | PCATCH, "ugenri", mstohz(sce->timeout));
574 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
575 if (sc->sc_dying)
576 error = EIO;
577 if (error) {
578 sce->state &= ~UGEN_ASLP;
579 break;
580 }
581 }
582 splx(s);
583
584 /* Transfer as many chunks as possible. */
585 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
586 n = min(sce->q.c_cc, uio->uio_resid);
587 if (n > sizeof(sc->sc_buffer))
588 n = sizeof(sc->sc_buffer);
589
590 /* Remove a small chunk from the input queue. */
591 q_to_b(&sce->q, sc->sc_buffer, n);
592 DPRINTFN(5, ("ugenread: got %d chars\n", n));
593
594 /* Copy the data to the user process. */
595 error = uiomove(sc->sc_buffer, n, uio);
596 if (error)
597 break;
598 }
599 break;
600 case UE_BULK:
601 if (sce->state & UGEN_BULK_RA) {
602 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
603 uio->uio_resid, sce->ra_wb_used));
604 xfer = sce->ra_wb_xfer;
605
606 s = splusb();
607 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
608 splx(s);
609 return (EWOULDBLOCK);
610 }
611 while (uio->uio_resid > 0 && !error) {
612 while (sce->ra_wb_used == 0) {
613 sce->state |= UGEN_ASLP;
614 DPRINTFN(5,
615 ("ugenread: sleep on %p\n",
616 sce));
617 error = tsleep(sce, PZERO | PCATCH,
618 "ugenrb", mstohz(sce->timeout));
619 DPRINTFN(5,
620 ("ugenread: woke, error=%d\n",
621 error));
622 if (sc->sc_dying)
623 error = EIO;
624 if (error) {
625 sce->state &= ~UGEN_ASLP;
626 break;
627 }
628 }
629
630 /* Copy data to the process. */
631 while (uio->uio_resid > 0
632 && sce->ra_wb_used > 0) {
633 n = min(uio->uio_resid,
634 sce->ra_wb_used);
635 n = min(n, sce->limit - sce->cur);
636 error = uiomove(sce->cur, n, uio);
637 if (error)
638 break;
639 sce->cur += n;
640 sce->ra_wb_used -= n;
641 if (sce->cur == sce->limit)
642 sce->cur = sce->ibuf;
643 }
644
645 /*
646 * If the transfers stopped because the
647 * buffer was full, restart them.
648 */
649 if (sce->state & UGEN_RA_WB_STOP &&
650 sce->ra_wb_used < sce->limit - sce->ibuf) {
651 n = (sce->limit - sce->ibuf)
652 - sce->ra_wb_used;
653 usbd_setup_xfer(xfer,
654 sce->pipeh, sce, NULL,
655 min(n, sce->ra_wb_xferlen),
656 USBD_NO_COPY, USBD_NO_TIMEOUT,
657 ugen_bulkra_intr);
658 sce->state &= ~UGEN_RA_WB_STOP;
659 err = usbd_transfer(xfer);
660 if (err != USBD_IN_PROGRESS)
661 /*
662 * The transfer has not been
663 * queued. Setting STOP
664 * will make us try
665 * again at the next read.
666 */
667 sce->state |= UGEN_RA_WB_STOP;
668 }
669 }
670 splx(s);
671 break;
672 }
673 xfer = usbd_alloc_xfer(sc->sc_udev);
674 if (xfer == 0)
675 return (ENOMEM);
676 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
677 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
678 tn = n;
679 err = usbd_bulk_transfer(
680 xfer, sce->pipeh,
681 sce->state & UGEN_SHORT_OK ?
682 USBD_SHORT_XFER_OK : 0,
683 sce->timeout, sc->sc_buffer, &tn, "ugenrb");
684 if (err) {
685 if (err == USBD_INTERRUPTED)
686 error = EINTR;
687 else if (err == USBD_TIMEOUT)
688 error = ETIMEDOUT;
689 else
690 error = EIO;
691 break;
692 }
693 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
694 error = uiomove(sc->sc_buffer, tn, uio);
695 if (error || tn < n)
696 break;
697 }
698 usbd_free_xfer(xfer);
699 break;
700 case UE_ISOCHRONOUS:
701 s = splusb();
702 while (sce->cur == sce->fill) {
703 if (flag & IO_NDELAY) {
704 splx(s);
705 return (EWOULDBLOCK);
706 }
707 sce->state |= UGEN_ASLP;
708 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
709 error = tsleep(sce, PZERO | PCATCH, "ugenri", mstohz(sce->timeout));
710 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
711 if (sc->sc_dying)
712 error = EIO;
713 if (error) {
714 sce->state &= ~UGEN_ASLP;
715 break;
716 }
717 }
718
719 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
720 if(sce->fill > sce->cur)
721 n = min(sce->fill - sce->cur, uio->uio_resid);
722 else
723 n = min(sce->limit - sce->cur, uio->uio_resid);
724
725 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
726
727 /* Copy the data to the user process. */
728 error = uiomove(sce->cur, n, uio);
729 if (error)
730 break;
731 sce->cur += n;
732 if(sce->cur >= sce->limit)
733 sce->cur = sce->ibuf;
734 }
735 splx(s);
736 break;
737
738
739 default:
740 return (ENXIO);
741 }
742 return (error);
743 }
744
745 int
746 ugenread(dev_t dev, struct uio *uio, int flag)
747 {
748 int endpt = UGENENDPOINT(dev);
749 struct ugen_softc *sc;
750 int error;
751
752 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
753 if (sc == NULL)
754 return ENXIO;
755
756 sc->sc_refcnt++;
757 error = ugen_do_read(sc, endpt, uio, flag);
758 if (--sc->sc_refcnt < 0)
759 usb_detach_wakeupold(sc->sc_dev);
760 return (error);
761 }
762
763 Static int
764 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
765 int flag)
766 {
767 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
768 u_int32_t n;
769 int error = 0;
770 int s;
771 u_int32_t tn;
772 char *dbuf;
773 usbd_xfer_handle xfer;
774 usbd_status err;
775
776 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
777
778 if (sc->sc_dying)
779 return (EIO);
780
781 if (endpt == USB_CONTROL_ENDPOINT)
782 return (ENODEV);
783
784 #ifdef DIAGNOSTIC
785 if (sce->edesc == NULL) {
786 printf("ugenwrite: no edesc\n");
787 return (EIO);
788 }
789 if (sce->pipeh == NULL) {
790 printf("ugenwrite: no pipe\n");
791 return (EIO);
792 }
793 #endif
794
795 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
796 case UE_BULK:
797 if (sce->state & UGEN_BULK_WB) {
798 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
799 uio->uio_resid, sce->ra_wb_used));
800 xfer = sce->ra_wb_xfer;
801
802 s = splusb();
803 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
804 flag & IO_NDELAY) {
805 splx(s);
806 return (EWOULDBLOCK);
807 }
808 while (uio->uio_resid > 0 && !error) {
809 while (sce->ra_wb_used ==
810 sce->limit - sce->ibuf) {
811 sce->state |= UGEN_ASLP;
812 DPRINTFN(5,
813 ("ugenwrite: sleep on %p\n",
814 sce));
815 error = tsleep(sce, PZERO | PCATCH,
816 "ugenwb", mstohz(sce->timeout));
817 DPRINTFN(5,
818 ("ugenwrite: woke, error=%d\n",
819 error));
820 if (sc->sc_dying)
821 error = EIO;
822 if (error) {
823 sce->state &= ~UGEN_ASLP;
824 break;
825 }
826 }
827
828 /* Copy data from the process. */
829 while (uio->uio_resid > 0 &&
830 sce->ra_wb_used < sce->limit - sce->ibuf) {
831 n = min(uio->uio_resid,
832 (sce->limit - sce->ibuf)
833 - sce->ra_wb_used);
834 n = min(n, sce->limit - sce->fill);
835 error = uiomove(sce->fill, n, uio);
836 if (error)
837 break;
838 sce->fill += n;
839 sce->ra_wb_used += n;
840 if (sce->fill == sce->limit)
841 sce->fill = sce->ibuf;
842 }
843
844 /*
845 * If the transfers stopped because the
846 * buffer was empty, restart them.
847 */
848 if (sce->state & UGEN_RA_WB_STOP &&
849 sce->ra_wb_used > 0) {
850 dbuf = (char *)usbd_get_buffer(xfer);
851 n = min(sce->ra_wb_used,
852 sce->ra_wb_xferlen);
853 tn = min(n, sce->limit - sce->cur);
854 memcpy(dbuf, sce->cur, tn);
855 dbuf += tn;
856 if (n - tn > 0)
857 memcpy(dbuf, sce->ibuf,
858 n - tn);
859 usbd_setup_xfer(xfer,
860 sce->pipeh, sce, NULL, n,
861 USBD_NO_COPY, USBD_NO_TIMEOUT,
862 ugen_bulkwb_intr);
863 sce->state &= ~UGEN_RA_WB_STOP;
864 err = usbd_transfer(xfer);
865 if (err != USBD_IN_PROGRESS)
866 /*
867 * The transfer has not been
868 * queued. Setting STOP
869 * will make us try again
870 * at the next read.
871 */
872 sce->state |= UGEN_RA_WB_STOP;
873 }
874 }
875 splx(s);
876 break;
877 }
878 xfer = usbd_alloc_xfer(sc->sc_udev);
879 if (xfer == 0)
880 return (EIO);
881 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
882 error = uiomove(sc->sc_buffer, n, uio);
883 if (error)
884 break;
885 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
886 err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
887 sce->timeout, sc->sc_buffer, &n,"ugenwb");
888 if (err) {
889 if (err == USBD_INTERRUPTED)
890 error = EINTR;
891 else if (err == USBD_TIMEOUT)
892 error = ETIMEDOUT;
893 else
894 error = EIO;
895 break;
896 }
897 }
898 usbd_free_xfer(xfer);
899 break;
900 case UE_INTERRUPT:
901 xfer = usbd_alloc_xfer(sc->sc_udev);
902 if (xfer == 0)
903 return (EIO);
904 while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
905 uio->uio_resid)) != 0) {
906 error = uiomove(sc->sc_buffer, n, uio);
907 if (error)
908 break;
909 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
910 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
911 sce->timeout, sc->sc_buffer, &n, "ugenwi");
912 if (err) {
913 if (err == USBD_INTERRUPTED)
914 error = EINTR;
915 else if (err == USBD_TIMEOUT)
916 error = ETIMEDOUT;
917 else
918 error = EIO;
919 break;
920 }
921 }
922 usbd_free_xfer(xfer);
923 break;
924 default:
925 return (ENXIO);
926 }
927 return (error);
928 }
929
930 int
931 ugenwrite(dev_t dev, struct uio *uio, int flag)
932 {
933 int endpt = UGENENDPOINT(dev);
934 struct ugen_softc *sc;
935 int error;
936
937 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
938 if (sc == NULL)
939 return ENXIO;
940
941 sc->sc_refcnt++;
942 error = ugen_do_write(sc, endpt, uio, flag);
943 if (--sc->sc_refcnt < 0)
944 usb_detach_wakeupold(sc->sc_dev);
945 return (error);
946 }
947
948 int
949 ugen_activate(device_t self, enum devact act)
950 {
951 struct ugen_softc *sc = device_private(self);
952
953 switch (act) {
954 case DVACT_DEACTIVATE:
955 sc->sc_dying = 1;
956 return 0;
957 default:
958 return EOPNOTSUPP;
959 }
960 }
961
962 int
963 ugen_detach(device_t self, int flags)
964 {
965 struct ugen_softc *sc = device_private(self);
966 struct ugen_endpoint *sce;
967 int i, dir;
968 int s;
969 int maj, mn;
970
971 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
972
973 sc->sc_dying = 1;
974 pmf_device_deregister(self);
975 /* Abort all pipes. Causes processes waiting for transfer to wake. */
976 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
977 for (dir = OUT; dir <= IN; dir++) {
978 sce = &sc->sc_endpoints[i][dir];
979 if (sce && sce->pipeh)
980 usbd_abort_pipe(sce->pipeh);
981 }
982 }
983
984 s = splusb();
985 if (--sc->sc_refcnt >= 0) {
986 /* Wake everyone */
987 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
988 wakeup(&sc->sc_endpoints[i][IN]);
989 /* Wait for processes to go away. */
990 usb_detach_waitold(sc->sc_dev);
991 }
992 splx(s);
993
994 /* locate the major number */
995 maj = cdevsw_lookup_major(&ugen_cdevsw);
996
997 /* Nuke the vnodes for any open instances (calls close). */
998 mn = device_unit(self) * USB_MAX_ENDPOINTS;
999 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1000
1001 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1002 sc->sc_dev);
1003
1004 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1005 for (dir = OUT; dir <= IN; dir++) {
1006 sce = &sc->sc_endpoints[i][dir];
1007 seldestroy(&sce->rsel);
1008 }
1009 }
1010
1011 return (0);
1012 }
1013
1014 Static void
1015 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1016 {
1017 struct ugen_endpoint *sce = addr;
1018 /*struct ugen_softc *sc = sce->sc;*/
1019 u_int32_t count;
1020 u_char *ibuf;
1021
1022 if (status == USBD_CANCELLED)
1023 return;
1024
1025 if (status != USBD_NORMAL_COMPLETION) {
1026 DPRINTF(("ugenintr: status=%d\n", status));
1027 if (status == USBD_STALLED)
1028 usbd_clear_endpoint_stall_async(sce->pipeh);
1029 return;
1030 }
1031
1032 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1033 ibuf = sce->ibuf;
1034
1035 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1036 xfer, status, count));
1037 DPRINTFN(5, (" data = %02x %02x %02x\n",
1038 ibuf[0], ibuf[1], ibuf[2]));
1039
1040 (void)b_to_q(ibuf, count, &sce->q);
1041
1042 if (sce->state & UGEN_ASLP) {
1043 sce->state &= ~UGEN_ASLP;
1044 DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1045 wakeup(sce);
1046 }
1047 selnotify(&sce->rsel, 0, 0);
1048 }
1049
1050 Static void
1051 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1052 usbd_status status)
1053 {
1054 struct isoreq *req = addr;
1055 struct ugen_endpoint *sce = req->sce;
1056 u_int32_t count, n;
1057 int i, isize;
1058
1059 /* Return if we are aborting. */
1060 if (status == USBD_CANCELLED)
1061 return;
1062
1063 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1064 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1065 (long)(req - sce->isoreqs), count));
1066
1067 /* throw away oldest input if the buffer is full */
1068 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1069 sce->cur += count;
1070 if(sce->cur >= sce->limit)
1071 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1072 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1073 count));
1074 }
1075
1076 isize = UGETW(sce->edesc->wMaxPacketSize);
1077 for (i = 0; i < UGEN_NISORFRMS; i++) {
1078 u_int32_t actlen = req->sizes[i];
1079 char const *tbuf = (char const *)req->dmabuf + isize * i;
1080
1081 /* copy data to buffer */
1082 while (actlen > 0) {
1083 n = min(actlen, sce->limit - sce->fill);
1084 memcpy(sce->fill, tbuf, n);
1085
1086 tbuf += n;
1087 actlen -= n;
1088 sce->fill += n;
1089 if(sce->fill == sce->limit)
1090 sce->fill = sce->ibuf;
1091 }
1092
1093 /* setup size for next transfer */
1094 req->sizes[i] = isize;
1095 }
1096
1097 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1098 USBD_NO_COPY, ugen_isoc_rintr);
1099 (void)usbd_transfer(xfer);
1100
1101 if (sce->state & UGEN_ASLP) {
1102 sce->state &= ~UGEN_ASLP;
1103 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1104 wakeup(sce);
1105 }
1106 selnotify(&sce->rsel, 0, 0);
1107 }
1108
1109 Static void
1110 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1111 usbd_status status)
1112 {
1113 struct ugen_endpoint *sce = addr;
1114 u_int32_t count, n;
1115 char const *tbuf;
1116 usbd_status err;
1117
1118 /* Return if we are aborting. */
1119 if (status == USBD_CANCELLED)
1120 return;
1121
1122 if (status != USBD_NORMAL_COMPLETION) {
1123 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1124 sce->state |= UGEN_RA_WB_STOP;
1125 if (status == USBD_STALLED)
1126 usbd_clear_endpoint_stall_async(sce->pipeh);
1127 return;
1128 }
1129
1130 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1131
1132 /* Keep track of how much is in the buffer. */
1133 sce->ra_wb_used += count;
1134
1135 /* Copy data to buffer. */
1136 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1137 n = min(count, sce->limit - sce->fill);
1138 memcpy(sce->fill, tbuf, n);
1139 tbuf += n;
1140 count -= n;
1141 sce->fill += n;
1142 if (sce->fill == sce->limit)
1143 sce->fill = sce->ibuf;
1144 if (count > 0) {
1145 memcpy(sce->fill, tbuf, count);
1146 sce->fill += count;
1147 }
1148
1149 /* Set up the next request if necessary. */
1150 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1151 if (n > 0) {
1152 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1153 min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1154 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1155 err = usbd_transfer(xfer);
1156 if (err != USBD_IN_PROGRESS) {
1157 printf("usbd_bulkra_intr: error=%d\n", err);
1158 /*
1159 * The transfer has not been queued. Setting STOP
1160 * will make us try again at the next read.
1161 */
1162 sce->state |= UGEN_RA_WB_STOP;
1163 }
1164 }
1165 else
1166 sce->state |= UGEN_RA_WB_STOP;
1167
1168 if (sce->state & UGEN_ASLP) {
1169 sce->state &= ~UGEN_ASLP;
1170 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1171 wakeup(sce);
1172 }
1173 selnotify(&sce->rsel, 0, 0);
1174 }
1175
1176 Static void
1177 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1178 usbd_status status)
1179 {
1180 struct ugen_endpoint *sce = addr;
1181 u_int32_t count, n;
1182 char *tbuf;
1183 usbd_status err;
1184
1185 /* Return if we are aborting. */
1186 if (status == USBD_CANCELLED)
1187 return;
1188
1189 if (status != USBD_NORMAL_COMPLETION) {
1190 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1191 sce->state |= UGEN_RA_WB_STOP;
1192 if (status == USBD_STALLED)
1193 usbd_clear_endpoint_stall_async(sce->pipeh);
1194 return;
1195 }
1196
1197 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1198
1199 /* Keep track of how much is in the buffer. */
1200 sce->ra_wb_used -= count;
1201
1202 /* Update buffer pointers. */
1203 sce->cur += count;
1204 if (sce->cur >= sce->limit)
1205 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1206
1207 /* Set up next request if necessary. */
1208 if (sce->ra_wb_used > 0) {
1209 /* copy data from buffer */
1210 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1211 count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1212 n = min(count, sce->limit - sce->cur);
1213 memcpy(tbuf, sce->cur, n);
1214 tbuf += n;
1215 if (count - n > 0)
1216 memcpy(tbuf, sce->ibuf, count - n);
1217
1218 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1219 count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1220 err = usbd_transfer(xfer);
1221 if (err != USBD_IN_PROGRESS) {
1222 printf("usbd_bulkwb_intr: error=%d\n", err);
1223 /*
1224 * The transfer has not been queued. Setting STOP
1225 * will make us try again at the next write.
1226 */
1227 sce->state |= UGEN_RA_WB_STOP;
1228 }
1229 }
1230 else
1231 sce->state |= UGEN_RA_WB_STOP;
1232
1233 if (sce->state & UGEN_ASLP) {
1234 sce->state &= ~UGEN_ASLP;
1235 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1236 wakeup(sce);
1237 }
1238 selnotify(&sce->rsel, 0, 0);
1239 }
1240
1241 Static usbd_status
1242 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1243 {
1244 usbd_interface_handle iface;
1245 usb_endpoint_descriptor_t *ed;
1246 usbd_status err;
1247 struct ugen_endpoint *sce;
1248 u_int8_t niface, nendpt, endptno, endpt;
1249 int dir;
1250
1251 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1252
1253 err = usbd_interface_count(sc->sc_udev, &niface);
1254 if (err)
1255 return (err);
1256 if (ifaceidx < 0 || ifaceidx >= niface)
1257 return (USBD_INVAL);
1258
1259 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1260 if (err)
1261 return (err);
1262 err = usbd_endpoint_count(iface, &nendpt);
1263 if (err)
1264 return (err);
1265 /* XXX should only do this after setting new altno has succeeded */
1266 for (endptno = 0; endptno < nendpt; endptno++) {
1267 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1268 endpt = ed->bEndpointAddress;
1269 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1270 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1271 sce->sc = 0;
1272 sce->edesc = 0;
1273 sce->iface = 0;
1274 }
1275
1276 /* change setting */
1277 err = usbd_set_interface(iface, altno);
1278 if (err)
1279 return (err);
1280
1281 err = usbd_endpoint_count(iface, &nendpt);
1282 if (err)
1283 return (err);
1284 for (endptno = 0; endptno < nendpt; endptno++) {
1285 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1286 KASSERT(ed != NULL);
1287 endpt = ed->bEndpointAddress;
1288 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1289 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1290 sce->sc = sc;
1291 sce->edesc = ed;
1292 sce->iface = iface;
1293 }
1294 return (0);
1295 }
1296
1297 /* Retrieve a complete descriptor for a certain device and index. */
1298 Static usb_config_descriptor_t *
1299 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1300 {
1301 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1302 int len;
1303 usbd_status err;
1304
1305 if (index == USB_CURRENT_CONFIG_INDEX) {
1306 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1307 len = UGETW(tdesc->wTotalLength);
1308 if (lenp)
1309 *lenp = len;
1310 cdesc = malloc(len, M_TEMP, M_WAITOK);
1311 memcpy(cdesc, tdesc, len);
1312 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1313 } else {
1314 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1315 if (err)
1316 return (0);
1317 len = UGETW(cdescr.wTotalLength);
1318 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1319 if (lenp)
1320 *lenp = len;
1321 cdesc = malloc(len, M_TEMP, M_WAITOK);
1322 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1323 if (err) {
1324 free(cdesc, M_TEMP);
1325 return (0);
1326 }
1327 }
1328 return (cdesc);
1329 }
1330
1331 Static int
1332 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1333 {
1334 usbd_interface_handle iface;
1335 usbd_status err;
1336
1337 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1338 if (err)
1339 return (-1);
1340 return (usbd_get_interface_altindex(iface));
1341 }
1342
1343 Static int
1344 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1345 void *addr, int flag, struct lwp *l)
1346 {
1347 struct ugen_endpoint *sce;
1348 usbd_status err;
1349 usbd_interface_handle iface;
1350 struct usb_config_desc *cd;
1351 usb_config_descriptor_t *cdesc;
1352 struct usb_interface_desc *id;
1353 usb_interface_descriptor_t *idesc;
1354 struct usb_endpoint_desc *ed;
1355 usb_endpoint_descriptor_t *edesc;
1356 struct usb_alt_interface *ai;
1357 struct usb_string_desc *si;
1358 u_int8_t conf, alt;
1359
1360 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1361 if (sc->sc_dying)
1362 return (EIO);
1363
1364 switch (cmd) {
1365 case FIONBIO:
1366 /* All handled in the upper FS layer. */
1367 return (0);
1368 case USB_SET_SHORT_XFER:
1369 if (endpt == USB_CONTROL_ENDPOINT)
1370 return (EINVAL);
1371 /* This flag only affects read */
1372 sce = &sc->sc_endpoints[endpt][IN];
1373 if (sce == NULL || sce->pipeh == NULL)
1374 return (EINVAL);
1375 if (*(int *)addr)
1376 sce->state |= UGEN_SHORT_OK;
1377 else
1378 sce->state &= ~UGEN_SHORT_OK;
1379 return (0);
1380 case USB_SET_TIMEOUT:
1381 sce = &sc->sc_endpoints[endpt][IN];
1382 if (sce == NULL
1383 /* XXX this shouldn't happen, but the distinction between
1384 input and output pipes isn't clear enough.
1385 || sce->pipeh == NULL */
1386 )
1387 return (EINVAL);
1388 sce->timeout = *(int *)addr;
1389 return (0);
1390 case USB_SET_BULK_RA:
1391 if (endpt == USB_CONTROL_ENDPOINT)
1392 return (EINVAL);
1393 sce = &sc->sc_endpoints[endpt][IN];
1394 if (sce == NULL || sce->pipeh == NULL)
1395 return (EINVAL);
1396 edesc = sce->edesc;
1397 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1398 return (EINVAL);
1399
1400 if (*(int *)addr) {
1401 /* Only turn RA on if it's currently off. */
1402 if (sce->state & UGEN_BULK_RA)
1403 return (0);
1404
1405 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1406 /* shouldn't happen */
1407 return (EINVAL);
1408 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1409 if (sce->ra_wb_xfer == NULL)
1410 return (ENOMEM);
1411 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1412 /*
1413 * Set up a dmabuf because we reuse the xfer with
1414 * the same (max) request length like isoc.
1415 */
1416 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1417 sce->ra_wb_xferlen) == 0) {
1418 usbd_free_xfer(sce->ra_wb_xfer);
1419 return (ENOMEM);
1420 }
1421 sce->ibuf = malloc(sce->ra_wb_bufsize,
1422 M_USBDEV, M_WAITOK);
1423 sce->fill = sce->cur = sce->ibuf;
1424 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1425 sce->ra_wb_used = 0;
1426 sce->state |= UGEN_BULK_RA;
1427 sce->state &= ~UGEN_RA_WB_STOP;
1428 /* Now start reading. */
1429 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1430 NULL,
1431 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1432 USBD_NO_COPY, USBD_NO_TIMEOUT,
1433 ugen_bulkra_intr);
1434 err = usbd_transfer(sce->ra_wb_xfer);
1435 if (err != USBD_IN_PROGRESS) {
1436 sce->state &= ~UGEN_BULK_RA;
1437 free(sce->ibuf, M_USBDEV);
1438 sce->ibuf = NULL;
1439 usbd_free_xfer(sce->ra_wb_xfer);
1440 return (EIO);
1441 }
1442 } else {
1443 /* Only turn RA off if it's currently on. */
1444 if (!(sce->state & UGEN_BULK_RA))
1445 return (0);
1446
1447 sce->state &= ~UGEN_BULK_RA;
1448 usbd_abort_pipe(sce->pipeh);
1449 usbd_free_xfer(sce->ra_wb_xfer);
1450 /*
1451 * XXX Discard whatever's in the buffer, but we
1452 * should keep it around and drain the buffer
1453 * instead.
1454 */
1455 free(sce->ibuf, M_USBDEV);
1456 sce->ibuf = NULL;
1457 }
1458 return (0);
1459 case USB_SET_BULK_WB:
1460 if (endpt == USB_CONTROL_ENDPOINT)
1461 return (EINVAL);
1462 sce = &sc->sc_endpoints[endpt][OUT];
1463 if (sce == NULL || sce->pipeh == NULL)
1464 return (EINVAL);
1465 edesc = sce->edesc;
1466 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1467 return (EINVAL);
1468
1469 if (*(int *)addr) {
1470 /* Only turn WB on if it's currently off. */
1471 if (sce->state & UGEN_BULK_WB)
1472 return (0);
1473
1474 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1475 /* shouldn't happen */
1476 return (EINVAL);
1477 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1478 if (sce->ra_wb_xfer == NULL)
1479 return (ENOMEM);
1480 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1481 /*
1482 * Set up a dmabuf because we reuse the xfer with
1483 * the same (max) request length like isoc.
1484 */
1485 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1486 sce->ra_wb_xferlen) == 0) {
1487 usbd_free_xfer(sce->ra_wb_xfer);
1488 return (ENOMEM);
1489 }
1490 sce->ibuf = malloc(sce->ra_wb_bufsize,
1491 M_USBDEV, M_WAITOK);
1492 sce->fill = sce->cur = sce->ibuf;
1493 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1494 sce->ra_wb_used = 0;
1495 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1496 } else {
1497 /* Only turn WB off if it's currently on. */
1498 if (!(sce->state & UGEN_BULK_WB))
1499 return (0);
1500
1501 sce->state &= ~UGEN_BULK_WB;
1502 /*
1503 * XXX Discard whatever's in the buffer, but we
1504 * should keep it around and keep writing to
1505 * drain the buffer instead.
1506 */
1507 usbd_abort_pipe(sce->pipeh);
1508 usbd_free_xfer(sce->ra_wb_xfer);
1509 free(sce->ibuf, M_USBDEV);
1510 sce->ibuf = NULL;
1511 }
1512 return (0);
1513 case USB_SET_BULK_RA_OPT:
1514 case USB_SET_BULK_WB_OPT:
1515 {
1516 struct usb_bulk_ra_wb_opt *opt;
1517
1518 if (endpt == USB_CONTROL_ENDPOINT)
1519 return (EINVAL);
1520 opt = (struct usb_bulk_ra_wb_opt *)addr;
1521 if (cmd == USB_SET_BULK_RA_OPT)
1522 sce = &sc->sc_endpoints[endpt][IN];
1523 else
1524 sce = &sc->sc_endpoints[endpt][OUT];
1525 if (sce == NULL || sce->pipeh == NULL)
1526 return (EINVAL);
1527 if (opt->ra_wb_buffer_size < 1 ||
1528 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1529 opt->ra_wb_request_size < 1 ||
1530 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1531 return (EINVAL);
1532 /*
1533 * XXX These changes do not take effect until the
1534 * next time RA/WB mode is enabled but they ought to
1535 * take effect immediately.
1536 */
1537 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1538 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1539 return (0);
1540 }
1541 default:
1542 break;
1543 }
1544
1545 if (endpt != USB_CONTROL_ENDPOINT)
1546 return (EINVAL);
1547
1548 switch (cmd) {
1549 #ifdef UGEN_DEBUG
1550 case USB_SETDEBUG:
1551 ugendebug = *(int *)addr;
1552 break;
1553 #endif
1554 case USB_GET_CONFIG:
1555 err = usbd_get_config(sc->sc_udev, &conf);
1556 if (err)
1557 return (EIO);
1558 *(int *)addr = conf;
1559 break;
1560 case USB_SET_CONFIG:
1561 if (!(flag & FWRITE))
1562 return (EPERM);
1563 err = ugen_set_config(sc, *(int *)addr);
1564 switch (err) {
1565 case USBD_NORMAL_COMPLETION:
1566 break;
1567 case USBD_IN_USE:
1568 return (EBUSY);
1569 default:
1570 return (EIO);
1571 }
1572 break;
1573 case USB_GET_ALTINTERFACE:
1574 ai = (struct usb_alt_interface *)addr;
1575 err = usbd_device2interface_handle(sc->sc_udev,
1576 ai->uai_interface_index, &iface);
1577 if (err)
1578 return (EINVAL);
1579 idesc = usbd_get_interface_descriptor(iface);
1580 if (idesc == NULL)
1581 return (EIO);
1582 ai->uai_alt_no = idesc->bAlternateSetting;
1583 break;
1584 case USB_SET_ALTINTERFACE:
1585 if (!(flag & FWRITE))
1586 return (EPERM);
1587 ai = (struct usb_alt_interface *)addr;
1588 err = usbd_device2interface_handle(sc->sc_udev,
1589 ai->uai_interface_index, &iface);
1590 if (err)
1591 return (EINVAL);
1592 err = ugen_set_interface(sc, ai->uai_interface_index,
1593 ai->uai_alt_no);
1594 if (err)
1595 return (EINVAL);
1596 break;
1597 case USB_GET_NO_ALT:
1598 ai = (struct usb_alt_interface *)addr;
1599 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1600 if (cdesc == NULL)
1601 return (EINVAL);
1602 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1603 if (idesc == NULL) {
1604 free(cdesc, M_TEMP);
1605 return (EINVAL);
1606 }
1607 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1608 idesc->bInterfaceNumber);
1609 free(cdesc, M_TEMP);
1610 break;
1611 case USB_GET_DEVICE_DESC:
1612 *(usb_device_descriptor_t *)addr =
1613 *usbd_get_device_descriptor(sc->sc_udev);
1614 break;
1615 case USB_GET_CONFIG_DESC:
1616 cd = (struct usb_config_desc *)addr;
1617 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1618 if (cdesc == NULL)
1619 return (EINVAL);
1620 cd->ucd_desc = *cdesc;
1621 free(cdesc, M_TEMP);
1622 break;
1623 case USB_GET_INTERFACE_DESC:
1624 id = (struct usb_interface_desc *)addr;
1625 cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1626 if (cdesc == NULL)
1627 return (EINVAL);
1628 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1629 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1630 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1631 else
1632 alt = id->uid_alt_index;
1633 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1634 if (idesc == NULL) {
1635 free(cdesc, M_TEMP);
1636 return (EINVAL);
1637 }
1638 id->uid_desc = *idesc;
1639 free(cdesc, M_TEMP);
1640 break;
1641 case USB_GET_ENDPOINT_DESC:
1642 ed = (struct usb_endpoint_desc *)addr;
1643 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1644 if (cdesc == NULL)
1645 return (EINVAL);
1646 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1647 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1648 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1649 else
1650 alt = ed->ued_alt_index;
1651 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1652 alt, ed->ued_endpoint_index);
1653 if (edesc == NULL) {
1654 free(cdesc, M_TEMP);
1655 return (EINVAL);
1656 }
1657 ed->ued_desc = *edesc;
1658 free(cdesc, M_TEMP);
1659 break;
1660 case USB_GET_FULL_DESC:
1661 {
1662 int len;
1663 struct iovec iov;
1664 struct uio uio;
1665 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1666 int error;
1667
1668 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1669 if (cdesc == NULL)
1670 return (EINVAL);
1671 if (len > fd->ufd_size)
1672 len = fd->ufd_size;
1673 iov.iov_base = (void *)fd->ufd_data;
1674 iov.iov_len = len;
1675 uio.uio_iov = &iov;
1676 uio.uio_iovcnt = 1;
1677 uio.uio_resid = len;
1678 uio.uio_offset = 0;
1679 uio.uio_rw = UIO_READ;
1680 uio.uio_vmspace = l->l_proc->p_vmspace;
1681 error = uiomove((void *)cdesc, len, &uio);
1682 free(cdesc, M_TEMP);
1683 return (error);
1684 }
1685 case USB_GET_STRING_DESC: {
1686 int len;
1687 si = (struct usb_string_desc *)addr;
1688 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1689 si->usd_language_id, &si->usd_desc, &len);
1690 if (err)
1691 return (EINVAL);
1692 break;
1693 }
1694 case USB_DO_REQUEST:
1695 {
1696 struct usb_ctl_request *ur = (void *)addr;
1697 int len = UGETW(ur->ucr_request.wLength);
1698 struct iovec iov;
1699 struct uio uio;
1700 void *ptr = 0;
1701 usbd_status xerr;
1702 int error = 0;
1703
1704 if (!(flag & FWRITE))
1705 return (EPERM);
1706 /* Avoid requests that would damage the bus integrity. */
1707 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1708 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1709 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1710 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1711 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1712 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1713 return (EINVAL);
1714
1715 if (len < 0 || len > 32767)
1716 return (EINVAL);
1717 if (len != 0) {
1718 iov.iov_base = (void *)ur->ucr_data;
1719 iov.iov_len = len;
1720 uio.uio_iov = &iov;
1721 uio.uio_iovcnt = 1;
1722 uio.uio_resid = len;
1723 uio.uio_offset = 0;
1724 uio.uio_rw =
1725 ur->ucr_request.bmRequestType & UT_READ ?
1726 UIO_READ : UIO_WRITE;
1727 uio.uio_vmspace = l->l_proc->p_vmspace;
1728 ptr = malloc(len, M_TEMP, M_WAITOK);
1729 if (uio.uio_rw == UIO_WRITE) {
1730 error = uiomove(ptr, len, &uio);
1731 if (error)
1732 goto ret;
1733 }
1734 }
1735 sce = &sc->sc_endpoints[endpt][IN];
1736 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1737 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1738 if (xerr) {
1739 error = EIO;
1740 goto ret;
1741 }
1742 if (len != 0) {
1743 if (uio.uio_rw == UIO_READ) {
1744 error = uiomove(ptr, len, &uio);
1745 if (error)
1746 goto ret;
1747 }
1748 }
1749 ret:
1750 if (ptr)
1751 free(ptr, M_TEMP);
1752 return (error);
1753 }
1754 case USB_GET_DEVICEINFO:
1755 usbd_fill_deviceinfo(sc->sc_udev,
1756 (struct usb_device_info *)addr, 0);
1757 break;
1758 #ifdef COMPAT_30
1759 case USB_GET_DEVICEINFO_OLD:
1760 usbd_fill_deviceinfo_old(sc->sc_udev,
1761 (struct usb_device_info_old *)addr, 0);
1762
1763 break;
1764 #endif
1765 default:
1766 return (EINVAL);
1767 }
1768 return (0);
1769 }
1770
1771 int
1772 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1773 {
1774 int endpt = UGENENDPOINT(dev);
1775 struct ugen_softc *sc;
1776 int error;
1777
1778 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1779 if (sc == NULL)
1780 return ENXIO;
1781
1782 sc->sc_refcnt++;
1783 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1784 if (--sc->sc_refcnt < 0)
1785 usb_detach_wakeupold(sc->sc_dev);
1786 return (error);
1787 }
1788
1789 int
1790 ugenpoll(dev_t dev, int events, struct lwp *l)
1791 {
1792 struct ugen_softc *sc;
1793 struct ugen_endpoint *sce_in, *sce_out;
1794 int revents = 0;
1795 int s;
1796
1797 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1798 if (sc == NULL)
1799 return ENXIO;
1800
1801 if (sc->sc_dying)
1802 return (POLLHUP);
1803
1804 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1805 return ENODEV;
1806
1807 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1808 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1809 if (sce_in == NULL && sce_out == NULL)
1810 return (POLLERR);
1811 #ifdef DIAGNOSTIC
1812 if (!sce_in->edesc && !sce_out->edesc) {
1813 printf("ugenpoll: no edesc\n");
1814 return (POLLERR);
1815 }
1816 /* It's possible to have only one pipe open. */
1817 if (!sce_in->pipeh && !sce_out->pipeh) {
1818 printf("ugenpoll: no pipe\n");
1819 return (POLLERR);
1820 }
1821 #endif
1822 s = splusb();
1823 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1824 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1825 case UE_INTERRUPT:
1826 if (sce_in->q.c_cc > 0)
1827 revents |= events & (POLLIN | POLLRDNORM);
1828 else
1829 selrecord(l, &sce_in->rsel);
1830 break;
1831 case UE_ISOCHRONOUS:
1832 if (sce_in->cur != sce_in->fill)
1833 revents |= events & (POLLIN | POLLRDNORM);
1834 else
1835 selrecord(l, &sce_in->rsel);
1836 break;
1837 case UE_BULK:
1838 if (sce_in->state & UGEN_BULK_RA) {
1839 if (sce_in->ra_wb_used > 0)
1840 revents |= events &
1841 (POLLIN | POLLRDNORM);
1842 else
1843 selrecord(l, &sce_in->rsel);
1844 break;
1845 }
1846 /*
1847 * We have no easy way of determining if a read will
1848 * yield any data or a write will happen.
1849 * Pretend they will.
1850 */
1851 revents |= events & (POLLIN | POLLRDNORM);
1852 break;
1853 default:
1854 break;
1855 }
1856 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1857 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1858 case UE_INTERRUPT:
1859 case UE_ISOCHRONOUS:
1860 /* XXX unimplemented */
1861 break;
1862 case UE_BULK:
1863 if (sce_out->state & UGEN_BULK_WB) {
1864 if (sce_out->ra_wb_used <
1865 sce_out->limit - sce_out->ibuf)
1866 revents |= events &
1867 (POLLOUT | POLLWRNORM);
1868 else
1869 selrecord(l, &sce_out->rsel);
1870 break;
1871 }
1872 /*
1873 * We have no easy way of determining if a read will
1874 * yield any data or a write will happen.
1875 * Pretend they will.
1876 */
1877 revents |= events & (POLLOUT | POLLWRNORM);
1878 break;
1879 default:
1880 break;
1881 }
1882
1883
1884 splx(s);
1885 return (revents);
1886 }
1887
1888 static void
1889 filt_ugenrdetach(struct knote *kn)
1890 {
1891 struct ugen_endpoint *sce = kn->kn_hook;
1892 int s;
1893
1894 s = splusb();
1895 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1896 splx(s);
1897 }
1898
1899 static int
1900 filt_ugenread_intr(struct knote *kn, long hint)
1901 {
1902 struct ugen_endpoint *sce = kn->kn_hook;
1903
1904 kn->kn_data = sce->q.c_cc;
1905 return (kn->kn_data > 0);
1906 }
1907
1908 static int
1909 filt_ugenread_isoc(struct knote *kn, long hint)
1910 {
1911 struct ugen_endpoint *sce = kn->kn_hook;
1912
1913 if (sce->cur == sce->fill)
1914 return (0);
1915
1916 if (sce->cur < sce->fill)
1917 kn->kn_data = sce->fill - sce->cur;
1918 else
1919 kn->kn_data = (sce->limit - sce->cur) +
1920 (sce->fill - sce->ibuf);
1921
1922 return (1);
1923 }
1924
1925 static int
1926 filt_ugenread_bulk(struct knote *kn, long hint)
1927 {
1928 struct ugen_endpoint *sce = kn->kn_hook;
1929
1930 if (!(sce->state & UGEN_BULK_RA))
1931 /*
1932 * We have no easy way of determining if a read will
1933 * yield any data or a write will happen.
1934 * So, emulate "seltrue".
1935 */
1936 return (filt_seltrue(kn, hint));
1937
1938 if (sce->ra_wb_used == 0)
1939 return (0);
1940
1941 kn->kn_data = sce->ra_wb_used;
1942
1943 return (1);
1944 }
1945
1946 static int
1947 filt_ugenwrite_bulk(struct knote *kn, long hint)
1948 {
1949 struct ugen_endpoint *sce = kn->kn_hook;
1950
1951 if (!(sce->state & UGEN_BULK_WB))
1952 /*
1953 * We have no easy way of determining if a read will
1954 * yield any data or a write will happen.
1955 * So, emulate "seltrue".
1956 */
1957 return (filt_seltrue(kn, hint));
1958
1959 if (sce->ra_wb_used == sce->limit - sce->ibuf)
1960 return (0);
1961
1962 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1963
1964 return (1);
1965 }
1966
1967 static const struct filterops ugenread_intr_filtops =
1968 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
1969
1970 static const struct filterops ugenread_isoc_filtops =
1971 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
1972
1973 static const struct filterops ugenread_bulk_filtops =
1974 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
1975
1976 static const struct filterops ugenwrite_bulk_filtops =
1977 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
1978
1979 int
1980 ugenkqfilter(dev_t dev, struct knote *kn)
1981 {
1982 struct ugen_softc *sc;
1983 struct ugen_endpoint *sce;
1984 struct klist *klist;
1985 int s;
1986
1987 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1988 if (sc == NULL)
1989 return ENXIO;
1990
1991 if (sc->sc_dying)
1992 return (ENXIO);
1993
1994 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1995 return ENODEV;
1996
1997 switch (kn->kn_filter) {
1998 case EVFILT_READ:
1999 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2000 if (sce == NULL)
2001 return (EINVAL);
2002
2003 klist = &sce->rsel.sel_klist;
2004 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2005 case UE_INTERRUPT:
2006 kn->kn_fop = &ugenread_intr_filtops;
2007 break;
2008 case UE_ISOCHRONOUS:
2009 kn->kn_fop = &ugenread_isoc_filtops;
2010 break;
2011 case UE_BULK:
2012 kn->kn_fop = &ugenread_bulk_filtops;
2013 break;
2014 break;
2015 default:
2016 return (EINVAL);
2017 }
2018 break;
2019
2020 case EVFILT_WRITE:
2021 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2022 if (sce == NULL)
2023 return (EINVAL);
2024
2025 klist = &sce->rsel.sel_klist;
2026 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2027 case UE_INTERRUPT:
2028 case UE_ISOCHRONOUS:
2029 /* XXX poll doesn't support this */
2030 return (EINVAL);
2031
2032 case UE_BULK:
2033 kn->kn_fop = &ugenwrite_bulk_filtops;
2034 break;
2035 default:
2036 return (EINVAL);
2037 }
2038 break;
2039
2040 default:
2041 return (EINVAL);
2042 }
2043
2044 kn->kn_hook = sce;
2045
2046 s = splusb();
2047 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2048 splx(s);
2049
2050 return (0);
2051 }
2052