ugen.c revision 1.111.8.4 1 /* $NetBSD: ugen.c,v 1.111.8.4 2013/01/16 05:33:35 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.111.8.4 2013/01/16 05:33:35 yamt Exp $");
41
42 #include "opt_compat_netbsd.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #include <sys/device.h>
49 #include <sys/ioctl.h>
50 #include <sys/conf.h>
51 #include <sys/tty.h>
52 #include <sys/file.h>
53 #include <sys/select.h>
54 #include <sys/proc.h>
55 #include <sys/vnode.h>
56 #include <sys/poll.h>
57
58 #include <dev/usb/usb.h>
59 #include <dev/usb/usbdi.h>
60 #include <dev/usb/usbdi_util.h>
61
62 #ifdef UGEN_DEBUG
63 #define DPRINTF(x) if (ugendebug) printf x
64 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x
65 int ugendebug = 0;
66 #else
67 #define DPRINTF(x)
68 #define DPRINTFN(n,x)
69 #endif
70
71 #define UGEN_CHUNK 128 /* chunk size for read */
72 #define UGEN_IBSIZE 1020 /* buffer size */
73 #define UGEN_BBSIZE 1024
74
75 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
76 #define UGEN_NISORFRMS 8 /* number of transactions per req */
77 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
78
79 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
80 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
81
82 struct ugen_endpoint {
83 struct ugen_softc *sc;
84 usb_endpoint_descriptor_t *edesc;
85 usbd_interface_handle iface;
86 int state;
87 #define UGEN_ASLP 0x02 /* waiting for data */
88 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
89 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
90 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
91 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
92 usbd_pipe_handle pipeh;
93 struct clist q;
94 u_char *ibuf; /* start of buffer (circular for isoc) */
95 u_char *fill; /* location for input (isoc) */
96 u_char *limit; /* end of circular buffer (isoc) */
97 u_char *cur; /* current read location (isoc) */
98 u_int32_t timeout;
99 u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
100 u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
101 u_int32_t ra_wb_used; /* how much is in buffer */
102 u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
103 usbd_xfer_handle ra_wb_xfer;
104 struct isoreq {
105 struct ugen_endpoint *sce;
106 usbd_xfer_handle xfer;
107 void *dmabuf;
108 u_int16_t sizes[UGEN_NISORFRMS];
109 } isoreqs[UGEN_NISOREQS];
110 /* Keep these last; we don't overwrite them in ugen_set_config() */
111 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
112 struct selinfo rsel;
113 kcondvar_t cv;
114 };
115
116 struct ugen_softc {
117 device_t sc_dev; /* base device */
118 usbd_device_handle sc_udev;
119
120 kmutex_t sc_lock;
121 kcondvar_t sc_detach_cv;
122
123 char sc_is_open[USB_MAX_ENDPOINTS];
124 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
125 #define OUT 0
126 #define IN 1
127
128 int sc_refcnt;
129 char sc_buffer[UGEN_BBSIZE];
130 u_char sc_dying;
131 };
132
133 dev_type_open(ugenopen);
134 dev_type_close(ugenclose);
135 dev_type_read(ugenread);
136 dev_type_write(ugenwrite);
137 dev_type_ioctl(ugenioctl);
138 dev_type_poll(ugenpoll);
139 dev_type_kqfilter(ugenkqfilter);
140
141 const struct cdevsw ugen_cdevsw = {
142 ugenopen, ugenclose, ugenread, ugenwrite, ugenioctl,
143 nostop, notty, ugenpoll, nommap, ugenkqfilter, D_OTHER,
144 };
145
146 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
147 usbd_status status);
148 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
149 usbd_status status);
150 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
151 usbd_status status);
152 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
153 usbd_status status);
154 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
155 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
156 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
157 void *, int, struct lwp *);
158 Static int ugen_set_config(struct ugen_softc *sc, int configno);
159 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
160 int index, int *lenp);
161 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
162 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
163
164 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
165 #define UGENENDPOINT(n) (minor(n) & 0xf)
166 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
167
168 int ugen_match(device_t, cfdata_t, void *);
169 void ugen_attach(device_t, device_t, void *);
170 int ugen_detach(device_t, int);
171 int ugen_activate(device_t, enum devact);
172 extern struct cfdriver ugen_cd;
173 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate);
174
175 /* toggle to control attach priority. -1 means "let autoconf decide" */
176 int ugen_override = -1;
177
178 int
179 ugen_match(device_t parent, cfdata_t match, void *aux)
180 {
181 struct usb_attach_arg *uaa = aux;
182 int override;
183
184 if (ugen_override != -1)
185 override = ugen_override;
186 else
187 override = match->cf_flags & 1;
188
189 if (override)
190 return (UMATCH_HIGHEST);
191 else if (uaa->usegeneric)
192 return (UMATCH_GENERIC);
193 else
194 return (UMATCH_NONE);
195 }
196
197 void
198 ugen_attach(device_t parent, device_t self, void *aux)
199 {
200 struct ugen_softc *sc = device_private(self);
201 struct usb_attach_arg *uaa = aux;
202 usbd_device_handle udev;
203 char *devinfop;
204 usbd_status err;
205 int i, dir, conf;
206
207 aprint_naive("\n");
208 aprint_normal("\n");
209
210 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_USB);
211 cv_init(&sc->sc_detach_cv, "ugendet");
212
213 devinfop = usbd_devinfo_alloc(uaa->device, 0);
214 aprint_normal_dev(self, "%s\n", devinfop);
215 usbd_devinfo_free(devinfop);
216
217 sc->sc_dev = self;
218 sc->sc_udev = udev = uaa->device;
219
220 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
221 for (dir = OUT; dir <= IN; dir++) {
222 struct ugen_endpoint *sce;
223
224 sce = &sc->sc_endpoints[i][dir];
225 selinit(&sce->rsel);
226 cv_init(&sce->cv, "ugensce");
227 }
228 }
229
230 /* First set configuration index 0, the default one for ugen. */
231 err = usbd_set_config_index(udev, 0, 0);
232 if (err) {
233 aprint_error_dev(self,
234 "setting configuration index 0 failed\n");
235 sc->sc_dying = 1;
236 return;
237 }
238 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
239
240 /* Set up all the local state for this configuration. */
241 err = ugen_set_config(sc, conf);
242 if (err) {
243 aprint_error_dev(self, "setting configuration %d failed\n",
244 conf);
245 sc->sc_dying = 1;
246 return;
247 }
248
249 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
250 sc->sc_dev);
251
252 if (!pmf_device_register(self, NULL, NULL))
253 aprint_error_dev(self, "couldn't establish power handler\n");
254
255 return;
256 }
257
258 Static int
259 ugen_set_config(struct ugen_softc *sc, int configno)
260 {
261 usbd_device_handle dev = sc->sc_udev;
262 usb_config_descriptor_t *cdesc;
263 usbd_interface_handle iface;
264 usb_endpoint_descriptor_t *ed;
265 struct ugen_endpoint *sce;
266 u_int8_t niface, nendpt;
267 int ifaceno, endptno, endpt;
268 usbd_status err;
269 int dir, i;
270
271 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
272 device_xname(sc->sc_dev), configno, sc));
273
274 /*
275 * We start at 1, not 0, because we don't care whether the
276 * control endpoint is open or not. It is always present.
277 */
278 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
279 if (sc->sc_is_open[endptno]) {
280 DPRINTFN(1,
281 ("ugen_set_config: %s - endpoint %d is open\n",
282 device_xname(sc->sc_dev), endptno));
283 return (USBD_IN_USE);
284 }
285
286 /* Avoid setting the current value. */
287 cdesc = usbd_get_config_descriptor(dev);
288 if (!cdesc || cdesc->bConfigurationValue != configno) {
289 err = usbd_set_config_no(dev, configno, 1);
290 if (err)
291 return (err);
292 }
293
294 err = usbd_interface_count(dev, &niface);
295 if (err)
296 return (err);
297
298 /* Clear out the old info, but leave the selinfo and cv initialised. */
299 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
300 for (dir = OUT; dir <= IN; dir++) {
301 sce = &sc->sc_endpoints[i][dir];
302 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
303 }
304 }
305
306 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
307 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
308 err = usbd_device2interface_handle(dev, ifaceno, &iface);
309 if (err)
310 return (err);
311 err = usbd_endpoint_count(iface, &nendpt);
312 if (err)
313 return (err);
314 for (endptno = 0; endptno < nendpt; endptno++) {
315 ed = usbd_interface2endpoint_descriptor(iface,endptno);
316 KASSERT(ed != NULL);
317 endpt = ed->bEndpointAddress;
318 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
319 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
320 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
321 "(%d,%d), sce=%p\n",
322 endptno, endpt, UE_GET_ADDR(endpt),
323 UE_GET_DIR(endpt), sce));
324 sce->sc = sc;
325 sce->edesc = ed;
326 sce->iface = iface;
327 }
328 }
329 return (USBD_NORMAL_COMPLETION);
330 }
331
332 int
333 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
334 {
335 struct ugen_softc *sc;
336 int unit = UGENUNIT(dev);
337 int endpt = UGENENDPOINT(dev);
338 usb_endpoint_descriptor_t *edesc;
339 struct ugen_endpoint *sce;
340 int dir, isize;
341 usbd_status err;
342 usbd_xfer_handle xfer;
343 void *tbuf;
344 int i, j;
345
346 sc = device_lookup_private(&ugen_cd, unit);
347 if (sc == NULL)
348 return ENXIO;
349
350 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
351 flag, mode, unit, endpt));
352
353 if (sc == NULL || sc->sc_dying)
354 return (ENXIO);
355
356 /* The control endpoint allows multiple opens. */
357 if (endpt == USB_CONTROL_ENDPOINT) {
358 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
359 return (0);
360 }
361
362 if (sc->sc_is_open[endpt])
363 return (EBUSY);
364
365 /* Make sure there are pipes for all directions. */
366 for (dir = OUT; dir <= IN; dir++) {
367 if (flag & (dir == OUT ? FWRITE : FREAD)) {
368 sce = &sc->sc_endpoints[endpt][dir];
369 if (sce == 0 || sce->edesc == 0)
370 return (ENXIO);
371 }
372 }
373
374 /* Actually open the pipes. */
375 /* XXX Should back out properly if it fails. */
376 for (dir = OUT; dir <= IN; dir++) {
377 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
378 continue;
379 sce = &sc->sc_endpoints[endpt][dir];
380 sce->state = 0;
381 sce->timeout = USBD_NO_TIMEOUT;
382 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
383 sc, endpt, dir, sce));
384 edesc = sce->edesc;
385 switch (edesc->bmAttributes & UE_XFERTYPE) {
386 case UE_INTERRUPT:
387 if (dir == OUT) {
388 err = usbd_open_pipe(sce->iface,
389 edesc->bEndpointAddress, 0, &sce->pipeh);
390 if (err)
391 return (EIO);
392 break;
393 }
394 isize = UGETW(edesc->wMaxPacketSize);
395 if (isize == 0) /* shouldn't happen */
396 return (EINVAL);
397 sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
398 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
399 endpt, isize));
400 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
401 free(sce->ibuf, M_USBDEV);
402 sce->ibuf = NULL;
403 return (ENOMEM);
404 }
405 err = usbd_open_pipe_intr(sce->iface,
406 edesc->bEndpointAddress,
407 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
408 sce->ibuf, isize, ugenintr,
409 USBD_DEFAULT_INTERVAL);
410 if (err) {
411 clfree(&sce->q);
412 free(sce->ibuf, M_USBDEV);
413 sce->ibuf = NULL;
414 return (EIO);
415 }
416 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
417 break;
418 case UE_BULK:
419 err = usbd_open_pipe(sce->iface,
420 edesc->bEndpointAddress, 0, &sce->pipeh);
421 if (err)
422 return (EIO);
423 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
424 /*
425 * Use request size for non-RA/WB transfers
426 * as the default.
427 */
428 sce->ra_wb_reqsize = UGEN_BBSIZE;
429 break;
430 case UE_ISOCHRONOUS:
431 if (dir == OUT)
432 return (EINVAL);
433 isize = UGETW(edesc->wMaxPacketSize);
434 if (isize == 0) /* shouldn't happen */
435 return (EINVAL);
436 sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
437 M_USBDEV, M_WAITOK);
438 sce->cur = sce->fill = sce->ibuf;
439 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
440 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
441 endpt, isize));
442 err = usbd_open_pipe(sce->iface,
443 edesc->bEndpointAddress, 0, &sce->pipeh);
444 if (err) {
445 free(sce->ibuf, M_USBDEV);
446 sce->ibuf = NULL;
447 return (EIO);
448 }
449 for(i = 0; i < UGEN_NISOREQS; ++i) {
450 sce->isoreqs[i].sce = sce;
451 xfer = usbd_alloc_xfer(sc->sc_udev);
452 if (xfer == 0)
453 goto bad;
454 sce->isoreqs[i].xfer = xfer;
455 tbuf = usbd_alloc_buffer
456 (xfer, isize * UGEN_NISORFRMS);
457 if (tbuf == 0) {
458 i++;
459 goto bad;
460 }
461 sce->isoreqs[i].dmabuf = tbuf;
462 for(j = 0; j < UGEN_NISORFRMS; ++j)
463 sce->isoreqs[i].sizes[j] = isize;
464 usbd_setup_isoc_xfer
465 (xfer, sce->pipeh, &sce->isoreqs[i],
466 sce->isoreqs[i].sizes,
467 UGEN_NISORFRMS, USBD_NO_COPY,
468 ugen_isoc_rintr);
469 (void)usbd_transfer(xfer);
470 }
471 DPRINTFN(5, ("ugenopen: isoc open done\n"));
472 break;
473 bad:
474 while (--i >= 0) /* implicit buffer free */
475 usbd_free_xfer(sce->isoreqs[i].xfer);
476 usbd_close_pipe(sce->pipeh);
477 sce->pipeh = NULL;
478 free(sce->ibuf, M_USBDEV);
479 sce->ibuf = NULL;
480 return (ENOMEM);
481 case UE_CONTROL:
482 sce->timeout = USBD_DEFAULT_TIMEOUT;
483 return (EINVAL);
484 }
485 }
486 sc->sc_is_open[endpt] = 1;
487 return (0);
488 }
489
490 int
491 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
492 {
493 int endpt = UGENENDPOINT(dev);
494 struct ugen_softc *sc;
495 struct ugen_endpoint *sce;
496 int dir;
497 int i;
498
499 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
500 if (sc == NULL)
501 return ENXIO;
502
503 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
504 flag, mode, UGENUNIT(dev), endpt));
505
506 #ifdef DIAGNOSTIC
507 if (!sc->sc_is_open[endpt]) {
508 printf("ugenclose: not open\n");
509 return (EINVAL);
510 }
511 #endif
512
513 if (endpt == USB_CONTROL_ENDPOINT) {
514 DPRINTFN(5, ("ugenclose: close control\n"));
515 sc->sc_is_open[endpt] = 0;
516 return (0);
517 }
518
519 for (dir = OUT; dir <= IN; dir++) {
520 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
521 continue;
522 sce = &sc->sc_endpoints[endpt][dir];
523 if (sce == NULL || sce->pipeh == NULL)
524 continue;
525 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
526 endpt, dir, sce));
527
528 usbd_abort_pipe(sce->pipeh);
529 usbd_close_pipe(sce->pipeh);
530 sce->pipeh = NULL;
531
532 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
533 case UE_INTERRUPT:
534 ndflush(&sce->q, sce->q.c_cc);
535 clfree(&sce->q);
536 break;
537 case UE_ISOCHRONOUS:
538 for (i = 0; i < UGEN_NISOREQS; ++i)
539 usbd_free_xfer(sce->isoreqs[i].xfer);
540 break;
541 case UE_BULK:
542 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
543 /* ibuf freed below */
544 usbd_free_xfer(sce->ra_wb_xfer);
545 break;
546 default:
547 break;
548 }
549
550 if (sce->ibuf != NULL) {
551 free(sce->ibuf, M_USBDEV);
552 sce->ibuf = NULL;
553 }
554 }
555 sc->sc_is_open[endpt] = 0;
556
557 return (0);
558 }
559
560 Static int
561 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
562 {
563 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
564 u_int32_t n, tn;
565 usbd_xfer_handle xfer;
566 usbd_status err;
567 int error = 0;
568
569 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
570
571 if (sc->sc_dying)
572 return (EIO);
573
574 if (endpt == USB_CONTROL_ENDPOINT)
575 return (ENODEV);
576
577 #ifdef DIAGNOSTIC
578 if (sce->edesc == NULL) {
579 printf("ugenread: no edesc\n");
580 return (EIO);
581 }
582 if (sce->pipeh == NULL) {
583 printf("ugenread: no pipe\n");
584 return (EIO);
585 }
586 #endif
587
588 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
589 case UE_INTERRUPT:
590 /* Block until activity occurred. */
591 mutex_enter(&sc->sc_lock);
592 while (sce->q.c_cc == 0) {
593 if (flag & IO_NDELAY) {
594 mutex_exit(&sc->sc_lock);
595 return (EWOULDBLOCK);
596 }
597 sce->state |= UGEN_ASLP;
598 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
599 /* "ugenri" */
600 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
601 mstohz(sce->timeout));
602 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
603 if (sc->sc_dying)
604 error = EIO;
605 if (error) {
606 sce->state &= ~UGEN_ASLP;
607 break;
608 }
609 }
610 mutex_exit(&sc->sc_lock);
611
612 /* Transfer as many chunks as possible. */
613 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
614 n = min(sce->q.c_cc, uio->uio_resid);
615 if (n > sizeof(sc->sc_buffer))
616 n = sizeof(sc->sc_buffer);
617
618 /* Remove a small chunk from the input queue. */
619 q_to_b(&sce->q, sc->sc_buffer, n);
620 DPRINTFN(5, ("ugenread: got %d chars\n", n));
621
622 /* Copy the data to the user process. */
623 error = uiomove(sc->sc_buffer, n, uio);
624 if (error)
625 break;
626 }
627 break;
628 case UE_BULK:
629 if (sce->state & UGEN_BULK_RA) {
630 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
631 uio->uio_resid, sce->ra_wb_used));
632 xfer = sce->ra_wb_xfer;
633
634 mutex_enter(&sc->sc_lock);
635 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
636 mutex_exit(&sc->sc_lock);
637 return (EWOULDBLOCK);
638 }
639 while (uio->uio_resid > 0 && !error) {
640 while (sce->ra_wb_used == 0) {
641 sce->state |= UGEN_ASLP;
642 DPRINTFN(5,
643 ("ugenread: sleep on %p\n",
644 sce));
645 /* "ugenrb" */
646 error = cv_timedwait_sig(&sce->cv,
647 &sc->sc_lock, mstohz(sce->timeout));
648 DPRINTFN(5,
649 ("ugenread: woke, error=%d\n",
650 error));
651 if (sc->sc_dying)
652 error = EIO;
653 if (error) {
654 sce->state &= ~UGEN_ASLP;
655 break;
656 }
657 }
658
659 /* Copy data to the process. */
660 while (uio->uio_resid > 0
661 && sce->ra_wb_used > 0) {
662 n = min(uio->uio_resid,
663 sce->ra_wb_used);
664 n = min(n, sce->limit - sce->cur);
665 error = uiomove(sce->cur, n, uio);
666 if (error)
667 break;
668 sce->cur += n;
669 sce->ra_wb_used -= n;
670 if (sce->cur == sce->limit)
671 sce->cur = sce->ibuf;
672 }
673
674 /*
675 * If the transfers stopped because the
676 * buffer was full, restart them.
677 */
678 if (sce->state & UGEN_RA_WB_STOP &&
679 sce->ra_wb_used < sce->limit - sce->ibuf) {
680 n = (sce->limit - sce->ibuf)
681 - sce->ra_wb_used;
682 usbd_setup_xfer(xfer,
683 sce->pipeh, sce, NULL,
684 min(n, sce->ra_wb_xferlen),
685 USBD_NO_COPY, USBD_NO_TIMEOUT,
686 ugen_bulkra_intr);
687 sce->state &= ~UGEN_RA_WB_STOP;
688 err = usbd_transfer(xfer);
689 if (err != USBD_IN_PROGRESS)
690 /*
691 * The transfer has not been
692 * queued. Setting STOP
693 * will make us try
694 * again at the next read.
695 */
696 sce->state |= UGEN_RA_WB_STOP;
697 }
698 }
699 mutex_exit(&sc->sc_lock);
700 break;
701 }
702 xfer = usbd_alloc_xfer(sc->sc_udev);
703 if (xfer == 0)
704 return (ENOMEM);
705 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
706 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
707 tn = n;
708 err = usbd_bulk_transfer(
709 xfer, sce->pipeh,
710 sce->state & UGEN_SHORT_OK ?
711 USBD_SHORT_XFER_OK : 0,
712 sce->timeout, sc->sc_buffer, &tn, "ugenrb");
713 if (err) {
714 if (err == USBD_INTERRUPTED)
715 error = EINTR;
716 else if (err == USBD_TIMEOUT)
717 error = ETIMEDOUT;
718 else
719 error = EIO;
720 break;
721 }
722 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
723 error = uiomove(sc->sc_buffer, tn, uio);
724 if (error || tn < n)
725 break;
726 }
727 usbd_free_xfer(xfer);
728 break;
729 case UE_ISOCHRONOUS:
730 mutex_enter(&sc->sc_lock);
731 while (sce->cur == sce->fill) {
732 if (flag & IO_NDELAY) {
733 mutex_exit(&sc->sc_lock);
734 return (EWOULDBLOCK);
735 }
736 sce->state |= UGEN_ASLP;
737 /* "ugenri" */
738 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
739 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
740 mstohz(sce->timeout));
741 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
742 if (sc->sc_dying)
743 error = EIO;
744 if (error) {
745 sce->state &= ~UGEN_ASLP;
746 break;
747 }
748 }
749
750 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
751 if(sce->fill > sce->cur)
752 n = min(sce->fill - sce->cur, uio->uio_resid);
753 else
754 n = min(sce->limit - sce->cur, uio->uio_resid);
755
756 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
757
758 /* Copy the data to the user process. */
759 error = uiomove(sce->cur, n, uio);
760 if (error)
761 break;
762 sce->cur += n;
763 if (sce->cur >= sce->limit)
764 sce->cur = sce->ibuf;
765 }
766 mutex_exit(&sc->sc_lock);
767 break;
768
769
770 default:
771 return (ENXIO);
772 }
773 return (error);
774 }
775
776 int
777 ugenread(dev_t dev, struct uio *uio, int flag)
778 {
779 int endpt = UGENENDPOINT(dev);
780 struct ugen_softc *sc;
781 int error;
782
783 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
784 if (sc == NULL)
785 return ENXIO;
786
787 mutex_enter(&sc->sc_lock);
788 sc->sc_refcnt++;
789 mutex_exit(&sc->sc_lock);
790
791 error = ugen_do_read(sc, endpt, uio, flag);
792
793 mutex_enter(&sc->sc_lock);
794 if (--sc->sc_refcnt < 0)
795 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
796 mutex_exit(&sc->sc_lock);
797
798 return (error);
799 }
800
801 Static int
802 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
803 int flag)
804 {
805 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
806 u_int32_t n;
807 int error = 0;
808 u_int32_t tn;
809 char *dbuf;
810 usbd_xfer_handle xfer;
811 usbd_status err;
812
813 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
814
815 if (sc->sc_dying)
816 return (EIO);
817
818 if (endpt == USB_CONTROL_ENDPOINT)
819 return (ENODEV);
820
821 #ifdef DIAGNOSTIC
822 if (sce->edesc == NULL) {
823 printf("ugenwrite: no edesc\n");
824 return (EIO);
825 }
826 if (sce->pipeh == NULL) {
827 printf("ugenwrite: no pipe\n");
828 return (EIO);
829 }
830 #endif
831
832 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
833 case UE_BULK:
834 if (sce->state & UGEN_BULK_WB) {
835 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
836 uio->uio_resid, sce->ra_wb_used));
837 xfer = sce->ra_wb_xfer;
838
839 mutex_enter(&sc->sc_lock);
840 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
841 flag & IO_NDELAY) {
842 mutex_exit(&sc->sc_lock);
843 return (EWOULDBLOCK);
844 }
845 while (uio->uio_resid > 0 && !error) {
846 while (sce->ra_wb_used ==
847 sce->limit - sce->ibuf) {
848 sce->state |= UGEN_ASLP;
849 DPRINTFN(5,
850 ("ugenwrite: sleep on %p\n",
851 sce));
852 /* "ugenwb" */
853 error = cv_timedwait_sig(&sce->cv,
854 &sc->sc_lock, mstohz(sce->timeout));
855 DPRINTFN(5,
856 ("ugenwrite: woke, error=%d\n",
857 error));
858 if (sc->sc_dying)
859 error = EIO;
860 if (error) {
861 sce->state &= ~UGEN_ASLP;
862 break;
863 }
864 }
865
866 /* Copy data from the process. */
867 while (uio->uio_resid > 0 &&
868 sce->ra_wb_used < sce->limit - sce->ibuf) {
869 n = min(uio->uio_resid,
870 (sce->limit - sce->ibuf)
871 - sce->ra_wb_used);
872 n = min(n, sce->limit - sce->fill);
873 error = uiomove(sce->fill, n, uio);
874 if (error)
875 break;
876 sce->fill += n;
877 sce->ra_wb_used += n;
878 if (sce->fill == sce->limit)
879 sce->fill = sce->ibuf;
880 }
881
882 /*
883 * If the transfers stopped because the
884 * buffer was empty, restart them.
885 */
886 if (sce->state & UGEN_RA_WB_STOP &&
887 sce->ra_wb_used > 0) {
888 dbuf = (char *)usbd_get_buffer(xfer);
889 n = min(sce->ra_wb_used,
890 sce->ra_wb_xferlen);
891 tn = min(n, sce->limit - sce->cur);
892 memcpy(dbuf, sce->cur, tn);
893 dbuf += tn;
894 if (n - tn > 0)
895 memcpy(dbuf, sce->ibuf,
896 n - tn);
897 usbd_setup_xfer(xfer,
898 sce->pipeh, sce, NULL, n,
899 USBD_NO_COPY, USBD_NO_TIMEOUT,
900 ugen_bulkwb_intr);
901 sce->state &= ~UGEN_RA_WB_STOP;
902 err = usbd_transfer(xfer);
903 if (err != USBD_IN_PROGRESS)
904 /*
905 * The transfer has not been
906 * queued. Setting STOP
907 * will make us try again
908 * at the next read.
909 */
910 sce->state |= UGEN_RA_WB_STOP;
911 }
912 }
913 mutex_exit(&sc->sc_lock);
914 break;
915 }
916 xfer = usbd_alloc_xfer(sc->sc_udev);
917 if (xfer == 0)
918 return (EIO);
919 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
920 error = uiomove(sc->sc_buffer, n, uio);
921 if (error)
922 break;
923 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
924 err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
925 sce->timeout, sc->sc_buffer, &n,"ugenwb");
926 if (err) {
927 if (err == USBD_INTERRUPTED)
928 error = EINTR;
929 else if (err == USBD_TIMEOUT)
930 error = ETIMEDOUT;
931 else
932 error = EIO;
933 break;
934 }
935 }
936 usbd_free_xfer(xfer);
937 break;
938 case UE_INTERRUPT:
939 xfer = usbd_alloc_xfer(sc->sc_udev);
940 if (xfer == 0)
941 return (EIO);
942 while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
943 uio->uio_resid)) != 0) {
944 error = uiomove(sc->sc_buffer, n, uio);
945 if (error)
946 break;
947 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
948 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
949 sce->timeout, sc->sc_buffer, &n, "ugenwi");
950 if (err) {
951 if (err == USBD_INTERRUPTED)
952 error = EINTR;
953 else if (err == USBD_TIMEOUT)
954 error = ETIMEDOUT;
955 else
956 error = EIO;
957 break;
958 }
959 }
960 usbd_free_xfer(xfer);
961 break;
962 default:
963 return (ENXIO);
964 }
965 return (error);
966 }
967
968 int
969 ugenwrite(dev_t dev, struct uio *uio, int flag)
970 {
971 int endpt = UGENENDPOINT(dev);
972 struct ugen_softc *sc;
973 int error;
974
975 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
976 if (sc == NULL)
977 return ENXIO;
978
979 mutex_enter(&sc->sc_lock);
980 sc->sc_refcnt++;
981 mutex_exit(&sc->sc_lock);
982
983 error = ugen_do_write(sc, endpt, uio, flag);
984
985 mutex_enter(&sc->sc_lock);
986 if (--sc->sc_refcnt < 0)
987 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
988 mutex_exit(&sc->sc_lock);
989
990 return (error);
991 }
992
993 int
994 ugen_activate(device_t self, enum devact act)
995 {
996 struct ugen_softc *sc = device_private(self);
997
998 switch (act) {
999 case DVACT_DEACTIVATE:
1000 sc->sc_dying = 1;
1001 return 0;
1002 default:
1003 return EOPNOTSUPP;
1004 }
1005 }
1006
1007 int
1008 ugen_detach(device_t self, int flags)
1009 {
1010 struct ugen_softc *sc = device_private(self);
1011 struct ugen_endpoint *sce;
1012 int i, dir;
1013 int maj, mn;
1014
1015 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1016
1017 sc->sc_dying = 1;
1018 pmf_device_deregister(self);
1019 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1020 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1021 for (dir = OUT; dir <= IN; dir++) {
1022 sce = &sc->sc_endpoints[i][dir];
1023 if (sce && sce->pipeh)
1024 usbd_abort_pipe(sce->pipeh);
1025 }
1026 }
1027
1028 mutex_enter(&sc->sc_lock);
1029 if (--sc->sc_refcnt >= 0) {
1030 /* Wake everyone */
1031 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1032 cv_signal(&sc->sc_endpoints[i][IN].cv);
1033 /* Wait for processes to go away. */
1034 usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock);
1035 }
1036 mutex_exit(&sc->sc_lock);
1037
1038 /* locate the major number */
1039 maj = cdevsw_lookup_major(&ugen_cdevsw);
1040
1041 /* Nuke the vnodes for any open instances (calls close). */
1042 mn = device_unit(self) * USB_MAX_ENDPOINTS;
1043 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1044
1045 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1046 sc->sc_dev);
1047
1048 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1049 for (dir = OUT; dir <= IN; dir++) {
1050 sce = &sc->sc_endpoints[i][dir];
1051 seldestroy(&sce->rsel);
1052 cv_destroy(&sce->cv);
1053 }
1054 }
1055
1056 cv_destroy(&sc->sc_detach_cv);
1057 mutex_destroy(&sc->sc_lock);
1058
1059 return (0);
1060 }
1061
1062 Static void
1063 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1064 {
1065 struct ugen_endpoint *sce = addr;
1066 struct ugen_softc *sc = sce->sc;
1067 u_int32_t count;
1068 u_char *ibuf;
1069
1070 if (status == USBD_CANCELLED)
1071 return;
1072
1073 if (status != USBD_NORMAL_COMPLETION) {
1074 DPRINTF(("ugenintr: status=%d\n", status));
1075 if (status == USBD_STALLED)
1076 usbd_clear_endpoint_stall_async(sce->pipeh);
1077 return;
1078 }
1079
1080 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1081 ibuf = sce->ibuf;
1082
1083 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1084 xfer, status, count));
1085 DPRINTFN(5, (" data = %02x %02x %02x\n",
1086 ibuf[0], ibuf[1], ibuf[2]));
1087
1088 (void)b_to_q(ibuf, count, &sce->q);
1089
1090 mutex_enter(&sc->sc_lock);
1091 if (sce->state & UGEN_ASLP) {
1092 sce->state &= ~UGEN_ASLP;
1093 DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1094 cv_signal(&sce->cv);
1095 }
1096 mutex_exit(&sc->sc_lock);
1097 selnotify(&sce->rsel, 0, 0);
1098 }
1099
1100 Static void
1101 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1102 usbd_status status)
1103 {
1104 struct isoreq *req = addr;
1105 struct ugen_endpoint *sce = req->sce;
1106 struct ugen_softc *sc = sce->sc;
1107 u_int32_t count, n;
1108 int i, isize;
1109
1110 /* Return if we are aborting. */
1111 if (status == USBD_CANCELLED)
1112 return;
1113
1114 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1115 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1116 (long)(req - sce->isoreqs), count));
1117
1118 /* throw away oldest input if the buffer is full */
1119 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1120 sce->cur += count;
1121 if(sce->cur >= sce->limit)
1122 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1123 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1124 count));
1125 }
1126
1127 isize = UGETW(sce->edesc->wMaxPacketSize);
1128 for (i = 0; i < UGEN_NISORFRMS; i++) {
1129 u_int32_t actlen = req->sizes[i];
1130 char const *tbuf = (char const *)req->dmabuf + isize * i;
1131
1132 /* copy data to buffer */
1133 while (actlen > 0) {
1134 n = min(actlen, sce->limit - sce->fill);
1135 memcpy(sce->fill, tbuf, n);
1136
1137 tbuf += n;
1138 actlen -= n;
1139 sce->fill += n;
1140 if(sce->fill == sce->limit)
1141 sce->fill = sce->ibuf;
1142 }
1143
1144 /* setup size for next transfer */
1145 req->sizes[i] = isize;
1146 }
1147
1148 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1149 USBD_NO_COPY, ugen_isoc_rintr);
1150 (void)usbd_transfer(xfer);
1151
1152 mutex_enter(&sc->sc_lock);
1153 if (sce->state & UGEN_ASLP) {
1154 sce->state &= ~UGEN_ASLP;
1155 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1156 cv_signal(&sce->cv);
1157 }
1158 mutex_exit(&sc->sc_lock);
1159 selnotify(&sce->rsel, 0, 0);
1160 }
1161
1162 Static void
1163 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1164 usbd_status status)
1165 {
1166 struct ugen_endpoint *sce = addr;
1167 struct ugen_softc *sc = sce->sc;
1168 u_int32_t count, n;
1169 char const *tbuf;
1170 usbd_status err;
1171
1172 /* Return if we are aborting. */
1173 if (status == USBD_CANCELLED)
1174 return;
1175
1176 if (status != USBD_NORMAL_COMPLETION) {
1177 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1178 sce->state |= UGEN_RA_WB_STOP;
1179 if (status == USBD_STALLED)
1180 usbd_clear_endpoint_stall_async(sce->pipeh);
1181 return;
1182 }
1183
1184 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1185
1186 /* Keep track of how much is in the buffer. */
1187 sce->ra_wb_used += count;
1188
1189 /* Copy data to buffer. */
1190 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1191 n = min(count, sce->limit - sce->fill);
1192 memcpy(sce->fill, tbuf, n);
1193 tbuf += n;
1194 count -= n;
1195 sce->fill += n;
1196 if (sce->fill == sce->limit)
1197 sce->fill = sce->ibuf;
1198 if (count > 0) {
1199 memcpy(sce->fill, tbuf, count);
1200 sce->fill += count;
1201 }
1202
1203 /* Set up the next request if necessary. */
1204 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1205 if (n > 0) {
1206 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1207 min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1208 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1209 err = usbd_transfer(xfer);
1210 if (err != USBD_IN_PROGRESS) {
1211 printf("usbd_bulkra_intr: error=%d\n", err);
1212 /*
1213 * The transfer has not been queued. Setting STOP
1214 * will make us try again at the next read.
1215 */
1216 sce->state |= UGEN_RA_WB_STOP;
1217 }
1218 }
1219 else
1220 sce->state |= UGEN_RA_WB_STOP;
1221
1222 mutex_enter(&sc->sc_lock);
1223 if (sce->state & UGEN_ASLP) {
1224 sce->state &= ~UGEN_ASLP;
1225 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1226 cv_signal(&sce->cv);
1227 }
1228 mutex_exit(&sc->sc_lock);
1229 selnotify(&sce->rsel, 0, 0);
1230 }
1231
1232 Static void
1233 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1234 usbd_status status)
1235 {
1236 struct ugen_endpoint *sce = addr;
1237 struct ugen_softc *sc = sce->sc;
1238 u_int32_t count, n;
1239 char *tbuf;
1240 usbd_status err;
1241
1242 /* Return if we are aborting. */
1243 if (status == USBD_CANCELLED)
1244 return;
1245
1246 if (status != USBD_NORMAL_COMPLETION) {
1247 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1248 sce->state |= UGEN_RA_WB_STOP;
1249 if (status == USBD_STALLED)
1250 usbd_clear_endpoint_stall_async(sce->pipeh);
1251 return;
1252 }
1253
1254 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1255
1256 /* Keep track of how much is in the buffer. */
1257 sce->ra_wb_used -= count;
1258
1259 /* Update buffer pointers. */
1260 sce->cur += count;
1261 if (sce->cur >= sce->limit)
1262 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1263
1264 /* Set up next request if necessary. */
1265 if (sce->ra_wb_used > 0) {
1266 /* copy data from buffer */
1267 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1268 count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1269 n = min(count, sce->limit - sce->cur);
1270 memcpy(tbuf, sce->cur, n);
1271 tbuf += n;
1272 if (count - n > 0)
1273 memcpy(tbuf, sce->ibuf, count - n);
1274
1275 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1276 count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1277 err = usbd_transfer(xfer);
1278 if (err != USBD_IN_PROGRESS) {
1279 printf("usbd_bulkwb_intr: error=%d\n", err);
1280 /*
1281 * The transfer has not been queued. Setting STOP
1282 * will make us try again at the next write.
1283 */
1284 sce->state |= UGEN_RA_WB_STOP;
1285 }
1286 }
1287 else
1288 sce->state |= UGEN_RA_WB_STOP;
1289
1290 mutex_enter(&sc->sc_lock);
1291 if (sce->state & UGEN_ASLP) {
1292 sce->state &= ~UGEN_ASLP;
1293 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1294 cv_signal(&sce->cv);
1295 }
1296 mutex_exit(&sc->sc_lock);
1297 selnotify(&sce->rsel, 0, 0);
1298 }
1299
1300 Static usbd_status
1301 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1302 {
1303 usbd_interface_handle iface;
1304 usb_endpoint_descriptor_t *ed;
1305 usbd_status err;
1306 struct ugen_endpoint *sce;
1307 u_int8_t niface, nendpt, endptno, endpt;
1308 int dir;
1309
1310 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1311
1312 err = usbd_interface_count(sc->sc_udev, &niface);
1313 if (err)
1314 return (err);
1315 if (ifaceidx < 0 || ifaceidx >= niface)
1316 return (USBD_INVAL);
1317
1318 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1319 if (err)
1320 return (err);
1321 err = usbd_endpoint_count(iface, &nendpt);
1322 if (err)
1323 return (err);
1324 /* XXX should only do this after setting new altno has succeeded */
1325 for (endptno = 0; endptno < nendpt; endptno++) {
1326 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1327 endpt = ed->bEndpointAddress;
1328 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1329 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1330 sce->sc = 0;
1331 sce->edesc = 0;
1332 sce->iface = 0;
1333 }
1334
1335 /* change setting */
1336 err = usbd_set_interface(iface, altno);
1337 if (err)
1338 return (err);
1339
1340 err = usbd_endpoint_count(iface, &nendpt);
1341 if (err)
1342 return (err);
1343 for (endptno = 0; endptno < nendpt; endptno++) {
1344 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1345 KASSERT(ed != NULL);
1346 endpt = ed->bEndpointAddress;
1347 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1348 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1349 sce->sc = sc;
1350 sce->edesc = ed;
1351 sce->iface = iface;
1352 }
1353 return (0);
1354 }
1355
1356 /* Retrieve a complete descriptor for a certain device and index. */
1357 Static usb_config_descriptor_t *
1358 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1359 {
1360 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1361 int len;
1362 usbd_status err;
1363
1364 if (index == USB_CURRENT_CONFIG_INDEX) {
1365 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1366 len = UGETW(tdesc->wTotalLength);
1367 if (lenp)
1368 *lenp = len;
1369 cdesc = malloc(len, M_TEMP, M_WAITOK);
1370 memcpy(cdesc, tdesc, len);
1371 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1372 } else {
1373 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1374 if (err)
1375 return (0);
1376 len = UGETW(cdescr.wTotalLength);
1377 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1378 if (lenp)
1379 *lenp = len;
1380 cdesc = malloc(len, M_TEMP, M_WAITOK);
1381 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1382 if (err) {
1383 free(cdesc, M_TEMP);
1384 return (0);
1385 }
1386 }
1387 return (cdesc);
1388 }
1389
1390 Static int
1391 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1392 {
1393 usbd_interface_handle iface;
1394 usbd_status err;
1395
1396 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1397 if (err)
1398 return (-1);
1399 return (usbd_get_interface_altindex(iface));
1400 }
1401
1402 Static int
1403 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1404 void *addr, int flag, struct lwp *l)
1405 {
1406 struct ugen_endpoint *sce;
1407 usbd_status err;
1408 usbd_interface_handle iface;
1409 struct usb_config_desc *cd;
1410 usb_config_descriptor_t *cdesc;
1411 struct usb_interface_desc *id;
1412 usb_interface_descriptor_t *idesc;
1413 struct usb_endpoint_desc *ed;
1414 usb_endpoint_descriptor_t *edesc;
1415 struct usb_alt_interface *ai;
1416 struct usb_string_desc *si;
1417 u_int8_t conf, alt;
1418
1419 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1420 if (sc->sc_dying)
1421 return (EIO);
1422
1423 switch (cmd) {
1424 case FIONBIO:
1425 /* All handled in the upper FS layer. */
1426 return (0);
1427 case USB_SET_SHORT_XFER:
1428 if (endpt == USB_CONTROL_ENDPOINT)
1429 return (EINVAL);
1430 /* This flag only affects read */
1431 sce = &sc->sc_endpoints[endpt][IN];
1432 if (sce == NULL || sce->pipeh == NULL)
1433 return (EINVAL);
1434 if (*(int *)addr)
1435 sce->state |= UGEN_SHORT_OK;
1436 else
1437 sce->state &= ~UGEN_SHORT_OK;
1438 return (0);
1439 case USB_SET_TIMEOUT:
1440 sce = &sc->sc_endpoints[endpt][IN];
1441 if (sce == NULL
1442 /* XXX this shouldn't happen, but the distinction between
1443 input and output pipes isn't clear enough.
1444 || sce->pipeh == NULL */
1445 )
1446 return (EINVAL);
1447 sce->timeout = *(int *)addr;
1448 return (0);
1449 case USB_SET_BULK_RA:
1450 if (endpt == USB_CONTROL_ENDPOINT)
1451 return (EINVAL);
1452 sce = &sc->sc_endpoints[endpt][IN];
1453 if (sce == NULL || sce->pipeh == NULL)
1454 return (EINVAL);
1455 edesc = sce->edesc;
1456 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1457 return (EINVAL);
1458
1459 if (*(int *)addr) {
1460 /* Only turn RA on if it's currently off. */
1461 if (sce->state & UGEN_BULK_RA)
1462 return (0);
1463
1464 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1465 /* shouldn't happen */
1466 return (EINVAL);
1467 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1468 if (sce->ra_wb_xfer == NULL)
1469 return (ENOMEM);
1470 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1471 /*
1472 * Set up a dmabuf because we reuse the xfer with
1473 * the same (max) request length like isoc.
1474 */
1475 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1476 sce->ra_wb_xferlen) == 0) {
1477 usbd_free_xfer(sce->ra_wb_xfer);
1478 return (ENOMEM);
1479 }
1480 sce->ibuf = malloc(sce->ra_wb_bufsize,
1481 M_USBDEV, M_WAITOK);
1482 sce->fill = sce->cur = sce->ibuf;
1483 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1484 sce->ra_wb_used = 0;
1485 sce->state |= UGEN_BULK_RA;
1486 sce->state &= ~UGEN_RA_WB_STOP;
1487 /* Now start reading. */
1488 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1489 NULL,
1490 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1491 USBD_NO_COPY, USBD_NO_TIMEOUT,
1492 ugen_bulkra_intr);
1493 err = usbd_transfer(sce->ra_wb_xfer);
1494 if (err != USBD_IN_PROGRESS) {
1495 sce->state &= ~UGEN_BULK_RA;
1496 free(sce->ibuf, M_USBDEV);
1497 sce->ibuf = NULL;
1498 usbd_free_xfer(sce->ra_wb_xfer);
1499 return (EIO);
1500 }
1501 } else {
1502 /* Only turn RA off if it's currently on. */
1503 if (!(sce->state & UGEN_BULK_RA))
1504 return (0);
1505
1506 sce->state &= ~UGEN_BULK_RA;
1507 usbd_abort_pipe(sce->pipeh);
1508 usbd_free_xfer(sce->ra_wb_xfer);
1509 /*
1510 * XXX Discard whatever's in the buffer, but we
1511 * should keep it around and drain the buffer
1512 * instead.
1513 */
1514 free(sce->ibuf, M_USBDEV);
1515 sce->ibuf = NULL;
1516 }
1517 return (0);
1518 case USB_SET_BULK_WB:
1519 if (endpt == USB_CONTROL_ENDPOINT)
1520 return (EINVAL);
1521 sce = &sc->sc_endpoints[endpt][OUT];
1522 if (sce == NULL || sce->pipeh == NULL)
1523 return (EINVAL);
1524 edesc = sce->edesc;
1525 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1526 return (EINVAL);
1527
1528 if (*(int *)addr) {
1529 /* Only turn WB on if it's currently off. */
1530 if (sce->state & UGEN_BULK_WB)
1531 return (0);
1532
1533 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1534 /* shouldn't happen */
1535 return (EINVAL);
1536 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1537 if (sce->ra_wb_xfer == NULL)
1538 return (ENOMEM);
1539 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1540 /*
1541 * Set up a dmabuf because we reuse the xfer with
1542 * the same (max) request length like isoc.
1543 */
1544 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1545 sce->ra_wb_xferlen) == 0) {
1546 usbd_free_xfer(sce->ra_wb_xfer);
1547 return (ENOMEM);
1548 }
1549 sce->ibuf = malloc(sce->ra_wb_bufsize,
1550 M_USBDEV, M_WAITOK);
1551 sce->fill = sce->cur = sce->ibuf;
1552 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1553 sce->ra_wb_used = 0;
1554 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1555 } else {
1556 /* Only turn WB off if it's currently on. */
1557 if (!(sce->state & UGEN_BULK_WB))
1558 return (0);
1559
1560 sce->state &= ~UGEN_BULK_WB;
1561 /*
1562 * XXX Discard whatever's in the buffer, but we
1563 * should keep it around and keep writing to
1564 * drain the buffer instead.
1565 */
1566 usbd_abort_pipe(sce->pipeh);
1567 usbd_free_xfer(sce->ra_wb_xfer);
1568 free(sce->ibuf, M_USBDEV);
1569 sce->ibuf = NULL;
1570 }
1571 return (0);
1572 case USB_SET_BULK_RA_OPT:
1573 case USB_SET_BULK_WB_OPT:
1574 {
1575 struct usb_bulk_ra_wb_opt *opt;
1576
1577 if (endpt == USB_CONTROL_ENDPOINT)
1578 return (EINVAL);
1579 opt = (struct usb_bulk_ra_wb_opt *)addr;
1580 if (cmd == USB_SET_BULK_RA_OPT)
1581 sce = &sc->sc_endpoints[endpt][IN];
1582 else
1583 sce = &sc->sc_endpoints[endpt][OUT];
1584 if (sce == NULL || sce->pipeh == NULL)
1585 return (EINVAL);
1586 if (opt->ra_wb_buffer_size < 1 ||
1587 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1588 opt->ra_wb_request_size < 1 ||
1589 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1590 return (EINVAL);
1591 /*
1592 * XXX These changes do not take effect until the
1593 * next time RA/WB mode is enabled but they ought to
1594 * take effect immediately.
1595 */
1596 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1597 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1598 return (0);
1599 }
1600 default:
1601 break;
1602 }
1603
1604 if (endpt != USB_CONTROL_ENDPOINT)
1605 return (EINVAL);
1606
1607 switch (cmd) {
1608 #ifdef UGEN_DEBUG
1609 case USB_SETDEBUG:
1610 ugendebug = *(int *)addr;
1611 break;
1612 #endif
1613 case USB_GET_CONFIG:
1614 err = usbd_get_config(sc->sc_udev, &conf);
1615 if (err)
1616 return (EIO);
1617 *(int *)addr = conf;
1618 break;
1619 case USB_SET_CONFIG:
1620 if (!(flag & FWRITE))
1621 return (EPERM);
1622 err = ugen_set_config(sc, *(int *)addr);
1623 switch (err) {
1624 case USBD_NORMAL_COMPLETION:
1625 break;
1626 case USBD_IN_USE:
1627 return (EBUSY);
1628 default:
1629 return (EIO);
1630 }
1631 break;
1632 case USB_GET_ALTINTERFACE:
1633 ai = (struct usb_alt_interface *)addr;
1634 err = usbd_device2interface_handle(sc->sc_udev,
1635 ai->uai_interface_index, &iface);
1636 if (err)
1637 return (EINVAL);
1638 idesc = usbd_get_interface_descriptor(iface);
1639 if (idesc == NULL)
1640 return (EIO);
1641 ai->uai_alt_no = idesc->bAlternateSetting;
1642 break;
1643 case USB_SET_ALTINTERFACE:
1644 if (!(flag & FWRITE))
1645 return (EPERM);
1646 ai = (struct usb_alt_interface *)addr;
1647 err = usbd_device2interface_handle(sc->sc_udev,
1648 ai->uai_interface_index, &iface);
1649 if (err)
1650 return (EINVAL);
1651 err = ugen_set_interface(sc, ai->uai_interface_index,
1652 ai->uai_alt_no);
1653 if (err)
1654 return (EINVAL);
1655 break;
1656 case USB_GET_NO_ALT:
1657 ai = (struct usb_alt_interface *)addr;
1658 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1659 if (cdesc == NULL)
1660 return (EINVAL);
1661 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1662 if (idesc == NULL) {
1663 free(cdesc, M_TEMP);
1664 return (EINVAL);
1665 }
1666 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1667 idesc->bInterfaceNumber);
1668 free(cdesc, M_TEMP);
1669 break;
1670 case USB_GET_DEVICE_DESC:
1671 *(usb_device_descriptor_t *)addr =
1672 *usbd_get_device_descriptor(sc->sc_udev);
1673 break;
1674 case USB_GET_CONFIG_DESC:
1675 cd = (struct usb_config_desc *)addr;
1676 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1677 if (cdesc == NULL)
1678 return (EINVAL);
1679 cd->ucd_desc = *cdesc;
1680 free(cdesc, M_TEMP);
1681 break;
1682 case USB_GET_INTERFACE_DESC:
1683 id = (struct usb_interface_desc *)addr;
1684 cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1685 if (cdesc == NULL)
1686 return (EINVAL);
1687 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1688 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1689 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1690 else
1691 alt = id->uid_alt_index;
1692 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1693 if (idesc == NULL) {
1694 free(cdesc, M_TEMP);
1695 return (EINVAL);
1696 }
1697 id->uid_desc = *idesc;
1698 free(cdesc, M_TEMP);
1699 break;
1700 case USB_GET_ENDPOINT_DESC:
1701 ed = (struct usb_endpoint_desc *)addr;
1702 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1703 if (cdesc == NULL)
1704 return (EINVAL);
1705 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1706 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1707 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1708 else
1709 alt = ed->ued_alt_index;
1710 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1711 alt, ed->ued_endpoint_index);
1712 if (edesc == NULL) {
1713 free(cdesc, M_TEMP);
1714 return (EINVAL);
1715 }
1716 ed->ued_desc = *edesc;
1717 free(cdesc, M_TEMP);
1718 break;
1719 case USB_GET_FULL_DESC:
1720 {
1721 int len;
1722 struct iovec iov;
1723 struct uio uio;
1724 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1725 int error;
1726
1727 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1728 if (cdesc == NULL)
1729 return (EINVAL);
1730 if (len > fd->ufd_size)
1731 len = fd->ufd_size;
1732 iov.iov_base = (void *)fd->ufd_data;
1733 iov.iov_len = len;
1734 uio.uio_iov = &iov;
1735 uio.uio_iovcnt = 1;
1736 uio.uio_resid = len;
1737 uio.uio_offset = 0;
1738 uio.uio_rw = UIO_READ;
1739 uio.uio_vmspace = l->l_proc->p_vmspace;
1740 error = uiomove((void *)cdesc, len, &uio);
1741 free(cdesc, M_TEMP);
1742 return (error);
1743 }
1744 case USB_GET_STRING_DESC: {
1745 int len;
1746 si = (struct usb_string_desc *)addr;
1747 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1748 si->usd_language_id, &si->usd_desc, &len);
1749 if (err)
1750 return (EINVAL);
1751 break;
1752 }
1753 case USB_DO_REQUEST:
1754 {
1755 struct usb_ctl_request *ur = (void *)addr;
1756 int len = UGETW(ur->ucr_request.wLength);
1757 struct iovec iov;
1758 struct uio uio;
1759 void *ptr = 0;
1760 usbd_status xerr;
1761 int error = 0;
1762
1763 if (!(flag & FWRITE))
1764 return (EPERM);
1765 /* Avoid requests that would damage the bus integrity. */
1766 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1767 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1768 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1769 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1770 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1771 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1772 return (EINVAL);
1773
1774 if (len < 0 || len > 32767)
1775 return (EINVAL);
1776 if (len != 0) {
1777 iov.iov_base = (void *)ur->ucr_data;
1778 iov.iov_len = len;
1779 uio.uio_iov = &iov;
1780 uio.uio_iovcnt = 1;
1781 uio.uio_resid = len;
1782 uio.uio_offset = 0;
1783 uio.uio_rw =
1784 ur->ucr_request.bmRequestType & UT_READ ?
1785 UIO_READ : UIO_WRITE;
1786 uio.uio_vmspace = l->l_proc->p_vmspace;
1787 ptr = malloc(len, M_TEMP, M_WAITOK);
1788 if (uio.uio_rw == UIO_WRITE) {
1789 error = uiomove(ptr, len, &uio);
1790 if (error)
1791 goto ret;
1792 }
1793 }
1794 sce = &sc->sc_endpoints[endpt][IN];
1795 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1796 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1797 if (xerr) {
1798 error = EIO;
1799 goto ret;
1800 }
1801 if (len != 0) {
1802 if (uio.uio_rw == UIO_READ) {
1803 error = uiomove(ptr, len, &uio);
1804 if (error)
1805 goto ret;
1806 }
1807 }
1808 ret:
1809 if (ptr)
1810 free(ptr, M_TEMP);
1811 return (error);
1812 }
1813 case USB_GET_DEVICEINFO:
1814 usbd_fill_deviceinfo(sc->sc_udev,
1815 (struct usb_device_info *)addr, 0);
1816 break;
1817 #ifdef COMPAT_30
1818 case USB_GET_DEVICEINFO_OLD:
1819 usbd_fill_deviceinfo_old(sc->sc_udev,
1820 (struct usb_device_info_old *)addr, 0);
1821
1822 break;
1823 #endif
1824 default:
1825 return (EINVAL);
1826 }
1827 return (0);
1828 }
1829
1830 int
1831 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1832 {
1833 int endpt = UGENENDPOINT(dev);
1834 struct ugen_softc *sc;
1835 int error;
1836
1837 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1838 if (sc == NULL)
1839 return ENXIO;
1840
1841 sc->sc_refcnt++;
1842 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1843 if (--sc->sc_refcnt < 0)
1844 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1845 return (error);
1846 }
1847
1848 int
1849 ugenpoll(dev_t dev, int events, struct lwp *l)
1850 {
1851 struct ugen_softc *sc;
1852 struct ugen_endpoint *sce_in, *sce_out;
1853 int revents = 0;
1854
1855 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1856 if (sc == NULL)
1857 return ENXIO;
1858
1859 if (sc->sc_dying)
1860 return (POLLHUP);
1861
1862 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1863 return ENODEV;
1864
1865 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1866 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1867 if (sce_in == NULL && sce_out == NULL)
1868 return (POLLERR);
1869 #ifdef DIAGNOSTIC
1870 if (!sce_in->edesc && !sce_out->edesc) {
1871 printf("ugenpoll: no edesc\n");
1872 return (POLLERR);
1873 }
1874 /* It's possible to have only one pipe open. */
1875 if (!sce_in->pipeh && !sce_out->pipeh) {
1876 printf("ugenpoll: no pipe\n");
1877 return (POLLERR);
1878 }
1879 #endif
1880
1881 mutex_enter(&sc->sc_lock);
1882 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1883 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1884 case UE_INTERRUPT:
1885 if (sce_in->q.c_cc > 0)
1886 revents |= events & (POLLIN | POLLRDNORM);
1887 else
1888 selrecord(l, &sce_in->rsel);
1889 break;
1890 case UE_ISOCHRONOUS:
1891 if (sce_in->cur != sce_in->fill)
1892 revents |= events & (POLLIN | POLLRDNORM);
1893 else
1894 selrecord(l, &sce_in->rsel);
1895 break;
1896 case UE_BULK:
1897 if (sce_in->state & UGEN_BULK_RA) {
1898 if (sce_in->ra_wb_used > 0)
1899 revents |= events &
1900 (POLLIN | POLLRDNORM);
1901 else
1902 selrecord(l, &sce_in->rsel);
1903 break;
1904 }
1905 /*
1906 * We have no easy way of determining if a read will
1907 * yield any data or a write will happen.
1908 * Pretend they will.
1909 */
1910 revents |= events & (POLLIN | POLLRDNORM);
1911 break;
1912 default:
1913 break;
1914 }
1915 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1916 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1917 case UE_INTERRUPT:
1918 case UE_ISOCHRONOUS:
1919 /* XXX unimplemented */
1920 break;
1921 case UE_BULK:
1922 if (sce_out->state & UGEN_BULK_WB) {
1923 if (sce_out->ra_wb_used <
1924 sce_out->limit - sce_out->ibuf)
1925 revents |= events &
1926 (POLLOUT | POLLWRNORM);
1927 else
1928 selrecord(l, &sce_out->rsel);
1929 break;
1930 }
1931 /*
1932 * We have no easy way of determining if a read will
1933 * yield any data or a write will happen.
1934 * Pretend they will.
1935 */
1936 revents |= events & (POLLOUT | POLLWRNORM);
1937 break;
1938 default:
1939 break;
1940 }
1941
1942 mutex_exit(&sc->sc_lock);
1943
1944 return (revents);
1945 }
1946
1947 static void
1948 filt_ugenrdetach(struct knote *kn)
1949 {
1950 struct ugen_endpoint *sce = kn->kn_hook;
1951 struct ugen_softc *sc = sce->sc;
1952
1953 mutex_enter(&sc->sc_lock);
1954 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1955 mutex_exit(&sc->sc_lock);
1956 }
1957
1958 static int
1959 filt_ugenread_intr(struct knote *kn, long hint)
1960 {
1961 struct ugen_endpoint *sce = kn->kn_hook;
1962
1963 kn->kn_data = sce->q.c_cc;
1964 return (kn->kn_data > 0);
1965 }
1966
1967 static int
1968 filt_ugenread_isoc(struct knote *kn, long hint)
1969 {
1970 struct ugen_endpoint *sce = kn->kn_hook;
1971
1972 if (sce->cur == sce->fill)
1973 return (0);
1974
1975 if (sce->cur < sce->fill)
1976 kn->kn_data = sce->fill - sce->cur;
1977 else
1978 kn->kn_data = (sce->limit - sce->cur) +
1979 (sce->fill - sce->ibuf);
1980
1981 return (1);
1982 }
1983
1984 static int
1985 filt_ugenread_bulk(struct knote *kn, long hint)
1986 {
1987 struct ugen_endpoint *sce = kn->kn_hook;
1988
1989 if (!(sce->state & UGEN_BULK_RA))
1990 /*
1991 * We have no easy way of determining if a read will
1992 * yield any data or a write will happen.
1993 * So, emulate "seltrue".
1994 */
1995 return (filt_seltrue(kn, hint));
1996
1997 if (sce->ra_wb_used == 0)
1998 return (0);
1999
2000 kn->kn_data = sce->ra_wb_used;
2001
2002 return (1);
2003 }
2004
2005 static int
2006 filt_ugenwrite_bulk(struct knote *kn, long hint)
2007 {
2008 struct ugen_endpoint *sce = kn->kn_hook;
2009
2010 if (!(sce->state & UGEN_BULK_WB))
2011 /*
2012 * We have no easy way of determining if a read will
2013 * yield any data or a write will happen.
2014 * So, emulate "seltrue".
2015 */
2016 return (filt_seltrue(kn, hint));
2017
2018 if (sce->ra_wb_used == sce->limit - sce->ibuf)
2019 return (0);
2020
2021 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2022
2023 return (1);
2024 }
2025
2026 static const struct filterops ugenread_intr_filtops =
2027 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2028
2029 static const struct filterops ugenread_isoc_filtops =
2030 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2031
2032 static const struct filterops ugenread_bulk_filtops =
2033 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2034
2035 static const struct filterops ugenwrite_bulk_filtops =
2036 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2037
2038 int
2039 ugenkqfilter(dev_t dev, struct knote *kn)
2040 {
2041 struct ugen_softc *sc;
2042 struct ugen_endpoint *sce;
2043 struct klist *klist;
2044
2045 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2046 if (sc == NULL)
2047 return ENXIO;
2048
2049 if (sc->sc_dying)
2050 return (ENXIO);
2051
2052 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2053 return ENODEV;
2054
2055 switch (kn->kn_filter) {
2056 case EVFILT_READ:
2057 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2058 if (sce == NULL)
2059 return (EINVAL);
2060
2061 klist = &sce->rsel.sel_klist;
2062 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2063 case UE_INTERRUPT:
2064 kn->kn_fop = &ugenread_intr_filtops;
2065 break;
2066 case UE_ISOCHRONOUS:
2067 kn->kn_fop = &ugenread_isoc_filtops;
2068 break;
2069 case UE_BULK:
2070 kn->kn_fop = &ugenread_bulk_filtops;
2071 break;
2072 default:
2073 return (EINVAL);
2074 }
2075 break;
2076
2077 case EVFILT_WRITE:
2078 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2079 if (sce == NULL)
2080 return (EINVAL);
2081
2082 klist = &sce->rsel.sel_klist;
2083 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2084 case UE_INTERRUPT:
2085 case UE_ISOCHRONOUS:
2086 /* XXX poll doesn't support this */
2087 return (EINVAL);
2088
2089 case UE_BULK:
2090 kn->kn_fop = &ugenwrite_bulk_filtops;
2091 break;
2092 default:
2093 return (EINVAL);
2094 }
2095 break;
2096
2097 default:
2098 return (EINVAL);
2099 }
2100
2101 kn->kn_hook = sce;
2102
2103 mutex_enter(&sc->sc_lock);
2104 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2105 mutex_exit(&sc->sc_lock);
2106
2107 return (0);
2108 }
2109