ugen.c revision 1.124 1 /* $NetBSD: ugen.c,v 1.124 2014/07/25 08:10:39 dholland Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.124 2014/07/25 08:10:39 dholland Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/device.h>
51 #include <sys/ioctl.h>
52 #include <sys/conf.h>
53 #include <sys/tty.h>
54 #include <sys/file.h>
55 #include <sys/select.h>
56 #include <sys/proc.h>
57 #include <sys/vnode.h>
58 #include <sys/poll.h>
59
60 #include <dev/usb/usb.h>
61 #include <dev/usb/usbdi.h>
62 #include <dev/usb/usbdi_util.h>
63
64 #ifdef UGEN_DEBUG
65 #define DPRINTF(x) if (ugendebug) printf x
66 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x
67 int ugendebug = 0;
68 #else
69 #define DPRINTF(x)
70 #define DPRINTFN(n,x)
71 #endif
72
73 #define UGEN_CHUNK 128 /* chunk size for read */
74 #define UGEN_IBSIZE 1020 /* buffer size */
75 #define UGEN_BBSIZE 1024
76
77 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
78 #define UGEN_NISORFRMS 8 /* number of transactions per req */
79 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
80
81 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
82 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
83
84 struct ugen_endpoint {
85 struct ugen_softc *sc;
86 usb_endpoint_descriptor_t *edesc;
87 usbd_interface_handle iface;
88 int state;
89 #define UGEN_ASLP 0x02 /* waiting for data */
90 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
91 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
92 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
93 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
94 usbd_pipe_handle pipeh;
95 struct clist q;
96 u_char *ibuf; /* start of buffer (circular for isoc) */
97 u_char *fill; /* location for input (isoc) */
98 u_char *limit; /* end of circular buffer (isoc) */
99 u_char *cur; /* current read location (isoc) */
100 u_int32_t timeout;
101 u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
102 u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
103 u_int32_t ra_wb_used; /* how much is in buffer */
104 u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
105 usbd_xfer_handle ra_wb_xfer;
106 struct isoreq {
107 struct ugen_endpoint *sce;
108 usbd_xfer_handle xfer;
109 void *dmabuf;
110 u_int16_t sizes[UGEN_NISORFRMS];
111 } isoreqs[UGEN_NISOREQS];
112 /* Keep these last; we don't overwrite them in ugen_set_config() */
113 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
114 struct selinfo rsel;
115 kcondvar_t cv;
116 };
117
118 struct ugen_softc {
119 device_t sc_dev; /* base device */
120 usbd_device_handle sc_udev;
121
122 kmutex_t sc_lock;
123 kcondvar_t sc_detach_cv;
124
125 char sc_is_open[USB_MAX_ENDPOINTS];
126 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
127 #define OUT 0
128 #define IN 1
129
130 int sc_refcnt;
131 char sc_buffer[UGEN_BBSIZE];
132 u_char sc_dying;
133 };
134
135 dev_type_open(ugenopen);
136 dev_type_close(ugenclose);
137 dev_type_read(ugenread);
138 dev_type_write(ugenwrite);
139 dev_type_ioctl(ugenioctl);
140 dev_type_poll(ugenpoll);
141 dev_type_kqfilter(ugenkqfilter);
142
143 const struct cdevsw ugen_cdevsw = {
144 .d_open = ugenopen,
145 .d_close = ugenclose,
146 .d_read = ugenread,
147 .d_write = ugenwrite,
148 .d_ioctl = ugenioctl,
149 .d_stop = nostop,
150 .d_tty = notty,
151 .d_poll = ugenpoll,
152 .d_mmap = nommap,
153 .d_kqfilter = ugenkqfilter,
154 .d_discard = nodiscard,
155 .d_flag = D_OTHER,
156 };
157
158 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
159 usbd_status status);
160 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
161 usbd_status status);
162 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
163 usbd_status status);
164 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
165 usbd_status status);
166 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
167 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
168 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
169 void *, int, struct lwp *);
170 Static int ugen_set_config(struct ugen_softc *sc, int configno);
171 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
172 int index, int *lenp);
173 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
174 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
175
176 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
177 #define UGENENDPOINT(n) (minor(n) & 0xf)
178 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
179
180 int ugen_match(device_t, cfdata_t, void *);
181 void ugen_attach(device_t, device_t, void *);
182 int ugen_detach(device_t, int);
183 int ugen_activate(device_t, enum devact);
184 extern struct cfdriver ugen_cd;
185 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate);
186
187 /* toggle to control attach priority. -1 means "let autoconf decide" */
188 int ugen_override = -1;
189
190 int
191 ugen_match(device_t parent, cfdata_t match, void *aux)
192 {
193 struct usb_attach_arg *uaa = aux;
194 int override;
195
196 if (ugen_override != -1)
197 override = ugen_override;
198 else
199 override = match->cf_flags & 1;
200
201 if (override)
202 return (UMATCH_HIGHEST);
203 else if (uaa->usegeneric)
204 return (UMATCH_GENERIC);
205 else
206 return (UMATCH_NONE);
207 }
208
209 void
210 ugen_attach(device_t parent, device_t self, void *aux)
211 {
212 struct ugen_softc *sc = device_private(self);
213 struct usb_attach_arg *uaa = aux;
214 usbd_device_handle udev;
215 char *devinfop;
216 usbd_status err;
217 int i, dir, conf;
218
219 aprint_naive("\n");
220 aprint_normal("\n");
221
222 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_USB);
223 cv_init(&sc->sc_detach_cv, "ugendet");
224
225 devinfop = usbd_devinfo_alloc(uaa->device, 0);
226 aprint_normal_dev(self, "%s\n", devinfop);
227 usbd_devinfo_free(devinfop);
228
229 sc->sc_dev = self;
230 sc->sc_udev = udev = uaa->device;
231
232 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
233 for (dir = OUT; dir <= IN; dir++) {
234 struct ugen_endpoint *sce;
235
236 sce = &sc->sc_endpoints[i][dir];
237 selinit(&sce->rsel);
238 cv_init(&sce->cv, "ugensce");
239 }
240 }
241
242 /* First set configuration index 0, the default one for ugen. */
243 err = usbd_set_config_index(udev, 0, 0);
244 if (err) {
245 aprint_error_dev(self,
246 "setting configuration index 0 failed\n");
247 sc->sc_dying = 1;
248 return;
249 }
250 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
251
252 /* Set up all the local state for this configuration. */
253 err = ugen_set_config(sc, conf);
254 if (err) {
255 aprint_error_dev(self, "setting configuration %d failed\n",
256 conf);
257 sc->sc_dying = 1;
258 return;
259 }
260
261 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
262 sc->sc_dev);
263
264 if (!pmf_device_register(self, NULL, NULL))
265 aprint_error_dev(self, "couldn't establish power handler\n");
266
267 return;
268 }
269
270 Static int
271 ugen_set_config(struct ugen_softc *sc, int configno)
272 {
273 usbd_device_handle dev = sc->sc_udev;
274 usb_config_descriptor_t *cdesc;
275 usbd_interface_handle iface;
276 usb_endpoint_descriptor_t *ed;
277 struct ugen_endpoint *sce;
278 u_int8_t niface, nendpt;
279 int ifaceno, endptno, endpt;
280 usbd_status err;
281 int dir, i;
282
283 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
284 device_xname(sc->sc_dev), configno, sc));
285
286 /*
287 * We start at 1, not 0, because we don't care whether the
288 * control endpoint is open or not. It is always present.
289 */
290 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
291 if (sc->sc_is_open[endptno]) {
292 DPRINTFN(1,
293 ("ugen_set_config: %s - endpoint %d is open\n",
294 device_xname(sc->sc_dev), endptno));
295 return (USBD_IN_USE);
296 }
297
298 /* Avoid setting the current value. */
299 cdesc = usbd_get_config_descriptor(dev);
300 if (!cdesc || cdesc->bConfigurationValue != configno) {
301 err = usbd_set_config_no(dev, configno, 1);
302 if (err)
303 return (err);
304 }
305
306 err = usbd_interface_count(dev, &niface);
307 if (err)
308 return (err);
309
310 /* Clear out the old info, but leave the selinfo and cv initialised. */
311 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
312 for (dir = OUT; dir <= IN; dir++) {
313 sce = &sc->sc_endpoints[i][dir];
314 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
315 }
316 }
317
318 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
319 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
320 err = usbd_device2interface_handle(dev, ifaceno, &iface);
321 if (err)
322 return (err);
323 err = usbd_endpoint_count(iface, &nendpt);
324 if (err)
325 return (err);
326 for (endptno = 0; endptno < nendpt; endptno++) {
327 ed = usbd_interface2endpoint_descriptor(iface,endptno);
328 KASSERT(ed != NULL);
329 endpt = ed->bEndpointAddress;
330 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
331 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
332 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
333 "(%d,%d), sce=%p\n",
334 endptno, endpt, UE_GET_ADDR(endpt),
335 UE_GET_DIR(endpt), sce));
336 sce->sc = sc;
337 sce->edesc = ed;
338 sce->iface = iface;
339 }
340 }
341 return (USBD_NORMAL_COMPLETION);
342 }
343
344 int
345 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
346 {
347 struct ugen_softc *sc;
348 int unit = UGENUNIT(dev);
349 int endpt = UGENENDPOINT(dev);
350 usb_endpoint_descriptor_t *edesc;
351 struct ugen_endpoint *sce;
352 int dir, isize;
353 usbd_status err;
354 usbd_xfer_handle xfer;
355 void *tbuf;
356 int i, j;
357
358 sc = device_lookup_private(&ugen_cd, unit);
359 if (sc == NULL)
360 return ENXIO;
361
362 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
363 flag, mode, unit, endpt));
364
365 if (sc == NULL || sc->sc_dying)
366 return (ENXIO);
367
368 /* The control endpoint allows multiple opens. */
369 if (endpt == USB_CONTROL_ENDPOINT) {
370 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
371 return (0);
372 }
373
374 if (sc->sc_is_open[endpt])
375 return (EBUSY);
376
377 /* Make sure there are pipes for all directions. */
378 for (dir = OUT; dir <= IN; dir++) {
379 if (flag & (dir == OUT ? FWRITE : FREAD)) {
380 sce = &sc->sc_endpoints[endpt][dir];
381 if (sce == 0 || sce->edesc == 0)
382 return (ENXIO);
383 }
384 }
385
386 /* Actually open the pipes. */
387 /* XXX Should back out properly if it fails. */
388 for (dir = OUT; dir <= IN; dir++) {
389 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
390 continue;
391 sce = &sc->sc_endpoints[endpt][dir];
392 sce->state = 0;
393 sce->timeout = USBD_NO_TIMEOUT;
394 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
395 sc, endpt, dir, sce));
396 edesc = sce->edesc;
397 switch (edesc->bmAttributes & UE_XFERTYPE) {
398 case UE_INTERRUPT:
399 if (dir == OUT) {
400 err = usbd_open_pipe(sce->iface,
401 edesc->bEndpointAddress, 0, &sce->pipeh);
402 if (err)
403 return (EIO);
404 break;
405 }
406 isize = UGETW(edesc->wMaxPacketSize);
407 if (isize == 0) /* shouldn't happen */
408 return (EINVAL);
409 sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
410 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
411 endpt, isize));
412 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
413 free(sce->ibuf, M_USBDEV);
414 sce->ibuf = NULL;
415 return (ENOMEM);
416 }
417 err = usbd_open_pipe_intr(sce->iface,
418 edesc->bEndpointAddress,
419 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
420 sce->ibuf, isize, ugenintr,
421 USBD_DEFAULT_INTERVAL);
422 if (err) {
423 clfree(&sce->q);
424 free(sce->ibuf, M_USBDEV);
425 sce->ibuf = NULL;
426 return (EIO);
427 }
428 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
429 break;
430 case UE_BULK:
431 err = usbd_open_pipe(sce->iface,
432 edesc->bEndpointAddress, 0, &sce->pipeh);
433 if (err)
434 return (EIO);
435 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
436 /*
437 * Use request size for non-RA/WB transfers
438 * as the default.
439 */
440 sce->ra_wb_reqsize = UGEN_BBSIZE;
441 break;
442 case UE_ISOCHRONOUS:
443 if (dir == OUT)
444 return (EINVAL);
445 isize = UGETW(edesc->wMaxPacketSize);
446 if (isize == 0) /* shouldn't happen */
447 return (EINVAL);
448 sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
449 M_USBDEV, M_WAITOK);
450 sce->cur = sce->fill = sce->ibuf;
451 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
452 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
453 endpt, isize));
454 err = usbd_open_pipe(sce->iface,
455 edesc->bEndpointAddress, 0, &sce->pipeh);
456 if (err) {
457 free(sce->ibuf, M_USBDEV);
458 sce->ibuf = NULL;
459 return (EIO);
460 }
461 for(i = 0; i < UGEN_NISOREQS; ++i) {
462 sce->isoreqs[i].sce = sce;
463 xfer = usbd_alloc_xfer(sc->sc_udev);
464 if (xfer == 0)
465 goto bad;
466 sce->isoreqs[i].xfer = xfer;
467 tbuf = usbd_alloc_buffer
468 (xfer, isize * UGEN_NISORFRMS);
469 if (tbuf == 0) {
470 i++;
471 goto bad;
472 }
473 sce->isoreqs[i].dmabuf = tbuf;
474 for(j = 0; j < UGEN_NISORFRMS; ++j)
475 sce->isoreqs[i].sizes[j] = isize;
476 usbd_setup_isoc_xfer
477 (xfer, sce->pipeh, &sce->isoreqs[i],
478 sce->isoreqs[i].sizes,
479 UGEN_NISORFRMS, USBD_NO_COPY,
480 ugen_isoc_rintr);
481 (void)usbd_transfer(xfer);
482 }
483 DPRINTFN(5, ("ugenopen: isoc open done\n"));
484 break;
485 bad:
486 while (--i >= 0) /* implicit buffer free */
487 usbd_free_xfer(sce->isoreqs[i].xfer);
488 usbd_close_pipe(sce->pipeh);
489 sce->pipeh = NULL;
490 free(sce->ibuf, M_USBDEV);
491 sce->ibuf = NULL;
492 return (ENOMEM);
493 case UE_CONTROL:
494 sce->timeout = USBD_DEFAULT_TIMEOUT;
495 return (EINVAL);
496 }
497 }
498 sc->sc_is_open[endpt] = 1;
499 return (0);
500 }
501
502 int
503 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
504 {
505 int endpt = UGENENDPOINT(dev);
506 struct ugen_softc *sc;
507 struct ugen_endpoint *sce;
508 int dir;
509 int i;
510
511 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
512 if (sc == NULL)
513 return ENXIO;
514
515 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
516 flag, mode, UGENUNIT(dev), endpt));
517
518 #ifdef DIAGNOSTIC
519 if (!sc->sc_is_open[endpt]) {
520 printf("ugenclose: not open\n");
521 return (EINVAL);
522 }
523 #endif
524
525 if (endpt == USB_CONTROL_ENDPOINT) {
526 DPRINTFN(5, ("ugenclose: close control\n"));
527 sc->sc_is_open[endpt] = 0;
528 return (0);
529 }
530
531 for (dir = OUT; dir <= IN; dir++) {
532 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
533 continue;
534 sce = &sc->sc_endpoints[endpt][dir];
535 if (sce == NULL || sce->pipeh == NULL)
536 continue;
537 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
538 endpt, dir, sce));
539
540 usbd_abort_pipe(sce->pipeh);
541 usbd_close_pipe(sce->pipeh);
542 sce->pipeh = NULL;
543
544 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
545 case UE_INTERRUPT:
546 ndflush(&sce->q, sce->q.c_cc);
547 clfree(&sce->q);
548 break;
549 case UE_ISOCHRONOUS:
550 for (i = 0; i < UGEN_NISOREQS; ++i)
551 usbd_free_xfer(sce->isoreqs[i].xfer);
552 break;
553 case UE_BULK:
554 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
555 /* ibuf freed below */
556 usbd_free_xfer(sce->ra_wb_xfer);
557 break;
558 default:
559 break;
560 }
561
562 if (sce->ibuf != NULL) {
563 free(sce->ibuf, M_USBDEV);
564 sce->ibuf = NULL;
565 }
566 }
567 sc->sc_is_open[endpt] = 0;
568
569 return (0);
570 }
571
572 Static int
573 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
574 {
575 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
576 u_int32_t n, tn;
577 usbd_xfer_handle xfer;
578 usbd_status err;
579 int error = 0;
580
581 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
582
583 if (sc->sc_dying)
584 return (EIO);
585
586 if (endpt == USB_CONTROL_ENDPOINT)
587 return (ENODEV);
588
589 #ifdef DIAGNOSTIC
590 if (sce->edesc == NULL) {
591 printf("ugenread: no edesc\n");
592 return (EIO);
593 }
594 if (sce->pipeh == NULL) {
595 printf("ugenread: no pipe\n");
596 return (EIO);
597 }
598 #endif
599
600 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
601 case UE_INTERRUPT:
602 /* Block until activity occurred. */
603 mutex_enter(&sc->sc_lock);
604 while (sce->q.c_cc == 0) {
605 if (flag & IO_NDELAY) {
606 mutex_exit(&sc->sc_lock);
607 return (EWOULDBLOCK);
608 }
609 sce->state |= UGEN_ASLP;
610 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
611 /* "ugenri" */
612 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
613 mstohz(sce->timeout));
614 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
615 if (sc->sc_dying)
616 error = EIO;
617 if (error) {
618 sce->state &= ~UGEN_ASLP;
619 break;
620 }
621 }
622 mutex_exit(&sc->sc_lock);
623
624 /* Transfer as many chunks as possible. */
625 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
626 n = min(sce->q.c_cc, uio->uio_resid);
627 if (n > sizeof(sc->sc_buffer))
628 n = sizeof(sc->sc_buffer);
629
630 /* Remove a small chunk from the input queue. */
631 q_to_b(&sce->q, sc->sc_buffer, n);
632 DPRINTFN(5, ("ugenread: got %d chars\n", n));
633
634 /* Copy the data to the user process. */
635 error = uiomove(sc->sc_buffer, n, uio);
636 if (error)
637 break;
638 }
639 break;
640 case UE_BULK:
641 if (sce->state & UGEN_BULK_RA) {
642 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
643 uio->uio_resid, sce->ra_wb_used));
644 xfer = sce->ra_wb_xfer;
645
646 mutex_enter(&sc->sc_lock);
647 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
648 mutex_exit(&sc->sc_lock);
649 return (EWOULDBLOCK);
650 }
651 while (uio->uio_resid > 0 && !error) {
652 while (sce->ra_wb_used == 0) {
653 sce->state |= UGEN_ASLP;
654 DPRINTFN(5,
655 ("ugenread: sleep on %p\n",
656 sce));
657 /* "ugenrb" */
658 error = cv_timedwait_sig(&sce->cv,
659 &sc->sc_lock, mstohz(sce->timeout));
660 DPRINTFN(5,
661 ("ugenread: woke, error=%d\n",
662 error));
663 if (sc->sc_dying)
664 error = EIO;
665 if (error) {
666 sce->state &= ~UGEN_ASLP;
667 break;
668 }
669 }
670
671 /* Copy data to the process. */
672 while (uio->uio_resid > 0
673 && sce->ra_wb_used > 0) {
674 n = min(uio->uio_resid,
675 sce->ra_wb_used);
676 n = min(n, sce->limit - sce->cur);
677 error = uiomove(sce->cur, n, uio);
678 if (error)
679 break;
680 sce->cur += n;
681 sce->ra_wb_used -= n;
682 if (sce->cur == sce->limit)
683 sce->cur = sce->ibuf;
684 }
685
686 /*
687 * If the transfers stopped because the
688 * buffer was full, restart them.
689 */
690 if (sce->state & UGEN_RA_WB_STOP &&
691 sce->ra_wb_used < sce->limit - sce->ibuf) {
692 n = (sce->limit - sce->ibuf)
693 - sce->ra_wb_used;
694 usbd_setup_xfer(xfer,
695 sce->pipeh, sce, NULL,
696 min(n, sce->ra_wb_xferlen),
697 USBD_NO_COPY, USBD_NO_TIMEOUT,
698 ugen_bulkra_intr);
699 sce->state &= ~UGEN_RA_WB_STOP;
700 err = usbd_transfer(xfer);
701 if (err != USBD_IN_PROGRESS)
702 /*
703 * The transfer has not been
704 * queued. Setting STOP
705 * will make us try
706 * again at the next read.
707 */
708 sce->state |= UGEN_RA_WB_STOP;
709 }
710 }
711 mutex_exit(&sc->sc_lock);
712 break;
713 }
714 xfer = usbd_alloc_xfer(sc->sc_udev);
715 if (xfer == 0)
716 return (ENOMEM);
717 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
718 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
719 tn = n;
720 err = usbd_bulk_transfer(
721 xfer, sce->pipeh,
722 sce->state & UGEN_SHORT_OK ?
723 USBD_SHORT_XFER_OK : 0,
724 sce->timeout, sc->sc_buffer, &tn, "ugenrb");
725 if (err) {
726 if (err == USBD_INTERRUPTED)
727 error = EINTR;
728 else if (err == USBD_TIMEOUT)
729 error = ETIMEDOUT;
730 else
731 error = EIO;
732 break;
733 }
734 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
735 error = uiomove(sc->sc_buffer, tn, uio);
736 if (error || tn < n)
737 break;
738 }
739 usbd_free_xfer(xfer);
740 break;
741 case UE_ISOCHRONOUS:
742 mutex_enter(&sc->sc_lock);
743 while (sce->cur == sce->fill) {
744 if (flag & IO_NDELAY) {
745 mutex_exit(&sc->sc_lock);
746 return (EWOULDBLOCK);
747 }
748 sce->state |= UGEN_ASLP;
749 /* "ugenri" */
750 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
751 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
752 mstohz(sce->timeout));
753 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
754 if (sc->sc_dying)
755 error = EIO;
756 if (error) {
757 sce->state &= ~UGEN_ASLP;
758 break;
759 }
760 }
761
762 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
763 if(sce->fill > sce->cur)
764 n = min(sce->fill - sce->cur, uio->uio_resid);
765 else
766 n = min(sce->limit - sce->cur, uio->uio_resid);
767
768 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
769
770 /* Copy the data to the user process. */
771 error = uiomove(sce->cur, n, uio);
772 if (error)
773 break;
774 sce->cur += n;
775 if (sce->cur >= sce->limit)
776 sce->cur = sce->ibuf;
777 }
778 mutex_exit(&sc->sc_lock);
779 break;
780
781
782 default:
783 return (ENXIO);
784 }
785 return (error);
786 }
787
788 int
789 ugenread(dev_t dev, struct uio *uio, int flag)
790 {
791 int endpt = UGENENDPOINT(dev);
792 struct ugen_softc *sc;
793 int error;
794
795 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
796 if (sc == NULL)
797 return ENXIO;
798
799 mutex_enter(&sc->sc_lock);
800 sc->sc_refcnt++;
801 mutex_exit(&sc->sc_lock);
802
803 error = ugen_do_read(sc, endpt, uio, flag);
804
805 mutex_enter(&sc->sc_lock);
806 if (--sc->sc_refcnt < 0)
807 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
808 mutex_exit(&sc->sc_lock);
809
810 return (error);
811 }
812
813 Static int
814 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
815 int flag)
816 {
817 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
818 u_int32_t n;
819 int error = 0;
820 u_int32_t tn;
821 char *dbuf;
822 usbd_xfer_handle xfer;
823 usbd_status err;
824
825 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
826
827 if (sc->sc_dying)
828 return (EIO);
829
830 if (endpt == USB_CONTROL_ENDPOINT)
831 return (ENODEV);
832
833 #ifdef DIAGNOSTIC
834 if (sce->edesc == NULL) {
835 printf("ugenwrite: no edesc\n");
836 return (EIO);
837 }
838 if (sce->pipeh == NULL) {
839 printf("ugenwrite: no pipe\n");
840 return (EIO);
841 }
842 #endif
843
844 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
845 case UE_BULK:
846 if (sce->state & UGEN_BULK_WB) {
847 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
848 uio->uio_resid, sce->ra_wb_used));
849 xfer = sce->ra_wb_xfer;
850
851 mutex_enter(&sc->sc_lock);
852 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
853 flag & IO_NDELAY) {
854 mutex_exit(&sc->sc_lock);
855 return (EWOULDBLOCK);
856 }
857 while (uio->uio_resid > 0 && !error) {
858 while (sce->ra_wb_used ==
859 sce->limit - sce->ibuf) {
860 sce->state |= UGEN_ASLP;
861 DPRINTFN(5,
862 ("ugenwrite: sleep on %p\n",
863 sce));
864 /* "ugenwb" */
865 error = cv_timedwait_sig(&sce->cv,
866 &sc->sc_lock, mstohz(sce->timeout));
867 DPRINTFN(5,
868 ("ugenwrite: woke, error=%d\n",
869 error));
870 if (sc->sc_dying)
871 error = EIO;
872 if (error) {
873 sce->state &= ~UGEN_ASLP;
874 break;
875 }
876 }
877
878 /* Copy data from the process. */
879 while (uio->uio_resid > 0 &&
880 sce->ra_wb_used < sce->limit - sce->ibuf) {
881 n = min(uio->uio_resid,
882 (sce->limit - sce->ibuf)
883 - sce->ra_wb_used);
884 n = min(n, sce->limit - sce->fill);
885 error = uiomove(sce->fill, n, uio);
886 if (error)
887 break;
888 sce->fill += n;
889 sce->ra_wb_used += n;
890 if (sce->fill == sce->limit)
891 sce->fill = sce->ibuf;
892 }
893
894 /*
895 * If the transfers stopped because the
896 * buffer was empty, restart them.
897 */
898 if (sce->state & UGEN_RA_WB_STOP &&
899 sce->ra_wb_used > 0) {
900 dbuf = (char *)usbd_get_buffer(xfer);
901 n = min(sce->ra_wb_used,
902 sce->ra_wb_xferlen);
903 tn = min(n, sce->limit - sce->cur);
904 memcpy(dbuf, sce->cur, tn);
905 dbuf += tn;
906 if (n - tn > 0)
907 memcpy(dbuf, sce->ibuf,
908 n - tn);
909 usbd_setup_xfer(xfer,
910 sce->pipeh, sce, NULL, n,
911 USBD_NO_COPY, USBD_NO_TIMEOUT,
912 ugen_bulkwb_intr);
913 sce->state &= ~UGEN_RA_WB_STOP;
914 err = usbd_transfer(xfer);
915 if (err != USBD_IN_PROGRESS)
916 /*
917 * The transfer has not been
918 * queued. Setting STOP
919 * will make us try again
920 * at the next read.
921 */
922 sce->state |= UGEN_RA_WB_STOP;
923 }
924 }
925 mutex_exit(&sc->sc_lock);
926 break;
927 }
928 xfer = usbd_alloc_xfer(sc->sc_udev);
929 if (xfer == 0)
930 return (EIO);
931 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
932 error = uiomove(sc->sc_buffer, n, uio);
933 if (error)
934 break;
935 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
936 err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
937 sce->timeout, sc->sc_buffer, &n,"ugenwb");
938 if (err) {
939 if (err == USBD_INTERRUPTED)
940 error = EINTR;
941 else if (err == USBD_TIMEOUT)
942 error = ETIMEDOUT;
943 else
944 error = EIO;
945 break;
946 }
947 }
948 usbd_free_xfer(xfer);
949 break;
950 case UE_INTERRUPT:
951 xfer = usbd_alloc_xfer(sc->sc_udev);
952 if (xfer == 0)
953 return (EIO);
954 while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
955 uio->uio_resid)) != 0) {
956 error = uiomove(sc->sc_buffer, n, uio);
957 if (error)
958 break;
959 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
960 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
961 sce->timeout, sc->sc_buffer, &n, "ugenwi");
962 if (err) {
963 if (err == USBD_INTERRUPTED)
964 error = EINTR;
965 else if (err == USBD_TIMEOUT)
966 error = ETIMEDOUT;
967 else
968 error = EIO;
969 break;
970 }
971 }
972 usbd_free_xfer(xfer);
973 break;
974 default:
975 return (ENXIO);
976 }
977 return (error);
978 }
979
980 int
981 ugenwrite(dev_t dev, struct uio *uio, int flag)
982 {
983 int endpt = UGENENDPOINT(dev);
984 struct ugen_softc *sc;
985 int error;
986
987 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
988 if (sc == NULL)
989 return ENXIO;
990
991 mutex_enter(&sc->sc_lock);
992 sc->sc_refcnt++;
993 mutex_exit(&sc->sc_lock);
994
995 error = ugen_do_write(sc, endpt, uio, flag);
996
997 mutex_enter(&sc->sc_lock);
998 if (--sc->sc_refcnt < 0)
999 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1000 mutex_exit(&sc->sc_lock);
1001
1002 return (error);
1003 }
1004
1005 int
1006 ugen_activate(device_t self, enum devact act)
1007 {
1008 struct ugen_softc *sc = device_private(self);
1009
1010 switch (act) {
1011 case DVACT_DEACTIVATE:
1012 sc->sc_dying = 1;
1013 return 0;
1014 default:
1015 return EOPNOTSUPP;
1016 }
1017 }
1018
1019 int
1020 ugen_detach(device_t self, int flags)
1021 {
1022 struct ugen_softc *sc = device_private(self);
1023 struct ugen_endpoint *sce;
1024 int i, dir;
1025 int maj, mn;
1026
1027 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1028
1029 sc->sc_dying = 1;
1030 pmf_device_deregister(self);
1031 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1032 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1033 for (dir = OUT; dir <= IN; dir++) {
1034 sce = &sc->sc_endpoints[i][dir];
1035 if (sce && sce->pipeh)
1036 usbd_abort_pipe(sce->pipeh);
1037 }
1038 }
1039
1040 mutex_enter(&sc->sc_lock);
1041 if (--sc->sc_refcnt >= 0) {
1042 /* Wake everyone */
1043 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1044 cv_signal(&sc->sc_endpoints[i][IN].cv);
1045 /* Wait for processes to go away. */
1046 usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock);
1047 }
1048 mutex_exit(&sc->sc_lock);
1049
1050 /* locate the major number */
1051 maj = cdevsw_lookup_major(&ugen_cdevsw);
1052
1053 /* Nuke the vnodes for any open instances (calls close). */
1054 mn = device_unit(self) * USB_MAX_ENDPOINTS;
1055 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1056
1057 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1058 sc->sc_dev);
1059
1060 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1061 for (dir = OUT; dir <= IN; dir++) {
1062 sce = &sc->sc_endpoints[i][dir];
1063 seldestroy(&sce->rsel);
1064 cv_destroy(&sce->cv);
1065 }
1066 }
1067
1068 cv_destroy(&sc->sc_detach_cv);
1069 mutex_destroy(&sc->sc_lock);
1070
1071 return (0);
1072 }
1073
1074 Static void
1075 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1076 {
1077 struct ugen_endpoint *sce = addr;
1078 struct ugen_softc *sc = sce->sc;
1079 u_int32_t count;
1080 u_char *ibuf;
1081
1082 if (status == USBD_CANCELLED)
1083 return;
1084
1085 if (status != USBD_NORMAL_COMPLETION) {
1086 DPRINTF(("ugenintr: status=%d\n", status));
1087 if (status == USBD_STALLED)
1088 usbd_clear_endpoint_stall_async(sce->pipeh);
1089 return;
1090 }
1091
1092 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1093 ibuf = sce->ibuf;
1094
1095 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1096 xfer, status, count));
1097 DPRINTFN(5, (" data = %02x %02x %02x\n",
1098 ibuf[0], ibuf[1], ibuf[2]));
1099
1100 (void)b_to_q(ibuf, count, &sce->q);
1101
1102 mutex_enter(&sc->sc_lock);
1103 if (sce->state & UGEN_ASLP) {
1104 sce->state &= ~UGEN_ASLP;
1105 DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1106 cv_signal(&sce->cv);
1107 }
1108 mutex_exit(&sc->sc_lock);
1109 selnotify(&sce->rsel, 0, 0);
1110 }
1111
1112 Static void
1113 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1114 usbd_status status)
1115 {
1116 struct isoreq *req = addr;
1117 struct ugen_endpoint *sce = req->sce;
1118 struct ugen_softc *sc = sce->sc;
1119 u_int32_t count, n;
1120 int i, isize;
1121
1122 /* Return if we are aborting. */
1123 if (status == USBD_CANCELLED)
1124 return;
1125
1126 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1127 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1128 (long)(req - sce->isoreqs), count));
1129
1130 /* throw away oldest input if the buffer is full */
1131 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1132 sce->cur += count;
1133 if(sce->cur >= sce->limit)
1134 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1135 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1136 count));
1137 }
1138
1139 isize = UGETW(sce->edesc->wMaxPacketSize);
1140 for (i = 0; i < UGEN_NISORFRMS; i++) {
1141 u_int32_t actlen = req->sizes[i];
1142 char const *tbuf = (char const *)req->dmabuf + isize * i;
1143
1144 /* copy data to buffer */
1145 while (actlen > 0) {
1146 n = min(actlen, sce->limit - sce->fill);
1147 memcpy(sce->fill, tbuf, n);
1148
1149 tbuf += n;
1150 actlen -= n;
1151 sce->fill += n;
1152 if(sce->fill == sce->limit)
1153 sce->fill = sce->ibuf;
1154 }
1155
1156 /* setup size for next transfer */
1157 req->sizes[i] = isize;
1158 }
1159
1160 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1161 USBD_NO_COPY, ugen_isoc_rintr);
1162 (void)usbd_transfer(xfer);
1163
1164 mutex_enter(&sc->sc_lock);
1165 if (sce->state & UGEN_ASLP) {
1166 sce->state &= ~UGEN_ASLP;
1167 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1168 cv_signal(&sce->cv);
1169 }
1170 mutex_exit(&sc->sc_lock);
1171 selnotify(&sce->rsel, 0, 0);
1172 }
1173
1174 Static void
1175 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1176 usbd_status status)
1177 {
1178 struct ugen_endpoint *sce = addr;
1179 struct ugen_softc *sc = sce->sc;
1180 u_int32_t count, n;
1181 char const *tbuf;
1182 usbd_status err;
1183
1184 /* Return if we are aborting. */
1185 if (status == USBD_CANCELLED)
1186 return;
1187
1188 if (status != USBD_NORMAL_COMPLETION) {
1189 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1190 sce->state |= UGEN_RA_WB_STOP;
1191 if (status == USBD_STALLED)
1192 usbd_clear_endpoint_stall_async(sce->pipeh);
1193 return;
1194 }
1195
1196 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1197
1198 /* Keep track of how much is in the buffer. */
1199 sce->ra_wb_used += count;
1200
1201 /* Copy data to buffer. */
1202 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1203 n = min(count, sce->limit - sce->fill);
1204 memcpy(sce->fill, tbuf, n);
1205 tbuf += n;
1206 count -= n;
1207 sce->fill += n;
1208 if (sce->fill == sce->limit)
1209 sce->fill = sce->ibuf;
1210 if (count > 0) {
1211 memcpy(sce->fill, tbuf, count);
1212 sce->fill += count;
1213 }
1214
1215 /* Set up the next request if necessary. */
1216 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1217 if (n > 0) {
1218 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1219 min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1220 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1221 err = usbd_transfer(xfer);
1222 if (err != USBD_IN_PROGRESS) {
1223 printf("usbd_bulkra_intr: error=%d\n", err);
1224 /*
1225 * The transfer has not been queued. Setting STOP
1226 * will make us try again at the next read.
1227 */
1228 sce->state |= UGEN_RA_WB_STOP;
1229 }
1230 }
1231 else
1232 sce->state |= UGEN_RA_WB_STOP;
1233
1234 mutex_enter(&sc->sc_lock);
1235 if (sce->state & UGEN_ASLP) {
1236 sce->state &= ~UGEN_ASLP;
1237 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1238 cv_signal(&sce->cv);
1239 }
1240 mutex_exit(&sc->sc_lock);
1241 selnotify(&sce->rsel, 0, 0);
1242 }
1243
1244 Static void
1245 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1246 usbd_status status)
1247 {
1248 struct ugen_endpoint *sce = addr;
1249 struct ugen_softc *sc = sce->sc;
1250 u_int32_t count, n;
1251 char *tbuf;
1252 usbd_status err;
1253
1254 /* Return if we are aborting. */
1255 if (status == USBD_CANCELLED)
1256 return;
1257
1258 if (status != USBD_NORMAL_COMPLETION) {
1259 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1260 sce->state |= UGEN_RA_WB_STOP;
1261 if (status == USBD_STALLED)
1262 usbd_clear_endpoint_stall_async(sce->pipeh);
1263 return;
1264 }
1265
1266 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1267
1268 /* Keep track of how much is in the buffer. */
1269 sce->ra_wb_used -= count;
1270
1271 /* Update buffer pointers. */
1272 sce->cur += count;
1273 if (sce->cur >= sce->limit)
1274 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1275
1276 /* Set up next request if necessary. */
1277 if (sce->ra_wb_used > 0) {
1278 /* copy data from buffer */
1279 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1280 count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1281 n = min(count, sce->limit - sce->cur);
1282 memcpy(tbuf, sce->cur, n);
1283 tbuf += n;
1284 if (count - n > 0)
1285 memcpy(tbuf, sce->ibuf, count - n);
1286
1287 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1288 count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1289 err = usbd_transfer(xfer);
1290 if (err != USBD_IN_PROGRESS) {
1291 printf("usbd_bulkwb_intr: error=%d\n", err);
1292 /*
1293 * The transfer has not been queued. Setting STOP
1294 * will make us try again at the next write.
1295 */
1296 sce->state |= UGEN_RA_WB_STOP;
1297 }
1298 }
1299 else
1300 sce->state |= UGEN_RA_WB_STOP;
1301
1302 mutex_enter(&sc->sc_lock);
1303 if (sce->state & UGEN_ASLP) {
1304 sce->state &= ~UGEN_ASLP;
1305 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1306 cv_signal(&sce->cv);
1307 }
1308 mutex_exit(&sc->sc_lock);
1309 selnotify(&sce->rsel, 0, 0);
1310 }
1311
1312 Static usbd_status
1313 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1314 {
1315 usbd_interface_handle iface;
1316 usb_endpoint_descriptor_t *ed;
1317 usbd_status err;
1318 struct ugen_endpoint *sce;
1319 u_int8_t niface, nendpt, endptno, endpt;
1320 int dir;
1321
1322 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1323
1324 err = usbd_interface_count(sc->sc_udev, &niface);
1325 if (err)
1326 return (err);
1327 if (ifaceidx < 0 || ifaceidx >= niface)
1328 return (USBD_INVAL);
1329
1330 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1331 if (err)
1332 return (err);
1333 err = usbd_endpoint_count(iface, &nendpt);
1334 if (err)
1335 return (err);
1336 /* XXX should only do this after setting new altno has succeeded */
1337 for (endptno = 0; endptno < nendpt; endptno++) {
1338 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1339 endpt = ed->bEndpointAddress;
1340 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1341 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1342 sce->sc = 0;
1343 sce->edesc = 0;
1344 sce->iface = 0;
1345 }
1346
1347 /* change setting */
1348 err = usbd_set_interface(iface, altno);
1349 if (err)
1350 return (err);
1351
1352 err = usbd_endpoint_count(iface, &nendpt);
1353 if (err)
1354 return (err);
1355 for (endptno = 0; endptno < nendpt; endptno++) {
1356 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1357 KASSERT(ed != NULL);
1358 endpt = ed->bEndpointAddress;
1359 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1360 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1361 sce->sc = sc;
1362 sce->edesc = ed;
1363 sce->iface = iface;
1364 }
1365 return (0);
1366 }
1367
1368 /* Retrieve a complete descriptor for a certain device and index. */
1369 Static usb_config_descriptor_t *
1370 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1371 {
1372 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1373 int len;
1374 usbd_status err;
1375
1376 if (index == USB_CURRENT_CONFIG_INDEX) {
1377 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1378 len = UGETW(tdesc->wTotalLength);
1379 if (lenp)
1380 *lenp = len;
1381 cdesc = malloc(len, M_TEMP, M_WAITOK);
1382 memcpy(cdesc, tdesc, len);
1383 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1384 } else {
1385 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1386 if (err)
1387 return (0);
1388 len = UGETW(cdescr.wTotalLength);
1389 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1390 if (lenp)
1391 *lenp = len;
1392 cdesc = malloc(len, M_TEMP, M_WAITOK);
1393 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1394 if (err) {
1395 free(cdesc, M_TEMP);
1396 return (0);
1397 }
1398 }
1399 return (cdesc);
1400 }
1401
1402 Static int
1403 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1404 {
1405 usbd_interface_handle iface;
1406 usbd_status err;
1407
1408 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1409 if (err)
1410 return (-1);
1411 return (usbd_get_interface_altindex(iface));
1412 }
1413
1414 Static int
1415 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1416 void *addr, int flag, struct lwp *l)
1417 {
1418 struct ugen_endpoint *sce;
1419 usbd_status err;
1420 usbd_interface_handle iface;
1421 struct usb_config_desc *cd;
1422 usb_config_descriptor_t *cdesc;
1423 struct usb_interface_desc *id;
1424 usb_interface_descriptor_t *idesc;
1425 struct usb_endpoint_desc *ed;
1426 usb_endpoint_descriptor_t *edesc;
1427 struct usb_alt_interface *ai;
1428 struct usb_string_desc *si;
1429 u_int8_t conf, alt;
1430
1431 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1432 if (sc->sc_dying)
1433 return (EIO);
1434
1435 switch (cmd) {
1436 case FIONBIO:
1437 /* All handled in the upper FS layer. */
1438 return (0);
1439 case USB_SET_SHORT_XFER:
1440 if (endpt == USB_CONTROL_ENDPOINT)
1441 return (EINVAL);
1442 /* This flag only affects read */
1443 sce = &sc->sc_endpoints[endpt][IN];
1444 if (sce == NULL || sce->pipeh == NULL)
1445 return (EINVAL);
1446 if (*(int *)addr)
1447 sce->state |= UGEN_SHORT_OK;
1448 else
1449 sce->state &= ~UGEN_SHORT_OK;
1450 return (0);
1451 case USB_SET_TIMEOUT:
1452 sce = &sc->sc_endpoints[endpt][IN];
1453 if (sce == NULL
1454 /* XXX this shouldn't happen, but the distinction between
1455 input and output pipes isn't clear enough.
1456 || sce->pipeh == NULL */
1457 )
1458 return (EINVAL);
1459 sce->timeout = *(int *)addr;
1460 return (0);
1461 case USB_SET_BULK_RA:
1462 if (endpt == USB_CONTROL_ENDPOINT)
1463 return (EINVAL);
1464 sce = &sc->sc_endpoints[endpt][IN];
1465 if (sce == NULL || sce->pipeh == NULL)
1466 return (EINVAL);
1467 edesc = sce->edesc;
1468 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1469 return (EINVAL);
1470
1471 if (*(int *)addr) {
1472 /* Only turn RA on if it's currently off. */
1473 if (sce->state & UGEN_BULK_RA)
1474 return (0);
1475
1476 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1477 /* shouldn't happen */
1478 return (EINVAL);
1479 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1480 if (sce->ra_wb_xfer == NULL)
1481 return (ENOMEM);
1482 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1483 /*
1484 * Set up a dmabuf because we reuse the xfer with
1485 * the same (max) request length like isoc.
1486 */
1487 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1488 sce->ra_wb_xferlen) == 0) {
1489 usbd_free_xfer(sce->ra_wb_xfer);
1490 return (ENOMEM);
1491 }
1492 sce->ibuf = malloc(sce->ra_wb_bufsize,
1493 M_USBDEV, M_WAITOK);
1494 sce->fill = sce->cur = sce->ibuf;
1495 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1496 sce->ra_wb_used = 0;
1497 sce->state |= UGEN_BULK_RA;
1498 sce->state &= ~UGEN_RA_WB_STOP;
1499 /* Now start reading. */
1500 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1501 NULL,
1502 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1503 USBD_NO_COPY, USBD_NO_TIMEOUT,
1504 ugen_bulkra_intr);
1505 err = usbd_transfer(sce->ra_wb_xfer);
1506 if (err != USBD_IN_PROGRESS) {
1507 sce->state &= ~UGEN_BULK_RA;
1508 free(sce->ibuf, M_USBDEV);
1509 sce->ibuf = NULL;
1510 usbd_free_xfer(sce->ra_wb_xfer);
1511 return (EIO);
1512 }
1513 } else {
1514 /* Only turn RA off if it's currently on. */
1515 if (!(sce->state & UGEN_BULK_RA))
1516 return (0);
1517
1518 sce->state &= ~UGEN_BULK_RA;
1519 usbd_abort_pipe(sce->pipeh);
1520 usbd_free_xfer(sce->ra_wb_xfer);
1521 /*
1522 * XXX Discard whatever's in the buffer, but we
1523 * should keep it around and drain the buffer
1524 * instead.
1525 */
1526 free(sce->ibuf, M_USBDEV);
1527 sce->ibuf = NULL;
1528 }
1529 return (0);
1530 case USB_SET_BULK_WB:
1531 if (endpt == USB_CONTROL_ENDPOINT)
1532 return (EINVAL);
1533 sce = &sc->sc_endpoints[endpt][OUT];
1534 if (sce == NULL || sce->pipeh == NULL)
1535 return (EINVAL);
1536 edesc = sce->edesc;
1537 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1538 return (EINVAL);
1539
1540 if (*(int *)addr) {
1541 /* Only turn WB on if it's currently off. */
1542 if (sce->state & UGEN_BULK_WB)
1543 return (0);
1544
1545 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1546 /* shouldn't happen */
1547 return (EINVAL);
1548 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1549 if (sce->ra_wb_xfer == NULL)
1550 return (ENOMEM);
1551 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1552 /*
1553 * Set up a dmabuf because we reuse the xfer with
1554 * the same (max) request length like isoc.
1555 */
1556 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1557 sce->ra_wb_xferlen) == 0) {
1558 usbd_free_xfer(sce->ra_wb_xfer);
1559 return (ENOMEM);
1560 }
1561 sce->ibuf = malloc(sce->ra_wb_bufsize,
1562 M_USBDEV, M_WAITOK);
1563 sce->fill = sce->cur = sce->ibuf;
1564 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1565 sce->ra_wb_used = 0;
1566 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1567 } else {
1568 /* Only turn WB off if it's currently on. */
1569 if (!(sce->state & UGEN_BULK_WB))
1570 return (0);
1571
1572 sce->state &= ~UGEN_BULK_WB;
1573 /*
1574 * XXX Discard whatever's in the buffer, but we
1575 * should keep it around and keep writing to
1576 * drain the buffer instead.
1577 */
1578 usbd_abort_pipe(sce->pipeh);
1579 usbd_free_xfer(sce->ra_wb_xfer);
1580 free(sce->ibuf, M_USBDEV);
1581 sce->ibuf = NULL;
1582 }
1583 return (0);
1584 case USB_SET_BULK_RA_OPT:
1585 case USB_SET_BULK_WB_OPT:
1586 {
1587 struct usb_bulk_ra_wb_opt *opt;
1588
1589 if (endpt == USB_CONTROL_ENDPOINT)
1590 return (EINVAL);
1591 opt = (struct usb_bulk_ra_wb_opt *)addr;
1592 if (cmd == USB_SET_BULK_RA_OPT)
1593 sce = &sc->sc_endpoints[endpt][IN];
1594 else
1595 sce = &sc->sc_endpoints[endpt][OUT];
1596 if (sce == NULL || sce->pipeh == NULL)
1597 return (EINVAL);
1598 if (opt->ra_wb_buffer_size < 1 ||
1599 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1600 opt->ra_wb_request_size < 1 ||
1601 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1602 return (EINVAL);
1603 /*
1604 * XXX These changes do not take effect until the
1605 * next time RA/WB mode is enabled but they ought to
1606 * take effect immediately.
1607 */
1608 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1609 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1610 return (0);
1611 }
1612 default:
1613 break;
1614 }
1615
1616 if (endpt != USB_CONTROL_ENDPOINT)
1617 return (EINVAL);
1618
1619 switch (cmd) {
1620 #ifdef UGEN_DEBUG
1621 case USB_SETDEBUG:
1622 ugendebug = *(int *)addr;
1623 break;
1624 #endif
1625 case USB_GET_CONFIG:
1626 err = usbd_get_config(sc->sc_udev, &conf);
1627 if (err)
1628 return (EIO);
1629 *(int *)addr = conf;
1630 break;
1631 case USB_SET_CONFIG:
1632 if (!(flag & FWRITE))
1633 return (EPERM);
1634 err = ugen_set_config(sc, *(int *)addr);
1635 switch (err) {
1636 case USBD_NORMAL_COMPLETION:
1637 break;
1638 case USBD_IN_USE:
1639 return (EBUSY);
1640 default:
1641 return (EIO);
1642 }
1643 break;
1644 case USB_GET_ALTINTERFACE:
1645 ai = (struct usb_alt_interface *)addr;
1646 err = usbd_device2interface_handle(sc->sc_udev,
1647 ai->uai_interface_index, &iface);
1648 if (err)
1649 return (EINVAL);
1650 idesc = usbd_get_interface_descriptor(iface);
1651 if (idesc == NULL)
1652 return (EIO);
1653 ai->uai_alt_no = idesc->bAlternateSetting;
1654 break;
1655 case USB_SET_ALTINTERFACE:
1656 if (!(flag & FWRITE))
1657 return (EPERM);
1658 ai = (struct usb_alt_interface *)addr;
1659 err = usbd_device2interface_handle(sc->sc_udev,
1660 ai->uai_interface_index, &iface);
1661 if (err)
1662 return (EINVAL);
1663 err = ugen_set_interface(sc, ai->uai_interface_index,
1664 ai->uai_alt_no);
1665 if (err)
1666 return (EINVAL);
1667 break;
1668 case USB_GET_NO_ALT:
1669 ai = (struct usb_alt_interface *)addr;
1670 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1671 if (cdesc == NULL)
1672 return (EINVAL);
1673 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1674 if (idesc == NULL) {
1675 free(cdesc, M_TEMP);
1676 return (EINVAL);
1677 }
1678 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1679 idesc->bInterfaceNumber);
1680 free(cdesc, M_TEMP);
1681 break;
1682 case USB_GET_DEVICE_DESC:
1683 *(usb_device_descriptor_t *)addr =
1684 *usbd_get_device_descriptor(sc->sc_udev);
1685 break;
1686 case USB_GET_CONFIG_DESC:
1687 cd = (struct usb_config_desc *)addr;
1688 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1689 if (cdesc == NULL)
1690 return (EINVAL);
1691 cd->ucd_desc = *cdesc;
1692 free(cdesc, M_TEMP);
1693 break;
1694 case USB_GET_INTERFACE_DESC:
1695 id = (struct usb_interface_desc *)addr;
1696 cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1697 if (cdesc == NULL)
1698 return (EINVAL);
1699 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1700 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1701 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1702 else
1703 alt = id->uid_alt_index;
1704 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1705 if (idesc == NULL) {
1706 free(cdesc, M_TEMP);
1707 return (EINVAL);
1708 }
1709 id->uid_desc = *idesc;
1710 free(cdesc, M_TEMP);
1711 break;
1712 case USB_GET_ENDPOINT_DESC:
1713 ed = (struct usb_endpoint_desc *)addr;
1714 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1715 if (cdesc == NULL)
1716 return (EINVAL);
1717 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1718 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1719 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1720 else
1721 alt = ed->ued_alt_index;
1722 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1723 alt, ed->ued_endpoint_index);
1724 if (edesc == NULL) {
1725 free(cdesc, M_TEMP);
1726 return (EINVAL);
1727 }
1728 ed->ued_desc = *edesc;
1729 free(cdesc, M_TEMP);
1730 break;
1731 case USB_GET_FULL_DESC:
1732 {
1733 int len;
1734 struct iovec iov;
1735 struct uio uio;
1736 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1737 int error;
1738
1739 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1740 if (cdesc == NULL)
1741 return (EINVAL);
1742 if (len > fd->ufd_size)
1743 len = fd->ufd_size;
1744 iov.iov_base = (void *)fd->ufd_data;
1745 iov.iov_len = len;
1746 uio.uio_iov = &iov;
1747 uio.uio_iovcnt = 1;
1748 uio.uio_resid = len;
1749 uio.uio_offset = 0;
1750 uio.uio_rw = UIO_READ;
1751 uio.uio_vmspace = l->l_proc->p_vmspace;
1752 error = uiomove((void *)cdesc, len, &uio);
1753 free(cdesc, M_TEMP);
1754 return (error);
1755 }
1756 case USB_GET_STRING_DESC: {
1757 int len;
1758 si = (struct usb_string_desc *)addr;
1759 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1760 si->usd_language_id, &si->usd_desc, &len);
1761 if (err)
1762 return (EINVAL);
1763 break;
1764 }
1765 case USB_DO_REQUEST:
1766 {
1767 struct usb_ctl_request *ur = (void *)addr;
1768 int len = UGETW(ur->ucr_request.wLength);
1769 struct iovec iov;
1770 struct uio uio;
1771 void *ptr = 0;
1772 usbd_status xerr;
1773 int error = 0;
1774
1775 if (!(flag & FWRITE))
1776 return (EPERM);
1777 /* Avoid requests that would damage the bus integrity. */
1778 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1779 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1780 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1781 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1782 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1783 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1784 return (EINVAL);
1785
1786 if (len < 0 || len > 32767)
1787 return (EINVAL);
1788 if (len != 0) {
1789 iov.iov_base = (void *)ur->ucr_data;
1790 iov.iov_len = len;
1791 uio.uio_iov = &iov;
1792 uio.uio_iovcnt = 1;
1793 uio.uio_resid = len;
1794 uio.uio_offset = 0;
1795 uio.uio_rw =
1796 ur->ucr_request.bmRequestType & UT_READ ?
1797 UIO_READ : UIO_WRITE;
1798 uio.uio_vmspace = l->l_proc->p_vmspace;
1799 ptr = malloc(len, M_TEMP, M_WAITOK);
1800 if (uio.uio_rw == UIO_WRITE) {
1801 error = uiomove(ptr, len, &uio);
1802 if (error)
1803 goto ret;
1804 }
1805 }
1806 sce = &sc->sc_endpoints[endpt][IN];
1807 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1808 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1809 if (xerr) {
1810 error = EIO;
1811 goto ret;
1812 }
1813 if (len != 0) {
1814 if (uio.uio_rw == UIO_READ) {
1815 error = uiomove(ptr, len, &uio);
1816 if (error)
1817 goto ret;
1818 }
1819 }
1820 ret:
1821 if (ptr)
1822 free(ptr, M_TEMP);
1823 return (error);
1824 }
1825 case USB_GET_DEVICEINFO:
1826 usbd_fill_deviceinfo(sc->sc_udev,
1827 (struct usb_device_info *)addr, 0);
1828 break;
1829 #ifdef COMPAT_30
1830 case USB_GET_DEVICEINFO_OLD:
1831 usbd_fill_deviceinfo_old(sc->sc_udev,
1832 (struct usb_device_info_old *)addr, 0);
1833
1834 break;
1835 #endif
1836 default:
1837 return (EINVAL);
1838 }
1839 return (0);
1840 }
1841
1842 int
1843 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1844 {
1845 int endpt = UGENENDPOINT(dev);
1846 struct ugen_softc *sc;
1847 int error;
1848
1849 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1850 if (sc == NULL)
1851 return ENXIO;
1852
1853 sc->sc_refcnt++;
1854 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1855 if (--sc->sc_refcnt < 0)
1856 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1857 return (error);
1858 }
1859
1860 int
1861 ugenpoll(dev_t dev, int events, struct lwp *l)
1862 {
1863 struct ugen_softc *sc;
1864 struct ugen_endpoint *sce_in, *sce_out;
1865 int revents = 0;
1866
1867 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1868 if (sc == NULL)
1869 return ENXIO;
1870
1871 if (sc->sc_dying)
1872 return (POLLHUP);
1873
1874 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1875 return ENODEV;
1876
1877 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1878 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1879 if (sce_in == NULL && sce_out == NULL)
1880 return (POLLERR);
1881 #ifdef DIAGNOSTIC
1882 if (!sce_in->edesc && !sce_out->edesc) {
1883 printf("ugenpoll: no edesc\n");
1884 return (POLLERR);
1885 }
1886 /* It's possible to have only one pipe open. */
1887 if (!sce_in->pipeh && !sce_out->pipeh) {
1888 printf("ugenpoll: no pipe\n");
1889 return (POLLERR);
1890 }
1891 #endif
1892
1893 mutex_enter(&sc->sc_lock);
1894 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1895 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1896 case UE_INTERRUPT:
1897 if (sce_in->q.c_cc > 0)
1898 revents |= events & (POLLIN | POLLRDNORM);
1899 else
1900 selrecord(l, &sce_in->rsel);
1901 break;
1902 case UE_ISOCHRONOUS:
1903 if (sce_in->cur != sce_in->fill)
1904 revents |= events & (POLLIN | POLLRDNORM);
1905 else
1906 selrecord(l, &sce_in->rsel);
1907 break;
1908 case UE_BULK:
1909 if (sce_in->state & UGEN_BULK_RA) {
1910 if (sce_in->ra_wb_used > 0)
1911 revents |= events &
1912 (POLLIN | POLLRDNORM);
1913 else
1914 selrecord(l, &sce_in->rsel);
1915 break;
1916 }
1917 /*
1918 * We have no easy way of determining if a read will
1919 * yield any data or a write will happen.
1920 * Pretend they will.
1921 */
1922 revents |= events & (POLLIN | POLLRDNORM);
1923 break;
1924 default:
1925 break;
1926 }
1927 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1928 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1929 case UE_INTERRUPT:
1930 case UE_ISOCHRONOUS:
1931 /* XXX unimplemented */
1932 break;
1933 case UE_BULK:
1934 if (sce_out->state & UGEN_BULK_WB) {
1935 if (sce_out->ra_wb_used <
1936 sce_out->limit - sce_out->ibuf)
1937 revents |= events &
1938 (POLLOUT | POLLWRNORM);
1939 else
1940 selrecord(l, &sce_out->rsel);
1941 break;
1942 }
1943 /*
1944 * We have no easy way of determining if a read will
1945 * yield any data or a write will happen.
1946 * Pretend they will.
1947 */
1948 revents |= events & (POLLOUT | POLLWRNORM);
1949 break;
1950 default:
1951 break;
1952 }
1953
1954 mutex_exit(&sc->sc_lock);
1955
1956 return (revents);
1957 }
1958
1959 static void
1960 filt_ugenrdetach(struct knote *kn)
1961 {
1962 struct ugen_endpoint *sce = kn->kn_hook;
1963 struct ugen_softc *sc = sce->sc;
1964
1965 mutex_enter(&sc->sc_lock);
1966 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1967 mutex_exit(&sc->sc_lock);
1968 }
1969
1970 static int
1971 filt_ugenread_intr(struct knote *kn, long hint)
1972 {
1973 struct ugen_endpoint *sce = kn->kn_hook;
1974
1975 kn->kn_data = sce->q.c_cc;
1976 return (kn->kn_data > 0);
1977 }
1978
1979 static int
1980 filt_ugenread_isoc(struct knote *kn, long hint)
1981 {
1982 struct ugen_endpoint *sce = kn->kn_hook;
1983
1984 if (sce->cur == sce->fill)
1985 return (0);
1986
1987 if (sce->cur < sce->fill)
1988 kn->kn_data = sce->fill - sce->cur;
1989 else
1990 kn->kn_data = (sce->limit - sce->cur) +
1991 (sce->fill - sce->ibuf);
1992
1993 return (1);
1994 }
1995
1996 static int
1997 filt_ugenread_bulk(struct knote *kn, long hint)
1998 {
1999 struct ugen_endpoint *sce = kn->kn_hook;
2000
2001 if (!(sce->state & UGEN_BULK_RA))
2002 /*
2003 * We have no easy way of determining if a read will
2004 * yield any data or a write will happen.
2005 * So, emulate "seltrue".
2006 */
2007 return (filt_seltrue(kn, hint));
2008
2009 if (sce->ra_wb_used == 0)
2010 return (0);
2011
2012 kn->kn_data = sce->ra_wb_used;
2013
2014 return (1);
2015 }
2016
2017 static int
2018 filt_ugenwrite_bulk(struct knote *kn, long hint)
2019 {
2020 struct ugen_endpoint *sce = kn->kn_hook;
2021
2022 if (!(sce->state & UGEN_BULK_WB))
2023 /*
2024 * We have no easy way of determining if a read will
2025 * yield any data or a write will happen.
2026 * So, emulate "seltrue".
2027 */
2028 return (filt_seltrue(kn, hint));
2029
2030 if (sce->ra_wb_used == sce->limit - sce->ibuf)
2031 return (0);
2032
2033 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2034
2035 return (1);
2036 }
2037
2038 static const struct filterops ugenread_intr_filtops =
2039 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2040
2041 static const struct filterops ugenread_isoc_filtops =
2042 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2043
2044 static const struct filterops ugenread_bulk_filtops =
2045 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2046
2047 static const struct filterops ugenwrite_bulk_filtops =
2048 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2049
2050 int
2051 ugenkqfilter(dev_t dev, struct knote *kn)
2052 {
2053 struct ugen_softc *sc;
2054 struct ugen_endpoint *sce;
2055 struct klist *klist;
2056
2057 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2058 if (sc == NULL)
2059 return ENXIO;
2060
2061 if (sc->sc_dying)
2062 return (ENXIO);
2063
2064 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2065 return ENODEV;
2066
2067 switch (kn->kn_filter) {
2068 case EVFILT_READ:
2069 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2070 if (sce == NULL)
2071 return (EINVAL);
2072
2073 klist = &sce->rsel.sel_klist;
2074 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2075 case UE_INTERRUPT:
2076 kn->kn_fop = &ugenread_intr_filtops;
2077 break;
2078 case UE_ISOCHRONOUS:
2079 kn->kn_fop = &ugenread_isoc_filtops;
2080 break;
2081 case UE_BULK:
2082 kn->kn_fop = &ugenread_bulk_filtops;
2083 break;
2084 default:
2085 return (EINVAL);
2086 }
2087 break;
2088
2089 case EVFILT_WRITE:
2090 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2091 if (sce == NULL)
2092 return (EINVAL);
2093
2094 klist = &sce->rsel.sel_klist;
2095 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2096 case UE_INTERRUPT:
2097 case UE_ISOCHRONOUS:
2098 /* XXX poll doesn't support this */
2099 return (EINVAL);
2100
2101 case UE_BULK:
2102 kn->kn_fop = &ugenwrite_bulk_filtops;
2103 break;
2104 default:
2105 return (EINVAL);
2106 }
2107 break;
2108
2109 default:
2110 return (EINVAL);
2111 }
2112
2113 kn->kn_hook = sce;
2114
2115 mutex_enter(&sc->sc_lock);
2116 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2117 mutex_exit(&sc->sc_lock);
2118
2119 return (0);
2120 }
2121