ugen.c revision 1.126.2.10 1 /* $NetBSD: ugen.c,v 1.126.2.10 2015/06/23 12:03:29 skrll Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.126.2.10 2015/06/23 12:03:29 skrll Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #include "opt_usb.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/device.h>
52 #include <sys/ioctl.h>
53 #include <sys/conf.h>
54 #include <sys/tty.h>
55 #include <sys/file.h>
56 #include <sys/select.h>
57 #include <sys/proc.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60
61 #include <dev/usb/usb.h>
62 #include <dev/usb/usbdi.h>
63 #include <dev/usb/usbdi_util.h>
64
65 #ifdef UGEN_DEBUG
66 #define DPRINTF(x) if (ugendebug) printf x
67 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x
68 int ugendebug = 0;
69 #else
70 #define DPRINTF(x)
71 #define DPRINTFN(n,x)
72 #endif
73
74 #define UGEN_CHUNK 128 /* chunk size for read */
75 #define UGEN_IBSIZE 1020 /* buffer size */
76 #define UGEN_BBSIZE 1024
77
78 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
79 #define UGEN_NISORFRMS 8 /* number of transactions per req */
80 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
81
82 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
83 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
84
85 struct isoreq {
86 struct ugen_endpoint *sce;
87 struct usbd_xfer *xfer;
88 void *dmabuf;
89 uint16_t sizes[UGEN_NISORFRMS];
90 };
91
92 struct ugen_endpoint {
93 struct ugen_softc *sc;
94 usb_endpoint_descriptor_t *edesc;
95 struct usbd_interface *iface;
96 int state;
97 #define UGEN_ASLP 0x02 /* waiting for data */
98 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
99 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
100 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
101 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
102 struct usbd_pipe *pipeh;
103 struct clist q;
104 u_char *ibuf; /* start of buffer (circular for isoc) */
105 u_char *fill; /* location for input (isoc) */
106 u_char *limit; /* end of circular buffer (isoc) */
107 u_char *cur; /* current read location (isoc) */
108 uint32_t timeout;
109 uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
110 uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
111 uint32_t ra_wb_used; /* how much is in buffer */
112 uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */
113 struct usbd_xfer *ra_wb_xfer;
114 struct isoreq isoreqs[UGEN_NISOREQS];
115 /* Keep these last; we don't overwrite them in ugen_set_config() */
116 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
117 struct selinfo rsel;
118 kcondvar_t cv;
119 };
120
121 struct ugen_softc {
122 device_t sc_dev; /* base device */
123 struct usbd_device *sc_udev;
124
125 kmutex_t sc_lock;
126 kcondvar_t sc_detach_cv;
127
128 char sc_is_open[USB_MAX_ENDPOINTS];
129 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
130 #define OUT 0
131 #define IN 1
132
133 int sc_refcnt;
134 char sc_buffer[UGEN_BBSIZE];
135 u_char sc_dying;
136 };
137
138 dev_type_open(ugenopen);
139 dev_type_close(ugenclose);
140 dev_type_read(ugenread);
141 dev_type_write(ugenwrite);
142 dev_type_ioctl(ugenioctl);
143 dev_type_poll(ugenpoll);
144 dev_type_kqfilter(ugenkqfilter);
145
146 const struct cdevsw ugen_cdevsw = {
147 .d_open = ugenopen,
148 .d_close = ugenclose,
149 .d_read = ugenread,
150 .d_write = ugenwrite,
151 .d_ioctl = ugenioctl,
152 .d_stop = nostop,
153 .d_tty = notty,
154 .d_poll = ugenpoll,
155 .d_mmap = nommap,
156 .d_kqfilter = ugenkqfilter,
157 .d_discard = nodiscard,
158 .d_flag = D_OTHER,
159 };
160
161 Static void ugenintr(struct usbd_xfer *, void *,
162 usbd_status);
163 Static void ugen_isoc_rintr(struct usbd_xfer *, void *,
164 usbd_status);
165 Static void ugen_bulkra_intr(struct usbd_xfer *, void *,
166 usbd_status);
167 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *,
168 usbd_status);
169 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
170 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
171 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
172 void *, int, struct lwp *);
173 Static int ugen_set_config(struct ugen_softc *, int);
174 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *,
175 int, int *);
176 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
177 Static int ugen_get_alt_index(struct ugen_softc *, int);
178
179 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
180 #define UGENENDPOINT(n) (minor(n) & 0xf)
181 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
182
183 int ugen_match(device_t, cfdata_t, void *);
184 void ugen_attach(device_t, device_t, void *);
185 int ugen_detach(device_t, int);
186 int ugen_activate(device_t, enum devact);
187 extern struct cfdriver ugen_cd;
188 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate);
189
190 /* toggle to control attach priority. -1 means "let autoconf decide" */
191 int ugen_override = -1;
192
193 int
194 ugen_match(device_t parent, cfdata_t match, void *aux)
195 {
196 struct usb_attach_arg *uaa = aux;
197 int override;
198
199 if (ugen_override != -1)
200 override = ugen_override;
201 else
202 override = match->cf_flags & 1;
203
204 if (override)
205 return UMATCH_HIGHEST;
206 else if (uaa->uaa_usegeneric)
207 return UMATCH_GENERIC;
208 else
209 return UMATCH_NONE;
210 }
211
212 void
213 ugen_attach(device_t parent, device_t self, void *aux)
214 {
215 struct ugen_softc *sc = device_private(self);
216 struct usb_attach_arg *uaa = aux;
217 struct usbd_device *udev;
218 char *devinfop;
219 usbd_status err;
220 int i, dir, conf;
221
222 aprint_naive("\n");
223 aprint_normal("\n");
224
225 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_USB);
226 cv_init(&sc->sc_detach_cv, "ugendet");
227
228 devinfop = usbd_devinfo_alloc(uaa->uaa_device, 0);
229 aprint_normal_dev(self, "%s\n", devinfop);
230 usbd_devinfo_free(devinfop);
231
232 sc->sc_dev = self;
233 sc->sc_udev = udev = uaa->uaa_device;
234
235 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
236 for (dir = OUT; dir <= IN; dir++) {
237 struct ugen_endpoint *sce;
238
239 sce = &sc->sc_endpoints[i][dir];
240 selinit(&sce->rsel);
241 cv_init(&sce->cv, "ugensce");
242 }
243 }
244
245 /* First set configuration index 0, the default one for ugen. */
246 err = usbd_set_config_index(udev, 0, 0);
247 if (err) {
248 aprint_error_dev(self,
249 "setting configuration index 0 failed\n");
250 sc->sc_dying = 1;
251 return;
252 }
253 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
254
255 /* Set up all the local state for this configuration. */
256 err = ugen_set_config(sc, conf);
257 if (err) {
258 aprint_error_dev(self, "setting configuration %d failed\n",
259 conf);
260 sc->sc_dying = 1;
261 return;
262 }
263
264 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
265 sc->sc_dev);
266
267 if (!pmf_device_register(self, NULL, NULL))
268 aprint_error_dev(self, "couldn't establish power handler\n");
269
270 return;
271 }
272
273 Static int
274 ugen_set_config(struct ugen_softc *sc, int configno)
275 {
276 struct usbd_device *dev = sc->sc_udev;
277 usb_config_descriptor_t *cdesc;
278 struct usbd_interface *iface;
279 usb_endpoint_descriptor_t *ed;
280 struct ugen_endpoint *sce;
281 uint8_t niface, nendpt;
282 int ifaceno, endptno, endpt;
283 usbd_status err;
284 int dir, i;
285
286 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
287 device_xname(sc->sc_dev), configno, sc));
288
289 /*
290 * We start at 1, not 0, because we don't care whether the
291 * control endpoint is open or not. It is always present.
292 */
293 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
294 if (sc->sc_is_open[endptno]) {
295 DPRINTFN(1,
296 ("ugen_set_config: %s - endpoint %d is open\n",
297 device_xname(sc->sc_dev), endptno));
298 return USBD_IN_USE;
299 }
300
301 /* Avoid setting the current value. */
302 cdesc = usbd_get_config_descriptor(dev);
303 if (!cdesc || cdesc->bConfigurationValue != configno) {
304 err = usbd_set_config_no(dev, configno, 1);
305 if (err)
306 return err;
307 }
308
309 err = usbd_interface_count(dev, &niface);
310 if (err)
311 return err;
312
313 /* Clear out the old info, but leave the selinfo and cv initialised. */
314 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
315 for (dir = OUT; dir <= IN; dir++) {
316 sce = &sc->sc_endpoints[i][dir];
317 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
318 }
319 }
320
321 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
322 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
323 err = usbd_device2interface_handle(dev, ifaceno, &iface);
324 if (err)
325 return err;
326 err = usbd_endpoint_count(iface, &nendpt);
327 if (err)
328 return err;
329 for (endptno = 0; endptno < nendpt; endptno++) {
330 ed = usbd_interface2endpoint_descriptor(iface,endptno);
331 KASSERT(ed != NULL);
332 endpt = ed->bEndpointAddress;
333 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
334 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
335 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
336 "(%d,%d), sce=%p\n",
337 endptno, endpt, UE_GET_ADDR(endpt),
338 UE_GET_DIR(endpt), sce));
339 sce->sc = sc;
340 sce->edesc = ed;
341 sce->iface = iface;
342 }
343 }
344 return USBD_NORMAL_COMPLETION;
345 }
346
347 int
348 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
349 {
350 struct ugen_softc *sc;
351 int unit = UGENUNIT(dev);
352 int endpt = UGENENDPOINT(dev);
353 usb_endpoint_descriptor_t *edesc;
354 struct ugen_endpoint *sce;
355 int dir, isize;
356 usbd_status err;
357 struct usbd_xfer *xfer;
358 void *tbuf;
359 int i, j;
360
361 sc = device_lookup_private(&ugen_cd, unit);
362 if (sc == NULL)
363 return ENXIO;
364
365 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
366 flag, mode, unit, endpt));
367
368 if (sc == NULL || sc->sc_dying)
369 return ENXIO;
370
371 /* The control endpoint allows multiple opens. */
372 if (endpt == USB_CONTROL_ENDPOINT) {
373 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
374 return 0;
375 }
376
377 if (sc->sc_is_open[endpt])
378 return EBUSY;
379
380 /* Make sure there are pipes for all directions. */
381 for (dir = OUT; dir <= IN; dir++) {
382 if (flag & (dir == OUT ? FWRITE : FREAD)) {
383 sce = &sc->sc_endpoints[endpt][dir];
384 if (sce == 0 || sce->edesc == 0)
385 return ENXIO;
386 }
387 }
388
389 /* Actually open the pipes. */
390 /* XXX Should back out properly if it fails. */
391 for (dir = OUT; dir <= IN; dir++) {
392 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
393 continue;
394 sce = &sc->sc_endpoints[endpt][dir];
395 sce->state = 0;
396 sce->timeout = USBD_NO_TIMEOUT;
397 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
398 sc, endpt, dir, sce));
399 edesc = sce->edesc;
400 switch (edesc->bmAttributes & UE_XFERTYPE) {
401 case UE_INTERRUPT:
402 if (dir == OUT) {
403 err = usbd_open_pipe(sce->iface,
404 edesc->bEndpointAddress, 0, &sce->pipeh);
405 if (err)
406 return EIO;
407 break;
408 }
409 isize = UGETW(edesc->wMaxPacketSize);
410 if (isize == 0) /* shouldn't happen */
411 return EINVAL;
412 sce->ibuf = kmem_alloc(isize, KM_SLEEP);
413 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
414 endpt, isize));
415 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
416 kmem_free(sce->ibuf, isize);
417 sce->ibuf = NULL;
418 return ENOMEM;
419 }
420 err = usbd_open_pipe_intr(sce->iface,
421 edesc->bEndpointAddress,
422 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
423 sce->ibuf, isize, ugenintr,
424 USBD_DEFAULT_INTERVAL);
425 if (err) {
426 clfree(&sce->q);
427 kmem_free(sce->ibuf, isize);
428 sce->ibuf = NULL;
429 return EIO;
430 }
431 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
432 break;
433 case UE_BULK:
434 err = usbd_open_pipe(sce->iface,
435 edesc->bEndpointAddress, 0, &sce->pipeh);
436 if (err)
437 return EIO;
438 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
439 /*
440 * Use request size for non-RA/WB transfers
441 * as the default.
442 */
443 sce->ra_wb_reqsize = UGEN_BBSIZE;
444 break;
445 case UE_ISOCHRONOUS:
446 if (dir == OUT)
447 return EINVAL;
448 isize = UGETW(edesc->wMaxPacketSize);
449 if (isize == 0) /* shouldn't happen */
450 return EINVAL;
451 sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES,
452 KM_SLEEP);
453 sce->cur = sce->fill = sce->ibuf;
454 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
455 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
456 endpt, isize));
457 err = usbd_open_pipe(sce->iface,
458 edesc->bEndpointAddress, 0, &sce->pipeh);
459 if (err) {
460 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
461 sce->ibuf = NULL;
462 return EIO;
463 }
464 for (i = 0; i < UGEN_NISOREQS; ++i) {
465 sce->isoreqs[i].sce = sce;
466 xfer = usbd_alloc_xfer(sc->sc_udev);
467 if (xfer == NULL)
468 goto bad;
469 sce->isoreqs[i].xfer = xfer;
470 tbuf = usbd_alloc_buffer
471 (xfer, isize * UGEN_NISORFRMS);
472 if (tbuf == NULL) {
473 i++;
474 goto bad;
475 }
476 sce->isoreqs[i].dmabuf = tbuf;
477 for (j = 0; j < UGEN_NISORFRMS; ++j)
478 sce->isoreqs[i].sizes[j] = isize;
479 usbd_setup_isoc_xfer
480 (xfer, sce->pipeh, &sce->isoreqs[i],
481 sce->isoreqs[i].sizes,
482 UGEN_NISORFRMS, 0,
483 ugen_isoc_rintr);
484 (void)usbd_transfer(xfer);
485 }
486 DPRINTFN(5, ("ugenopen: isoc open done\n"));
487 break;
488 bad:
489 while (--i >= 0) /* implicit buffer free */
490 usbd_free_xfer(sce->isoreqs[i].xfer);
491 usbd_close_pipe(sce->pipeh);
492 sce->pipeh = NULL;
493 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
494 sce->ibuf = NULL;
495 return ENOMEM;
496 case UE_CONTROL:
497 sce->timeout = USBD_DEFAULT_TIMEOUT;
498 return EINVAL;
499 }
500 }
501 sc->sc_is_open[endpt] = 1;
502 return 0;
503 }
504
505 int
506 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
507 {
508 int endpt = UGENENDPOINT(dev);
509 struct ugen_softc *sc;
510 struct ugen_endpoint *sce;
511 int dir;
512 int i;
513
514 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
515 if (sc == NULL)
516 return ENXIO;
517
518 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
519 flag, mode, UGENUNIT(dev), endpt));
520
521 #ifdef DIAGNOSTIC
522 if (!sc->sc_is_open[endpt]) {
523 printf("ugenclose: not open\n");
524 return EINVAL;
525 }
526 #endif
527
528 if (endpt == USB_CONTROL_ENDPOINT) {
529 DPRINTFN(5, ("ugenclose: close control\n"));
530 sc->sc_is_open[endpt] = 0;
531 return 0;
532 }
533
534 for (dir = OUT; dir <= IN; dir++) {
535 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
536 continue;
537 sce = &sc->sc_endpoints[endpt][dir];
538 if (sce == NULL || sce->pipeh == NULL)
539 continue;
540 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
541 endpt, dir, sce));
542
543 usbd_abort_pipe(sce->pipeh);
544 usbd_close_pipe(sce->pipeh);
545 sce->pipeh = NULL;
546
547 int isize = UGETW(sce->edesc->wMaxPacketSize);
548 int msize = 0;
549
550 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
551 case UE_INTERRUPT:
552 ndflush(&sce->q, sce->q.c_cc);
553 clfree(&sce->q);
554 msize = isize;
555 break;
556 case UE_ISOCHRONOUS:
557 for (i = 0; i < UGEN_NISOREQS; ++i)
558 usbd_free_xfer(sce->isoreqs[i].xfer);
559 msize = isize * UGEN_NISOFRAMES;
560 break;
561 case UE_BULK:
562 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) {
563 usbd_free_xfer(sce->ra_wb_xfer);
564 msize = sce->ra_wb_bufsize;
565 }
566 break;
567 default:
568 break;
569 }
570 if (sce->ibuf != NULL) {
571 kmem_free(sce->ibuf, msize);
572 sce->ibuf = NULL;
573 }
574 }
575 sc->sc_is_open[endpt] = 0;
576
577 return 0;
578 }
579
580 Static int
581 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
582 {
583 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
584 uint32_t n, tn;
585 struct usbd_xfer *xfer;
586 usbd_status err;
587 int error = 0;
588
589 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
590
591 if (sc->sc_dying)
592 return EIO;
593
594 if (endpt == USB_CONTROL_ENDPOINT)
595 return ENODEV;
596
597 #ifdef DIAGNOSTIC
598 if (sce->edesc == NULL) {
599 printf("ugenread: no edesc\n");
600 return EIO;
601 }
602 if (sce->pipeh == NULL) {
603 printf("ugenread: no pipe\n");
604 return EIO;
605 }
606 #endif
607
608 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
609 case UE_INTERRUPT:
610 /* Block until activity occurred. */
611 mutex_enter(&sc->sc_lock);
612 while (sce->q.c_cc == 0) {
613 if (flag & IO_NDELAY) {
614 mutex_exit(&sc->sc_lock);
615 return EWOULDBLOCK;
616 }
617 sce->state |= UGEN_ASLP;
618 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
619 /* "ugenri" */
620 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
621 mstohz(sce->timeout));
622 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
623 if (sc->sc_dying)
624 error = EIO;
625 if (error) {
626 sce->state &= ~UGEN_ASLP;
627 break;
628 }
629 }
630 mutex_exit(&sc->sc_lock);
631
632 /* Transfer as many chunks as possible. */
633 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
634 n = min(sce->q.c_cc, uio->uio_resid);
635 if (n > sizeof(sc->sc_buffer))
636 n = sizeof(sc->sc_buffer);
637
638 /* Remove a small chunk from the input queue. */
639 q_to_b(&sce->q, sc->sc_buffer, n);
640 DPRINTFN(5, ("ugenread: got %d chars\n", n));
641
642 /* Copy the data to the user process. */
643 error = uiomove(sc->sc_buffer, n, uio);
644 if (error)
645 break;
646 }
647 break;
648 case UE_BULK:
649 if (sce->state & UGEN_BULK_RA) {
650 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
651 uio->uio_resid, sce->ra_wb_used));
652 xfer = sce->ra_wb_xfer;
653
654 mutex_enter(&sc->sc_lock);
655 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
656 mutex_exit(&sc->sc_lock);
657 return EWOULDBLOCK;
658 }
659 while (uio->uio_resid > 0 && !error) {
660 while (sce->ra_wb_used == 0) {
661 sce->state |= UGEN_ASLP;
662 DPRINTFN(5,
663 ("ugenread: sleep on %p\n",
664 sce));
665 /* "ugenrb" */
666 error = cv_timedwait_sig(&sce->cv,
667 &sc->sc_lock, mstohz(sce->timeout));
668 DPRINTFN(5,
669 ("ugenread: woke, error=%d\n",
670 error));
671 if (sc->sc_dying)
672 error = EIO;
673 if (error) {
674 sce->state &= ~UGEN_ASLP;
675 break;
676 }
677 }
678
679 /* Copy data to the process. */
680 while (uio->uio_resid > 0
681 && sce->ra_wb_used > 0) {
682 n = min(uio->uio_resid,
683 sce->ra_wb_used);
684 n = min(n, sce->limit - sce->cur);
685 error = uiomove(sce->cur, n, uio);
686 if (error)
687 break;
688 sce->cur += n;
689 sce->ra_wb_used -= n;
690 if (sce->cur == sce->limit)
691 sce->cur = sce->ibuf;
692 }
693
694 /*
695 * If the transfers stopped because the
696 * buffer was full, restart them.
697 */
698 if (sce->state & UGEN_RA_WB_STOP &&
699 sce->ra_wb_used < sce->limit - sce->ibuf) {
700 n = (sce->limit - sce->ibuf)
701 - sce->ra_wb_used;
702 usbd_setup_xfer(xfer,
703 sce->pipeh, sce, NULL,
704 min(n, sce->ra_wb_xferlen),
705 0, USBD_NO_TIMEOUT,
706 ugen_bulkra_intr);
707 sce->state &= ~UGEN_RA_WB_STOP;
708 err = usbd_transfer(xfer);
709 if (err != USBD_IN_PROGRESS)
710 /*
711 * The transfer has not been
712 * queued. Setting STOP
713 * will make us try
714 * again at the next read.
715 */
716 sce->state |= UGEN_RA_WB_STOP;
717 }
718 }
719 mutex_exit(&sc->sc_lock);
720 break;
721 }
722 xfer = usbd_alloc_xfer(sc->sc_udev);
723 if (xfer == NULL)
724 return ENOMEM;
725 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
726 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
727 tn = n;
728 err = usbd_bulk_transfer(
729 xfer, sce->pipeh,
730 sce->state & UGEN_SHORT_OK ?
731 USBD_SHORT_XFER_OK : 0,
732 sce->timeout, sc->sc_buffer, &tn);
733 if (err) {
734 if (err == USBD_INTERRUPTED)
735 error = EINTR;
736 else if (err == USBD_TIMEOUT)
737 error = ETIMEDOUT;
738 else
739 error = EIO;
740 break;
741 }
742 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
743 error = uiomove(sc->sc_buffer, tn, uio);
744 if (error || tn < n)
745 break;
746 }
747 usbd_free_xfer(xfer);
748 break;
749 case UE_ISOCHRONOUS:
750 mutex_enter(&sc->sc_lock);
751 while (sce->cur == sce->fill) {
752 if (flag & IO_NDELAY) {
753 mutex_exit(&sc->sc_lock);
754 return EWOULDBLOCK;
755 }
756 sce->state |= UGEN_ASLP;
757 /* "ugenri" */
758 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
759 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
760 mstohz(sce->timeout));
761 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
762 if (sc->sc_dying)
763 error = EIO;
764 if (error) {
765 sce->state &= ~UGEN_ASLP;
766 break;
767 }
768 }
769
770 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
771 if(sce->fill > sce->cur)
772 n = min(sce->fill - sce->cur, uio->uio_resid);
773 else
774 n = min(sce->limit - sce->cur, uio->uio_resid);
775
776 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
777
778 /* Copy the data to the user process. */
779 error = uiomove(sce->cur, n, uio);
780 if (error)
781 break;
782 sce->cur += n;
783 if (sce->cur >= sce->limit)
784 sce->cur = sce->ibuf;
785 }
786 mutex_exit(&sc->sc_lock);
787 break;
788
789
790 default:
791 return ENXIO;
792 }
793 return error;
794 }
795
796 int
797 ugenread(dev_t dev, struct uio *uio, int flag)
798 {
799 int endpt = UGENENDPOINT(dev);
800 struct ugen_softc *sc;
801 int error;
802
803 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
804 if (sc == NULL)
805 return ENXIO;
806
807 mutex_enter(&sc->sc_lock);
808 sc->sc_refcnt++;
809 mutex_exit(&sc->sc_lock);
810
811 error = ugen_do_read(sc, endpt, uio, flag);
812
813 mutex_enter(&sc->sc_lock);
814 if (--sc->sc_refcnt < 0)
815 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
816 mutex_exit(&sc->sc_lock);
817
818 return error;
819 }
820
821 Static int
822 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
823 int flag)
824 {
825 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
826 uint32_t n;
827 int error = 0;
828 uint32_t tn;
829 char *dbuf;
830 struct usbd_xfer *xfer;
831 usbd_status err;
832
833 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
834
835 if (sc->sc_dying)
836 return EIO;
837
838 if (endpt == USB_CONTROL_ENDPOINT)
839 return ENODEV;
840
841 #ifdef DIAGNOSTIC
842 if (sce->edesc == NULL) {
843 printf("ugenwrite: no edesc\n");
844 return EIO;
845 }
846 if (sce->pipeh == NULL) {
847 printf("ugenwrite: no pipe\n");
848 return EIO;
849 }
850 #endif
851
852 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
853 case UE_BULK:
854 if (sce->state & UGEN_BULK_WB) {
855 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
856 uio->uio_resid, sce->ra_wb_used));
857 xfer = sce->ra_wb_xfer;
858
859 mutex_enter(&sc->sc_lock);
860 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
861 flag & IO_NDELAY) {
862 mutex_exit(&sc->sc_lock);
863 return EWOULDBLOCK;
864 }
865 while (uio->uio_resid > 0 && !error) {
866 while (sce->ra_wb_used ==
867 sce->limit - sce->ibuf) {
868 sce->state |= UGEN_ASLP;
869 DPRINTFN(5,
870 ("ugenwrite: sleep on %p\n",
871 sce));
872 /* "ugenwb" */
873 error = cv_timedwait_sig(&sce->cv,
874 &sc->sc_lock, mstohz(sce->timeout));
875 DPRINTFN(5,
876 ("ugenwrite: woke, error=%d\n",
877 error));
878 if (sc->sc_dying)
879 error = EIO;
880 if (error) {
881 sce->state &= ~UGEN_ASLP;
882 break;
883 }
884 }
885
886 /* Copy data from the process. */
887 while (uio->uio_resid > 0 &&
888 sce->ra_wb_used < sce->limit - sce->ibuf) {
889 n = min(uio->uio_resid,
890 (sce->limit - sce->ibuf)
891 - sce->ra_wb_used);
892 n = min(n, sce->limit - sce->fill);
893 error = uiomove(sce->fill, n, uio);
894 if (error)
895 break;
896 sce->fill += n;
897 sce->ra_wb_used += n;
898 if (sce->fill == sce->limit)
899 sce->fill = sce->ibuf;
900 }
901
902 /*
903 * If the transfers stopped because the
904 * buffer was empty, restart them.
905 */
906 if (sce->state & UGEN_RA_WB_STOP &&
907 sce->ra_wb_used > 0) {
908 dbuf = (char *)usbd_get_buffer(xfer);
909 n = min(sce->ra_wb_used,
910 sce->ra_wb_xferlen);
911 tn = min(n, sce->limit - sce->cur);
912 memcpy(dbuf, sce->cur, tn);
913 dbuf += tn;
914 if (n - tn > 0)
915 memcpy(dbuf, sce->ibuf,
916 n - tn);
917 usbd_setup_xfer(xfer,
918 sce->pipeh, sce, NULL, n,
919 0, USBD_NO_TIMEOUT,
920 ugen_bulkwb_intr);
921 sce->state &= ~UGEN_RA_WB_STOP;
922 err = usbd_transfer(xfer);
923 if (err != USBD_IN_PROGRESS)
924 /*
925 * The transfer has not been
926 * queued. Setting STOP
927 * will make us try again
928 * at the next read.
929 */
930 sce->state |= UGEN_RA_WB_STOP;
931 }
932 }
933 mutex_exit(&sc->sc_lock);
934 break;
935 }
936 xfer = usbd_alloc_xfer(sc->sc_udev);
937 if (xfer == NULL)
938 return EIO;
939 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
940 error = uiomove(sc->sc_buffer, n, uio);
941 if (error)
942 break;
943 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
944 err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
945 sce->timeout, sc->sc_buffer, &n);
946 if (err) {
947 if (err == USBD_INTERRUPTED)
948 error = EINTR;
949 else if (err == USBD_TIMEOUT)
950 error = ETIMEDOUT;
951 else
952 error = EIO;
953 break;
954 }
955 }
956 usbd_free_xfer(xfer);
957 break;
958 case UE_INTERRUPT:
959 xfer = usbd_alloc_xfer(sc->sc_udev);
960 if (xfer == NULL)
961 return EIO;
962 while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
963 uio->uio_resid)) != 0) {
964 error = uiomove(sc->sc_buffer, n, uio);
965 if (error)
966 break;
967 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
968 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
969 sce->timeout, sc->sc_buffer, &n);
970 if (err) {
971 if (err == USBD_INTERRUPTED)
972 error = EINTR;
973 else if (err == USBD_TIMEOUT)
974 error = ETIMEDOUT;
975 else
976 error = EIO;
977 break;
978 }
979 }
980 usbd_free_xfer(xfer);
981 break;
982 default:
983 return ENXIO;
984 }
985 return error;
986 }
987
988 int
989 ugenwrite(dev_t dev, struct uio *uio, int flag)
990 {
991 int endpt = UGENENDPOINT(dev);
992 struct ugen_softc *sc;
993 int error;
994
995 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
996 if (sc == NULL)
997 return ENXIO;
998
999 mutex_enter(&sc->sc_lock);
1000 sc->sc_refcnt++;
1001 mutex_exit(&sc->sc_lock);
1002
1003 error = ugen_do_write(sc, endpt, uio, flag);
1004
1005 mutex_enter(&sc->sc_lock);
1006 if (--sc->sc_refcnt < 0)
1007 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1008 mutex_exit(&sc->sc_lock);
1009
1010 return error;
1011 }
1012
1013 int
1014 ugen_activate(device_t self, enum devact act)
1015 {
1016 struct ugen_softc *sc = device_private(self);
1017
1018 switch (act) {
1019 case DVACT_DEACTIVATE:
1020 sc->sc_dying = 1;
1021 return 0;
1022 default:
1023 return EOPNOTSUPP;
1024 }
1025 }
1026
1027 int
1028 ugen_detach(device_t self, int flags)
1029 {
1030 struct ugen_softc *sc = device_private(self);
1031 struct ugen_endpoint *sce;
1032 int i, dir;
1033 int maj, mn;
1034
1035 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1036
1037 sc->sc_dying = 1;
1038 pmf_device_deregister(self);
1039 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1040 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1041 for (dir = OUT; dir <= IN; dir++) {
1042 sce = &sc->sc_endpoints[i][dir];
1043 if (sce && sce->pipeh)
1044 usbd_abort_pipe(sce->pipeh);
1045 }
1046 }
1047
1048 mutex_enter(&sc->sc_lock);
1049 if (--sc->sc_refcnt >= 0) {
1050 /* Wake everyone */
1051 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1052 cv_signal(&sc->sc_endpoints[i][IN].cv);
1053 /* Wait for processes to go away. */
1054 usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock);
1055 }
1056 mutex_exit(&sc->sc_lock);
1057
1058 /* locate the major number */
1059 maj = cdevsw_lookup_major(&ugen_cdevsw);
1060
1061 /* Nuke the vnodes for any open instances (calls close). */
1062 mn = device_unit(self) * USB_MAX_ENDPOINTS;
1063 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1064
1065 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1066 sc->sc_dev);
1067
1068 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1069 for (dir = OUT; dir <= IN; dir++) {
1070 sce = &sc->sc_endpoints[i][dir];
1071 seldestroy(&sce->rsel);
1072 cv_destroy(&sce->cv);
1073 }
1074 }
1075
1076 cv_destroy(&sc->sc_detach_cv);
1077 mutex_destroy(&sc->sc_lock);
1078
1079 return 0;
1080 }
1081
1082 Static void
1083 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
1084 {
1085 struct ugen_endpoint *sce = addr;
1086 struct ugen_softc *sc = sce->sc;
1087 uint32_t count;
1088 u_char *ibuf;
1089
1090 if (status == USBD_CANCELLED)
1091 return;
1092
1093 if (status != USBD_NORMAL_COMPLETION) {
1094 DPRINTF(("ugenintr: status=%d\n", status));
1095 if (status == USBD_STALLED)
1096 usbd_clear_endpoint_stall_async(sce->pipeh);
1097 return;
1098 }
1099
1100 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1101 ibuf = sce->ibuf;
1102
1103 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1104 xfer, status, count));
1105 DPRINTFN(5, (" data = %02x %02x %02x\n",
1106 ibuf[0], ibuf[1], ibuf[2]));
1107
1108 (void)b_to_q(ibuf, count, &sce->q);
1109
1110 mutex_enter(&sc->sc_lock);
1111 if (sce->state & UGEN_ASLP) {
1112 sce->state &= ~UGEN_ASLP;
1113 DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1114 cv_signal(&sce->cv);
1115 }
1116 mutex_exit(&sc->sc_lock);
1117 selnotify(&sce->rsel, 0, 0);
1118 }
1119
1120 Static void
1121 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr,
1122 usbd_status status)
1123 {
1124 struct isoreq *req = addr;
1125 struct ugen_endpoint *sce = req->sce;
1126 struct ugen_softc *sc = sce->sc;
1127 uint32_t count, n;
1128 int i, isize;
1129
1130 /* Return if we are aborting. */
1131 if (status == USBD_CANCELLED)
1132 return;
1133
1134 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1135 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1136 (long)(req - sce->isoreqs), count));
1137
1138 /* throw away oldest input if the buffer is full */
1139 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1140 sce->cur += count;
1141 if(sce->cur >= sce->limit)
1142 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1143 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1144 count));
1145 }
1146
1147 isize = UGETW(sce->edesc->wMaxPacketSize);
1148 for (i = 0; i < UGEN_NISORFRMS; i++) {
1149 uint32_t actlen = req->sizes[i];
1150 char const *tbuf = (char const *)req->dmabuf + isize * i;
1151
1152 /* copy data to buffer */
1153 while (actlen > 0) {
1154 n = min(actlen, sce->limit - sce->fill);
1155 memcpy(sce->fill, tbuf, n);
1156
1157 tbuf += n;
1158 actlen -= n;
1159 sce->fill += n;
1160 if(sce->fill == sce->limit)
1161 sce->fill = sce->ibuf;
1162 }
1163
1164 /* setup size for next transfer */
1165 req->sizes[i] = isize;
1166 }
1167
1168 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1169 0, ugen_isoc_rintr);
1170 (void)usbd_transfer(xfer);
1171
1172 mutex_enter(&sc->sc_lock);
1173 if (sce->state & UGEN_ASLP) {
1174 sce->state &= ~UGEN_ASLP;
1175 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1176 cv_signal(&sce->cv);
1177 }
1178 mutex_exit(&sc->sc_lock);
1179 selnotify(&sce->rsel, 0, 0);
1180 }
1181
1182 Static void
1183 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr,
1184 usbd_status status)
1185 {
1186 struct ugen_endpoint *sce = addr;
1187 struct ugen_softc *sc = sce->sc;
1188 uint32_t count, n;
1189 char const *tbuf;
1190 usbd_status err;
1191
1192 /* Return if we are aborting. */
1193 if (status == USBD_CANCELLED)
1194 return;
1195
1196 if (status != USBD_NORMAL_COMPLETION) {
1197 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1198 sce->state |= UGEN_RA_WB_STOP;
1199 if (status == USBD_STALLED)
1200 usbd_clear_endpoint_stall_async(sce->pipeh);
1201 return;
1202 }
1203
1204 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1205
1206 /* Keep track of how much is in the buffer. */
1207 sce->ra_wb_used += count;
1208
1209 /* Copy data to buffer. */
1210 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1211 n = min(count, sce->limit - sce->fill);
1212 memcpy(sce->fill, tbuf, n);
1213 tbuf += n;
1214 count -= n;
1215 sce->fill += n;
1216 if (sce->fill == sce->limit)
1217 sce->fill = sce->ibuf;
1218 if (count > 0) {
1219 memcpy(sce->fill, tbuf, count);
1220 sce->fill += count;
1221 }
1222
1223 /* Set up the next request if necessary. */
1224 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1225 if (n > 0) {
1226 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1227 min(n, sce->ra_wb_xferlen), 0,
1228 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1229 err = usbd_transfer(xfer);
1230 if (err != USBD_IN_PROGRESS) {
1231 printf("usbd_bulkra_intr: error=%d\n", err);
1232 /*
1233 * The transfer has not been queued. Setting STOP
1234 * will make us try again at the next read.
1235 */
1236 sce->state |= UGEN_RA_WB_STOP;
1237 }
1238 }
1239 else
1240 sce->state |= UGEN_RA_WB_STOP;
1241
1242 mutex_enter(&sc->sc_lock);
1243 if (sce->state & UGEN_ASLP) {
1244 sce->state &= ~UGEN_ASLP;
1245 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1246 cv_signal(&sce->cv);
1247 }
1248 mutex_exit(&sc->sc_lock);
1249 selnotify(&sce->rsel, 0, 0);
1250 }
1251
1252 Static void
1253 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr,
1254 usbd_status status)
1255 {
1256 struct ugen_endpoint *sce = addr;
1257 struct ugen_softc *sc = sce->sc;
1258 uint32_t count, n;
1259 char *tbuf;
1260 usbd_status err;
1261
1262 /* Return if we are aborting. */
1263 if (status == USBD_CANCELLED)
1264 return;
1265
1266 if (status != USBD_NORMAL_COMPLETION) {
1267 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1268 sce->state |= UGEN_RA_WB_STOP;
1269 if (status == USBD_STALLED)
1270 usbd_clear_endpoint_stall_async(sce->pipeh);
1271 return;
1272 }
1273
1274 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1275
1276 /* Keep track of how much is in the buffer. */
1277 sce->ra_wb_used -= count;
1278
1279 /* Update buffer pointers. */
1280 sce->cur += count;
1281 if (sce->cur >= sce->limit)
1282 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1283
1284 /* Set up next request if necessary. */
1285 if (sce->ra_wb_used > 0) {
1286 /* copy data from buffer */
1287 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1288 count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1289 n = min(count, sce->limit - sce->cur);
1290 memcpy(tbuf, sce->cur, n);
1291 tbuf += n;
1292 if (count - n > 0)
1293 memcpy(tbuf, sce->ibuf, count - n);
1294
1295 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1296 count, 0, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1297 err = usbd_transfer(xfer);
1298 if (err != USBD_IN_PROGRESS) {
1299 printf("usbd_bulkwb_intr: error=%d\n", err);
1300 /*
1301 * The transfer has not been queued. Setting STOP
1302 * will make us try again at the next write.
1303 */
1304 sce->state |= UGEN_RA_WB_STOP;
1305 }
1306 }
1307 else
1308 sce->state |= UGEN_RA_WB_STOP;
1309
1310 mutex_enter(&sc->sc_lock);
1311 if (sce->state & UGEN_ASLP) {
1312 sce->state &= ~UGEN_ASLP;
1313 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1314 cv_signal(&sce->cv);
1315 }
1316 mutex_exit(&sc->sc_lock);
1317 selnotify(&sce->rsel, 0, 0);
1318 }
1319
1320 Static usbd_status
1321 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1322 {
1323 struct usbd_interface *iface;
1324 usb_endpoint_descriptor_t *ed;
1325 usbd_status err;
1326 struct ugen_endpoint *sce;
1327 uint8_t niface, nendpt, endptno, endpt;
1328 int dir;
1329
1330 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1331
1332 err = usbd_interface_count(sc->sc_udev, &niface);
1333 if (err)
1334 return err;
1335 if (ifaceidx < 0 || ifaceidx >= niface)
1336 return USBD_INVAL;
1337
1338 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1339 if (err)
1340 return err;
1341 err = usbd_endpoint_count(iface, &nendpt);
1342 if (err)
1343 return err;
1344 /* XXX should only do this after setting new altno has succeeded */
1345 for (endptno = 0; endptno < nendpt; endptno++) {
1346 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1347 endpt = ed->bEndpointAddress;
1348 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1349 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1350 sce->sc = 0;
1351 sce->edesc = 0;
1352 sce->iface = 0;
1353 }
1354
1355 /* change setting */
1356 err = usbd_set_interface(iface, altno);
1357 if (err)
1358 return err;
1359
1360 err = usbd_endpoint_count(iface, &nendpt);
1361 if (err)
1362 return err;
1363 for (endptno = 0; endptno < nendpt; endptno++) {
1364 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1365 KASSERT(ed != NULL);
1366 endpt = ed->bEndpointAddress;
1367 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1368 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1369 sce->sc = sc;
1370 sce->edesc = ed;
1371 sce->iface = iface;
1372 }
1373 return 0;
1374 }
1375
1376 /* Retrieve a complete descriptor for a certain device and index. */
1377 Static usb_config_descriptor_t *
1378 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1379 {
1380 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1381 int len;
1382 usbd_status err;
1383
1384 if (index == USB_CURRENT_CONFIG_INDEX) {
1385 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1386 len = UGETW(tdesc->wTotalLength);
1387 if (lenp)
1388 *lenp = len;
1389 cdesc = kmem_alloc(len, KM_SLEEP);
1390 memcpy(cdesc, tdesc, len);
1391 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1392 } else {
1393 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1394 if (err)
1395 return 0;
1396 len = UGETW(cdescr.wTotalLength);
1397 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1398 if (lenp)
1399 *lenp = len;
1400 cdesc = kmem_alloc(len, KM_SLEEP);
1401 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1402 if (err) {
1403 kmem_free(cdesc, len);
1404 return 0;
1405 }
1406 }
1407 return cdesc;
1408 }
1409
1410 Static int
1411 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1412 {
1413 struct usbd_interface *iface;
1414 usbd_status err;
1415
1416 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1417 if (err)
1418 return -1;
1419 return usbd_get_interface_altindex(iface);
1420 }
1421
1422 Static int
1423 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1424 void *addr, int flag, struct lwp *l)
1425 {
1426 struct ugen_endpoint *sce;
1427 usbd_status err;
1428 struct usbd_interface *iface;
1429 struct usb_config_desc *cd;
1430 usb_config_descriptor_t *cdesc;
1431 struct usb_interface_desc *id;
1432 usb_interface_descriptor_t *idesc;
1433 struct usb_endpoint_desc *ed;
1434 usb_endpoint_descriptor_t *edesc;
1435 struct usb_alt_interface *ai;
1436 struct usb_string_desc *si;
1437 uint8_t conf, alt;
1438 int cdesclen;
1439
1440 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1441 if (sc->sc_dying)
1442 return EIO;
1443
1444 switch (cmd) {
1445 case FIONBIO:
1446 /* All handled in the upper FS layer. */
1447 return 0;
1448 case USB_SET_SHORT_XFER:
1449 if (endpt == USB_CONTROL_ENDPOINT)
1450 return EINVAL;
1451 /* This flag only affects read */
1452 sce = &sc->sc_endpoints[endpt][IN];
1453 if (sce == NULL || sce->pipeh == NULL)
1454 return EINVAL;
1455 if (*(int *)addr)
1456 sce->state |= UGEN_SHORT_OK;
1457 else
1458 sce->state &= ~UGEN_SHORT_OK;
1459 return 0;
1460 case USB_SET_TIMEOUT:
1461 sce = &sc->sc_endpoints[endpt][IN];
1462 if (sce == NULL
1463 /* XXX this shouldn't happen, but the distinction between
1464 input and output pipes isn't clear enough.
1465 || sce->pipeh == NULL */
1466 )
1467 return EINVAL;
1468 sce->timeout = *(int *)addr;
1469 return 0;
1470 case USB_SET_BULK_RA:
1471 if (endpt == USB_CONTROL_ENDPOINT)
1472 return EINVAL;
1473 sce = &sc->sc_endpoints[endpt][IN];
1474 if (sce == NULL || sce->pipeh == NULL)
1475 return EINVAL;
1476 edesc = sce->edesc;
1477 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1478 return EINVAL;
1479
1480 if (*(int *)addr) {
1481 /* Only turn RA on if it's currently off. */
1482 if (sce->state & UGEN_BULK_RA)
1483 return 0;
1484
1485 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1486 /* shouldn't happen */
1487 return EINVAL;
1488 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1489 if (sce->ra_wb_xfer == NULL)
1490 return ENOMEM;
1491 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1492 /*
1493 * Set up a dmabuf because we reuse the xfer with
1494 * the same (max) request length like isoc.
1495 */
1496 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1497 sce->ra_wb_xferlen) == 0) {
1498 usbd_free_xfer(sce->ra_wb_xfer);
1499 return ENOMEM;
1500 }
1501 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1502 sce->fill = sce->cur = sce->ibuf;
1503 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1504 sce->ra_wb_used = 0;
1505 sce->state |= UGEN_BULK_RA;
1506 sce->state &= ~UGEN_RA_WB_STOP;
1507 /* Now start reading. */
1508 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1509 NULL,
1510 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1511 0, USBD_NO_TIMEOUT,
1512 ugen_bulkra_intr);
1513 err = usbd_transfer(sce->ra_wb_xfer);
1514 if (err != USBD_IN_PROGRESS) {
1515 sce->state &= ~UGEN_BULK_RA;
1516 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1517 sce->ibuf = NULL;
1518 usbd_free_xfer(sce->ra_wb_xfer);
1519 return EIO;
1520 }
1521 } else {
1522 /* Only turn RA off if it's currently on. */
1523 if (!(sce->state & UGEN_BULK_RA))
1524 return 0;
1525
1526 sce->state &= ~UGEN_BULK_RA;
1527 usbd_abort_pipe(sce->pipeh);
1528 usbd_free_xfer(sce->ra_wb_xfer);
1529 /*
1530 * XXX Discard whatever's in the buffer, but we
1531 * should keep it around and drain the buffer
1532 * instead.
1533 */
1534 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1535 sce->ibuf = NULL;
1536 }
1537 return 0;
1538 case USB_SET_BULK_WB:
1539 if (endpt == USB_CONTROL_ENDPOINT)
1540 return EINVAL;
1541 sce = &sc->sc_endpoints[endpt][OUT];
1542 if (sce == NULL || sce->pipeh == NULL)
1543 return EINVAL;
1544 edesc = sce->edesc;
1545 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1546 return EINVAL;
1547
1548 if (*(int *)addr) {
1549 /* Only turn WB on if it's currently off. */
1550 if (sce->state & UGEN_BULK_WB)
1551 return 0;
1552
1553 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1554 /* shouldn't happen */
1555 return EINVAL;
1556 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1557 if (sce->ra_wb_xfer == NULL)
1558 return ENOMEM;
1559 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1560 /*
1561 * Set up a dmabuf because we reuse the xfer with
1562 * the same (max) request length like isoc.
1563 */
1564 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1565 sce->ra_wb_xferlen) == 0) {
1566 usbd_free_xfer(sce->ra_wb_xfer);
1567 return ENOMEM;
1568 }
1569 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1570 sce->fill = sce->cur = sce->ibuf;
1571 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1572 sce->ra_wb_used = 0;
1573 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1574 } else {
1575 /* Only turn WB off if it's currently on. */
1576 if (!(sce->state & UGEN_BULK_WB))
1577 return 0;
1578
1579 sce->state &= ~UGEN_BULK_WB;
1580 /*
1581 * XXX Discard whatever's in the buffer, but we
1582 * should keep it around and keep writing to
1583 * drain the buffer instead.
1584 */
1585 usbd_abort_pipe(sce->pipeh);
1586 usbd_free_xfer(sce->ra_wb_xfer);
1587 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1588 sce->ibuf = NULL;
1589 }
1590 return 0;
1591 case USB_SET_BULK_RA_OPT:
1592 case USB_SET_BULK_WB_OPT:
1593 {
1594 struct usb_bulk_ra_wb_opt *opt;
1595
1596 if (endpt == USB_CONTROL_ENDPOINT)
1597 return EINVAL;
1598 opt = (struct usb_bulk_ra_wb_opt *)addr;
1599 if (cmd == USB_SET_BULK_RA_OPT)
1600 sce = &sc->sc_endpoints[endpt][IN];
1601 else
1602 sce = &sc->sc_endpoints[endpt][OUT];
1603 if (sce == NULL || sce->pipeh == NULL)
1604 return EINVAL;
1605 if (opt->ra_wb_buffer_size < 1 ||
1606 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1607 opt->ra_wb_request_size < 1 ||
1608 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1609 return EINVAL;
1610 /*
1611 * XXX These changes do not take effect until the
1612 * next time RA/WB mode is enabled but they ought to
1613 * take effect immediately.
1614 */
1615 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1616 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1617 return 0;
1618 }
1619 default:
1620 break;
1621 }
1622
1623 if (endpt != USB_CONTROL_ENDPOINT)
1624 return EINVAL;
1625
1626 switch (cmd) {
1627 #ifdef UGEN_DEBUG
1628 case USB_SETDEBUG:
1629 ugendebug = *(int *)addr;
1630 break;
1631 #endif
1632 case USB_GET_CONFIG:
1633 err = usbd_get_config(sc->sc_udev, &conf);
1634 if (err)
1635 return EIO;
1636 *(int *)addr = conf;
1637 break;
1638 case USB_SET_CONFIG:
1639 if (!(flag & FWRITE))
1640 return EPERM;
1641 err = ugen_set_config(sc, *(int *)addr);
1642 switch (err) {
1643 case USBD_NORMAL_COMPLETION:
1644 break;
1645 case USBD_IN_USE:
1646 return EBUSY;
1647 default:
1648 return EIO;
1649 }
1650 break;
1651 case USB_GET_ALTINTERFACE:
1652 ai = (struct usb_alt_interface *)addr;
1653 err = usbd_device2interface_handle(sc->sc_udev,
1654 ai->uai_interface_index, &iface);
1655 if (err)
1656 return EINVAL;
1657 idesc = usbd_get_interface_descriptor(iface);
1658 if (idesc == NULL)
1659 return EIO;
1660 ai->uai_alt_no = idesc->bAlternateSetting;
1661 break;
1662 case USB_SET_ALTINTERFACE:
1663 if (!(flag & FWRITE))
1664 return EPERM;
1665 ai = (struct usb_alt_interface *)addr;
1666 err = usbd_device2interface_handle(sc->sc_udev,
1667 ai->uai_interface_index, &iface);
1668 if (err)
1669 return EINVAL;
1670 err = ugen_set_interface(sc, ai->uai_interface_index,
1671 ai->uai_alt_no);
1672 if (err)
1673 return EINVAL;
1674 break;
1675 case USB_GET_NO_ALT:
1676 ai = (struct usb_alt_interface *)addr;
1677 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen);
1678 if (cdesc == NULL)
1679 return EINVAL;
1680 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1681 if (idesc == NULL) {
1682 kmem_free(cdesc, cdesclen);
1683 return EINVAL;
1684 }
1685 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1686 idesc->bInterfaceNumber);
1687 kmem_free(cdesc, cdesclen);
1688 break;
1689 case USB_GET_DEVICE_DESC:
1690 *(usb_device_descriptor_t *)addr =
1691 *usbd_get_device_descriptor(sc->sc_udev);
1692 break;
1693 case USB_GET_CONFIG_DESC:
1694 cd = (struct usb_config_desc *)addr;
1695 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen);
1696 if (cdesc == NULL)
1697 return EINVAL;
1698 cd->ucd_desc = *cdesc;
1699 kmem_free(cdesc, cdesclen);
1700 break;
1701 case USB_GET_INTERFACE_DESC:
1702 id = (struct usb_interface_desc *)addr;
1703 cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen);
1704 if (cdesc == NULL)
1705 return EINVAL;
1706 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1707 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1708 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1709 else
1710 alt = id->uid_alt_index;
1711 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1712 if (idesc == NULL) {
1713 kmem_free(cdesc, cdesclen);
1714 return EINVAL;
1715 }
1716 id->uid_desc = *idesc;
1717 kmem_free(cdesc, cdesclen);
1718 break;
1719 case USB_GET_ENDPOINT_DESC:
1720 ed = (struct usb_endpoint_desc *)addr;
1721 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen);
1722 if (cdesc == NULL)
1723 return EINVAL;
1724 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1725 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1726 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1727 else
1728 alt = ed->ued_alt_index;
1729 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1730 alt, ed->ued_endpoint_index);
1731 if (edesc == NULL) {
1732 kmem_free(cdesc, cdesclen);
1733 return EINVAL;
1734 }
1735 ed->ued_desc = *edesc;
1736 kmem_free(cdesc, cdesclen);
1737 break;
1738 case USB_GET_FULL_DESC:
1739 {
1740 int len;
1741 struct iovec iov;
1742 struct uio uio;
1743 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1744 int error;
1745
1746 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen);
1747 if (cdesc == NULL)
1748 return EINVAL;
1749 len = cdesclen;
1750 if (len > fd->ufd_size)
1751 len = fd->ufd_size;
1752 iov.iov_base = (void *)fd->ufd_data;
1753 iov.iov_len = len;
1754 uio.uio_iov = &iov;
1755 uio.uio_iovcnt = 1;
1756 uio.uio_resid = len;
1757 uio.uio_offset = 0;
1758 uio.uio_rw = UIO_READ;
1759 uio.uio_vmspace = l->l_proc->p_vmspace;
1760 error = uiomove((void *)cdesc, len, &uio);
1761 kmem_free(cdesc, cdesclen);
1762 return error;
1763 }
1764 case USB_GET_STRING_DESC: {
1765 int len;
1766 si = (struct usb_string_desc *)addr;
1767 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1768 si->usd_language_id, &si->usd_desc, &len);
1769 if (err)
1770 return EINVAL;
1771 break;
1772 }
1773 case USB_DO_REQUEST:
1774 {
1775 struct usb_ctl_request *ur = (void *)addr;
1776 int len = UGETW(ur->ucr_request.wLength);
1777 struct iovec iov;
1778 struct uio uio;
1779 void *ptr = 0;
1780 usbd_status xerr;
1781 int error = 0;
1782
1783 if (!(flag & FWRITE))
1784 return EPERM;
1785 /* Avoid requests that would damage the bus integrity. */
1786 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1787 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1788 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1789 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1790 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1791 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1792 return EINVAL;
1793
1794 if (len < 0 || len > 32767)
1795 return EINVAL;
1796 if (len != 0) {
1797 iov.iov_base = (void *)ur->ucr_data;
1798 iov.iov_len = len;
1799 uio.uio_iov = &iov;
1800 uio.uio_iovcnt = 1;
1801 uio.uio_resid = len;
1802 uio.uio_offset = 0;
1803 uio.uio_rw =
1804 ur->ucr_request.bmRequestType & UT_READ ?
1805 UIO_READ : UIO_WRITE;
1806 uio.uio_vmspace = l->l_proc->p_vmspace;
1807 ptr = kmem_alloc(len, KM_SLEEP);
1808 if (uio.uio_rw == UIO_WRITE) {
1809 error = uiomove(ptr, len, &uio);
1810 if (error)
1811 goto ret;
1812 }
1813 }
1814 sce = &sc->sc_endpoints[endpt][IN];
1815 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1816 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1817 if (xerr) {
1818 error = EIO;
1819 goto ret;
1820 }
1821 if (len != 0) {
1822 if (uio.uio_rw == UIO_READ) {
1823 size_t alen = min(len, ur->ucr_actlen);
1824 error = uiomove(ptr, alen, &uio);
1825 if (error)
1826 goto ret;
1827 }
1828 }
1829 ret:
1830 if (ptr)
1831 kmem_free(ptr, len);
1832 return error;
1833 }
1834 case USB_GET_DEVICEINFO:
1835 usbd_fill_deviceinfo(sc->sc_udev,
1836 (struct usb_device_info *)addr, 0);
1837 break;
1838 #ifdef COMPAT_30
1839 case USB_GET_DEVICEINFO_OLD:
1840 usbd_fill_deviceinfo_old(sc->sc_udev,
1841 (struct usb_device_info_old *)addr, 0);
1842
1843 break;
1844 #endif
1845 default:
1846 return EINVAL;
1847 }
1848 return 0;
1849 }
1850
1851 int
1852 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1853 {
1854 int endpt = UGENENDPOINT(dev);
1855 struct ugen_softc *sc;
1856 int error;
1857
1858 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1859 if (sc == NULL)
1860 return ENXIO;
1861
1862 sc->sc_refcnt++;
1863 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1864 if (--sc->sc_refcnt < 0)
1865 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1866 return error;
1867 }
1868
1869 int
1870 ugenpoll(dev_t dev, int events, struct lwp *l)
1871 {
1872 struct ugen_softc *sc;
1873 struct ugen_endpoint *sce_in, *sce_out;
1874 int revents = 0;
1875
1876 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1877 if (sc == NULL)
1878 return ENXIO;
1879
1880 if (sc->sc_dying)
1881 return POLLHUP;
1882
1883 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1884 return ENODEV;
1885
1886 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1887 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1888 if (sce_in == NULL && sce_out == NULL)
1889 return POLLERR;
1890 #ifdef DIAGNOSTIC
1891 if (!sce_in->edesc && !sce_out->edesc) {
1892 printf("ugenpoll: no edesc\n");
1893 return POLLERR;
1894 }
1895 /* It's possible to have only one pipe open. */
1896 if (!sce_in->pipeh && !sce_out->pipeh) {
1897 printf("ugenpoll: no pipe\n");
1898 return POLLERR;
1899 }
1900 #endif
1901
1902 mutex_enter(&sc->sc_lock);
1903 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1904 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1905 case UE_INTERRUPT:
1906 if (sce_in->q.c_cc > 0)
1907 revents |= events & (POLLIN | POLLRDNORM);
1908 else
1909 selrecord(l, &sce_in->rsel);
1910 break;
1911 case UE_ISOCHRONOUS:
1912 if (sce_in->cur != sce_in->fill)
1913 revents |= events & (POLLIN | POLLRDNORM);
1914 else
1915 selrecord(l, &sce_in->rsel);
1916 break;
1917 case UE_BULK:
1918 if (sce_in->state & UGEN_BULK_RA) {
1919 if (sce_in->ra_wb_used > 0)
1920 revents |= events &
1921 (POLLIN | POLLRDNORM);
1922 else
1923 selrecord(l, &sce_in->rsel);
1924 break;
1925 }
1926 /*
1927 * We have no easy way of determining if a read will
1928 * yield any data or a write will happen.
1929 * Pretend they will.
1930 */
1931 revents |= events & (POLLIN | POLLRDNORM);
1932 break;
1933 default:
1934 break;
1935 }
1936 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1937 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1938 case UE_INTERRUPT:
1939 case UE_ISOCHRONOUS:
1940 /* XXX unimplemented */
1941 break;
1942 case UE_BULK:
1943 if (sce_out->state & UGEN_BULK_WB) {
1944 if (sce_out->ra_wb_used <
1945 sce_out->limit - sce_out->ibuf)
1946 revents |= events &
1947 (POLLOUT | POLLWRNORM);
1948 else
1949 selrecord(l, &sce_out->rsel);
1950 break;
1951 }
1952 /*
1953 * We have no easy way of determining if a read will
1954 * yield any data or a write will happen.
1955 * Pretend they will.
1956 */
1957 revents |= events & (POLLOUT | POLLWRNORM);
1958 break;
1959 default:
1960 break;
1961 }
1962
1963 mutex_exit(&sc->sc_lock);
1964
1965 return revents;
1966 }
1967
1968 static void
1969 filt_ugenrdetach(struct knote *kn)
1970 {
1971 struct ugen_endpoint *sce = kn->kn_hook;
1972 struct ugen_softc *sc = sce->sc;
1973
1974 mutex_enter(&sc->sc_lock);
1975 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1976 mutex_exit(&sc->sc_lock);
1977 }
1978
1979 static int
1980 filt_ugenread_intr(struct knote *kn, long hint)
1981 {
1982 struct ugen_endpoint *sce = kn->kn_hook;
1983
1984 kn->kn_data = sce->q.c_cc;
1985 return kn->kn_data > 0;
1986 }
1987
1988 static int
1989 filt_ugenread_isoc(struct knote *kn, long hint)
1990 {
1991 struct ugen_endpoint *sce = kn->kn_hook;
1992
1993 if (sce->cur == sce->fill)
1994 return 0;
1995
1996 if (sce->cur < sce->fill)
1997 kn->kn_data = sce->fill - sce->cur;
1998 else
1999 kn->kn_data = (sce->limit - sce->cur) +
2000 (sce->fill - sce->ibuf);
2001
2002 return 1;
2003 }
2004
2005 static int
2006 filt_ugenread_bulk(struct knote *kn, long hint)
2007 {
2008 struct ugen_endpoint *sce = kn->kn_hook;
2009
2010 if (!(sce->state & UGEN_BULK_RA))
2011 /*
2012 * We have no easy way of determining if a read will
2013 * yield any data or a write will happen.
2014 * So, emulate "seltrue".
2015 */
2016 return filt_seltrue(kn, hint);
2017
2018 if (sce->ra_wb_used == 0)
2019 return 0;
2020
2021 kn->kn_data = sce->ra_wb_used;
2022
2023 return 1;
2024 }
2025
2026 static int
2027 filt_ugenwrite_bulk(struct knote *kn, long hint)
2028 {
2029 struct ugen_endpoint *sce = kn->kn_hook;
2030
2031 if (!(sce->state & UGEN_BULK_WB))
2032 /*
2033 * We have no easy way of determining if a read will
2034 * yield any data or a write will happen.
2035 * So, emulate "seltrue".
2036 */
2037 return filt_seltrue(kn, hint);
2038
2039 if (sce->ra_wb_used == sce->limit - sce->ibuf)
2040 return 0;
2041
2042 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2043
2044 return 1;
2045 }
2046
2047 static const struct filterops ugenread_intr_filtops =
2048 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2049
2050 static const struct filterops ugenread_isoc_filtops =
2051 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2052
2053 static const struct filterops ugenread_bulk_filtops =
2054 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2055
2056 static const struct filterops ugenwrite_bulk_filtops =
2057 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2058
2059 int
2060 ugenkqfilter(dev_t dev, struct knote *kn)
2061 {
2062 struct ugen_softc *sc;
2063 struct ugen_endpoint *sce;
2064 struct klist *klist;
2065
2066 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2067 if (sc == NULL)
2068 return ENXIO;
2069
2070 if (sc->sc_dying)
2071 return ENXIO;
2072
2073 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2074 return ENODEV;
2075
2076 switch (kn->kn_filter) {
2077 case EVFILT_READ:
2078 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2079 if (sce == NULL)
2080 return EINVAL;
2081
2082 klist = &sce->rsel.sel_klist;
2083 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2084 case UE_INTERRUPT:
2085 kn->kn_fop = &ugenread_intr_filtops;
2086 break;
2087 case UE_ISOCHRONOUS:
2088 kn->kn_fop = &ugenread_isoc_filtops;
2089 break;
2090 case UE_BULK:
2091 kn->kn_fop = &ugenread_bulk_filtops;
2092 break;
2093 default:
2094 return EINVAL;
2095 }
2096 break;
2097
2098 case EVFILT_WRITE:
2099 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2100 if (sce == NULL)
2101 return EINVAL;
2102
2103 klist = &sce->rsel.sel_klist;
2104 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2105 case UE_INTERRUPT:
2106 case UE_ISOCHRONOUS:
2107 /* XXX poll doesn't support this */
2108 return EINVAL;
2109
2110 case UE_BULK:
2111 kn->kn_fop = &ugenwrite_bulk_filtops;
2112 break;
2113 default:
2114 return EINVAL;
2115 }
2116 break;
2117
2118 default:
2119 return EINVAL;
2120 }
2121
2122 kn->kn_hook = sce;
2123
2124 mutex_enter(&sc->sc_lock);
2125 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2126 mutex_exit(&sc->sc_lock);
2127
2128 return 0;
2129 }
2130