ugen.c revision 1.126.2.11 1 /* $NetBSD: ugen.c,v 1.126.2.11 2015/10/06 21:32:15 skrll Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.126.2.11 2015/10/06 21:32:15 skrll Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #include "opt_usb.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/device.h>
52 #include <sys/ioctl.h>
53 #include <sys/conf.h>
54 #include <sys/tty.h>
55 #include <sys/file.h>
56 #include <sys/select.h>
57 #include <sys/proc.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60
61 #include <dev/usb/usb.h>
62 #include <dev/usb/usbdi.h>
63 #include <dev/usb/usbdi_util.h>
64
65 #ifdef UGEN_DEBUG
66 #define DPRINTF(x) if (ugendebug) printf x
67 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x
68 int ugendebug = 0;
69 #else
70 #define DPRINTF(x)
71 #define DPRINTFN(n,x)
72 #endif
73
74 #define UGEN_CHUNK 128 /* chunk size for read */
75 #define UGEN_IBSIZE 1020 /* buffer size */
76 #define UGEN_BBSIZE 1024
77
78 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
79 #define UGEN_NISORFRMS 8 /* number of transactions per req */
80 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
81
82 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
83 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
84
85 struct isoreq {
86 struct ugen_endpoint *sce;
87 struct usbd_xfer *xfer;
88 void *dmabuf;
89 uint16_t sizes[UGEN_NISORFRMS];
90 };
91
92 struct ugen_endpoint {
93 struct ugen_softc *sc;
94 usb_endpoint_descriptor_t *edesc;
95 struct usbd_interface *iface;
96 int state;
97 #define UGEN_ASLP 0x02 /* waiting for data */
98 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
99 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
100 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
101 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
102 struct usbd_pipe *pipeh;
103 struct clist q;
104 u_char *ibuf; /* start of buffer (circular for isoc) */
105 u_char *fill; /* location for input (isoc) */
106 u_char *limit; /* end of circular buffer (isoc) */
107 u_char *cur; /* current read location (isoc) */
108 uint32_t timeout;
109 uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
110 uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
111 uint32_t ra_wb_used; /* how much is in buffer */
112 uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */
113 struct usbd_xfer *ra_wb_xfer;
114 struct isoreq isoreqs[UGEN_NISOREQS];
115 /* Keep these last; we don't overwrite them in ugen_set_config() */
116 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
117 struct selinfo rsel;
118 kcondvar_t cv;
119 };
120
121 struct ugen_softc {
122 device_t sc_dev; /* base device */
123 struct usbd_device *sc_udev;
124
125 kmutex_t sc_lock;
126 kcondvar_t sc_detach_cv;
127
128 char sc_is_open[USB_MAX_ENDPOINTS];
129 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
130 #define OUT 0
131 #define IN 1
132
133 int sc_refcnt;
134 char sc_buffer[UGEN_BBSIZE];
135 u_char sc_dying;
136 };
137
138 dev_type_open(ugenopen);
139 dev_type_close(ugenclose);
140 dev_type_read(ugenread);
141 dev_type_write(ugenwrite);
142 dev_type_ioctl(ugenioctl);
143 dev_type_poll(ugenpoll);
144 dev_type_kqfilter(ugenkqfilter);
145
146 const struct cdevsw ugen_cdevsw = {
147 .d_open = ugenopen,
148 .d_close = ugenclose,
149 .d_read = ugenread,
150 .d_write = ugenwrite,
151 .d_ioctl = ugenioctl,
152 .d_stop = nostop,
153 .d_tty = notty,
154 .d_poll = ugenpoll,
155 .d_mmap = nommap,
156 .d_kqfilter = ugenkqfilter,
157 .d_discard = nodiscard,
158 .d_flag = D_OTHER,
159 };
160
161 Static void ugenintr(struct usbd_xfer *, void *,
162 usbd_status);
163 Static void ugen_isoc_rintr(struct usbd_xfer *, void *,
164 usbd_status);
165 Static void ugen_bulkra_intr(struct usbd_xfer *, void *,
166 usbd_status);
167 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *,
168 usbd_status);
169 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
170 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
171 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
172 void *, int, struct lwp *);
173 Static int ugen_set_config(struct ugen_softc *, int);
174 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *,
175 int, int *);
176 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
177 Static int ugen_get_alt_index(struct ugen_softc *, int);
178
179 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
180 #define UGENENDPOINT(n) (minor(n) & 0xf)
181 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
182
183 int ugen_match(device_t, cfdata_t, void *);
184 void ugen_attach(device_t, device_t, void *);
185 int ugen_detach(device_t, int);
186 int ugen_activate(device_t, enum devact);
187 extern struct cfdriver ugen_cd;
188 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate);
189
190 /* toggle to control attach priority. -1 means "let autoconf decide" */
191 int ugen_override = -1;
192
193 int
194 ugen_match(device_t parent, cfdata_t match, void *aux)
195 {
196 struct usb_attach_arg *uaa = aux;
197 int override;
198
199 if (ugen_override != -1)
200 override = ugen_override;
201 else
202 override = match->cf_flags & 1;
203
204 if (override)
205 return UMATCH_HIGHEST;
206 else if (uaa->uaa_usegeneric)
207 return UMATCH_GENERIC;
208 else
209 return UMATCH_NONE;
210 }
211
212 void
213 ugen_attach(device_t parent, device_t self, void *aux)
214 {
215 struct ugen_softc *sc = device_private(self);
216 struct usb_attach_arg *uaa = aux;
217 struct usbd_device *udev;
218 char *devinfop;
219 usbd_status err;
220 int i, dir, conf;
221
222 aprint_naive("\n");
223 aprint_normal("\n");
224
225 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_USB);
226 cv_init(&sc->sc_detach_cv, "ugendet");
227
228 devinfop = usbd_devinfo_alloc(uaa->uaa_device, 0);
229 aprint_normal_dev(self, "%s\n", devinfop);
230 usbd_devinfo_free(devinfop);
231
232 sc->sc_dev = self;
233 sc->sc_udev = udev = uaa->uaa_device;
234
235 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
236 for (dir = OUT; dir <= IN; dir++) {
237 struct ugen_endpoint *sce;
238
239 sce = &sc->sc_endpoints[i][dir];
240 selinit(&sce->rsel);
241 cv_init(&sce->cv, "ugensce");
242 }
243 }
244
245 /* First set configuration index 0, the default one for ugen. */
246 err = usbd_set_config_index(udev, 0, 0);
247 if (err) {
248 aprint_error_dev(self,
249 "setting configuration index 0 failed\n");
250 sc->sc_dying = 1;
251 return;
252 }
253 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
254
255 /* Set up all the local state for this configuration. */
256 err = ugen_set_config(sc, conf);
257 if (err) {
258 aprint_error_dev(self, "setting configuration %d failed\n",
259 conf);
260 sc->sc_dying = 1;
261 return;
262 }
263
264 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
265 sc->sc_dev);
266
267 if (!pmf_device_register(self, NULL, NULL))
268 aprint_error_dev(self, "couldn't establish power handler\n");
269
270 return;
271 }
272
273 Static int
274 ugen_set_config(struct ugen_softc *sc, int configno)
275 {
276 struct usbd_device *dev = sc->sc_udev;
277 usb_config_descriptor_t *cdesc;
278 struct usbd_interface *iface;
279 usb_endpoint_descriptor_t *ed;
280 struct ugen_endpoint *sce;
281 uint8_t niface, nendpt;
282 int ifaceno, endptno, endpt;
283 usbd_status err;
284 int dir, i;
285
286 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
287 device_xname(sc->sc_dev), configno, sc));
288
289 /*
290 * We start at 1, not 0, because we don't care whether the
291 * control endpoint is open or not. It is always present.
292 */
293 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
294 if (sc->sc_is_open[endptno]) {
295 DPRINTFN(1,
296 ("ugen_set_config: %s - endpoint %d is open\n",
297 device_xname(sc->sc_dev), endptno));
298 return USBD_IN_USE;
299 }
300
301 /* Avoid setting the current value. */
302 cdesc = usbd_get_config_descriptor(dev);
303 if (!cdesc || cdesc->bConfigurationValue != configno) {
304 err = usbd_set_config_no(dev, configno, 1);
305 if (err)
306 return err;
307 }
308
309 err = usbd_interface_count(dev, &niface);
310 if (err)
311 return err;
312
313 /* Clear out the old info, but leave the selinfo and cv initialised. */
314 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
315 for (dir = OUT; dir <= IN; dir++) {
316 sce = &sc->sc_endpoints[i][dir];
317 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
318 }
319 }
320
321 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
322 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
323 err = usbd_device2interface_handle(dev, ifaceno, &iface);
324 if (err)
325 return err;
326 err = usbd_endpoint_count(iface, &nendpt);
327 if (err)
328 return err;
329 for (endptno = 0; endptno < nendpt; endptno++) {
330 ed = usbd_interface2endpoint_descriptor(iface,endptno);
331 KASSERT(ed != NULL);
332 endpt = ed->bEndpointAddress;
333 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
334 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
335 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
336 "(%d,%d), sce=%p\n",
337 endptno, endpt, UE_GET_ADDR(endpt),
338 UE_GET_DIR(endpt), sce));
339 sce->sc = sc;
340 sce->edesc = ed;
341 sce->iface = iface;
342 }
343 }
344 return USBD_NORMAL_COMPLETION;
345 }
346
347 int
348 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
349 {
350 struct ugen_softc *sc;
351 int unit = UGENUNIT(dev);
352 int endpt = UGENENDPOINT(dev);
353 usb_endpoint_descriptor_t *edesc;
354 struct ugen_endpoint *sce;
355 int dir, isize;
356 usbd_status err;
357 struct usbd_xfer *xfer;
358 int i, j;
359
360 sc = device_lookup_private(&ugen_cd, unit);
361 if (sc == NULL)
362 return ENXIO;
363
364 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
365 flag, mode, unit, endpt));
366
367 if (sc == NULL || sc->sc_dying)
368 return ENXIO;
369
370 /* The control endpoint allows multiple opens. */
371 if (endpt == USB_CONTROL_ENDPOINT) {
372 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
373 return 0;
374 }
375
376 if (sc->sc_is_open[endpt])
377 return EBUSY;
378
379 /* Make sure there are pipes for all directions. */
380 for (dir = OUT; dir <= IN; dir++) {
381 if (flag & (dir == OUT ? FWRITE : FREAD)) {
382 sce = &sc->sc_endpoints[endpt][dir];
383 if (sce == 0 || sce->edesc == 0)
384 return ENXIO;
385 }
386 }
387
388 /* Actually open the pipes. */
389 /* XXX Should back out properly if it fails. */
390 for (dir = OUT; dir <= IN; dir++) {
391 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
392 continue;
393 sce = &sc->sc_endpoints[endpt][dir];
394 sce->state = 0;
395 sce->timeout = USBD_NO_TIMEOUT;
396 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
397 sc, endpt, dir, sce));
398 edesc = sce->edesc;
399 switch (edesc->bmAttributes & UE_XFERTYPE) {
400 case UE_INTERRUPT:
401 if (dir == OUT) {
402 err = usbd_open_pipe(sce->iface,
403 edesc->bEndpointAddress, 0, &sce->pipeh);
404 if (err)
405 return EIO;
406 break;
407 }
408 isize = UGETW(edesc->wMaxPacketSize);
409 if (isize == 0) /* shouldn't happen */
410 return EINVAL;
411 sce->ibuf = kmem_alloc(isize, KM_SLEEP);
412 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
413 endpt, isize));
414 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
415 kmem_free(sce->ibuf, isize);
416 sce->ibuf = NULL;
417 return ENOMEM;
418 }
419 err = usbd_open_pipe_intr(sce->iface,
420 edesc->bEndpointAddress,
421 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
422 sce->ibuf, isize, ugenintr,
423 USBD_DEFAULT_INTERVAL);
424 if (err) {
425 clfree(&sce->q);
426 kmem_free(sce->ibuf, isize);
427 sce->ibuf = NULL;
428 return EIO;
429 }
430 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
431 break;
432 case UE_BULK:
433 err = usbd_open_pipe(sce->iface,
434 edesc->bEndpointAddress, 0, &sce->pipeh);
435 if (err)
436 return EIO;
437 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
438 /*
439 * Use request size for non-RA/WB transfers
440 * as the default.
441 */
442 sce->ra_wb_reqsize = UGEN_BBSIZE;
443 break;
444 case UE_ISOCHRONOUS:
445 if (dir == OUT)
446 return EINVAL;
447 isize = UGETW(edesc->wMaxPacketSize);
448 if (isize == 0) /* shouldn't happen */
449 return EINVAL;
450 sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES,
451 KM_SLEEP);
452 sce->cur = sce->fill = sce->ibuf;
453 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
454 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
455 endpt, isize));
456 err = usbd_open_pipe(sce->iface,
457 edesc->bEndpointAddress, 0, &sce->pipeh);
458 if (err) {
459 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
460 sce->ibuf = NULL;
461 return EIO;
462 }
463 for (i = 0; i < UGEN_NISOREQS; ++i) {
464 sce->isoreqs[i].sce = sce;
465 err = usbd_create_xfer(sce->pipeh,
466 isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS,
467 &xfer);
468 if (err)
469 goto bad;
470 sce->isoreqs[i].xfer = xfer;
471 sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer);
472 for (j = 0; j < UGEN_NISORFRMS; ++j)
473 sce->isoreqs[i].sizes[j] = isize;
474 usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i],
475 sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0,
476 ugen_isoc_rintr);
477 (void)usbd_transfer(xfer);
478 }
479 DPRINTFN(5, ("ugenopen: isoc open done\n"));
480 break;
481 bad:
482 while (--i >= 0) /* implicit buffer free */
483 usbd_destroy_xfer(sce->isoreqs[i].xfer);
484 usbd_close_pipe(sce->pipeh);
485 sce->pipeh = NULL;
486 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
487 sce->ibuf = NULL;
488 return ENOMEM;
489 case UE_CONTROL:
490 sce->timeout = USBD_DEFAULT_TIMEOUT;
491 return EINVAL;
492 }
493 }
494 sc->sc_is_open[endpt] = 1;
495 return 0;
496 }
497
498 int
499 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
500 {
501 int endpt = UGENENDPOINT(dev);
502 struct ugen_softc *sc;
503 struct ugen_endpoint *sce;
504 int dir;
505 int i;
506
507 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
508 if (sc == NULL)
509 return ENXIO;
510
511 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
512 flag, mode, UGENUNIT(dev), endpt));
513
514 #ifdef DIAGNOSTIC
515 if (!sc->sc_is_open[endpt]) {
516 printf("ugenclose: not open\n");
517 return EINVAL;
518 }
519 #endif
520
521 if (endpt == USB_CONTROL_ENDPOINT) {
522 DPRINTFN(5, ("ugenclose: close control\n"));
523 sc->sc_is_open[endpt] = 0;
524 return 0;
525 }
526
527 for (dir = OUT; dir <= IN; dir++) {
528 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
529 continue;
530 sce = &sc->sc_endpoints[endpt][dir];
531 if (sce == NULL || sce->pipeh == NULL)
532 continue;
533 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
534 endpt, dir, sce));
535
536 usbd_abort_pipe(sce->pipeh);
537 usbd_close_pipe(sce->pipeh);
538 sce->pipeh = NULL;
539
540 int isize = UGETW(sce->edesc->wMaxPacketSize);
541 int msize = 0;
542
543 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
544 case UE_INTERRUPT:
545 ndflush(&sce->q, sce->q.c_cc);
546 clfree(&sce->q);
547 msize = isize;
548 break;
549 case UE_ISOCHRONOUS:
550 for (i = 0; i < UGEN_NISOREQS; ++i)
551 usbd_destroy_xfer(sce->isoreqs[i].xfer);
552 msize = isize * UGEN_NISOFRAMES;
553 break;
554 case UE_BULK:
555 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) {
556 usbd_destroy_xfer(sce->ra_wb_xfer);
557 msize = sce->ra_wb_bufsize;
558 }
559 break;
560 default:
561 break;
562 }
563 if (sce->ibuf != NULL) {
564 kmem_free(sce->ibuf, msize);
565 sce->ibuf = NULL;
566 }
567 }
568 sc->sc_is_open[endpt] = 0;
569
570 return 0;
571 }
572
573 Static int
574 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
575 {
576 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
577 uint32_t n, tn;
578 struct usbd_xfer *xfer;
579 usbd_status err;
580 int error = 0;
581
582 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
583
584 if (sc->sc_dying)
585 return EIO;
586
587 if (endpt == USB_CONTROL_ENDPOINT)
588 return ENODEV;
589
590 #ifdef DIAGNOSTIC
591 if (sce->edesc == NULL) {
592 printf("ugenread: no edesc\n");
593 return EIO;
594 }
595 if (sce->pipeh == NULL) {
596 printf("ugenread: no pipe\n");
597 return EIO;
598 }
599 #endif
600
601 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
602 case UE_INTERRUPT:
603 /* Block until activity occurred. */
604 mutex_enter(&sc->sc_lock);
605 while (sce->q.c_cc == 0) {
606 if (flag & IO_NDELAY) {
607 mutex_exit(&sc->sc_lock);
608 return EWOULDBLOCK;
609 }
610 sce->state |= UGEN_ASLP;
611 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
612 /* "ugenri" */
613 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
614 mstohz(sce->timeout));
615 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
616 if (sc->sc_dying)
617 error = EIO;
618 if (error) {
619 sce->state &= ~UGEN_ASLP;
620 break;
621 }
622 }
623 mutex_exit(&sc->sc_lock);
624
625 /* Transfer as many chunks as possible. */
626 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
627 n = min(sce->q.c_cc, uio->uio_resid);
628 if (n > sizeof(sc->sc_buffer))
629 n = sizeof(sc->sc_buffer);
630
631 /* Remove a small chunk from the input queue. */
632 q_to_b(&sce->q, sc->sc_buffer, n);
633 DPRINTFN(5, ("ugenread: got %d chars\n", n));
634
635 /* Copy the data to the user process. */
636 error = uiomove(sc->sc_buffer, n, uio);
637 if (error)
638 break;
639 }
640 break;
641 case UE_BULK:
642 if (sce->state & UGEN_BULK_RA) {
643 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
644 uio->uio_resid, sce->ra_wb_used));
645 xfer = sce->ra_wb_xfer;
646
647 mutex_enter(&sc->sc_lock);
648 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
649 mutex_exit(&sc->sc_lock);
650 return EWOULDBLOCK;
651 }
652 while (uio->uio_resid > 0 && !error) {
653 while (sce->ra_wb_used == 0) {
654 sce->state |= UGEN_ASLP;
655 DPRINTFN(5,
656 ("ugenread: sleep on %p\n",
657 sce));
658 /* "ugenrb" */
659 error = cv_timedwait_sig(&sce->cv,
660 &sc->sc_lock, mstohz(sce->timeout));
661 DPRINTFN(5,
662 ("ugenread: woke, error=%d\n",
663 error));
664 if (sc->sc_dying)
665 error = EIO;
666 if (error) {
667 sce->state &= ~UGEN_ASLP;
668 break;
669 }
670 }
671
672 /* Copy data to the process. */
673 while (uio->uio_resid > 0
674 && sce->ra_wb_used > 0) {
675 n = min(uio->uio_resid,
676 sce->ra_wb_used);
677 n = min(n, sce->limit - sce->cur);
678 error = uiomove(sce->cur, n, uio);
679 if (error)
680 break;
681 sce->cur += n;
682 sce->ra_wb_used -= n;
683 if (sce->cur == sce->limit)
684 sce->cur = sce->ibuf;
685 }
686
687 /*
688 * If the transfers stopped because the
689 * buffer was full, restart them.
690 */
691 if (sce->state & UGEN_RA_WB_STOP &&
692 sce->ra_wb_used < sce->limit - sce->ibuf) {
693 n = (sce->limit - sce->ibuf)
694 - sce->ra_wb_used;
695 usbd_setup_xfer(xfer, sce, NULL,
696 min(n, sce->ra_wb_xferlen),
697 0, USBD_NO_TIMEOUT,
698 ugen_bulkra_intr);
699 sce->state &= ~UGEN_RA_WB_STOP;
700 err = usbd_transfer(xfer);
701 if (err != USBD_IN_PROGRESS)
702 /*
703 * The transfer has not been
704 * queued. Setting STOP
705 * will make us try
706 * again at the next read.
707 */
708 sce->state |= UGEN_RA_WB_STOP;
709 }
710 }
711 mutex_exit(&sc->sc_lock);
712 break;
713 }
714 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
715 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0,
716 0, &xfer);
717 if (error)
718 return error;
719 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
720 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
721 tn = n;
722 err = usbd_bulk_transfer(xfer, sce->pipeh,
723 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0,
724 sce->timeout, sc->sc_buffer, &tn);
725 if (err) {
726 if (err == USBD_INTERRUPTED)
727 error = EINTR;
728 else if (err == USBD_TIMEOUT)
729 error = ETIMEDOUT;
730 else
731 error = EIO;
732 break;
733 }
734 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
735 error = uiomove(sc->sc_buffer, tn, uio);
736 if (error || tn < n)
737 break;
738 }
739 usbd_destroy_xfer(xfer);
740 break;
741 case UE_ISOCHRONOUS:
742 mutex_enter(&sc->sc_lock);
743 while (sce->cur == sce->fill) {
744 if (flag & IO_NDELAY) {
745 mutex_exit(&sc->sc_lock);
746 return EWOULDBLOCK;
747 }
748 sce->state |= UGEN_ASLP;
749 /* "ugenri" */
750 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
751 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
752 mstohz(sce->timeout));
753 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
754 if (sc->sc_dying)
755 error = EIO;
756 if (error) {
757 sce->state &= ~UGEN_ASLP;
758 break;
759 }
760 }
761
762 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
763 if(sce->fill > sce->cur)
764 n = min(sce->fill - sce->cur, uio->uio_resid);
765 else
766 n = min(sce->limit - sce->cur, uio->uio_resid);
767
768 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
769
770 /* Copy the data to the user process. */
771 error = uiomove(sce->cur, n, uio);
772 if (error)
773 break;
774 sce->cur += n;
775 if (sce->cur >= sce->limit)
776 sce->cur = sce->ibuf;
777 }
778 mutex_exit(&sc->sc_lock);
779 break;
780
781
782 default:
783 return ENXIO;
784 }
785 return error;
786 }
787
788 int
789 ugenread(dev_t dev, struct uio *uio, int flag)
790 {
791 int endpt = UGENENDPOINT(dev);
792 struct ugen_softc *sc;
793 int error;
794
795 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
796 if (sc == NULL)
797 return ENXIO;
798
799 mutex_enter(&sc->sc_lock);
800 sc->sc_refcnt++;
801 mutex_exit(&sc->sc_lock);
802
803 error = ugen_do_read(sc, endpt, uio, flag);
804
805 mutex_enter(&sc->sc_lock);
806 if (--sc->sc_refcnt < 0)
807 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
808 mutex_exit(&sc->sc_lock);
809
810 return error;
811 }
812
813 Static int
814 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
815 int flag)
816 {
817 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
818 uint32_t n;
819 int error = 0;
820 uint32_t tn;
821 char *dbuf;
822 struct usbd_xfer *xfer;
823 usbd_status err;
824
825 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
826
827 if (sc->sc_dying)
828 return EIO;
829
830 if (endpt == USB_CONTROL_ENDPOINT)
831 return ENODEV;
832
833 #ifdef DIAGNOSTIC
834 if (sce->edesc == NULL) {
835 printf("ugenwrite: no edesc\n");
836 return EIO;
837 }
838 if (sce->pipeh == NULL) {
839 printf("ugenwrite: no pipe\n");
840 return EIO;
841 }
842 #endif
843
844 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
845 case UE_BULK:
846 if (sce->state & UGEN_BULK_WB) {
847 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
848 uio->uio_resid, sce->ra_wb_used));
849 xfer = sce->ra_wb_xfer;
850
851 mutex_enter(&sc->sc_lock);
852 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
853 flag & IO_NDELAY) {
854 mutex_exit(&sc->sc_lock);
855 return EWOULDBLOCK;
856 }
857 while (uio->uio_resid > 0 && !error) {
858 while (sce->ra_wb_used ==
859 sce->limit - sce->ibuf) {
860 sce->state |= UGEN_ASLP;
861 DPRINTFN(5,
862 ("ugenwrite: sleep on %p\n",
863 sce));
864 /* "ugenwb" */
865 error = cv_timedwait_sig(&sce->cv,
866 &sc->sc_lock, mstohz(sce->timeout));
867 DPRINTFN(5,
868 ("ugenwrite: woke, error=%d\n",
869 error));
870 if (sc->sc_dying)
871 error = EIO;
872 if (error) {
873 sce->state &= ~UGEN_ASLP;
874 break;
875 }
876 }
877
878 /* Copy data from the process. */
879 while (uio->uio_resid > 0 &&
880 sce->ra_wb_used < sce->limit - sce->ibuf) {
881 n = min(uio->uio_resid,
882 (sce->limit - sce->ibuf)
883 - sce->ra_wb_used);
884 n = min(n, sce->limit - sce->fill);
885 error = uiomove(sce->fill, n, uio);
886 if (error)
887 break;
888 sce->fill += n;
889 sce->ra_wb_used += n;
890 if (sce->fill == sce->limit)
891 sce->fill = sce->ibuf;
892 }
893
894 /*
895 * If the transfers stopped because the
896 * buffer was empty, restart them.
897 */
898 if (sce->state & UGEN_RA_WB_STOP &&
899 sce->ra_wb_used > 0) {
900 dbuf = (char *)usbd_get_buffer(xfer);
901 n = min(sce->ra_wb_used,
902 sce->ra_wb_xferlen);
903 tn = min(n, sce->limit - sce->cur);
904 memcpy(dbuf, sce->cur, tn);
905 dbuf += tn;
906 if (n - tn > 0)
907 memcpy(dbuf, sce->ibuf,
908 n - tn);
909 usbd_setup_xfer(xfer, sce, NULL, n,
910 0, USBD_NO_TIMEOUT,
911 ugen_bulkwb_intr);
912 sce->state &= ~UGEN_RA_WB_STOP;
913 err = usbd_transfer(xfer);
914 if (err != USBD_IN_PROGRESS)
915 /*
916 * The transfer has not been
917 * queued. Setting STOP
918 * will make us try again
919 * at the next read.
920 */
921 sce->state |= UGEN_RA_WB_STOP;
922 }
923 }
924 mutex_exit(&sc->sc_lock);
925 break;
926 }
927 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
928 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0,
929 0, &xfer);
930 if (error)
931 return error;
932 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
933 error = uiomove(sc->sc_buffer, n, uio);
934 if (error)
935 break;
936 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
937 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout,
938 sc->sc_buffer, &n);
939 if (err) {
940 if (err == USBD_INTERRUPTED)
941 error = EINTR;
942 else if (err == USBD_TIMEOUT)
943 error = ETIMEDOUT;
944 else
945 error = EIO;
946 break;
947 }
948 }
949 usbd_destroy_xfer(xfer);
950 break;
951 case UE_INTERRUPT:
952 error = usbd_create_xfer(sce->pipeh,
953 UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer);
954 if (error)
955 return error;
956 while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
957 uio->uio_resid)) != 0) {
958 error = uiomove(sc->sc_buffer, n, uio);
959 if (error)
960 break;
961 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
962 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
963 sce->timeout, sc->sc_buffer, &n);
964 if (err) {
965 if (err == USBD_INTERRUPTED)
966 error = EINTR;
967 else if (err == USBD_TIMEOUT)
968 error = ETIMEDOUT;
969 else
970 error = EIO;
971 break;
972 }
973 }
974 usbd_destroy_xfer(xfer);
975 break;
976 default:
977 return ENXIO;
978 }
979 return error;
980 }
981
982 int
983 ugenwrite(dev_t dev, struct uio *uio, int flag)
984 {
985 int endpt = UGENENDPOINT(dev);
986 struct ugen_softc *sc;
987 int error;
988
989 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
990 if (sc == NULL)
991 return ENXIO;
992
993 mutex_enter(&sc->sc_lock);
994 sc->sc_refcnt++;
995 mutex_exit(&sc->sc_lock);
996
997 error = ugen_do_write(sc, endpt, uio, flag);
998
999 mutex_enter(&sc->sc_lock);
1000 if (--sc->sc_refcnt < 0)
1001 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1002 mutex_exit(&sc->sc_lock);
1003
1004 return error;
1005 }
1006
1007 int
1008 ugen_activate(device_t self, enum devact act)
1009 {
1010 struct ugen_softc *sc = device_private(self);
1011
1012 switch (act) {
1013 case DVACT_DEACTIVATE:
1014 sc->sc_dying = 1;
1015 return 0;
1016 default:
1017 return EOPNOTSUPP;
1018 }
1019 }
1020
1021 int
1022 ugen_detach(device_t self, int flags)
1023 {
1024 struct ugen_softc *sc = device_private(self);
1025 struct ugen_endpoint *sce;
1026 int i, dir;
1027 int maj, mn;
1028
1029 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1030
1031 sc->sc_dying = 1;
1032 pmf_device_deregister(self);
1033 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1034 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1035 for (dir = OUT; dir <= IN; dir++) {
1036 sce = &sc->sc_endpoints[i][dir];
1037 if (sce && sce->pipeh)
1038 usbd_abort_pipe(sce->pipeh);
1039 }
1040 }
1041
1042 mutex_enter(&sc->sc_lock);
1043 if (--sc->sc_refcnt >= 0) {
1044 /* Wake everyone */
1045 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1046 cv_signal(&sc->sc_endpoints[i][IN].cv);
1047 /* Wait for processes to go away. */
1048 usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock);
1049 }
1050 mutex_exit(&sc->sc_lock);
1051
1052 /* locate the major number */
1053 maj = cdevsw_lookup_major(&ugen_cdevsw);
1054
1055 /* Nuke the vnodes for any open instances (calls close). */
1056 mn = device_unit(self) * USB_MAX_ENDPOINTS;
1057 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1058
1059 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1060 sc->sc_dev);
1061
1062 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1063 for (dir = OUT; dir <= IN; dir++) {
1064 sce = &sc->sc_endpoints[i][dir];
1065 seldestroy(&sce->rsel);
1066 cv_destroy(&sce->cv);
1067 }
1068 }
1069
1070 cv_destroy(&sc->sc_detach_cv);
1071 mutex_destroy(&sc->sc_lock);
1072
1073 return 0;
1074 }
1075
1076 Static void
1077 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
1078 {
1079 struct ugen_endpoint *sce = addr;
1080 struct ugen_softc *sc = sce->sc;
1081 uint32_t count;
1082 u_char *ibuf;
1083
1084 if (status == USBD_CANCELLED)
1085 return;
1086
1087 if (status != USBD_NORMAL_COMPLETION) {
1088 DPRINTF(("ugenintr: status=%d\n", status));
1089 if (status == USBD_STALLED)
1090 usbd_clear_endpoint_stall_async(sce->pipeh);
1091 return;
1092 }
1093
1094 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1095 ibuf = sce->ibuf;
1096
1097 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1098 xfer, status, count));
1099 DPRINTFN(5, (" data = %02x %02x %02x\n",
1100 ibuf[0], ibuf[1], ibuf[2]));
1101
1102 (void)b_to_q(ibuf, count, &sce->q);
1103
1104 mutex_enter(&sc->sc_lock);
1105 if (sce->state & UGEN_ASLP) {
1106 sce->state &= ~UGEN_ASLP;
1107 DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1108 cv_signal(&sce->cv);
1109 }
1110 mutex_exit(&sc->sc_lock);
1111 selnotify(&sce->rsel, 0, 0);
1112 }
1113
1114 Static void
1115 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr,
1116 usbd_status status)
1117 {
1118 struct isoreq *req = addr;
1119 struct ugen_endpoint *sce = req->sce;
1120 struct ugen_softc *sc = sce->sc;
1121 uint32_t count, n;
1122 int i, isize;
1123
1124 /* Return if we are aborting. */
1125 if (status == USBD_CANCELLED)
1126 return;
1127
1128 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1129 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1130 (long)(req - sce->isoreqs), count));
1131
1132 /* throw away oldest input if the buffer is full */
1133 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1134 sce->cur += count;
1135 if(sce->cur >= sce->limit)
1136 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1137 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1138 count));
1139 }
1140
1141 isize = UGETW(sce->edesc->wMaxPacketSize);
1142 for (i = 0; i < UGEN_NISORFRMS; i++) {
1143 uint32_t actlen = req->sizes[i];
1144 char const *tbuf = (char const *)req->dmabuf + isize * i;
1145
1146 /* copy data to buffer */
1147 while (actlen > 0) {
1148 n = min(actlen, sce->limit - sce->fill);
1149 memcpy(sce->fill, tbuf, n);
1150
1151 tbuf += n;
1152 actlen -= n;
1153 sce->fill += n;
1154 if(sce->fill == sce->limit)
1155 sce->fill = sce->ibuf;
1156 }
1157
1158 /* setup size for next transfer */
1159 req->sizes[i] = isize;
1160 }
1161
1162 usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0,
1163 ugen_isoc_rintr);
1164 (void)usbd_transfer(xfer);
1165
1166 mutex_enter(&sc->sc_lock);
1167 if (sce->state & UGEN_ASLP) {
1168 sce->state &= ~UGEN_ASLP;
1169 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1170 cv_signal(&sce->cv);
1171 }
1172 mutex_exit(&sc->sc_lock);
1173 selnotify(&sce->rsel, 0, 0);
1174 }
1175
1176 Static void
1177 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr,
1178 usbd_status status)
1179 {
1180 struct ugen_endpoint *sce = addr;
1181 struct ugen_softc *sc = sce->sc;
1182 uint32_t count, n;
1183 char const *tbuf;
1184 usbd_status err;
1185
1186 /* Return if we are aborting. */
1187 if (status == USBD_CANCELLED)
1188 return;
1189
1190 if (status != USBD_NORMAL_COMPLETION) {
1191 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1192 sce->state |= UGEN_RA_WB_STOP;
1193 if (status == USBD_STALLED)
1194 usbd_clear_endpoint_stall_async(sce->pipeh);
1195 return;
1196 }
1197
1198 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1199
1200 /* Keep track of how much is in the buffer. */
1201 sce->ra_wb_used += count;
1202
1203 /* Copy data to buffer. */
1204 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1205 n = min(count, sce->limit - sce->fill);
1206 memcpy(sce->fill, tbuf, n);
1207 tbuf += n;
1208 count -= n;
1209 sce->fill += n;
1210 if (sce->fill == sce->limit)
1211 sce->fill = sce->ibuf;
1212 if (count > 0) {
1213 memcpy(sce->fill, tbuf, count);
1214 sce->fill += count;
1215 }
1216
1217 /* Set up the next request if necessary. */
1218 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1219 if (n > 0) {
1220 usbd_setup_xfer(xfer, sce, NULL, min(n, sce->ra_wb_xferlen), 0,
1221 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1222 err = usbd_transfer(xfer);
1223 if (err != USBD_IN_PROGRESS) {
1224 printf("usbd_bulkra_intr: error=%d\n", err);
1225 /*
1226 * The transfer has not been queued. Setting STOP
1227 * will make us try again at the next read.
1228 */
1229 sce->state |= UGEN_RA_WB_STOP;
1230 }
1231 }
1232 else
1233 sce->state |= UGEN_RA_WB_STOP;
1234
1235 mutex_enter(&sc->sc_lock);
1236 if (sce->state & UGEN_ASLP) {
1237 sce->state &= ~UGEN_ASLP;
1238 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1239 cv_signal(&sce->cv);
1240 }
1241 mutex_exit(&sc->sc_lock);
1242 selnotify(&sce->rsel, 0, 0);
1243 }
1244
1245 Static void
1246 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr,
1247 usbd_status status)
1248 {
1249 struct ugen_endpoint *sce = addr;
1250 struct ugen_softc *sc = sce->sc;
1251 uint32_t count, n;
1252 char *tbuf;
1253 usbd_status err;
1254
1255 /* Return if we are aborting. */
1256 if (status == USBD_CANCELLED)
1257 return;
1258
1259 if (status != USBD_NORMAL_COMPLETION) {
1260 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1261 sce->state |= UGEN_RA_WB_STOP;
1262 if (status == USBD_STALLED)
1263 usbd_clear_endpoint_stall_async(sce->pipeh);
1264 return;
1265 }
1266
1267 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1268
1269 /* Keep track of how much is in the buffer. */
1270 sce->ra_wb_used -= count;
1271
1272 /* Update buffer pointers. */
1273 sce->cur += count;
1274 if (sce->cur >= sce->limit)
1275 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1276
1277 /* Set up next request if necessary. */
1278 if (sce->ra_wb_used > 0) {
1279 /* copy data from buffer */
1280 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1281 count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1282 n = min(count, sce->limit - sce->cur);
1283 memcpy(tbuf, sce->cur, n);
1284 tbuf += n;
1285 if (count - n > 0)
1286 memcpy(tbuf, sce->ibuf, count - n);
1287
1288 usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT,
1289 ugen_bulkwb_intr);
1290 err = usbd_transfer(xfer);
1291 if (err != USBD_IN_PROGRESS) {
1292 printf("usbd_bulkwb_intr: error=%d\n", err);
1293 /*
1294 * The transfer has not been queued. Setting STOP
1295 * will make us try again at the next write.
1296 */
1297 sce->state |= UGEN_RA_WB_STOP;
1298 }
1299 }
1300 else
1301 sce->state |= UGEN_RA_WB_STOP;
1302
1303 mutex_enter(&sc->sc_lock);
1304 if (sce->state & UGEN_ASLP) {
1305 sce->state &= ~UGEN_ASLP;
1306 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1307 cv_signal(&sce->cv);
1308 }
1309 mutex_exit(&sc->sc_lock);
1310 selnotify(&sce->rsel, 0, 0);
1311 }
1312
1313 Static usbd_status
1314 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1315 {
1316 struct usbd_interface *iface;
1317 usb_endpoint_descriptor_t *ed;
1318 usbd_status err;
1319 struct ugen_endpoint *sce;
1320 uint8_t niface, nendpt, endptno, endpt;
1321 int dir;
1322
1323 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1324
1325 err = usbd_interface_count(sc->sc_udev, &niface);
1326 if (err)
1327 return err;
1328 if (ifaceidx < 0 || ifaceidx >= niface)
1329 return USBD_INVAL;
1330
1331 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1332 if (err)
1333 return err;
1334 err = usbd_endpoint_count(iface, &nendpt);
1335 if (err)
1336 return err;
1337 /* XXX should only do this after setting new altno has succeeded */
1338 for (endptno = 0; endptno < nendpt; endptno++) {
1339 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1340 endpt = ed->bEndpointAddress;
1341 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1342 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1343 sce->sc = 0;
1344 sce->edesc = 0;
1345 sce->iface = 0;
1346 }
1347
1348 /* change setting */
1349 err = usbd_set_interface(iface, altno);
1350 if (err)
1351 return err;
1352
1353 err = usbd_endpoint_count(iface, &nendpt);
1354 if (err)
1355 return err;
1356 for (endptno = 0; endptno < nendpt; endptno++) {
1357 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1358 KASSERT(ed != NULL);
1359 endpt = ed->bEndpointAddress;
1360 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1361 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1362 sce->sc = sc;
1363 sce->edesc = ed;
1364 sce->iface = iface;
1365 }
1366 return 0;
1367 }
1368
1369 /* Retrieve a complete descriptor for a certain device and index. */
1370 Static usb_config_descriptor_t *
1371 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1372 {
1373 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1374 int len;
1375 usbd_status err;
1376
1377 if (index == USB_CURRENT_CONFIG_INDEX) {
1378 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1379 len = UGETW(tdesc->wTotalLength);
1380 if (lenp)
1381 *lenp = len;
1382 cdesc = kmem_alloc(len, KM_SLEEP);
1383 memcpy(cdesc, tdesc, len);
1384 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1385 } else {
1386 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1387 if (err)
1388 return 0;
1389 len = UGETW(cdescr.wTotalLength);
1390 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1391 if (lenp)
1392 *lenp = len;
1393 cdesc = kmem_alloc(len, KM_SLEEP);
1394 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1395 if (err) {
1396 kmem_free(cdesc, len);
1397 return 0;
1398 }
1399 }
1400 return cdesc;
1401 }
1402
1403 Static int
1404 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1405 {
1406 struct usbd_interface *iface;
1407 usbd_status err;
1408
1409 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1410 if (err)
1411 return -1;
1412 return usbd_get_interface_altindex(iface);
1413 }
1414
1415 Static int
1416 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1417 void *addr, int flag, struct lwp *l)
1418 {
1419 struct ugen_endpoint *sce;
1420 usbd_status err;
1421 struct usbd_interface *iface;
1422 struct usb_config_desc *cd;
1423 usb_config_descriptor_t *cdesc;
1424 struct usb_interface_desc *id;
1425 usb_interface_descriptor_t *idesc;
1426 struct usb_endpoint_desc *ed;
1427 usb_endpoint_descriptor_t *edesc;
1428 struct usb_alt_interface *ai;
1429 struct usb_string_desc *si;
1430 uint8_t conf, alt;
1431 int cdesclen;
1432 int error;
1433
1434 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1435 if (sc->sc_dying)
1436 return EIO;
1437
1438 switch (cmd) {
1439 case FIONBIO:
1440 /* All handled in the upper FS layer. */
1441 return 0;
1442 case USB_SET_SHORT_XFER:
1443 if (endpt == USB_CONTROL_ENDPOINT)
1444 return EINVAL;
1445 /* This flag only affects read */
1446 sce = &sc->sc_endpoints[endpt][IN];
1447 if (sce == NULL || sce->pipeh == NULL)
1448 return EINVAL;
1449 if (*(int *)addr)
1450 sce->state |= UGEN_SHORT_OK;
1451 else
1452 sce->state &= ~UGEN_SHORT_OK;
1453 return 0;
1454 case USB_SET_TIMEOUT:
1455 sce = &sc->sc_endpoints[endpt][IN];
1456 if (sce == NULL
1457 /* XXX this shouldn't happen, but the distinction between
1458 input and output pipes isn't clear enough.
1459 || sce->pipeh == NULL */
1460 )
1461 return EINVAL;
1462 sce->timeout = *(int *)addr;
1463 return 0;
1464 case USB_SET_BULK_RA:
1465 if (endpt == USB_CONTROL_ENDPOINT)
1466 return EINVAL;
1467 sce = &sc->sc_endpoints[endpt][IN];
1468 if (sce == NULL || sce->pipeh == NULL)
1469 return EINVAL;
1470 edesc = sce->edesc;
1471 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1472 return EINVAL;
1473
1474 if (*(int *)addr) {
1475 /* Only turn RA on if it's currently off. */
1476 if (sce->state & UGEN_BULK_RA)
1477 return 0;
1478
1479 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1480 /* shouldn't happen */
1481 return EINVAL;
1482 error = usbd_create_xfer(sce->pipeh,
1483 sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer);
1484 if (error)
1485 return error;
1486 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1487 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1488 sce->fill = sce->cur = sce->ibuf;
1489 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1490 sce->ra_wb_used = 0;
1491 sce->state |= UGEN_BULK_RA;
1492 sce->state &= ~UGEN_RA_WB_STOP;
1493 /* Now start reading. */
1494 usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL,
1495 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1496 0, USBD_NO_TIMEOUT, ugen_bulkra_intr);
1497 err = usbd_transfer(sce->ra_wb_xfer);
1498 if (err != USBD_IN_PROGRESS) {
1499 sce->state &= ~UGEN_BULK_RA;
1500 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1501 sce->ibuf = NULL;
1502 usbd_destroy_xfer(sce->ra_wb_xfer);
1503 return EIO;
1504 }
1505 } else {
1506 /* Only turn RA off if it's currently on. */
1507 if (!(sce->state & UGEN_BULK_RA))
1508 return 0;
1509
1510 sce->state &= ~UGEN_BULK_RA;
1511 usbd_abort_pipe(sce->pipeh);
1512 usbd_destroy_xfer(sce->ra_wb_xfer);
1513 /*
1514 * XXX Discard whatever's in the buffer, but we
1515 * should keep it around and drain the buffer
1516 * instead.
1517 */
1518 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1519 sce->ibuf = NULL;
1520 }
1521 return 0;
1522 case USB_SET_BULK_WB:
1523 if (endpt == USB_CONTROL_ENDPOINT)
1524 return EINVAL;
1525 sce = &sc->sc_endpoints[endpt][OUT];
1526 if (sce == NULL || sce->pipeh == NULL)
1527 return EINVAL;
1528 edesc = sce->edesc;
1529 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1530 return EINVAL;
1531
1532 if (*(int *)addr) {
1533 /* Only turn WB on if it's currently off. */
1534 if (sce->state & UGEN_BULK_WB)
1535 return 0;
1536
1537 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1538 /* shouldn't happen */
1539 return EINVAL;
1540 error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize,
1541 0, 0, &sce->ra_wb_xfer);
1542 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1543 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1544 sce->fill = sce->cur = sce->ibuf;
1545 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1546 sce->ra_wb_used = 0;
1547 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1548 } else {
1549 /* Only turn WB off if it's currently on. */
1550 if (!(sce->state & UGEN_BULK_WB))
1551 return 0;
1552
1553 sce->state &= ~UGEN_BULK_WB;
1554 /*
1555 * XXX Discard whatever's in the buffer, but we
1556 * should keep it around and keep writing to
1557 * drain the buffer instead.
1558 */
1559 usbd_abort_pipe(sce->pipeh);
1560 usbd_destroy_xfer(sce->ra_wb_xfer);
1561 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1562 sce->ibuf = NULL;
1563 }
1564 return 0;
1565 case USB_SET_BULK_RA_OPT:
1566 case USB_SET_BULK_WB_OPT:
1567 {
1568 struct usb_bulk_ra_wb_opt *opt;
1569
1570 if (endpt == USB_CONTROL_ENDPOINT)
1571 return EINVAL;
1572 opt = (struct usb_bulk_ra_wb_opt *)addr;
1573 if (cmd == USB_SET_BULK_RA_OPT)
1574 sce = &sc->sc_endpoints[endpt][IN];
1575 else
1576 sce = &sc->sc_endpoints[endpt][OUT];
1577 if (sce == NULL || sce->pipeh == NULL)
1578 return EINVAL;
1579 if (opt->ra_wb_buffer_size < 1 ||
1580 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1581 opt->ra_wb_request_size < 1 ||
1582 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1583 return EINVAL;
1584 /*
1585 * XXX These changes do not take effect until the
1586 * next time RA/WB mode is enabled but they ought to
1587 * take effect immediately.
1588 */
1589 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1590 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1591 return 0;
1592 }
1593 default:
1594 break;
1595 }
1596
1597 if (endpt != USB_CONTROL_ENDPOINT)
1598 return EINVAL;
1599
1600 switch (cmd) {
1601 #ifdef UGEN_DEBUG
1602 case USB_SETDEBUG:
1603 ugendebug = *(int *)addr;
1604 break;
1605 #endif
1606 case USB_GET_CONFIG:
1607 err = usbd_get_config(sc->sc_udev, &conf);
1608 if (err)
1609 return EIO;
1610 *(int *)addr = conf;
1611 break;
1612 case USB_SET_CONFIG:
1613 if (!(flag & FWRITE))
1614 return EPERM;
1615 err = ugen_set_config(sc, *(int *)addr);
1616 switch (err) {
1617 case USBD_NORMAL_COMPLETION:
1618 break;
1619 case USBD_IN_USE:
1620 return EBUSY;
1621 default:
1622 return EIO;
1623 }
1624 break;
1625 case USB_GET_ALTINTERFACE:
1626 ai = (struct usb_alt_interface *)addr;
1627 err = usbd_device2interface_handle(sc->sc_udev,
1628 ai->uai_interface_index, &iface);
1629 if (err)
1630 return EINVAL;
1631 idesc = usbd_get_interface_descriptor(iface);
1632 if (idesc == NULL)
1633 return EIO;
1634 ai->uai_alt_no = idesc->bAlternateSetting;
1635 break;
1636 case USB_SET_ALTINTERFACE:
1637 if (!(flag & FWRITE))
1638 return EPERM;
1639 ai = (struct usb_alt_interface *)addr;
1640 err = usbd_device2interface_handle(sc->sc_udev,
1641 ai->uai_interface_index, &iface);
1642 if (err)
1643 return EINVAL;
1644 err = ugen_set_interface(sc, ai->uai_interface_index,
1645 ai->uai_alt_no);
1646 if (err)
1647 return EINVAL;
1648 break;
1649 case USB_GET_NO_ALT:
1650 ai = (struct usb_alt_interface *)addr;
1651 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen);
1652 if (cdesc == NULL)
1653 return EINVAL;
1654 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1655 if (idesc == NULL) {
1656 kmem_free(cdesc, cdesclen);
1657 return EINVAL;
1658 }
1659 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1660 idesc->bInterfaceNumber);
1661 kmem_free(cdesc, cdesclen);
1662 break;
1663 case USB_GET_DEVICE_DESC:
1664 *(usb_device_descriptor_t *)addr =
1665 *usbd_get_device_descriptor(sc->sc_udev);
1666 break;
1667 case USB_GET_CONFIG_DESC:
1668 cd = (struct usb_config_desc *)addr;
1669 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen);
1670 if (cdesc == NULL)
1671 return EINVAL;
1672 cd->ucd_desc = *cdesc;
1673 kmem_free(cdesc, cdesclen);
1674 break;
1675 case USB_GET_INTERFACE_DESC:
1676 id = (struct usb_interface_desc *)addr;
1677 cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen);
1678 if (cdesc == NULL)
1679 return EINVAL;
1680 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1681 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1682 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1683 else
1684 alt = id->uid_alt_index;
1685 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1686 if (idesc == NULL) {
1687 kmem_free(cdesc, cdesclen);
1688 return EINVAL;
1689 }
1690 id->uid_desc = *idesc;
1691 kmem_free(cdesc, cdesclen);
1692 break;
1693 case USB_GET_ENDPOINT_DESC:
1694 ed = (struct usb_endpoint_desc *)addr;
1695 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen);
1696 if (cdesc == NULL)
1697 return EINVAL;
1698 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1699 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1700 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1701 else
1702 alt = ed->ued_alt_index;
1703 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1704 alt, ed->ued_endpoint_index);
1705 if (edesc == NULL) {
1706 kmem_free(cdesc, cdesclen);
1707 return EINVAL;
1708 }
1709 ed->ued_desc = *edesc;
1710 kmem_free(cdesc, cdesclen);
1711 break;
1712 case USB_GET_FULL_DESC:
1713 {
1714 int len;
1715 struct iovec iov;
1716 struct uio uio;
1717 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1718
1719 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen);
1720 if (cdesc == NULL)
1721 return EINVAL;
1722 len = cdesclen;
1723 if (len > fd->ufd_size)
1724 len = fd->ufd_size;
1725 iov.iov_base = (void *)fd->ufd_data;
1726 iov.iov_len = len;
1727 uio.uio_iov = &iov;
1728 uio.uio_iovcnt = 1;
1729 uio.uio_resid = len;
1730 uio.uio_offset = 0;
1731 uio.uio_rw = UIO_READ;
1732 uio.uio_vmspace = l->l_proc->p_vmspace;
1733 error = uiomove((void *)cdesc, len, &uio);
1734 kmem_free(cdesc, cdesclen);
1735 return error;
1736 }
1737 case USB_GET_STRING_DESC: {
1738 int len;
1739 si = (struct usb_string_desc *)addr;
1740 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1741 si->usd_language_id, &si->usd_desc, &len);
1742 if (err)
1743 return EINVAL;
1744 break;
1745 }
1746 case USB_DO_REQUEST:
1747 {
1748 struct usb_ctl_request *ur = (void *)addr;
1749 int len = UGETW(ur->ucr_request.wLength);
1750 struct iovec iov;
1751 struct uio uio;
1752 void *ptr = 0;
1753 usbd_status xerr;
1754
1755 error = 0;
1756
1757 if (!(flag & FWRITE))
1758 return EPERM;
1759 /* Avoid requests that would damage the bus integrity. */
1760 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1761 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1762 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1763 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1764 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1765 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1766 return EINVAL;
1767
1768 if (len < 0 || len > 32767)
1769 return EINVAL;
1770 if (len != 0) {
1771 iov.iov_base = (void *)ur->ucr_data;
1772 iov.iov_len = len;
1773 uio.uio_iov = &iov;
1774 uio.uio_iovcnt = 1;
1775 uio.uio_resid = len;
1776 uio.uio_offset = 0;
1777 uio.uio_rw =
1778 ur->ucr_request.bmRequestType & UT_READ ?
1779 UIO_READ : UIO_WRITE;
1780 uio.uio_vmspace = l->l_proc->p_vmspace;
1781 ptr = kmem_alloc(len, KM_SLEEP);
1782 if (uio.uio_rw == UIO_WRITE) {
1783 error = uiomove(ptr, len, &uio);
1784 if (error)
1785 goto ret;
1786 }
1787 }
1788 sce = &sc->sc_endpoints[endpt][IN];
1789 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1790 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1791 if (xerr) {
1792 error = EIO;
1793 goto ret;
1794 }
1795 if (len != 0) {
1796 if (uio.uio_rw == UIO_READ) {
1797 size_t alen = min(len, ur->ucr_actlen);
1798 error = uiomove(ptr, alen, &uio);
1799 if (error)
1800 goto ret;
1801 }
1802 }
1803 ret:
1804 if (ptr)
1805 kmem_free(ptr, len);
1806 return error;
1807 }
1808 case USB_GET_DEVICEINFO:
1809 usbd_fill_deviceinfo(sc->sc_udev,
1810 (struct usb_device_info *)addr, 0);
1811 break;
1812 #ifdef COMPAT_30
1813 case USB_GET_DEVICEINFO_OLD:
1814 usbd_fill_deviceinfo_old(sc->sc_udev,
1815 (struct usb_device_info_old *)addr, 0);
1816
1817 break;
1818 #endif
1819 default:
1820 return EINVAL;
1821 }
1822 return 0;
1823 }
1824
1825 int
1826 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1827 {
1828 int endpt = UGENENDPOINT(dev);
1829 struct ugen_softc *sc;
1830 int error;
1831
1832 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1833 if (sc == NULL)
1834 return ENXIO;
1835
1836 sc->sc_refcnt++;
1837 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1838 if (--sc->sc_refcnt < 0)
1839 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1840 return error;
1841 }
1842
1843 int
1844 ugenpoll(dev_t dev, int events, struct lwp *l)
1845 {
1846 struct ugen_softc *sc;
1847 struct ugen_endpoint *sce_in, *sce_out;
1848 int revents = 0;
1849
1850 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1851 if (sc == NULL)
1852 return ENXIO;
1853
1854 if (sc->sc_dying)
1855 return POLLHUP;
1856
1857 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1858 return ENODEV;
1859
1860 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1861 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1862 if (sce_in == NULL && sce_out == NULL)
1863 return POLLERR;
1864 #ifdef DIAGNOSTIC
1865 if (!sce_in->edesc && !sce_out->edesc) {
1866 printf("ugenpoll: no edesc\n");
1867 return POLLERR;
1868 }
1869 /* It's possible to have only one pipe open. */
1870 if (!sce_in->pipeh && !sce_out->pipeh) {
1871 printf("ugenpoll: no pipe\n");
1872 return POLLERR;
1873 }
1874 #endif
1875
1876 mutex_enter(&sc->sc_lock);
1877 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1878 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1879 case UE_INTERRUPT:
1880 if (sce_in->q.c_cc > 0)
1881 revents |= events & (POLLIN | POLLRDNORM);
1882 else
1883 selrecord(l, &sce_in->rsel);
1884 break;
1885 case UE_ISOCHRONOUS:
1886 if (sce_in->cur != sce_in->fill)
1887 revents |= events & (POLLIN | POLLRDNORM);
1888 else
1889 selrecord(l, &sce_in->rsel);
1890 break;
1891 case UE_BULK:
1892 if (sce_in->state & UGEN_BULK_RA) {
1893 if (sce_in->ra_wb_used > 0)
1894 revents |= events &
1895 (POLLIN | POLLRDNORM);
1896 else
1897 selrecord(l, &sce_in->rsel);
1898 break;
1899 }
1900 /*
1901 * We have no easy way of determining if a read will
1902 * yield any data or a write will happen.
1903 * Pretend they will.
1904 */
1905 revents |= events & (POLLIN | POLLRDNORM);
1906 break;
1907 default:
1908 break;
1909 }
1910 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1911 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1912 case UE_INTERRUPT:
1913 case UE_ISOCHRONOUS:
1914 /* XXX unimplemented */
1915 break;
1916 case UE_BULK:
1917 if (sce_out->state & UGEN_BULK_WB) {
1918 if (sce_out->ra_wb_used <
1919 sce_out->limit - sce_out->ibuf)
1920 revents |= events &
1921 (POLLOUT | POLLWRNORM);
1922 else
1923 selrecord(l, &sce_out->rsel);
1924 break;
1925 }
1926 /*
1927 * We have no easy way of determining if a read will
1928 * yield any data or a write will happen.
1929 * Pretend they will.
1930 */
1931 revents |= events & (POLLOUT | POLLWRNORM);
1932 break;
1933 default:
1934 break;
1935 }
1936
1937 mutex_exit(&sc->sc_lock);
1938
1939 return revents;
1940 }
1941
1942 static void
1943 filt_ugenrdetach(struct knote *kn)
1944 {
1945 struct ugen_endpoint *sce = kn->kn_hook;
1946 struct ugen_softc *sc = sce->sc;
1947
1948 mutex_enter(&sc->sc_lock);
1949 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1950 mutex_exit(&sc->sc_lock);
1951 }
1952
1953 static int
1954 filt_ugenread_intr(struct knote *kn, long hint)
1955 {
1956 struct ugen_endpoint *sce = kn->kn_hook;
1957
1958 kn->kn_data = sce->q.c_cc;
1959 return kn->kn_data > 0;
1960 }
1961
1962 static int
1963 filt_ugenread_isoc(struct knote *kn, long hint)
1964 {
1965 struct ugen_endpoint *sce = kn->kn_hook;
1966
1967 if (sce->cur == sce->fill)
1968 return 0;
1969
1970 if (sce->cur < sce->fill)
1971 kn->kn_data = sce->fill - sce->cur;
1972 else
1973 kn->kn_data = (sce->limit - sce->cur) +
1974 (sce->fill - sce->ibuf);
1975
1976 return 1;
1977 }
1978
1979 static int
1980 filt_ugenread_bulk(struct knote *kn, long hint)
1981 {
1982 struct ugen_endpoint *sce = kn->kn_hook;
1983
1984 if (!(sce->state & UGEN_BULK_RA))
1985 /*
1986 * We have no easy way of determining if a read will
1987 * yield any data or a write will happen.
1988 * So, emulate "seltrue".
1989 */
1990 return filt_seltrue(kn, hint);
1991
1992 if (sce->ra_wb_used == 0)
1993 return 0;
1994
1995 kn->kn_data = sce->ra_wb_used;
1996
1997 return 1;
1998 }
1999
2000 static int
2001 filt_ugenwrite_bulk(struct knote *kn, long hint)
2002 {
2003 struct ugen_endpoint *sce = kn->kn_hook;
2004
2005 if (!(sce->state & UGEN_BULK_WB))
2006 /*
2007 * We have no easy way of determining if a read will
2008 * yield any data or a write will happen.
2009 * So, emulate "seltrue".
2010 */
2011 return filt_seltrue(kn, hint);
2012
2013 if (sce->ra_wb_used == sce->limit - sce->ibuf)
2014 return 0;
2015
2016 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2017
2018 return 1;
2019 }
2020
2021 static const struct filterops ugenread_intr_filtops =
2022 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2023
2024 static const struct filterops ugenread_isoc_filtops =
2025 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2026
2027 static const struct filterops ugenread_bulk_filtops =
2028 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2029
2030 static const struct filterops ugenwrite_bulk_filtops =
2031 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2032
2033 int
2034 ugenkqfilter(dev_t dev, struct knote *kn)
2035 {
2036 struct ugen_softc *sc;
2037 struct ugen_endpoint *sce;
2038 struct klist *klist;
2039
2040 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2041 if (sc == NULL)
2042 return ENXIO;
2043
2044 if (sc->sc_dying)
2045 return ENXIO;
2046
2047 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2048 return ENODEV;
2049
2050 switch (kn->kn_filter) {
2051 case EVFILT_READ:
2052 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2053 if (sce == NULL)
2054 return EINVAL;
2055
2056 klist = &sce->rsel.sel_klist;
2057 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2058 case UE_INTERRUPT:
2059 kn->kn_fop = &ugenread_intr_filtops;
2060 break;
2061 case UE_ISOCHRONOUS:
2062 kn->kn_fop = &ugenread_isoc_filtops;
2063 break;
2064 case UE_BULK:
2065 kn->kn_fop = &ugenread_bulk_filtops;
2066 break;
2067 default:
2068 return EINVAL;
2069 }
2070 break;
2071
2072 case EVFILT_WRITE:
2073 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2074 if (sce == NULL)
2075 return EINVAL;
2076
2077 klist = &sce->rsel.sel_klist;
2078 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2079 case UE_INTERRUPT:
2080 case UE_ISOCHRONOUS:
2081 /* XXX poll doesn't support this */
2082 return EINVAL;
2083
2084 case UE_BULK:
2085 kn->kn_fop = &ugenwrite_bulk_filtops;
2086 break;
2087 default:
2088 return EINVAL;
2089 }
2090 break;
2091
2092 default:
2093 return EINVAL;
2094 }
2095
2096 kn->kn_hook = sce;
2097
2098 mutex_enter(&sc->sc_lock);
2099 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2100 mutex_exit(&sc->sc_lock);
2101
2102 return 0;
2103 }
2104