ugen.c revision 1.142 1 /* $NetBSD: ugen.c,v 1.142 2019/01/27 02:08:42 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.142 2019/01/27 02:08:42 pgoyette Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #include "opt_usb.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/device.h>
52 #include <sys/ioctl.h>
53 #include <sys/conf.h>
54 #include <sys/tty.h>
55 #include <sys/file.h>
56 #include <sys/select.h>
57 #include <sys/proc.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60 #include <sys/compat_stub.h>
61
62 #include <dev/usb/usb.h>
63 #include <dev/usb/usbdi.h>
64 #include <dev/usb/usbdi_util.h>
65
66 #ifdef UGEN_DEBUG
67 #define DPRINTF(x) if (ugendebug) printf x
68 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x
69 int ugendebug = 0;
70 #else
71 #define DPRINTF(x)
72 #define DPRINTFN(n,x)
73 #endif
74
75 #define UGEN_CHUNK 128 /* chunk size for read */
76 #define UGEN_IBSIZE 1020 /* buffer size */
77 #define UGEN_BBSIZE 1024
78
79 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
80 #define UGEN_NISORFRMS 8 /* number of transactions per req */
81 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
82
83 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
84 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
85
86 struct isoreq {
87 struct ugen_endpoint *sce;
88 struct usbd_xfer *xfer;
89 void *dmabuf;
90 uint16_t sizes[UGEN_NISORFRMS];
91 };
92
93 struct ugen_endpoint {
94 struct ugen_softc *sc;
95 usb_endpoint_descriptor_t *edesc;
96 struct usbd_interface *iface;
97 int state;
98 #define UGEN_ASLP 0x02 /* waiting for data */
99 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
100 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
101 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
102 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
103 struct usbd_pipe *pipeh;
104 struct clist q;
105 u_char *ibuf; /* start of buffer (circular for isoc) */
106 u_char *fill; /* location for input (isoc) */
107 u_char *limit; /* end of circular buffer (isoc) */
108 u_char *cur; /* current read location (isoc) */
109 uint32_t timeout;
110 uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
111 uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
112 uint32_t ra_wb_used; /* how much is in buffer */
113 uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */
114 struct usbd_xfer *ra_wb_xfer;
115 struct isoreq isoreqs[UGEN_NISOREQS];
116 /* Keep these last; we don't overwrite them in ugen_set_config() */
117 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
118 struct selinfo rsel;
119 kcondvar_t cv;
120 };
121
122 struct ugen_softc {
123 device_t sc_dev; /* base device */
124 struct usbd_device *sc_udev;
125
126 kmutex_t sc_lock;
127 kcondvar_t sc_detach_cv;
128
129 char sc_is_open[USB_MAX_ENDPOINTS];
130 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
131 #define OUT 0
132 #define IN 1
133
134 int sc_refcnt;
135 char sc_buffer[UGEN_BBSIZE];
136 u_char sc_dying;
137 };
138
139 dev_type_open(ugenopen);
140 dev_type_close(ugenclose);
141 dev_type_read(ugenread);
142 dev_type_write(ugenwrite);
143 dev_type_ioctl(ugenioctl);
144 dev_type_poll(ugenpoll);
145 dev_type_kqfilter(ugenkqfilter);
146
147 const struct cdevsw ugen_cdevsw = {
148 .d_open = ugenopen,
149 .d_close = ugenclose,
150 .d_read = ugenread,
151 .d_write = ugenwrite,
152 .d_ioctl = ugenioctl,
153 .d_stop = nostop,
154 .d_tty = notty,
155 .d_poll = ugenpoll,
156 .d_mmap = nommap,
157 .d_kqfilter = ugenkqfilter,
158 .d_discard = nodiscard,
159 .d_flag = D_OTHER,
160 };
161
162 Static void ugenintr(struct usbd_xfer *, void *,
163 usbd_status);
164 Static void ugen_isoc_rintr(struct usbd_xfer *, void *,
165 usbd_status);
166 Static void ugen_bulkra_intr(struct usbd_xfer *, void *,
167 usbd_status);
168 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *,
169 usbd_status);
170 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
171 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
172 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
173 void *, int, struct lwp *);
174 Static int ugen_set_config(struct ugen_softc *, int, int);
175 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *,
176 int, int *);
177 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
178 Static int ugen_get_alt_index(struct ugen_softc *, int);
179 Static void ugen_clear_endpoints(struct ugen_softc *);
180
181 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
182 #define UGENENDPOINT(n) (minor(n) & 0xf)
183 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
184
185 int ugenif_match(device_t, cfdata_t, void *);
186 void ugenif_attach(device_t, device_t, void *);
187 int ugen_match(device_t, cfdata_t, void *);
188 void ugen_attach(device_t, device_t, void *);
189 int ugen_detach(device_t, int);
190 int ugen_activate(device_t, enum devact);
191 extern struct cfdriver ugen_cd;
192 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match,
193 ugen_attach, ugen_detach, ugen_activate);
194 CFATTACH_DECL_NEW(ugenif, sizeof(struct ugen_softc), ugenif_match,
195 ugenif_attach, ugen_detach, ugen_activate);
196
197 /* toggle to control attach priority. -1 means "let autoconf decide" */
198 int ugen_override = -1;
199
200 int
201 ugen_match(device_t parent, cfdata_t match, void *aux)
202 {
203 struct usb_attach_arg *uaa = aux;
204 int override;
205
206 if (ugen_override != -1)
207 override = ugen_override;
208 else
209 override = match->cf_flags & 1;
210
211 if (override)
212 return UMATCH_HIGHEST;
213 else if (uaa->uaa_usegeneric)
214 return UMATCH_GENERIC;
215 else
216 return UMATCH_NONE;
217 }
218
219 int
220 ugenif_match(device_t parent, cfdata_t match, void *aux)
221 {
222 /* Assume that they knew what they configured! (see ugenif(4)) */
223 return UMATCH_HIGHEST;
224 }
225
226 void
227 ugen_attach(device_t parent, device_t self, void *aux)
228 {
229 struct usb_attach_arg *uaa = aux;
230 struct usbif_attach_arg uiaa;
231
232 memset(&uiaa, 0, sizeof uiaa);
233 uiaa.uiaa_port = uaa->uaa_port;
234 uiaa.uiaa_vendor = uaa->uaa_vendor;
235 uiaa.uiaa_product = uaa->uaa_product;
236 uiaa.uiaa_release = uaa->uaa_release;
237 uiaa.uiaa_device = uaa->uaa_device;
238 uiaa.uiaa_configno = -1;
239 uiaa.uiaa_ifaceno = -1;
240
241 ugenif_attach(parent, self, &uiaa);
242 }
243
244 void
245 ugenif_attach(device_t parent, device_t self, void *aux)
246 {
247 struct ugen_softc *sc = device_private(self);
248 struct usbif_attach_arg *uiaa = aux;
249 struct usbd_device *udev;
250 char *devinfop;
251 usbd_status err;
252 int i, dir, conf;
253
254 aprint_naive("\n");
255 aprint_normal("\n");
256
257 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
258 cv_init(&sc->sc_detach_cv, "ugendet");
259
260 devinfop = usbd_devinfo_alloc(uiaa->uiaa_device, 0);
261 aprint_normal_dev(self, "%s\n", devinfop);
262 usbd_devinfo_free(devinfop);
263
264 sc->sc_dev = self;
265 sc->sc_udev = udev = uiaa->uiaa_device;
266
267 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
268 for (dir = OUT; dir <= IN; dir++) {
269 struct ugen_endpoint *sce;
270
271 sce = &sc->sc_endpoints[i][dir];
272 selinit(&sce->rsel);
273 cv_init(&sce->cv, "ugensce");
274 }
275 }
276
277 if (uiaa->uiaa_ifaceno < 0) {
278 /*
279 * If we attach the whole device,
280 * set configuration index 0, the default one.
281 */
282 err = usbd_set_config_index(udev, 0, 0);
283 if (err) {
284 aprint_error_dev(self,
285 "setting configuration index 0 failed\n");
286 sc->sc_dying = 1;
287 return;
288 }
289 }
290
291 /* Get current configuration */
292 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
293
294 /* Set up all the local state for this configuration. */
295 err = ugen_set_config(sc, conf, uiaa->uiaa_ifaceno < 0);
296 if (err) {
297 aprint_error_dev(self, "setting configuration %d failed\n",
298 conf);
299 sc->sc_dying = 1;
300 return;
301 }
302
303 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev);
304
305 if (!pmf_device_register(self, NULL, NULL))
306 aprint_error_dev(self, "couldn't establish power handler\n");
307
308 }
309
310 Static void
311 ugen_clear_endpoints(struct ugen_softc *sc)
312 {
313
314 /* Clear out the old info, but leave the selinfo and cv initialised. */
315 for (int i = 0; i < USB_MAX_ENDPOINTS; i++) {
316 for (int dir = OUT; dir <= IN; dir++) {
317 struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir];
318 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
319 }
320 }
321 }
322
323 Static int
324 ugen_set_config(struct ugen_softc *sc, int configno, int chkopen)
325 {
326 struct usbd_device *dev = sc->sc_udev;
327 usb_config_descriptor_t *cdesc;
328 struct usbd_interface *iface;
329 usb_endpoint_descriptor_t *ed;
330 struct ugen_endpoint *sce;
331 uint8_t niface, nendpt;
332 int ifaceno, endptno, endpt;
333 usbd_status err;
334 int dir;
335
336 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
337 device_xname(sc->sc_dev), configno, sc));
338
339 if (chkopen) {
340 /*
341 * We start at 1, not 0, because we don't care whether the
342 * control endpoint is open or not. It is always present.
343 */
344 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
345 if (sc->sc_is_open[endptno]) {
346 DPRINTFN(1,
347 ("ugen_set_config: %s - endpoint %d is open\n",
348 device_xname(sc->sc_dev), endptno));
349 return USBD_IN_USE;
350 }
351 }
352
353 /* Avoid setting the current value. */
354 cdesc = usbd_get_config_descriptor(dev);
355 if (!cdesc || cdesc->bConfigurationValue != configno) {
356 err = usbd_set_config_no(dev, configno, 1);
357 if (err)
358 return err;
359 }
360
361 ugen_clear_endpoints(sc);
362
363 err = usbd_interface_count(dev, &niface);
364 if (err)
365 return err;
366
367 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
368 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
369 err = usbd_device2interface_handle(dev, ifaceno, &iface);
370 if (err)
371 return err;
372 err = usbd_endpoint_count(iface, &nendpt);
373 if (err)
374 return err;
375 for (endptno = 0; endptno < nendpt; endptno++) {
376 ed = usbd_interface2endpoint_descriptor(iface,endptno);
377 KASSERT(ed != NULL);
378 endpt = ed->bEndpointAddress;
379 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
380 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
381 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
382 "(%d,%d), sce=%p\n",
383 endptno, endpt, UE_GET_ADDR(endpt),
384 UE_GET_DIR(endpt), sce));
385 sce->sc = sc;
386 sce->edesc = ed;
387 sce->iface = iface;
388 }
389 }
390 return USBD_NORMAL_COMPLETION;
391 }
392
393 int
394 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
395 {
396 struct ugen_softc *sc;
397 int unit = UGENUNIT(dev);
398 int endpt = UGENENDPOINT(dev);
399 usb_endpoint_descriptor_t *edesc;
400 struct ugen_endpoint *sce;
401 int dir, isize;
402 usbd_status err;
403 struct usbd_xfer *xfer;
404 int i, j;
405
406 sc = device_lookup_private(&ugen_cd, unit);
407 if (sc == NULL || sc->sc_dying)
408 return ENXIO;
409
410 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
411 flag, mode, unit, endpt));
412
413 /* The control endpoint allows multiple opens. */
414 if (endpt == USB_CONTROL_ENDPOINT) {
415 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
416 return 0;
417 }
418
419 if (sc->sc_is_open[endpt])
420 return EBUSY;
421
422 /* Make sure there are pipes for all directions. */
423 for (dir = OUT; dir <= IN; dir++) {
424 if (flag & (dir == OUT ? FWRITE : FREAD)) {
425 sce = &sc->sc_endpoints[endpt][dir];
426 if (sce->edesc == NULL)
427 return ENXIO;
428 }
429 }
430
431 /* Actually open the pipes. */
432 /* XXX Should back out properly if it fails. */
433 for (dir = OUT; dir <= IN; dir++) {
434 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
435 continue;
436 sce = &sc->sc_endpoints[endpt][dir];
437 sce->state = 0;
438 sce->timeout = USBD_NO_TIMEOUT;
439 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
440 sc, endpt, dir, sce));
441 edesc = sce->edesc;
442 switch (edesc->bmAttributes & UE_XFERTYPE) {
443 case UE_INTERRUPT:
444 if (dir == OUT) {
445 err = usbd_open_pipe(sce->iface,
446 edesc->bEndpointAddress, 0, &sce->pipeh);
447 if (err)
448 return EIO;
449 break;
450 }
451 isize = UGETW(edesc->wMaxPacketSize);
452 if (isize == 0) /* shouldn't happen */
453 return EINVAL;
454 sce->ibuf = kmem_alloc(isize, KM_SLEEP);
455 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
456 endpt, isize));
457 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
458 kmem_free(sce->ibuf, isize);
459 sce->ibuf = NULL;
460 return ENOMEM;
461 }
462 err = usbd_open_pipe_intr(sce->iface,
463 edesc->bEndpointAddress,
464 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
465 sce->ibuf, isize, ugenintr,
466 USBD_DEFAULT_INTERVAL);
467 if (err) {
468 clfree(&sce->q);
469 kmem_free(sce->ibuf, isize);
470 sce->ibuf = NULL;
471 return EIO;
472 }
473 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
474 break;
475 case UE_BULK:
476 err = usbd_open_pipe(sce->iface,
477 edesc->bEndpointAddress, 0, &sce->pipeh);
478 if (err)
479 return EIO;
480 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
481 /*
482 * Use request size for non-RA/WB transfers
483 * as the default.
484 */
485 sce->ra_wb_reqsize = UGEN_BBSIZE;
486 break;
487 case UE_ISOCHRONOUS:
488 if (dir == OUT)
489 return EINVAL;
490 isize = UGETW(edesc->wMaxPacketSize);
491 if (isize == 0) /* shouldn't happen */
492 return EINVAL;
493 sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES,
494 KM_SLEEP);
495 sce->cur = sce->fill = sce->ibuf;
496 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
497 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
498 endpt, isize));
499 err = usbd_open_pipe(sce->iface,
500 edesc->bEndpointAddress, 0, &sce->pipeh);
501 if (err) {
502 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
503 sce->ibuf = NULL;
504 return EIO;
505 }
506 for (i = 0; i < UGEN_NISOREQS; ++i) {
507 sce->isoreqs[i].sce = sce;
508 err = usbd_create_xfer(sce->pipeh,
509 isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS,
510 &xfer);
511 if (err)
512 goto bad;
513 sce->isoreqs[i].xfer = xfer;
514 sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer);
515 for (j = 0; j < UGEN_NISORFRMS; ++j)
516 sce->isoreqs[i].sizes[j] = isize;
517 usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i],
518 sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0,
519 ugen_isoc_rintr);
520 (void)usbd_transfer(xfer);
521 }
522 DPRINTFN(5, ("ugenopen: isoc open done\n"));
523 break;
524 bad:
525 while (--i >= 0) /* implicit buffer free */
526 usbd_destroy_xfer(sce->isoreqs[i].xfer);
527 usbd_close_pipe(sce->pipeh);
528 sce->pipeh = NULL;
529 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
530 sce->ibuf = NULL;
531 return ENOMEM;
532 case UE_CONTROL:
533 sce->timeout = USBD_DEFAULT_TIMEOUT;
534 return EINVAL;
535 }
536 }
537 sc->sc_is_open[endpt] = 1;
538 return 0;
539 }
540
541 int
542 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
543 {
544 int endpt = UGENENDPOINT(dev);
545 struct ugen_softc *sc;
546 struct ugen_endpoint *sce;
547 int dir;
548 int i;
549
550 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
551 if (sc == NULL || sc->sc_dying)
552 return ENXIO;
553
554 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
555 flag, mode, UGENUNIT(dev), endpt));
556
557 #ifdef DIAGNOSTIC
558 if (!sc->sc_is_open[endpt]) {
559 printf("ugenclose: not open\n");
560 return EINVAL;
561 }
562 #endif
563
564 if (endpt == USB_CONTROL_ENDPOINT) {
565 DPRINTFN(5, ("ugenclose: close control\n"));
566 sc->sc_is_open[endpt] = 0;
567 return 0;
568 }
569
570 for (dir = OUT; dir <= IN; dir++) {
571 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
572 continue;
573 sce = &sc->sc_endpoints[endpt][dir];
574 if (sce->pipeh == NULL)
575 continue;
576 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
577 endpt, dir, sce));
578
579 usbd_abort_pipe(sce->pipeh);
580
581 int isize = UGETW(sce->edesc->wMaxPacketSize);
582 int msize = 0;
583
584 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
585 case UE_INTERRUPT:
586 ndflush(&sce->q, sce->q.c_cc);
587 clfree(&sce->q);
588 msize = isize;
589 break;
590 case UE_ISOCHRONOUS:
591 for (i = 0; i < UGEN_NISOREQS; ++i)
592 usbd_destroy_xfer(sce->isoreqs[i].xfer);
593 msize = isize * UGEN_NISOFRAMES;
594 break;
595 case UE_BULK:
596 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) {
597 usbd_destroy_xfer(sce->ra_wb_xfer);
598 msize = sce->ra_wb_bufsize;
599 }
600 break;
601 default:
602 break;
603 }
604 usbd_close_pipe(sce->pipeh);
605 sce->pipeh = NULL;
606 if (sce->ibuf != NULL) {
607 kmem_free(sce->ibuf, msize);
608 sce->ibuf = NULL;
609 }
610 }
611 sc->sc_is_open[endpt] = 0;
612
613 return 0;
614 }
615
616 Static int
617 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
618 {
619 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
620 uint32_t n, tn;
621 struct usbd_xfer *xfer;
622 usbd_status err;
623 int error = 0;
624
625 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
626
627 if (endpt == USB_CONTROL_ENDPOINT)
628 return ENODEV;
629
630 #ifdef DIAGNOSTIC
631 if (sce->edesc == NULL) {
632 printf("ugenread: no edesc\n");
633 return EIO;
634 }
635 if (sce->pipeh == NULL) {
636 printf("ugenread: no pipe\n");
637 return EIO;
638 }
639 #endif
640
641 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
642 case UE_INTERRUPT:
643 /* Block until activity occurred. */
644 mutex_enter(&sc->sc_lock);
645 while (sce->q.c_cc == 0) {
646 if (flag & IO_NDELAY) {
647 mutex_exit(&sc->sc_lock);
648 return EWOULDBLOCK;
649 }
650 sce->state |= UGEN_ASLP;
651 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
652 /* "ugenri" */
653 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
654 mstohz(sce->timeout));
655 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
656 if (sc->sc_dying)
657 error = EIO;
658 if (error) {
659 sce->state &= ~UGEN_ASLP;
660 break;
661 }
662 }
663 mutex_exit(&sc->sc_lock);
664
665 /* Transfer as many chunks as possible. */
666 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
667 n = uimin(sce->q.c_cc, uio->uio_resid);
668 if (n > sizeof(sc->sc_buffer))
669 n = sizeof(sc->sc_buffer);
670
671 /* Remove a small chunk from the input queue. */
672 q_to_b(&sce->q, sc->sc_buffer, n);
673 DPRINTFN(5, ("ugenread: got %d chars\n", n));
674
675 /* Copy the data to the user process. */
676 error = uiomove(sc->sc_buffer, n, uio);
677 if (error)
678 break;
679 }
680 break;
681 case UE_BULK:
682 if (sce->state & UGEN_BULK_RA) {
683 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
684 uio->uio_resid, sce->ra_wb_used));
685 xfer = sce->ra_wb_xfer;
686
687 mutex_enter(&sc->sc_lock);
688 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
689 mutex_exit(&sc->sc_lock);
690 return EWOULDBLOCK;
691 }
692 while (uio->uio_resid > 0 && !error) {
693 while (sce->ra_wb_used == 0) {
694 sce->state |= UGEN_ASLP;
695 DPRINTFN(5,
696 ("ugenread: sleep on %p\n",
697 sce));
698 /* "ugenrb" */
699 error = cv_timedwait_sig(&sce->cv,
700 &sc->sc_lock, mstohz(sce->timeout));
701 DPRINTFN(5,
702 ("ugenread: woke, error=%d\n",
703 error));
704 if (sc->sc_dying)
705 error = EIO;
706 if (error) {
707 sce->state &= ~UGEN_ASLP;
708 break;
709 }
710 }
711
712 /* Copy data to the process. */
713 while (uio->uio_resid > 0
714 && sce->ra_wb_used > 0) {
715 n = uimin(uio->uio_resid,
716 sce->ra_wb_used);
717 n = uimin(n, sce->limit - sce->cur);
718 error = uiomove(sce->cur, n, uio);
719 if (error)
720 break;
721 sce->cur += n;
722 sce->ra_wb_used -= n;
723 if (sce->cur == sce->limit)
724 sce->cur = sce->ibuf;
725 }
726
727 /*
728 * If the transfers stopped because the
729 * buffer was full, restart them.
730 */
731 if (sce->state & UGEN_RA_WB_STOP &&
732 sce->ra_wb_used < sce->limit - sce->ibuf) {
733 n = (sce->limit - sce->ibuf)
734 - sce->ra_wb_used;
735 usbd_setup_xfer(xfer, sce, NULL,
736 uimin(n, sce->ra_wb_xferlen),
737 0, USBD_NO_TIMEOUT,
738 ugen_bulkra_intr);
739 sce->state &= ~UGEN_RA_WB_STOP;
740 err = usbd_transfer(xfer);
741 if (err != USBD_IN_PROGRESS)
742 /*
743 * The transfer has not been
744 * queued. Setting STOP
745 * will make us try
746 * again at the next read.
747 */
748 sce->state |= UGEN_RA_WB_STOP;
749 }
750 }
751 mutex_exit(&sc->sc_lock);
752 break;
753 }
754 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
755 0, 0, &xfer);
756 if (error)
757 return error;
758 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
759 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
760 tn = n;
761 err = usbd_bulk_transfer(xfer, sce->pipeh,
762 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0,
763 sce->timeout, sc->sc_buffer, &tn);
764 if (err) {
765 if (err == USBD_INTERRUPTED)
766 error = EINTR;
767 else if (err == USBD_TIMEOUT)
768 error = ETIMEDOUT;
769 else
770 error = EIO;
771 break;
772 }
773 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
774 error = uiomove(sc->sc_buffer, tn, uio);
775 if (error || tn < n)
776 break;
777 }
778 usbd_destroy_xfer(xfer);
779 break;
780 case UE_ISOCHRONOUS:
781 mutex_enter(&sc->sc_lock);
782 while (sce->cur == sce->fill) {
783 if (flag & IO_NDELAY) {
784 mutex_exit(&sc->sc_lock);
785 return EWOULDBLOCK;
786 }
787 sce->state |= UGEN_ASLP;
788 /* "ugenri" */
789 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
790 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
791 mstohz(sce->timeout));
792 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
793 if (sc->sc_dying)
794 error = EIO;
795 if (error) {
796 sce->state &= ~UGEN_ASLP;
797 break;
798 }
799 }
800
801 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
802 if(sce->fill > sce->cur)
803 n = uimin(sce->fill - sce->cur, uio->uio_resid);
804 else
805 n = uimin(sce->limit - sce->cur, uio->uio_resid);
806
807 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
808
809 /* Copy the data to the user process. */
810 error = uiomove(sce->cur, n, uio);
811 if (error)
812 break;
813 sce->cur += n;
814 if (sce->cur >= sce->limit)
815 sce->cur = sce->ibuf;
816 }
817 mutex_exit(&sc->sc_lock);
818 break;
819
820
821 default:
822 return ENXIO;
823 }
824 return error;
825 }
826
827 int
828 ugenread(dev_t dev, struct uio *uio, int flag)
829 {
830 int endpt = UGENENDPOINT(dev);
831 struct ugen_softc *sc;
832 int error;
833
834 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
835 if (sc == NULL || sc->sc_dying)
836 return ENXIO;
837
838 mutex_enter(&sc->sc_lock);
839 sc->sc_refcnt++;
840 mutex_exit(&sc->sc_lock);
841
842 error = ugen_do_read(sc, endpt, uio, flag);
843
844 mutex_enter(&sc->sc_lock);
845 if (--sc->sc_refcnt < 0)
846 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
847 mutex_exit(&sc->sc_lock);
848
849 return error;
850 }
851
852 Static int
853 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
854 int flag)
855 {
856 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
857 uint32_t n;
858 int error = 0;
859 uint32_t tn;
860 char *dbuf;
861 struct usbd_xfer *xfer;
862 usbd_status err;
863
864 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
865
866 if (endpt == USB_CONTROL_ENDPOINT)
867 return ENODEV;
868
869 #ifdef DIAGNOSTIC
870 if (sce->edesc == NULL) {
871 printf("ugenwrite: no edesc\n");
872 return EIO;
873 }
874 if (sce->pipeh == NULL) {
875 printf("ugenwrite: no pipe\n");
876 return EIO;
877 }
878 #endif
879
880 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
881 case UE_BULK:
882 if (sce->state & UGEN_BULK_WB) {
883 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
884 uio->uio_resid, sce->ra_wb_used));
885 xfer = sce->ra_wb_xfer;
886
887 mutex_enter(&sc->sc_lock);
888 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
889 flag & IO_NDELAY) {
890 mutex_exit(&sc->sc_lock);
891 return EWOULDBLOCK;
892 }
893 while (uio->uio_resid > 0 && !error) {
894 while (sce->ra_wb_used ==
895 sce->limit - sce->ibuf) {
896 sce->state |= UGEN_ASLP;
897 DPRINTFN(5,
898 ("ugenwrite: sleep on %p\n",
899 sce));
900 /* "ugenwb" */
901 error = cv_timedwait_sig(&sce->cv,
902 &sc->sc_lock, mstohz(sce->timeout));
903 DPRINTFN(5,
904 ("ugenwrite: woke, error=%d\n",
905 error));
906 if (sc->sc_dying)
907 error = EIO;
908 if (error) {
909 sce->state &= ~UGEN_ASLP;
910 break;
911 }
912 }
913
914 /* Copy data from the process. */
915 while (uio->uio_resid > 0 &&
916 sce->ra_wb_used < sce->limit - sce->ibuf) {
917 n = uimin(uio->uio_resid,
918 (sce->limit - sce->ibuf)
919 - sce->ra_wb_used);
920 n = uimin(n, sce->limit - sce->fill);
921 error = uiomove(sce->fill, n, uio);
922 if (error)
923 break;
924 sce->fill += n;
925 sce->ra_wb_used += n;
926 if (sce->fill == sce->limit)
927 sce->fill = sce->ibuf;
928 }
929
930 /*
931 * If the transfers stopped because the
932 * buffer was empty, restart them.
933 */
934 if (sce->state & UGEN_RA_WB_STOP &&
935 sce->ra_wb_used > 0) {
936 dbuf = (char *)usbd_get_buffer(xfer);
937 n = uimin(sce->ra_wb_used,
938 sce->ra_wb_xferlen);
939 tn = uimin(n, sce->limit - sce->cur);
940 memcpy(dbuf, sce->cur, tn);
941 dbuf += tn;
942 if (n - tn > 0)
943 memcpy(dbuf, sce->ibuf,
944 n - tn);
945 usbd_setup_xfer(xfer, sce, NULL, n,
946 0, USBD_NO_TIMEOUT,
947 ugen_bulkwb_intr);
948 sce->state &= ~UGEN_RA_WB_STOP;
949 err = usbd_transfer(xfer);
950 if (err != USBD_IN_PROGRESS)
951 /*
952 * The transfer has not been
953 * queued. Setting STOP
954 * will make us try again
955 * at the next read.
956 */
957 sce->state |= UGEN_RA_WB_STOP;
958 }
959 }
960 mutex_exit(&sc->sc_lock);
961 break;
962 }
963 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
964 0, 0, &xfer);
965 if (error)
966 return error;
967 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
968 error = uiomove(sc->sc_buffer, n, uio);
969 if (error)
970 break;
971 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
972 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout,
973 sc->sc_buffer, &n);
974 if (err) {
975 if (err == USBD_INTERRUPTED)
976 error = EINTR;
977 else if (err == USBD_TIMEOUT)
978 error = ETIMEDOUT;
979 else
980 error = EIO;
981 break;
982 }
983 }
984 usbd_destroy_xfer(xfer);
985 break;
986 case UE_INTERRUPT:
987 error = usbd_create_xfer(sce->pipeh,
988 UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer);
989 if (error)
990 return error;
991 while ((n = uimin(UGETW(sce->edesc->wMaxPacketSize),
992 uio->uio_resid)) != 0) {
993 error = uiomove(sc->sc_buffer, n, uio);
994 if (error)
995 break;
996 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
997 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
998 sce->timeout, sc->sc_buffer, &n);
999 if (err) {
1000 if (err == USBD_INTERRUPTED)
1001 error = EINTR;
1002 else if (err == USBD_TIMEOUT)
1003 error = ETIMEDOUT;
1004 else
1005 error = EIO;
1006 break;
1007 }
1008 }
1009 usbd_destroy_xfer(xfer);
1010 break;
1011 default:
1012 return ENXIO;
1013 }
1014 return error;
1015 }
1016
1017 int
1018 ugenwrite(dev_t dev, struct uio *uio, int flag)
1019 {
1020 int endpt = UGENENDPOINT(dev);
1021 struct ugen_softc *sc;
1022 int error;
1023
1024 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1025 if (sc == NULL || sc->sc_dying)
1026 return ENXIO;
1027
1028 mutex_enter(&sc->sc_lock);
1029 sc->sc_refcnt++;
1030 mutex_exit(&sc->sc_lock);
1031
1032 error = ugen_do_write(sc, endpt, uio, flag);
1033
1034 mutex_enter(&sc->sc_lock);
1035 if (--sc->sc_refcnt < 0)
1036 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1037 mutex_exit(&sc->sc_lock);
1038
1039 return error;
1040 }
1041
1042 int
1043 ugen_activate(device_t self, enum devact act)
1044 {
1045 struct ugen_softc *sc = device_private(self);
1046
1047 switch (act) {
1048 case DVACT_DEACTIVATE:
1049 sc->sc_dying = 1;
1050 return 0;
1051 default:
1052 return EOPNOTSUPP;
1053 }
1054 }
1055
1056 int
1057 ugen_detach(device_t self, int flags)
1058 {
1059 struct ugen_softc *sc = device_private(self);
1060 struct ugen_endpoint *sce;
1061 int i, dir;
1062 int maj, mn;
1063
1064 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1065
1066 sc->sc_dying = 1;
1067 pmf_device_deregister(self);
1068 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1069 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1070 for (dir = OUT; dir <= IN; dir++) {
1071 sce = &sc->sc_endpoints[i][dir];
1072 if (sce->pipeh)
1073 usbd_abort_pipe(sce->pipeh);
1074 }
1075 }
1076
1077 mutex_enter(&sc->sc_lock);
1078 if (--sc->sc_refcnt >= 0) {
1079 /* Wake everyone */
1080 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1081 cv_signal(&sc->sc_endpoints[i][IN].cv);
1082 /* Wait for processes to go away. */
1083 usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock);
1084 }
1085 mutex_exit(&sc->sc_lock);
1086
1087 /* locate the major number */
1088 maj = cdevsw_lookup_major(&ugen_cdevsw);
1089
1090 /* Nuke the vnodes for any open instances (calls close). */
1091 mn = device_unit(self) * USB_MAX_ENDPOINTS;
1092 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1093
1094 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev);
1095
1096 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1097 for (dir = OUT; dir <= IN; dir++) {
1098 sce = &sc->sc_endpoints[i][dir];
1099 seldestroy(&sce->rsel);
1100 cv_destroy(&sce->cv);
1101 }
1102 }
1103
1104 cv_destroy(&sc->sc_detach_cv);
1105 mutex_destroy(&sc->sc_lock);
1106
1107 return 0;
1108 }
1109
1110 Static void
1111 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
1112 {
1113 struct ugen_endpoint *sce = addr;
1114 struct ugen_softc *sc = sce->sc;
1115 uint32_t count;
1116 u_char *ibuf;
1117
1118 if (status == USBD_CANCELLED)
1119 return;
1120
1121 if (status != USBD_NORMAL_COMPLETION) {
1122 DPRINTF(("ugenintr: status=%d\n", status));
1123 if (status == USBD_STALLED)
1124 usbd_clear_endpoint_stall_async(sce->pipeh);
1125 return;
1126 }
1127
1128 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1129 ibuf = sce->ibuf;
1130
1131 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1132 xfer, status, count));
1133 DPRINTFN(5, (" data = %02x %02x %02x\n",
1134 ibuf[0], ibuf[1], ibuf[2]));
1135
1136 (void)b_to_q(ibuf, count, &sce->q);
1137
1138 mutex_enter(&sc->sc_lock);
1139 if (sce->state & UGEN_ASLP) {
1140 sce->state &= ~UGEN_ASLP;
1141 DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1142 cv_signal(&sce->cv);
1143 }
1144 mutex_exit(&sc->sc_lock);
1145 selnotify(&sce->rsel, 0, 0);
1146 }
1147
1148 Static void
1149 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr,
1150 usbd_status status)
1151 {
1152 struct isoreq *req = addr;
1153 struct ugen_endpoint *sce = req->sce;
1154 struct ugen_softc *sc = sce->sc;
1155 uint32_t count, n;
1156 int i, isize;
1157
1158 /* Return if we are aborting. */
1159 if (status == USBD_CANCELLED)
1160 return;
1161
1162 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1163 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1164 (long)(req - sce->isoreqs), count));
1165
1166 /* throw away oldest input if the buffer is full */
1167 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1168 sce->cur += count;
1169 if(sce->cur >= sce->limit)
1170 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1171 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1172 count));
1173 }
1174
1175 isize = UGETW(sce->edesc->wMaxPacketSize);
1176 for (i = 0; i < UGEN_NISORFRMS; i++) {
1177 uint32_t actlen = req->sizes[i];
1178 char const *tbuf = (char const *)req->dmabuf + isize * i;
1179
1180 /* copy data to buffer */
1181 while (actlen > 0) {
1182 n = uimin(actlen, sce->limit - sce->fill);
1183 memcpy(sce->fill, tbuf, n);
1184
1185 tbuf += n;
1186 actlen -= n;
1187 sce->fill += n;
1188 if(sce->fill == sce->limit)
1189 sce->fill = sce->ibuf;
1190 }
1191
1192 /* setup size for next transfer */
1193 req->sizes[i] = isize;
1194 }
1195
1196 usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0,
1197 ugen_isoc_rintr);
1198 (void)usbd_transfer(xfer);
1199
1200 mutex_enter(&sc->sc_lock);
1201 if (sce->state & UGEN_ASLP) {
1202 sce->state &= ~UGEN_ASLP;
1203 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1204 cv_signal(&sce->cv);
1205 }
1206 mutex_exit(&sc->sc_lock);
1207 selnotify(&sce->rsel, 0, 0);
1208 }
1209
1210 Static void
1211 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr,
1212 usbd_status status)
1213 {
1214 struct ugen_endpoint *sce = addr;
1215 struct ugen_softc *sc = sce->sc;
1216 uint32_t count, n;
1217 char const *tbuf;
1218 usbd_status err;
1219
1220 /* Return if we are aborting. */
1221 if (status == USBD_CANCELLED)
1222 return;
1223
1224 if (status != USBD_NORMAL_COMPLETION) {
1225 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1226 sce->state |= UGEN_RA_WB_STOP;
1227 if (status == USBD_STALLED)
1228 usbd_clear_endpoint_stall_async(sce->pipeh);
1229 return;
1230 }
1231
1232 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1233
1234 /* Keep track of how much is in the buffer. */
1235 sce->ra_wb_used += count;
1236
1237 /* Copy data to buffer. */
1238 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1239 n = uimin(count, sce->limit - sce->fill);
1240 memcpy(sce->fill, tbuf, n);
1241 tbuf += n;
1242 count -= n;
1243 sce->fill += n;
1244 if (sce->fill == sce->limit)
1245 sce->fill = sce->ibuf;
1246 if (count > 0) {
1247 memcpy(sce->fill, tbuf, count);
1248 sce->fill += count;
1249 }
1250
1251 /* Set up the next request if necessary. */
1252 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1253 if (n > 0) {
1254 usbd_setup_xfer(xfer, sce, NULL, uimin(n, sce->ra_wb_xferlen), 0,
1255 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1256 err = usbd_transfer(xfer);
1257 if (err != USBD_IN_PROGRESS) {
1258 printf("usbd_bulkra_intr: error=%d\n", err);
1259 /*
1260 * The transfer has not been queued. Setting STOP
1261 * will make us try again at the next read.
1262 */
1263 sce->state |= UGEN_RA_WB_STOP;
1264 }
1265 }
1266 else
1267 sce->state |= UGEN_RA_WB_STOP;
1268
1269 mutex_enter(&sc->sc_lock);
1270 if (sce->state & UGEN_ASLP) {
1271 sce->state &= ~UGEN_ASLP;
1272 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1273 cv_signal(&sce->cv);
1274 }
1275 mutex_exit(&sc->sc_lock);
1276 selnotify(&sce->rsel, 0, 0);
1277 }
1278
1279 Static void
1280 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr,
1281 usbd_status status)
1282 {
1283 struct ugen_endpoint *sce = addr;
1284 struct ugen_softc *sc = sce->sc;
1285 uint32_t count, n;
1286 char *tbuf;
1287 usbd_status err;
1288
1289 /* Return if we are aborting. */
1290 if (status == USBD_CANCELLED)
1291 return;
1292
1293 if (status != USBD_NORMAL_COMPLETION) {
1294 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1295 sce->state |= UGEN_RA_WB_STOP;
1296 if (status == USBD_STALLED)
1297 usbd_clear_endpoint_stall_async(sce->pipeh);
1298 return;
1299 }
1300
1301 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1302
1303 /* Keep track of how much is in the buffer. */
1304 sce->ra_wb_used -= count;
1305
1306 /* Update buffer pointers. */
1307 sce->cur += count;
1308 if (sce->cur >= sce->limit)
1309 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1310
1311 /* Set up next request if necessary. */
1312 if (sce->ra_wb_used > 0) {
1313 /* copy data from buffer */
1314 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1315 count = uimin(sce->ra_wb_used, sce->ra_wb_xferlen);
1316 n = uimin(count, sce->limit - sce->cur);
1317 memcpy(tbuf, sce->cur, n);
1318 tbuf += n;
1319 if (count - n > 0)
1320 memcpy(tbuf, sce->ibuf, count - n);
1321
1322 usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT,
1323 ugen_bulkwb_intr);
1324 err = usbd_transfer(xfer);
1325 if (err != USBD_IN_PROGRESS) {
1326 printf("usbd_bulkwb_intr: error=%d\n", err);
1327 /*
1328 * The transfer has not been queued. Setting STOP
1329 * will make us try again at the next write.
1330 */
1331 sce->state |= UGEN_RA_WB_STOP;
1332 }
1333 }
1334 else
1335 sce->state |= UGEN_RA_WB_STOP;
1336
1337 mutex_enter(&sc->sc_lock);
1338 if (sce->state & UGEN_ASLP) {
1339 sce->state &= ~UGEN_ASLP;
1340 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1341 cv_signal(&sce->cv);
1342 }
1343 mutex_exit(&sc->sc_lock);
1344 selnotify(&sce->rsel, 0, 0);
1345 }
1346
1347 Static usbd_status
1348 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1349 {
1350 struct usbd_interface *iface;
1351 usb_endpoint_descriptor_t *ed;
1352 usbd_status err;
1353 struct ugen_endpoint *sce;
1354 uint8_t niface, nendpt, endptno, endpt;
1355 int dir;
1356
1357 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1358
1359 err = usbd_interface_count(sc->sc_udev, &niface);
1360 if (err)
1361 return err;
1362 if (ifaceidx < 0 || ifaceidx >= niface)
1363 return USBD_INVAL;
1364
1365 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1366 if (err)
1367 return err;
1368 err = usbd_endpoint_count(iface, &nendpt);
1369 if (err)
1370 return err;
1371
1372 /* change setting */
1373 err = usbd_set_interface(iface, altno);
1374 if (err)
1375 return err;
1376
1377 err = usbd_endpoint_count(iface, &nendpt);
1378 if (err)
1379 return err;
1380
1381 ugen_clear_endpoints(sc);
1382
1383 for (endptno = 0; endptno < nendpt; endptno++) {
1384 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1385 KASSERT(ed != NULL);
1386 endpt = ed->bEndpointAddress;
1387 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1388 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1389 sce->sc = sc;
1390 sce->edesc = ed;
1391 sce->iface = iface;
1392 }
1393 return 0;
1394 }
1395
1396 /* Retrieve a complete descriptor for a certain device and index. */
1397 Static usb_config_descriptor_t *
1398 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1399 {
1400 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1401 int len;
1402 usbd_status err;
1403
1404 if (index == USB_CURRENT_CONFIG_INDEX) {
1405 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1406 len = UGETW(tdesc->wTotalLength);
1407 if (lenp)
1408 *lenp = len;
1409 cdesc = kmem_alloc(len, KM_SLEEP);
1410 memcpy(cdesc, tdesc, len);
1411 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1412 } else {
1413 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1414 if (err)
1415 return 0;
1416 len = UGETW(cdescr.wTotalLength);
1417 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1418 if (lenp)
1419 *lenp = len;
1420 cdesc = kmem_alloc(len, KM_SLEEP);
1421 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1422 if (err) {
1423 kmem_free(cdesc, len);
1424 return 0;
1425 }
1426 }
1427 return cdesc;
1428 }
1429
1430 Static int
1431 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1432 {
1433 struct usbd_interface *iface;
1434 usbd_status err;
1435
1436 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1437 if (err)
1438 return -1;
1439 return usbd_get_interface_altindex(iface);
1440 }
1441
1442 Static int
1443 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1444 void *addr, int flag, struct lwp *l)
1445 {
1446 struct ugen_endpoint *sce;
1447 usbd_status err;
1448 struct usbd_interface *iface;
1449 struct usb_config_desc *cd;
1450 usb_config_descriptor_t *cdesc;
1451 struct usb_interface_desc *id;
1452 usb_interface_descriptor_t *idesc;
1453 struct usb_endpoint_desc *ed;
1454 usb_endpoint_descriptor_t *edesc;
1455 struct usb_alt_interface *ai;
1456 struct usb_string_desc *si;
1457 uint8_t conf, alt;
1458 int cdesclen;
1459 int error;
1460 int dir;
1461
1462 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1463 if (sc->sc_dying)
1464 return EIO;
1465
1466 switch (cmd) {
1467 case FIONBIO:
1468 /* All handled in the upper FS layer. */
1469 return 0;
1470 case USB_SET_SHORT_XFER:
1471 if (endpt == USB_CONTROL_ENDPOINT)
1472 return EINVAL;
1473 /* This flag only affects read */
1474 sce = &sc->sc_endpoints[endpt][IN];
1475 if (sce == NULL || sce->pipeh == NULL)
1476 return EINVAL;
1477 if (*(int *)addr)
1478 sce->state |= UGEN_SHORT_OK;
1479 else
1480 sce->state &= ~UGEN_SHORT_OK;
1481 return 0;
1482 case USB_SET_TIMEOUT:
1483 for (dir = OUT; dir <= IN; dir++) {
1484 sce = &sc->sc_endpoints[endpt][dir];
1485 if (sce == NULL)
1486 return EINVAL;
1487
1488 sce->timeout = *(int *)addr;
1489 }
1490 return 0;
1491 case USB_SET_BULK_RA:
1492 if (endpt == USB_CONTROL_ENDPOINT)
1493 return EINVAL;
1494 sce = &sc->sc_endpoints[endpt][IN];
1495 if (sce == NULL || sce->pipeh == NULL)
1496 return EINVAL;
1497 edesc = sce->edesc;
1498 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1499 return EINVAL;
1500
1501 if (*(int *)addr) {
1502 /* Only turn RA on if it's currently off. */
1503 if (sce->state & UGEN_BULK_RA)
1504 return 0;
1505
1506 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1507 /* shouldn't happen */
1508 return EINVAL;
1509 error = usbd_create_xfer(sce->pipeh,
1510 sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer);
1511 if (error)
1512 return error;
1513 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1514 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1515 sce->fill = sce->cur = sce->ibuf;
1516 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1517 sce->ra_wb_used = 0;
1518 sce->state |= UGEN_BULK_RA;
1519 sce->state &= ~UGEN_RA_WB_STOP;
1520 /* Now start reading. */
1521 usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL,
1522 uimin(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1523 0, USBD_NO_TIMEOUT, ugen_bulkra_intr);
1524 err = usbd_transfer(sce->ra_wb_xfer);
1525 if (err != USBD_IN_PROGRESS) {
1526 sce->state &= ~UGEN_BULK_RA;
1527 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1528 sce->ibuf = NULL;
1529 usbd_destroy_xfer(sce->ra_wb_xfer);
1530 return EIO;
1531 }
1532 } else {
1533 /* Only turn RA off if it's currently on. */
1534 if (!(sce->state & UGEN_BULK_RA))
1535 return 0;
1536
1537 sce->state &= ~UGEN_BULK_RA;
1538 usbd_abort_pipe(sce->pipeh);
1539 usbd_destroy_xfer(sce->ra_wb_xfer);
1540 /*
1541 * XXX Discard whatever's in the buffer, but we
1542 * should keep it around and drain the buffer
1543 * instead.
1544 */
1545 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1546 sce->ibuf = NULL;
1547 }
1548 return 0;
1549 case USB_SET_BULK_WB:
1550 if (endpt == USB_CONTROL_ENDPOINT)
1551 return EINVAL;
1552 sce = &sc->sc_endpoints[endpt][OUT];
1553 if (sce == NULL || sce->pipeh == NULL)
1554 return EINVAL;
1555 edesc = sce->edesc;
1556 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1557 return EINVAL;
1558
1559 if (*(int *)addr) {
1560 /* Only turn WB on if it's currently off. */
1561 if (sce->state & UGEN_BULK_WB)
1562 return 0;
1563
1564 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1565 /* shouldn't happen */
1566 return EINVAL;
1567 error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize,
1568 0, 0, &sce->ra_wb_xfer);
1569 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1570 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1571 sce->fill = sce->cur = sce->ibuf;
1572 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1573 sce->ra_wb_used = 0;
1574 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1575 } else {
1576 /* Only turn WB off if it's currently on. */
1577 if (!(sce->state & UGEN_BULK_WB))
1578 return 0;
1579
1580 sce->state &= ~UGEN_BULK_WB;
1581 /*
1582 * XXX Discard whatever's in the buffer, but we
1583 * should keep it around and keep writing to
1584 * drain the buffer instead.
1585 */
1586 usbd_abort_pipe(sce->pipeh);
1587 usbd_destroy_xfer(sce->ra_wb_xfer);
1588 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1589 sce->ibuf = NULL;
1590 }
1591 return 0;
1592 case USB_SET_BULK_RA_OPT:
1593 case USB_SET_BULK_WB_OPT:
1594 {
1595 struct usb_bulk_ra_wb_opt *opt;
1596
1597 if (endpt == USB_CONTROL_ENDPOINT)
1598 return EINVAL;
1599 opt = (struct usb_bulk_ra_wb_opt *)addr;
1600 if (cmd == USB_SET_BULK_RA_OPT)
1601 sce = &sc->sc_endpoints[endpt][IN];
1602 else
1603 sce = &sc->sc_endpoints[endpt][OUT];
1604 if (sce == NULL || sce->pipeh == NULL)
1605 return EINVAL;
1606 if (opt->ra_wb_buffer_size < 1 ||
1607 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1608 opt->ra_wb_request_size < 1 ||
1609 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1610 return EINVAL;
1611 /*
1612 * XXX These changes do not take effect until the
1613 * next time RA/WB mode is enabled but they ought to
1614 * take effect immediately.
1615 */
1616 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1617 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1618 return 0;
1619 }
1620 default:
1621 break;
1622 }
1623
1624 if (endpt != USB_CONTROL_ENDPOINT)
1625 return EINVAL;
1626
1627 switch (cmd) {
1628 #ifdef UGEN_DEBUG
1629 case USB_SETDEBUG:
1630 ugendebug = *(int *)addr;
1631 break;
1632 #endif
1633 case USB_GET_CONFIG:
1634 err = usbd_get_config(sc->sc_udev, &conf);
1635 if (err)
1636 return EIO;
1637 *(int *)addr = conf;
1638 break;
1639 case USB_SET_CONFIG:
1640 if (!(flag & FWRITE))
1641 return EPERM;
1642 err = ugen_set_config(sc, *(int *)addr, 1);
1643 switch (err) {
1644 case USBD_NORMAL_COMPLETION:
1645 break;
1646 case USBD_IN_USE:
1647 return EBUSY;
1648 default:
1649 return EIO;
1650 }
1651 break;
1652 case USB_GET_ALTINTERFACE:
1653 ai = (struct usb_alt_interface *)addr;
1654 err = usbd_device2interface_handle(sc->sc_udev,
1655 ai->uai_interface_index, &iface);
1656 if (err)
1657 return EINVAL;
1658 idesc = usbd_get_interface_descriptor(iface);
1659 if (idesc == NULL)
1660 return EIO;
1661 ai->uai_alt_no = idesc->bAlternateSetting;
1662 break;
1663 case USB_SET_ALTINTERFACE:
1664 if (!(flag & FWRITE))
1665 return EPERM;
1666 ai = (struct usb_alt_interface *)addr;
1667 err = usbd_device2interface_handle(sc->sc_udev,
1668 ai->uai_interface_index, &iface);
1669 if (err)
1670 return EINVAL;
1671 err = ugen_set_interface(sc, ai->uai_interface_index,
1672 ai->uai_alt_no);
1673 if (err)
1674 return EINVAL;
1675 break;
1676 case USB_GET_NO_ALT:
1677 ai = (struct usb_alt_interface *)addr;
1678 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen);
1679 if (cdesc == NULL)
1680 return EINVAL;
1681 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1682 if (idesc == NULL) {
1683 kmem_free(cdesc, cdesclen);
1684 return EINVAL;
1685 }
1686 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1687 idesc->bInterfaceNumber);
1688 kmem_free(cdesc, cdesclen);
1689 break;
1690 case USB_GET_DEVICE_DESC:
1691 *(usb_device_descriptor_t *)addr =
1692 *usbd_get_device_descriptor(sc->sc_udev);
1693 break;
1694 case USB_GET_CONFIG_DESC:
1695 cd = (struct usb_config_desc *)addr;
1696 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen);
1697 if (cdesc == NULL)
1698 return EINVAL;
1699 cd->ucd_desc = *cdesc;
1700 kmem_free(cdesc, cdesclen);
1701 break;
1702 case USB_GET_INTERFACE_DESC:
1703 id = (struct usb_interface_desc *)addr;
1704 cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen);
1705 if (cdesc == NULL)
1706 return EINVAL;
1707 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1708 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1709 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1710 else
1711 alt = id->uid_alt_index;
1712 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1713 if (idesc == NULL) {
1714 kmem_free(cdesc, cdesclen);
1715 return EINVAL;
1716 }
1717 id->uid_desc = *idesc;
1718 kmem_free(cdesc, cdesclen);
1719 break;
1720 case USB_GET_ENDPOINT_DESC:
1721 ed = (struct usb_endpoint_desc *)addr;
1722 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen);
1723 if (cdesc == NULL)
1724 return EINVAL;
1725 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1726 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1727 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1728 else
1729 alt = ed->ued_alt_index;
1730 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1731 alt, ed->ued_endpoint_index);
1732 if (edesc == NULL) {
1733 kmem_free(cdesc, cdesclen);
1734 return EINVAL;
1735 }
1736 ed->ued_desc = *edesc;
1737 kmem_free(cdesc, cdesclen);
1738 break;
1739 case USB_GET_FULL_DESC:
1740 {
1741 int len;
1742 struct iovec iov;
1743 struct uio uio;
1744 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1745
1746 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen);
1747 if (cdesc == NULL)
1748 return EINVAL;
1749 len = cdesclen;
1750 if (len > fd->ufd_size)
1751 len = fd->ufd_size;
1752 iov.iov_base = (void *)fd->ufd_data;
1753 iov.iov_len = len;
1754 uio.uio_iov = &iov;
1755 uio.uio_iovcnt = 1;
1756 uio.uio_resid = len;
1757 uio.uio_offset = 0;
1758 uio.uio_rw = UIO_READ;
1759 uio.uio_vmspace = l->l_proc->p_vmspace;
1760 error = uiomove((void *)cdesc, len, &uio);
1761 kmem_free(cdesc, cdesclen);
1762 return error;
1763 }
1764 case USB_GET_STRING_DESC: {
1765 int len;
1766 si = (struct usb_string_desc *)addr;
1767 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1768 si->usd_language_id, &si->usd_desc, &len);
1769 if (err)
1770 return EINVAL;
1771 break;
1772 }
1773 case USB_DO_REQUEST:
1774 {
1775 struct usb_ctl_request *ur = (void *)addr;
1776 int len = UGETW(ur->ucr_request.wLength);
1777 struct iovec iov;
1778 struct uio uio;
1779 void *ptr = 0;
1780 usbd_status xerr;
1781
1782 error = 0;
1783
1784 if (!(flag & FWRITE))
1785 return EPERM;
1786 /* Avoid requests that would damage the bus integrity. */
1787 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1788 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1789 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1790 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1791 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1792 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1793 return EINVAL;
1794
1795 if (len < 0 || len > 32767)
1796 return EINVAL;
1797 if (len != 0) {
1798 iov.iov_base = (void *)ur->ucr_data;
1799 iov.iov_len = len;
1800 uio.uio_iov = &iov;
1801 uio.uio_iovcnt = 1;
1802 uio.uio_resid = len;
1803 uio.uio_offset = 0;
1804 uio.uio_rw =
1805 ur->ucr_request.bmRequestType & UT_READ ?
1806 UIO_READ : UIO_WRITE;
1807 uio.uio_vmspace = l->l_proc->p_vmspace;
1808 ptr = kmem_alloc(len, KM_SLEEP);
1809 if (uio.uio_rw == UIO_WRITE) {
1810 error = uiomove(ptr, len, &uio);
1811 if (error)
1812 goto ret;
1813 }
1814 }
1815 sce = &sc->sc_endpoints[endpt][IN];
1816 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1817 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1818 if (xerr) {
1819 error = EIO;
1820 goto ret;
1821 }
1822 if (len != 0) {
1823 if (uio.uio_rw == UIO_READ) {
1824 size_t alen = uimin(len, ur->ucr_actlen);
1825 error = uiomove(ptr, alen, &uio);
1826 if (error)
1827 goto ret;
1828 }
1829 }
1830 ret:
1831 if (ptr)
1832 kmem_free(ptr, len);
1833 return error;
1834 }
1835 case USB_GET_DEVICEINFO:
1836 usbd_fill_deviceinfo(sc->sc_udev,
1837 (struct usb_device_info *)addr, 0);
1838 break;
1839 case USB_GET_DEVICEINFO_OLD:
1840 {
1841 int ret;
1842 MODULE_CALL_HOOK(usb_subr_30_fill_hook,
1843 (sc->sc_udev, (struct usb_device_info_old *)addr, 0,
1844 usbd_devinfo_vp, usbd_printBCD),
1845 enosys(), ret);
1846 if (ret == 0)
1847 return 0;
1848 return EINVAL;
1849 }
1850 default:
1851 return EINVAL;
1852 }
1853 return 0;
1854 }
1855
1856 int
1857 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1858 {
1859 int endpt = UGENENDPOINT(dev);
1860 struct ugen_softc *sc;
1861 int error;
1862
1863 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1864 if (sc == NULL || sc->sc_dying)
1865 return ENXIO;
1866
1867 sc->sc_refcnt++;
1868 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1869 if (--sc->sc_refcnt < 0)
1870 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1871 return error;
1872 }
1873
1874 int
1875 ugenpoll(dev_t dev, int events, struct lwp *l)
1876 {
1877 struct ugen_softc *sc;
1878 struct ugen_endpoint *sce_in, *sce_out;
1879 int revents = 0;
1880
1881 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1882 if (sc == NULL)
1883 return ENXIO;
1884
1885 if (sc->sc_dying)
1886 return POLLHUP;
1887
1888 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1889 return ENODEV;
1890
1891 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1892 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1893 if (sce_in == NULL && sce_out == NULL)
1894 return POLLERR;
1895 #ifdef DIAGNOSTIC
1896 if (!sce_in->edesc && !sce_out->edesc) {
1897 printf("ugenpoll: no edesc\n");
1898 return POLLERR;
1899 }
1900 /* It's possible to have only one pipe open. */
1901 if (!sce_in->pipeh && !sce_out->pipeh) {
1902 printf("ugenpoll: no pipe\n");
1903 return POLLERR;
1904 }
1905 #endif
1906
1907 mutex_enter(&sc->sc_lock);
1908 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1909 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1910 case UE_INTERRUPT:
1911 if (sce_in->q.c_cc > 0)
1912 revents |= events & (POLLIN | POLLRDNORM);
1913 else
1914 selrecord(l, &sce_in->rsel);
1915 break;
1916 case UE_ISOCHRONOUS:
1917 if (sce_in->cur != sce_in->fill)
1918 revents |= events & (POLLIN | POLLRDNORM);
1919 else
1920 selrecord(l, &sce_in->rsel);
1921 break;
1922 case UE_BULK:
1923 if (sce_in->state & UGEN_BULK_RA) {
1924 if (sce_in->ra_wb_used > 0)
1925 revents |= events &
1926 (POLLIN | POLLRDNORM);
1927 else
1928 selrecord(l, &sce_in->rsel);
1929 break;
1930 }
1931 /*
1932 * We have no easy way of determining if a read will
1933 * yield any data or a write will happen.
1934 * Pretend they will.
1935 */
1936 revents |= events & (POLLIN | POLLRDNORM);
1937 break;
1938 default:
1939 break;
1940 }
1941 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1942 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1943 case UE_INTERRUPT:
1944 case UE_ISOCHRONOUS:
1945 /* XXX unimplemented */
1946 break;
1947 case UE_BULK:
1948 if (sce_out->state & UGEN_BULK_WB) {
1949 if (sce_out->ra_wb_used <
1950 sce_out->limit - sce_out->ibuf)
1951 revents |= events &
1952 (POLLOUT | POLLWRNORM);
1953 else
1954 selrecord(l, &sce_out->rsel);
1955 break;
1956 }
1957 /*
1958 * We have no easy way of determining if a read will
1959 * yield any data or a write will happen.
1960 * Pretend they will.
1961 */
1962 revents |= events & (POLLOUT | POLLWRNORM);
1963 break;
1964 default:
1965 break;
1966 }
1967
1968 mutex_exit(&sc->sc_lock);
1969
1970 return revents;
1971 }
1972
1973 static void
1974 filt_ugenrdetach(struct knote *kn)
1975 {
1976 struct ugen_endpoint *sce = kn->kn_hook;
1977 struct ugen_softc *sc = sce->sc;
1978
1979 mutex_enter(&sc->sc_lock);
1980 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1981 mutex_exit(&sc->sc_lock);
1982 }
1983
1984 static int
1985 filt_ugenread_intr(struct knote *kn, long hint)
1986 {
1987 struct ugen_endpoint *sce = kn->kn_hook;
1988 struct ugen_softc *sc = sce->sc;
1989
1990 if (sc->sc_dying)
1991 return 0;
1992
1993 kn->kn_data = sce->q.c_cc;
1994 return kn->kn_data > 0;
1995 }
1996
1997 static int
1998 filt_ugenread_isoc(struct knote *kn, long hint)
1999 {
2000 struct ugen_endpoint *sce = kn->kn_hook;
2001 struct ugen_softc *sc = sce->sc;
2002
2003 if (sc->sc_dying)
2004 return 0;
2005
2006 if (sce->cur == sce->fill)
2007 return 0;
2008
2009 if (sce->cur < sce->fill)
2010 kn->kn_data = sce->fill - sce->cur;
2011 else
2012 kn->kn_data = (sce->limit - sce->cur) +
2013 (sce->fill - sce->ibuf);
2014
2015 return 1;
2016 }
2017
2018 static int
2019 filt_ugenread_bulk(struct knote *kn, long hint)
2020 {
2021 struct ugen_endpoint *sce = kn->kn_hook;
2022 struct ugen_softc *sc = sce->sc;
2023
2024 if (sc->sc_dying)
2025 return 0;
2026
2027 if (!(sce->state & UGEN_BULK_RA))
2028 /*
2029 * We have no easy way of determining if a read will
2030 * yield any data or a write will happen.
2031 * So, emulate "seltrue".
2032 */
2033 return filt_seltrue(kn, hint);
2034
2035 if (sce->ra_wb_used == 0)
2036 return 0;
2037
2038 kn->kn_data = sce->ra_wb_used;
2039
2040 return 1;
2041 }
2042
2043 static int
2044 filt_ugenwrite_bulk(struct knote *kn, long hint)
2045 {
2046 struct ugen_endpoint *sce = kn->kn_hook;
2047 struct ugen_softc *sc = sce->sc;
2048
2049 if (sc->sc_dying)
2050 return 0;
2051
2052 if (!(sce->state & UGEN_BULK_WB))
2053 /*
2054 * We have no easy way of determining if a read will
2055 * yield any data or a write will happen.
2056 * So, emulate "seltrue".
2057 */
2058 return filt_seltrue(kn, hint);
2059
2060 if (sce->ra_wb_used == sce->limit - sce->ibuf)
2061 return 0;
2062
2063 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2064
2065 return 1;
2066 }
2067
2068 static const struct filterops ugenread_intr_filtops = {
2069 .f_isfd = 1,
2070 .f_attach = NULL,
2071 .f_detach = filt_ugenrdetach,
2072 .f_event = filt_ugenread_intr,
2073 };
2074
2075 static const struct filterops ugenread_isoc_filtops = {
2076 .f_isfd = 1,
2077 .f_attach = NULL,
2078 .f_detach = filt_ugenrdetach,
2079 .f_event = filt_ugenread_isoc,
2080 };
2081
2082 static const struct filterops ugenread_bulk_filtops = {
2083 .f_isfd = 1,
2084 .f_attach = NULL,
2085 .f_detach = filt_ugenrdetach,
2086 .f_event = filt_ugenread_bulk,
2087 };
2088
2089 static const struct filterops ugenwrite_bulk_filtops = {
2090 .f_isfd = 1,
2091 .f_attach = NULL,
2092 .f_detach = filt_ugenrdetach,
2093 .f_event = filt_ugenwrite_bulk,
2094 };
2095
2096 int
2097 ugenkqfilter(dev_t dev, struct knote *kn)
2098 {
2099 struct ugen_softc *sc;
2100 struct ugen_endpoint *sce;
2101 struct klist *klist;
2102
2103 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2104 if (sc == NULL || sc->sc_dying)
2105 return ENXIO;
2106
2107 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2108 return ENODEV;
2109
2110 switch (kn->kn_filter) {
2111 case EVFILT_READ:
2112 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2113 if (sce == NULL)
2114 return EINVAL;
2115
2116 klist = &sce->rsel.sel_klist;
2117 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2118 case UE_INTERRUPT:
2119 kn->kn_fop = &ugenread_intr_filtops;
2120 break;
2121 case UE_ISOCHRONOUS:
2122 kn->kn_fop = &ugenread_isoc_filtops;
2123 break;
2124 case UE_BULK:
2125 kn->kn_fop = &ugenread_bulk_filtops;
2126 break;
2127 default:
2128 return EINVAL;
2129 }
2130 break;
2131
2132 case EVFILT_WRITE:
2133 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2134 if (sce == NULL)
2135 return EINVAL;
2136
2137 klist = &sce->rsel.sel_klist;
2138 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2139 case UE_INTERRUPT:
2140 case UE_ISOCHRONOUS:
2141 /* XXX poll doesn't support this */
2142 return EINVAL;
2143
2144 case UE_BULK:
2145 kn->kn_fop = &ugenwrite_bulk_filtops;
2146 break;
2147 default:
2148 return EINVAL;
2149 }
2150 break;
2151
2152 default:
2153 return EINVAL;
2154 }
2155
2156 kn->kn_hook = sce;
2157
2158 mutex_enter(&sc->sc_lock);
2159 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2160 mutex_exit(&sc->sc_lock);
2161
2162 return 0;
2163 }
2164