ugen.c revision 1.138 1 /* $NetBSD: ugen.c,v 1.138 2018/02/20 15:48:37 ws Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.138 2018/02/20 15:48:37 ws Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #include "opt_usb.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/device.h>
52 #include <sys/ioctl.h>
53 #include <sys/conf.h>
54 #include <sys/tty.h>
55 #include <sys/file.h>
56 #include <sys/select.h>
57 #include <sys/proc.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60
61 #include <dev/usb/usb.h>
62 #include <dev/usb/usbdi.h>
63 #include <dev/usb/usbdi_util.h>
64
65 #ifdef UGEN_DEBUG
66 #define DPRINTF(x) if (ugendebug) printf x
67 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x
68 int ugendebug = 0;
69 #else
70 #define DPRINTF(x)
71 #define DPRINTFN(n,x)
72 #endif
73
74 #define UGEN_CHUNK 128 /* chunk size for read */
75 #define UGEN_IBSIZE 1020 /* buffer size */
76 #define UGEN_BBSIZE 1024
77
78 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
79 #define UGEN_NISORFRMS 8 /* number of transactions per req */
80 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
81
82 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
83 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
84
85 struct isoreq {
86 struct ugen_endpoint *sce;
87 struct usbd_xfer *xfer;
88 void *dmabuf;
89 uint16_t sizes[UGEN_NISORFRMS];
90 };
91
92 struct ugen_endpoint {
93 struct ugen_softc *sc;
94 usb_endpoint_descriptor_t *edesc;
95 struct usbd_interface *iface;
96 int state;
97 #define UGEN_ASLP 0x02 /* waiting for data */
98 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
99 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
100 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
101 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
102 struct usbd_pipe *pipeh;
103 struct clist q;
104 u_char *ibuf; /* start of buffer (circular for isoc) */
105 u_char *fill; /* location for input (isoc) */
106 u_char *limit; /* end of circular buffer (isoc) */
107 u_char *cur; /* current read location (isoc) */
108 uint32_t timeout;
109 uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
110 uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
111 uint32_t ra_wb_used; /* how much is in buffer */
112 uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */
113 struct usbd_xfer *ra_wb_xfer;
114 struct isoreq isoreqs[UGEN_NISOREQS];
115 /* Keep these last; we don't overwrite them in ugen_set_config() */
116 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
117 struct selinfo rsel;
118 kcondvar_t cv;
119 };
120
121 struct ugen_softc {
122 device_t sc_dev; /* base device */
123 struct usbd_device *sc_udev;
124
125 kmutex_t sc_lock;
126 kcondvar_t sc_detach_cv;
127
128 char sc_is_open[USB_MAX_ENDPOINTS];
129 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
130 #define OUT 0
131 #define IN 1
132
133 int sc_refcnt;
134 char sc_buffer[UGEN_BBSIZE];
135 u_char sc_dying;
136 };
137
138 dev_type_open(ugenopen);
139 dev_type_close(ugenclose);
140 dev_type_read(ugenread);
141 dev_type_write(ugenwrite);
142 dev_type_ioctl(ugenioctl);
143 dev_type_poll(ugenpoll);
144 dev_type_kqfilter(ugenkqfilter);
145
146 const struct cdevsw ugen_cdevsw = {
147 .d_open = ugenopen,
148 .d_close = ugenclose,
149 .d_read = ugenread,
150 .d_write = ugenwrite,
151 .d_ioctl = ugenioctl,
152 .d_stop = nostop,
153 .d_tty = notty,
154 .d_poll = ugenpoll,
155 .d_mmap = nommap,
156 .d_kqfilter = ugenkqfilter,
157 .d_discard = nodiscard,
158 .d_flag = D_OTHER,
159 };
160
161 Static void ugenintr(struct usbd_xfer *, void *,
162 usbd_status);
163 Static void ugen_isoc_rintr(struct usbd_xfer *, void *,
164 usbd_status);
165 Static void ugen_bulkra_intr(struct usbd_xfer *, void *,
166 usbd_status);
167 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *,
168 usbd_status);
169 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
170 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
171 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
172 void *, int, struct lwp *);
173 Static int ugen_set_config(struct ugen_softc *, int, int);
174 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *,
175 int, int *);
176 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
177 Static int ugen_get_alt_index(struct ugen_softc *, int);
178 Static void ugen_clear_endpoints(struct ugen_softc *);
179
180 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
181 #define UGENENDPOINT(n) (minor(n) & 0xf)
182 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
183
184 int ugenif_match(device_t, cfdata_t, void *);
185 void ugenif_attach(device_t, device_t, void *);
186 int ugen_match(device_t, cfdata_t, void *);
187 void ugen_attach(device_t, device_t, void *);
188 int ugen_detach(device_t, int);
189 int ugen_activate(device_t, enum devact);
190 extern struct cfdriver ugen_cd;
191 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match,
192 ugen_attach, ugen_detach, ugen_activate);
193 CFATTACH_DECL_NEW(ugenif, sizeof(struct ugen_softc), ugenif_match,
194 ugenif_attach, ugen_detach, ugen_activate);
195
196 /* toggle to control attach priority. -1 means "let autoconf decide" */
197 int ugen_override = -1;
198
199 int
200 ugen_match(device_t parent, cfdata_t match, void *aux)
201 {
202 struct usb_attach_arg *uaa = aux;
203 int override;
204
205 if (ugen_override != -1)
206 override = ugen_override;
207 else
208 override = match->cf_flags & 1;
209
210 if (override)
211 return UMATCH_HIGHEST;
212 else if (uaa->uaa_usegeneric)
213 return UMATCH_GENERIC;
214 else
215 return UMATCH_NONE;
216 }
217
218 int
219 ugenif_match(device_t parent, cfdata_t match, void *aux)
220 {
221 if (match->cf_flags & 1)
222 return UMATCH_HIGHEST;
223 else
224 return UMATCH_NONE;
225 }
226
227 void
228 ugen_attach(device_t parent, device_t self, void *aux)
229 {
230 struct usb_attach_arg *uaa = aux;
231 struct usbif_attach_arg uiaa;
232
233 memset(&uiaa, 0, sizeof uiaa);
234 uiaa.uiaa_port = uaa->uaa_port;
235 uiaa.uiaa_vendor = uaa->uaa_vendor;
236 uiaa.uiaa_product = uaa->uaa_product;
237 uiaa.uiaa_release = uaa->uaa_release;
238 uiaa.uiaa_device = uaa->uaa_device;
239 uiaa.uiaa_configno = -1;
240 uiaa.uiaa_ifaceno = -1;
241
242 ugenif_attach(parent, self, &uiaa);
243 }
244
245 void
246 ugenif_attach(device_t parent, device_t self, void *aux)
247 {
248 struct ugen_softc *sc = device_private(self);
249 struct usbif_attach_arg *uiaa = aux;
250 struct usbd_device *udev;
251 char *devinfop;
252 usbd_status err;
253 int i, dir, conf;
254
255 aprint_naive("\n");
256 aprint_normal("\n");
257
258 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
259 cv_init(&sc->sc_detach_cv, "ugendet");
260
261 devinfop = usbd_devinfo_alloc(uiaa->uiaa_device, 0);
262 aprint_normal_dev(self, "%s\n", devinfop);
263 usbd_devinfo_free(devinfop);
264
265 sc->sc_dev = self;
266 sc->sc_udev = udev = uiaa->uiaa_device;
267
268 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
269 for (dir = OUT; dir <= IN; dir++) {
270 struct ugen_endpoint *sce;
271
272 sce = &sc->sc_endpoints[i][dir];
273 selinit(&sce->rsel);
274 cv_init(&sce->cv, "ugensce");
275 }
276 }
277
278 if (uiaa->uiaa_ifaceno < 0) {
279 /*
280 * If we attach the whole device,
281 * set configuration index 0, the default one.
282 */
283 err = usbd_set_config_index(udev, 0, 0);
284 if (err) {
285 aprint_error_dev(self,
286 "setting configuration index 0 failed\n");
287 sc->sc_dying = 1;
288 return;
289 }
290 }
291
292 /* Get current configuration */
293 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
294
295 /* Set up all the local state for this configuration. */
296 err = ugen_set_config(sc, conf, uiaa->uiaa_ifaceno < 0);
297 if (err) {
298 aprint_error_dev(self, "setting configuration %d failed\n",
299 conf);
300 sc->sc_dying = 1;
301 return;
302 }
303
304 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev);
305
306 if (!pmf_device_register(self, NULL, NULL))
307 aprint_error_dev(self, "couldn't establish power handler\n");
308
309 }
310
311 Static void
312 ugen_clear_endpoints(struct ugen_softc *sc)
313 {
314
315 /* Clear out the old info, but leave the selinfo and cv initialised. */
316 for (int i = 0; i < USB_MAX_ENDPOINTS; i++) {
317 for (int dir = OUT; dir <= IN; dir++) {
318 struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir];
319 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
320 }
321 }
322 }
323
324 Static int
325 ugen_set_config(struct ugen_softc *sc, int configno, int chkopen)
326 {
327 struct usbd_device *dev = sc->sc_udev;
328 usb_config_descriptor_t *cdesc;
329 struct usbd_interface *iface;
330 usb_endpoint_descriptor_t *ed;
331 struct ugen_endpoint *sce;
332 uint8_t niface, nendpt;
333 int ifaceno, endptno, endpt;
334 usbd_status err;
335 int dir;
336
337 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
338 device_xname(sc->sc_dev), configno, sc));
339
340 if (chkopen) {
341 /*
342 * We start at 1, not 0, because we don't care whether the
343 * control endpoint is open or not. It is always present.
344 */
345 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
346 if (sc->sc_is_open[endptno]) {
347 DPRINTFN(1,
348 ("ugen_set_config: %s - endpoint %d is open\n",
349 device_xname(sc->sc_dev), endptno));
350 return USBD_IN_USE;
351 }
352 }
353
354 /* Avoid setting the current value. */
355 cdesc = usbd_get_config_descriptor(dev);
356 if (!cdesc || cdesc->bConfigurationValue != configno) {
357 err = usbd_set_config_no(dev, configno, 1);
358 if (err)
359 return err;
360 }
361
362 ugen_clear_endpoints(sc);
363
364 err = usbd_interface_count(dev, &niface);
365 if (err)
366 return err;
367
368 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
369 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
370 err = usbd_device2interface_handle(dev, ifaceno, &iface);
371 if (err)
372 return err;
373 err = usbd_endpoint_count(iface, &nendpt);
374 if (err)
375 return err;
376 for (endptno = 0; endptno < nendpt; endptno++) {
377 ed = usbd_interface2endpoint_descriptor(iface,endptno);
378 KASSERT(ed != NULL);
379 endpt = ed->bEndpointAddress;
380 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
381 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
382 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
383 "(%d,%d), sce=%p\n",
384 endptno, endpt, UE_GET_ADDR(endpt),
385 UE_GET_DIR(endpt), sce));
386 sce->sc = sc;
387 sce->edesc = ed;
388 sce->iface = iface;
389 }
390 }
391 return USBD_NORMAL_COMPLETION;
392 }
393
394 int
395 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
396 {
397 struct ugen_softc *sc;
398 int unit = UGENUNIT(dev);
399 int endpt = UGENENDPOINT(dev);
400 usb_endpoint_descriptor_t *edesc;
401 struct ugen_endpoint *sce;
402 int dir, isize;
403 usbd_status err;
404 struct usbd_xfer *xfer;
405 int i, j;
406
407 sc = device_lookup_private(&ugen_cd, unit);
408 if (sc == NULL || sc->sc_dying)
409 return ENXIO;
410
411 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
412 flag, mode, unit, endpt));
413
414 /* The control endpoint allows multiple opens. */
415 if (endpt == USB_CONTROL_ENDPOINT) {
416 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
417 return 0;
418 }
419
420 if (sc->sc_is_open[endpt])
421 return EBUSY;
422
423 /* Make sure there are pipes for all directions. */
424 for (dir = OUT; dir <= IN; dir++) {
425 if (flag & (dir == OUT ? FWRITE : FREAD)) {
426 sce = &sc->sc_endpoints[endpt][dir];
427 if (sce->edesc == NULL)
428 return ENXIO;
429 }
430 }
431
432 /* Actually open the pipes. */
433 /* XXX Should back out properly if it fails. */
434 for (dir = OUT; dir <= IN; dir++) {
435 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
436 continue;
437 sce = &sc->sc_endpoints[endpt][dir];
438 sce->state = 0;
439 sce->timeout = USBD_NO_TIMEOUT;
440 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
441 sc, endpt, dir, sce));
442 edesc = sce->edesc;
443 switch (edesc->bmAttributes & UE_XFERTYPE) {
444 case UE_INTERRUPT:
445 if (dir == OUT) {
446 err = usbd_open_pipe(sce->iface,
447 edesc->bEndpointAddress, 0, &sce->pipeh);
448 if (err)
449 return EIO;
450 break;
451 }
452 isize = UGETW(edesc->wMaxPacketSize);
453 if (isize == 0) /* shouldn't happen */
454 return EINVAL;
455 sce->ibuf = kmem_alloc(isize, KM_SLEEP);
456 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
457 endpt, isize));
458 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
459 kmem_free(sce->ibuf, isize);
460 sce->ibuf = NULL;
461 return ENOMEM;
462 }
463 err = usbd_open_pipe_intr(sce->iface,
464 edesc->bEndpointAddress,
465 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
466 sce->ibuf, isize, ugenintr,
467 USBD_DEFAULT_INTERVAL);
468 if (err) {
469 clfree(&sce->q);
470 kmem_free(sce->ibuf, isize);
471 sce->ibuf = NULL;
472 return EIO;
473 }
474 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
475 break;
476 case UE_BULK:
477 err = usbd_open_pipe(sce->iface,
478 edesc->bEndpointAddress, 0, &sce->pipeh);
479 if (err)
480 return EIO;
481 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
482 /*
483 * Use request size for non-RA/WB transfers
484 * as the default.
485 */
486 sce->ra_wb_reqsize = UGEN_BBSIZE;
487 break;
488 case UE_ISOCHRONOUS:
489 if (dir == OUT)
490 return EINVAL;
491 isize = UGETW(edesc->wMaxPacketSize);
492 if (isize == 0) /* shouldn't happen */
493 return EINVAL;
494 sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES,
495 KM_SLEEP);
496 sce->cur = sce->fill = sce->ibuf;
497 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
498 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
499 endpt, isize));
500 err = usbd_open_pipe(sce->iface,
501 edesc->bEndpointAddress, 0, &sce->pipeh);
502 if (err) {
503 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
504 sce->ibuf = NULL;
505 return EIO;
506 }
507 for (i = 0; i < UGEN_NISOREQS; ++i) {
508 sce->isoreqs[i].sce = sce;
509 err = usbd_create_xfer(sce->pipeh,
510 isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS,
511 &xfer);
512 if (err)
513 goto bad;
514 sce->isoreqs[i].xfer = xfer;
515 sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer);
516 for (j = 0; j < UGEN_NISORFRMS; ++j)
517 sce->isoreqs[i].sizes[j] = isize;
518 usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i],
519 sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0,
520 ugen_isoc_rintr);
521 (void)usbd_transfer(xfer);
522 }
523 DPRINTFN(5, ("ugenopen: isoc open done\n"));
524 break;
525 bad:
526 while (--i >= 0) /* implicit buffer free */
527 usbd_destroy_xfer(sce->isoreqs[i].xfer);
528 usbd_close_pipe(sce->pipeh);
529 sce->pipeh = NULL;
530 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
531 sce->ibuf = NULL;
532 return ENOMEM;
533 case UE_CONTROL:
534 sce->timeout = USBD_DEFAULT_TIMEOUT;
535 return EINVAL;
536 }
537 }
538 sc->sc_is_open[endpt] = 1;
539 return 0;
540 }
541
542 int
543 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
544 {
545 int endpt = UGENENDPOINT(dev);
546 struct ugen_softc *sc;
547 struct ugen_endpoint *sce;
548 int dir;
549 int i;
550
551 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
552 if (sc == NULL || sc->sc_dying)
553 return ENXIO;
554
555 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
556 flag, mode, UGENUNIT(dev), endpt));
557
558 #ifdef DIAGNOSTIC
559 if (!sc->sc_is_open[endpt]) {
560 printf("ugenclose: not open\n");
561 return EINVAL;
562 }
563 #endif
564
565 if (endpt == USB_CONTROL_ENDPOINT) {
566 DPRINTFN(5, ("ugenclose: close control\n"));
567 sc->sc_is_open[endpt] = 0;
568 return 0;
569 }
570
571 for (dir = OUT; dir <= IN; dir++) {
572 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
573 continue;
574 sce = &sc->sc_endpoints[endpt][dir];
575 if (sce->pipeh == NULL)
576 continue;
577 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
578 endpt, dir, sce));
579
580 usbd_abort_pipe(sce->pipeh);
581
582 int isize = UGETW(sce->edesc->wMaxPacketSize);
583 int msize = 0;
584
585 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
586 case UE_INTERRUPT:
587 ndflush(&sce->q, sce->q.c_cc);
588 clfree(&sce->q);
589 msize = isize;
590 break;
591 case UE_ISOCHRONOUS:
592 for (i = 0; i < UGEN_NISOREQS; ++i)
593 usbd_destroy_xfer(sce->isoreqs[i].xfer);
594 msize = isize * UGEN_NISOFRAMES;
595 break;
596 case UE_BULK:
597 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) {
598 usbd_destroy_xfer(sce->ra_wb_xfer);
599 msize = sce->ra_wb_bufsize;
600 }
601 break;
602 default:
603 break;
604 }
605 usbd_close_pipe(sce->pipeh);
606 sce->pipeh = NULL;
607 if (sce->ibuf != NULL) {
608 kmem_free(sce->ibuf, msize);
609 sce->ibuf = NULL;
610 }
611 }
612 sc->sc_is_open[endpt] = 0;
613
614 return 0;
615 }
616
617 Static int
618 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
619 {
620 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
621 uint32_t n, tn;
622 struct usbd_xfer *xfer;
623 usbd_status err;
624 int error = 0;
625
626 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
627
628 if (endpt == USB_CONTROL_ENDPOINT)
629 return ENODEV;
630
631 #ifdef DIAGNOSTIC
632 if (sce->edesc == NULL) {
633 printf("ugenread: no edesc\n");
634 return EIO;
635 }
636 if (sce->pipeh == NULL) {
637 printf("ugenread: no pipe\n");
638 return EIO;
639 }
640 #endif
641
642 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
643 case UE_INTERRUPT:
644 /* Block until activity occurred. */
645 mutex_enter(&sc->sc_lock);
646 while (sce->q.c_cc == 0) {
647 if (flag & IO_NDELAY) {
648 mutex_exit(&sc->sc_lock);
649 return EWOULDBLOCK;
650 }
651 sce->state |= UGEN_ASLP;
652 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
653 /* "ugenri" */
654 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
655 mstohz(sce->timeout));
656 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
657 if (sc->sc_dying)
658 error = EIO;
659 if (error) {
660 sce->state &= ~UGEN_ASLP;
661 break;
662 }
663 }
664 mutex_exit(&sc->sc_lock);
665
666 /* Transfer as many chunks as possible. */
667 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
668 n = min(sce->q.c_cc, uio->uio_resid);
669 if (n > sizeof(sc->sc_buffer))
670 n = sizeof(sc->sc_buffer);
671
672 /* Remove a small chunk from the input queue. */
673 q_to_b(&sce->q, sc->sc_buffer, n);
674 DPRINTFN(5, ("ugenread: got %d chars\n", n));
675
676 /* Copy the data to the user process. */
677 error = uiomove(sc->sc_buffer, n, uio);
678 if (error)
679 break;
680 }
681 break;
682 case UE_BULK:
683 if (sce->state & UGEN_BULK_RA) {
684 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
685 uio->uio_resid, sce->ra_wb_used));
686 xfer = sce->ra_wb_xfer;
687
688 mutex_enter(&sc->sc_lock);
689 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
690 mutex_exit(&sc->sc_lock);
691 return EWOULDBLOCK;
692 }
693 while (uio->uio_resid > 0 && !error) {
694 while (sce->ra_wb_used == 0) {
695 sce->state |= UGEN_ASLP;
696 DPRINTFN(5,
697 ("ugenread: sleep on %p\n",
698 sce));
699 /* "ugenrb" */
700 error = cv_timedwait_sig(&sce->cv,
701 &sc->sc_lock, mstohz(sce->timeout));
702 DPRINTFN(5,
703 ("ugenread: woke, error=%d\n",
704 error));
705 if (sc->sc_dying)
706 error = EIO;
707 if (error) {
708 sce->state &= ~UGEN_ASLP;
709 break;
710 }
711 }
712
713 /* Copy data to the process. */
714 while (uio->uio_resid > 0
715 && sce->ra_wb_used > 0) {
716 n = min(uio->uio_resid,
717 sce->ra_wb_used);
718 n = min(n, sce->limit - sce->cur);
719 error = uiomove(sce->cur, n, uio);
720 if (error)
721 break;
722 sce->cur += n;
723 sce->ra_wb_used -= n;
724 if (sce->cur == sce->limit)
725 sce->cur = sce->ibuf;
726 }
727
728 /*
729 * If the transfers stopped because the
730 * buffer was full, restart them.
731 */
732 if (sce->state & UGEN_RA_WB_STOP &&
733 sce->ra_wb_used < sce->limit - sce->ibuf) {
734 n = (sce->limit - sce->ibuf)
735 - sce->ra_wb_used;
736 usbd_setup_xfer(xfer, sce, NULL,
737 min(n, sce->ra_wb_xferlen),
738 0, USBD_NO_TIMEOUT,
739 ugen_bulkra_intr);
740 sce->state &= ~UGEN_RA_WB_STOP;
741 err = usbd_transfer(xfer);
742 if (err != USBD_IN_PROGRESS)
743 /*
744 * The transfer has not been
745 * queued. Setting STOP
746 * will make us try
747 * again at the next read.
748 */
749 sce->state |= UGEN_RA_WB_STOP;
750 }
751 }
752 mutex_exit(&sc->sc_lock);
753 break;
754 }
755 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
756 0, 0, &xfer);
757 if (error)
758 return error;
759 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
760 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
761 tn = n;
762 err = usbd_bulk_transfer(xfer, sce->pipeh,
763 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0,
764 sce->timeout, sc->sc_buffer, &tn);
765 if (err) {
766 if (err == USBD_INTERRUPTED)
767 error = EINTR;
768 else if (err == USBD_TIMEOUT)
769 error = ETIMEDOUT;
770 else
771 error = EIO;
772 break;
773 }
774 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
775 error = uiomove(sc->sc_buffer, tn, uio);
776 if (error || tn < n)
777 break;
778 }
779 usbd_destroy_xfer(xfer);
780 break;
781 case UE_ISOCHRONOUS:
782 mutex_enter(&sc->sc_lock);
783 while (sce->cur == sce->fill) {
784 if (flag & IO_NDELAY) {
785 mutex_exit(&sc->sc_lock);
786 return EWOULDBLOCK;
787 }
788 sce->state |= UGEN_ASLP;
789 /* "ugenri" */
790 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
791 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
792 mstohz(sce->timeout));
793 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
794 if (sc->sc_dying)
795 error = EIO;
796 if (error) {
797 sce->state &= ~UGEN_ASLP;
798 break;
799 }
800 }
801
802 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
803 if(sce->fill > sce->cur)
804 n = min(sce->fill - sce->cur, uio->uio_resid);
805 else
806 n = min(sce->limit - sce->cur, uio->uio_resid);
807
808 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
809
810 /* Copy the data to the user process. */
811 error = uiomove(sce->cur, n, uio);
812 if (error)
813 break;
814 sce->cur += n;
815 if (sce->cur >= sce->limit)
816 sce->cur = sce->ibuf;
817 }
818 mutex_exit(&sc->sc_lock);
819 break;
820
821
822 default:
823 return ENXIO;
824 }
825 return error;
826 }
827
828 int
829 ugenread(dev_t dev, struct uio *uio, int flag)
830 {
831 int endpt = UGENENDPOINT(dev);
832 struct ugen_softc *sc;
833 int error;
834
835 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
836 if (sc == NULL || sc->sc_dying)
837 return ENXIO;
838
839 mutex_enter(&sc->sc_lock);
840 sc->sc_refcnt++;
841 mutex_exit(&sc->sc_lock);
842
843 error = ugen_do_read(sc, endpt, uio, flag);
844
845 mutex_enter(&sc->sc_lock);
846 if (--sc->sc_refcnt < 0)
847 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
848 mutex_exit(&sc->sc_lock);
849
850 return error;
851 }
852
853 Static int
854 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
855 int flag)
856 {
857 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
858 uint32_t n;
859 int error = 0;
860 uint32_t tn;
861 char *dbuf;
862 struct usbd_xfer *xfer;
863 usbd_status err;
864
865 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
866
867 if (endpt == USB_CONTROL_ENDPOINT)
868 return ENODEV;
869
870 #ifdef DIAGNOSTIC
871 if (sce->edesc == NULL) {
872 printf("ugenwrite: no edesc\n");
873 return EIO;
874 }
875 if (sce->pipeh == NULL) {
876 printf("ugenwrite: no pipe\n");
877 return EIO;
878 }
879 #endif
880
881 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
882 case UE_BULK:
883 if (sce->state & UGEN_BULK_WB) {
884 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
885 uio->uio_resid, sce->ra_wb_used));
886 xfer = sce->ra_wb_xfer;
887
888 mutex_enter(&sc->sc_lock);
889 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
890 flag & IO_NDELAY) {
891 mutex_exit(&sc->sc_lock);
892 return EWOULDBLOCK;
893 }
894 while (uio->uio_resid > 0 && !error) {
895 while (sce->ra_wb_used ==
896 sce->limit - sce->ibuf) {
897 sce->state |= UGEN_ASLP;
898 DPRINTFN(5,
899 ("ugenwrite: sleep on %p\n",
900 sce));
901 /* "ugenwb" */
902 error = cv_timedwait_sig(&sce->cv,
903 &sc->sc_lock, mstohz(sce->timeout));
904 DPRINTFN(5,
905 ("ugenwrite: woke, error=%d\n",
906 error));
907 if (sc->sc_dying)
908 error = EIO;
909 if (error) {
910 sce->state &= ~UGEN_ASLP;
911 break;
912 }
913 }
914
915 /* Copy data from the process. */
916 while (uio->uio_resid > 0 &&
917 sce->ra_wb_used < sce->limit - sce->ibuf) {
918 n = min(uio->uio_resid,
919 (sce->limit - sce->ibuf)
920 - sce->ra_wb_used);
921 n = min(n, sce->limit - sce->fill);
922 error = uiomove(sce->fill, n, uio);
923 if (error)
924 break;
925 sce->fill += n;
926 sce->ra_wb_used += n;
927 if (sce->fill == sce->limit)
928 sce->fill = sce->ibuf;
929 }
930
931 /*
932 * If the transfers stopped because the
933 * buffer was empty, restart them.
934 */
935 if (sce->state & UGEN_RA_WB_STOP &&
936 sce->ra_wb_used > 0) {
937 dbuf = (char *)usbd_get_buffer(xfer);
938 n = min(sce->ra_wb_used,
939 sce->ra_wb_xferlen);
940 tn = min(n, sce->limit - sce->cur);
941 memcpy(dbuf, sce->cur, tn);
942 dbuf += tn;
943 if (n - tn > 0)
944 memcpy(dbuf, sce->ibuf,
945 n - tn);
946 usbd_setup_xfer(xfer, sce, NULL, n,
947 0, USBD_NO_TIMEOUT,
948 ugen_bulkwb_intr);
949 sce->state &= ~UGEN_RA_WB_STOP;
950 err = usbd_transfer(xfer);
951 if (err != USBD_IN_PROGRESS)
952 /*
953 * The transfer has not been
954 * queued. Setting STOP
955 * will make us try again
956 * at the next read.
957 */
958 sce->state |= UGEN_RA_WB_STOP;
959 }
960 }
961 mutex_exit(&sc->sc_lock);
962 break;
963 }
964 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
965 0, 0, &xfer);
966 if (error)
967 return error;
968 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
969 error = uiomove(sc->sc_buffer, n, uio);
970 if (error)
971 break;
972 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
973 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout,
974 sc->sc_buffer, &n);
975 if (err) {
976 if (err == USBD_INTERRUPTED)
977 error = EINTR;
978 else if (err == USBD_TIMEOUT)
979 error = ETIMEDOUT;
980 else
981 error = EIO;
982 break;
983 }
984 }
985 usbd_destroy_xfer(xfer);
986 break;
987 case UE_INTERRUPT:
988 error = usbd_create_xfer(sce->pipeh,
989 UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer);
990 if (error)
991 return error;
992 while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
993 uio->uio_resid)) != 0) {
994 error = uiomove(sc->sc_buffer, n, uio);
995 if (error)
996 break;
997 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
998 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
999 sce->timeout, sc->sc_buffer, &n);
1000 if (err) {
1001 if (err == USBD_INTERRUPTED)
1002 error = EINTR;
1003 else if (err == USBD_TIMEOUT)
1004 error = ETIMEDOUT;
1005 else
1006 error = EIO;
1007 break;
1008 }
1009 }
1010 usbd_destroy_xfer(xfer);
1011 break;
1012 default:
1013 return ENXIO;
1014 }
1015 return error;
1016 }
1017
1018 int
1019 ugenwrite(dev_t dev, struct uio *uio, int flag)
1020 {
1021 int endpt = UGENENDPOINT(dev);
1022 struct ugen_softc *sc;
1023 int error;
1024
1025 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1026 if (sc == NULL || sc->sc_dying)
1027 return ENXIO;
1028
1029 mutex_enter(&sc->sc_lock);
1030 sc->sc_refcnt++;
1031 mutex_exit(&sc->sc_lock);
1032
1033 error = ugen_do_write(sc, endpt, uio, flag);
1034
1035 mutex_enter(&sc->sc_lock);
1036 if (--sc->sc_refcnt < 0)
1037 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1038 mutex_exit(&sc->sc_lock);
1039
1040 return error;
1041 }
1042
1043 int
1044 ugen_activate(device_t self, enum devact act)
1045 {
1046 struct ugen_softc *sc = device_private(self);
1047
1048 switch (act) {
1049 case DVACT_DEACTIVATE:
1050 sc->sc_dying = 1;
1051 return 0;
1052 default:
1053 return EOPNOTSUPP;
1054 }
1055 }
1056
1057 int
1058 ugen_detach(device_t self, int flags)
1059 {
1060 struct ugen_softc *sc = device_private(self);
1061 struct ugen_endpoint *sce;
1062 int i, dir;
1063 int maj, mn;
1064
1065 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1066
1067 sc->sc_dying = 1;
1068 pmf_device_deregister(self);
1069 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1070 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1071 for (dir = OUT; dir <= IN; dir++) {
1072 sce = &sc->sc_endpoints[i][dir];
1073 if (sce->pipeh)
1074 usbd_abort_pipe(sce->pipeh);
1075 }
1076 }
1077
1078 mutex_enter(&sc->sc_lock);
1079 if (--sc->sc_refcnt >= 0) {
1080 /* Wake everyone */
1081 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1082 cv_signal(&sc->sc_endpoints[i][IN].cv);
1083 /* Wait for processes to go away. */
1084 usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock);
1085 }
1086 mutex_exit(&sc->sc_lock);
1087
1088 /* locate the major number */
1089 maj = cdevsw_lookup_major(&ugen_cdevsw);
1090
1091 /* Nuke the vnodes for any open instances (calls close). */
1092 mn = device_unit(self) * USB_MAX_ENDPOINTS;
1093 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1094
1095 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev);
1096
1097 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1098 for (dir = OUT; dir <= IN; dir++) {
1099 sce = &sc->sc_endpoints[i][dir];
1100 seldestroy(&sce->rsel);
1101 cv_destroy(&sce->cv);
1102 }
1103 }
1104
1105 cv_destroy(&sc->sc_detach_cv);
1106 mutex_destroy(&sc->sc_lock);
1107
1108 return 0;
1109 }
1110
1111 Static void
1112 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
1113 {
1114 struct ugen_endpoint *sce = addr;
1115 struct ugen_softc *sc = sce->sc;
1116 uint32_t count;
1117 u_char *ibuf;
1118
1119 if (status == USBD_CANCELLED)
1120 return;
1121
1122 if (status != USBD_NORMAL_COMPLETION) {
1123 DPRINTF(("ugenintr: status=%d\n", status));
1124 if (status == USBD_STALLED)
1125 usbd_clear_endpoint_stall_async(sce->pipeh);
1126 return;
1127 }
1128
1129 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1130 ibuf = sce->ibuf;
1131
1132 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1133 xfer, status, count));
1134 DPRINTFN(5, (" data = %02x %02x %02x\n",
1135 ibuf[0], ibuf[1], ibuf[2]));
1136
1137 (void)b_to_q(ibuf, count, &sce->q);
1138
1139 mutex_enter(&sc->sc_lock);
1140 if (sce->state & UGEN_ASLP) {
1141 sce->state &= ~UGEN_ASLP;
1142 DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1143 cv_signal(&sce->cv);
1144 }
1145 mutex_exit(&sc->sc_lock);
1146 selnotify(&sce->rsel, 0, 0);
1147 }
1148
1149 Static void
1150 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr,
1151 usbd_status status)
1152 {
1153 struct isoreq *req = addr;
1154 struct ugen_endpoint *sce = req->sce;
1155 struct ugen_softc *sc = sce->sc;
1156 uint32_t count, n;
1157 int i, isize;
1158
1159 /* Return if we are aborting. */
1160 if (status == USBD_CANCELLED)
1161 return;
1162
1163 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1164 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1165 (long)(req - sce->isoreqs), count));
1166
1167 /* throw away oldest input if the buffer is full */
1168 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1169 sce->cur += count;
1170 if(sce->cur >= sce->limit)
1171 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1172 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1173 count));
1174 }
1175
1176 isize = UGETW(sce->edesc->wMaxPacketSize);
1177 for (i = 0; i < UGEN_NISORFRMS; i++) {
1178 uint32_t actlen = req->sizes[i];
1179 char const *tbuf = (char const *)req->dmabuf + isize * i;
1180
1181 /* copy data to buffer */
1182 while (actlen > 0) {
1183 n = min(actlen, sce->limit - sce->fill);
1184 memcpy(sce->fill, tbuf, n);
1185
1186 tbuf += n;
1187 actlen -= n;
1188 sce->fill += n;
1189 if(sce->fill == sce->limit)
1190 sce->fill = sce->ibuf;
1191 }
1192
1193 /* setup size for next transfer */
1194 req->sizes[i] = isize;
1195 }
1196
1197 usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0,
1198 ugen_isoc_rintr);
1199 (void)usbd_transfer(xfer);
1200
1201 mutex_enter(&sc->sc_lock);
1202 if (sce->state & UGEN_ASLP) {
1203 sce->state &= ~UGEN_ASLP;
1204 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1205 cv_signal(&sce->cv);
1206 }
1207 mutex_exit(&sc->sc_lock);
1208 selnotify(&sce->rsel, 0, 0);
1209 }
1210
1211 Static void
1212 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr,
1213 usbd_status status)
1214 {
1215 struct ugen_endpoint *sce = addr;
1216 struct ugen_softc *sc = sce->sc;
1217 uint32_t count, n;
1218 char const *tbuf;
1219 usbd_status err;
1220
1221 /* Return if we are aborting. */
1222 if (status == USBD_CANCELLED)
1223 return;
1224
1225 if (status != USBD_NORMAL_COMPLETION) {
1226 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1227 sce->state |= UGEN_RA_WB_STOP;
1228 if (status == USBD_STALLED)
1229 usbd_clear_endpoint_stall_async(sce->pipeh);
1230 return;
1231 }
1232
1233 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1234
1235 /* Keep track of how much is in the buffer. */
1236 sce->ra_wb_used += count;
1237
1238 /* Copy data to buffer. */
1239 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1240 n = min(count, sce->limit - sce->fill);
1241 memcpy(sce->fill, tbuf, n);
1242 tbuf += n;
1243 count -= n;
1244 sce->fill += n;
1245 if (sce->fill == sce->limit)
1246 sce->fill = sce->ibuf;
1247 if (count > 0) {
1248 memcpy(sce->fill, tbuf, count);
1249 sce->fill += count;
1250 }
1251
1252 /* Set up the next request if necessary. */
1253 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1254 if (n > 0) {
1255 usbd_setup_xfer(xfer, sce, NULL, min(n, sce->ra_wb_xferlen), 0,
1256 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1257 err = usbd_transfer(xfer);
1258 if (err != USBD_IN_PROGRESS) {
1259 printf("usbd_bulkra_intr: error=%d\n", err);
1260 /*
1261 * The transfer has not been queued. Setting STOP
1262 * will make us try again at the next read.
1263 */
1264 sce->state |= UGEN_RA_WB_STOP;
1265 }
1266 }
1267 else
1268 sce->state |= UGEN_RA_WB_STOP;
1269
1270 mutex_enter(&sc->sc_lock);
1271 if (sce->state & UGEN_ASLP) {
1272 sce->state &= ~UGEN_ASLP;
1273 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1274 cv_signal(&sce->cv);
1275 }
1276 mutex_exit(&sc->sc_lock);
1277 selnotify(&sce->rsel, 0, 0);
1278 }
1279
1280 Static void
1281 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr,
1282 usbd_status status)
1283 {
1284 struct ugen_endpoint *sce = addr;
1285 struct ugen_softc *sc = sce->sc;
1286 uint32_t count, n;
1287 char *tbuf;
1288 usbd_status err;
1289
1290 /* Return if we are aborting. */
1291 if (status == USBD_CANCELLED)
1292 return;
1293
1294 if (status != USBD_NORMAL_COMPLETION) {
1295 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1296 sce->state |= UGEN_RA_WB_STOP;
1297 if (status == USBD_STALLED)
1298 usbd_clear_endpoint_stall_async(sce->pipeh);
1299 return;
1300 }
1301
1302 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1303
1304 /* Keep track of how much is in the buffer. */
1305 sce->ra_wb_used -= count;
1306
1307 /* Update buffer pointers. */
1308 sce->cur += count;
1309 if (sce->cur >= sce->limit)
1310 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1311
1312 /* Set up next request if necessary. */
1313 if (sce->ra_wb_used > 0) {
1314 /* copy data from buffer */
1315 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1316 count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1317 n = min(count, sce->limit - sce->cur);
1318 memcpy(tbuf, sce->cur, n);
1319 tbuf += n;
1320 if (count - n > 0)
1321 memcpy(tbuf, sce->ibuf, count - n);
1322
1323 usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT,
1324 ugen_bulkwb_intr);
1325 err = usbd_transfer(xfer);
1326 if (err != USBD_IN_PROGRESS) {
1327 printf("usbd_bulkwb_intr: error=%d\n", err);
1328 /*
1329 * The transfer has not been queued. Setting STOP
1330 * will make us try again at the next write.
1331 */
1332 sce->state |= UGEN_RA_WB_STOP;
1333 }
1334 }
1335 else
1336 sce->state |= UGEN_RA_WB_STOP;
1337
1338 mutex_enter(&sc->sc_lock);
1339 if (sce->state & UGEN_ASLP) {
1340 sce->state &= ~UGEN_ASLP;
1341 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1342 cv_signal(&sce->cv);
1343 }
1344 mutex_exit(&sc->sc_lock);
1345 selnotify(&sce->rsel, 0, 0);
1346 }
1347
1348 Static usbd_status
1349 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1350 {
1351 struct usbd_interface *iface;
1352 usb_endpoint_descriptor_t *ed;
1353 usbd_status err;
1354 struct ugen_endpoint *sce;
1355 uint8_t niface, nendpt, endptno, endpt;
1356 int dir;
1357
1358 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1359
1360 err = usbd_interface_count(sc->sc_udev, &niface);
1361 if (err)
1362 return err;
1363 if (ifaceidx < 0 || ifaceidx >= niface)
1364 return USBD_INVAL;
1365
1366 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1367 if (err)
1368 return err;
1369 err = usbd_endpoint_count(iface, &nendpt);
1370 if (err)
1371 return err;
1372
1373 /* change setting */
1374 err = usbd_set_interface(iface, altno);
1375 if (err)
1376 return err;
1377
1378 err = usbd_endpoint_count(iface, &nendpt);
1379 if (err)
1380 return err;
1381
1382 ugen_clear_endpoints(sc);
1383
1384 for (endptno = 0; endptno < nendpt; endptno++) {
1385 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1386 KASSERT(ed != NULL);
1387 endpt = ed->bEndpointAddress;
1388 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1389 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1390 sce->sc = sc;
1391 sce->edesc = ed;
1392 sce->iface = iface;
1393 }
1394 return 0;
1395 }
1396
1397 /* Retrieve a complete descriptor for a certain device and index. */
1398 Static usb_config_descriptor_t *
1399 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1400 {
1401 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1402 int len;
1403 usbd_status err;
1404
1405 if (index == USB_CURRENT_CONFIG_INDEX) {
1406 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1407 len = UGETW(tdesc->wTotalLength);
1408 if (lenp)
1409 *lenp = len;
1410 cdesc = kmem_alloc(len, KM_SLEEP);
1411 memcpy(cdesc, tdesc, len);
1412 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1413 } else {
1414 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1415 if (err)
1416 return 0;
1417 len = UGETW(cdescr.wTotalLength);
1418 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1419 if (lenp)
1420 *lenp = len;
1421 cdesc = kmem_alloc(len, KM_SLEEP);
1422 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1423 if (err) {
1424 kmem_free(cdesc, len);
1425 return 0;
1426 }
1427 }
1428 return cdesc;
1429 }
1430
1431 Static int
1432 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1433 {
1434 struct usbd_interface *iface;
1435 usbd_status err;
1436
1437 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1438 if (err)
1439 return -1;
1440 return usbd_get_interface_altindex(iface);
1441 }
1442
1443 Static int
1444 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1445 void *addr, int flag, struct lwp *l)
1446 {
1447 struct ugen_endpoint *sce;
1448 usbd_status err;
1449 struct usbd_interface *iface;
1450 struct usb_config_desc *cd;
1451 usb_config_descriptor_t *cdesc;
1452 struct usb_interface_desc *id;
1453 usb_interface_descriptor_t *idesc;
1454 struct usb_endpoint_desc *ed;
1455 usb_endpoint_descriptor_t *edesc;
1456 struct usb_alt_interface *ai;
1457 struct usb_string_desc *si;
1458 uint8_t conf, alt;
1459 int cdesclen;
1460 int error;
1461
1462 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1463 if (sc->sc_dying)
1464 return EIO;
1465
1466 switch (cmd) {
1467 case FIONBIO:
1468 /* All handled in the upper FS layer. */
1469 return 0;
1470 case USB_SET_SHORT_XFER:
1471 if (endpt == USB_CONTROL_ENDPOINT)
1472 return EINVAL;
1473 /* This flag only affects read */
1474 sce = &sc->sc_endpoints[endpt][IN];
1475 if (sce == NULL || sce->pipeh == NULL)
1476 return EINVAL;
1477 if (*(int *)addr)
1478 sce->state |= UGEN_SHORT_OK;
1479 else
1480 sce->state &= ~UGEN_SHORT_OK;
1481 return 0;
1482 case USB_SET_TIMEOUT:
1483 sce = &sc->sc_endpoints[endpt][IN];
1484 if (sce == NULL
1485 /* XXX this shouldn't happen, but the distinction between
1486 input and output pipes isn't clear enough.
1487 || sce->pipeh == NULL */
1488 )
1489 return EINVAL;
1490 sce->timeout = *(int *)addr;
1491 return 0;
1492 case USB_SET_BULK_RA:
1493 if (endpt == USB_CONTROL_ENDPOINT)
1494 return EINVAL;
1495 sce = &sc->sc_endpoints[endpt][IN];
1496 if (sce == NULL || sce->pipeh == NULL)
1497 return EINVAL;
1498 edesc = sce->edesc;
1499 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1500 return EINVAL;
1501
1502 if (*(int *)addr) {
1503 /* Only turn RA on if it's currently off. */
1504 if (sce->state & UGEN_BULK_RA)
1505 return 0;
1506
1507 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1508 /* shouldn't happen */
1509 return EINVAL;
1510 error = usbd_create_xfer(sce->pipeh,
1511 sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer);
1512 if (error)
1513 return error;
1514 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1515 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1516 sce->fill = sce->cur = sce->ibuf;
1517 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1518 sce->ra_wb_used = 0;
1519 sce->state |= UGEN_BULK_RA;
1520 sce->state &= ~UGEN_RA_WB_STOP;
1521 /* Now start reading. */
1522 usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL,
1523 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1524 0, USBD_NO_TIMEOUT, ugen_bulkra_intr);
1525 err = usbd_transfer(sce->ra_wb_xfer);
1526 if (err != USBD_IN_PROGRESS) {
1527 sce->state &= ~UGEN_BULK_RA;
1528 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1529 sce->ibuf = NULL;
1530 usbd_destroy_xfer(sce->ra_wb_xfer);
1531 return EIO;
1532 }
1533 } else {
1534 /* Only turn RA off if it's currently on. */
1535 if (!(sce->state & UGEN_BULK_RA))
1536 return 0;
1537
1538 sce->state &= ~UGEN_BULK_RA;
1539 usbd_abort_pipe(sce->pipeh);
1540 usbd_destroy_xfer(sce->ra_wb_xfer);
1541 /*
1542 * XXX Discard whatever's in the buffer, but we
1543 * should keep it around and drain the buffer
1544 * instead.
1545 */
1546 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1547 sce->ibuf = NULL;
1548 }
1549 return 0;
1550 case USB_SET_BULK_WB:
1551 if (endpt == USB_CONTROL_ENDPOINT)
1552 return EINVAL;
1553 sce = &sc->sc_endpoints[endpt][OUT];
1554 if (sce == NULL || sce->pipeh == NULL)
1555 return EINVAL;
1556 edesc = sce->edesc;
1557 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1558 return EINVAL;
1559
1560 if (*(int *)addr) {
1561 /* Only turn WB on if it's currently off. */
1562 if (sce->state & UGEN_BULK_WB)
1563 return 0;
1564
1565 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1566 /* shouldn't happen */
1567 return EINVAL;
1568 error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize,
1569 0, 0, &sce->ra_wb_xfer);
1570 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1571 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1572 sce->fill = sce->cur = sce->ibuf;
1573 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1574 sce->ra_wb_used = 0;
1575 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1576 } else {
1577 /* Only turn WB off if it's currently on. */
1578 if (!(sce->state & UGEN_BULK_WB))
1579 return 0;
1580
1581 sce->state &= ~UGEN_BULK_WB;
1582 /*
1583 * XXX Discard whatever's in the buffer, but we
1584 * should keep it around and keep writing to
1585 * drain the buffer instead.
1586 */
1587 usbd_abort_pipe(sce->pipeh);
1588 usbd_destroy_xfer(sce->ra_wb_xfer);
1589 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1590 sce->ibuf = NULL;
1591 }
1592 return 0;
1593 case USB_SET_BULK_RA_OPT:
1594 case USB_SET_BULK_WB_OPT:
1595 {
1596 struct usb_bulk_ra_wb_opt *opt;
1597
1598 if (endpt == USB_CONTROL_ENDPOINT)
1599 return EINVAL;
1600 opt = (struct usb_bulk_ra_wb_opt *)addr;
1601 if (cmd == USB_SET_BULK_RA_OPT)
1602 sce = &sc->sc_endpoints[endpt][IN];
1603 else
1604 sce = &sc->sc_endpoints[endpt][OUT];
1605 if (sce == NULL || sce->pipeh == NULL)
1606 return EINVAL;
1607 if (opt->ra_wb_buffer_size < 1 ||
1608 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1609 opt->ra_wb_request_size < 1 ||
1610 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1611 return EINVAL;
1612 /*
1613 * XXX These changes do not take effect until the
1614 * next time RA/WB mode is enabled but they ought to
1615 * take effect immediately.
1616 */
1617 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1618 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1619 return 0;
1620 }
1621 default:
1622 break;
1623 }
1624
1625 if (endpt != USB_CONTROL_ENDPOINT)
1626 return EINVAL;
1627
1628 switch (cmd) {
1629 #ifdef UGEN_DEBUG
1630 case USB_SETDEBUG:
1631 ugendebug = *(int *)addr;
1632 break;
1633 #endif
1634 case USB_GET_CONFIG:
1635 err = usbd_get_config(sc->sc_udev, &conf);
1636 if (err)
1637 return EIO;
1638 *(int *)addr = conf;
1639 break;
1640 case USB_SET_CONFIG:
1641 if (!(flag & FWRITE))
1642 return EPERM;
1643 err = ugen_set_config(sc, *(int *)addr, 1);
1644 switch (err) {
1645 case USBD_NORMAL_COMPLETION:
1646 break;
1647 case USBD_IN_USE:
1648 return EBUSY;
1649 default:
1650 return EIO;
1651 }
1652 break;
1653 case USB_GET_ALTINTERFACE:
1654 ai = (struct usb_alt_interface *)addr;
1655 err = usbd_device2interface_handle(sc->sc_udev,
1656 ai->uai_interface_index, &iface);
1657 if (err)
1658 return EINVAL;
1659 idesc = usbd_get_interface_descriptor(iface);
1660 if (idesc == NULL)
1661 return EIO;
1662 ai->uai_alt_no = idesc->bAlternateSetting;
1663 break;
1664 case USB_SET_ALTINTERFACE:
1665 if (!(flag & FWRITE))
1666 return EPERM;
1667 ai = (struct usb_alt_interface *)addr;
1668 err = usbd_device2interface_handle(sc->sc_udev,
1669 ai->uai_interface_index, &iface);
1670 if (err)
1671 return EINVAL;
1672 err = ugen_set_interface(sc, ai->uai_interface_index,
1673 ai->uai_alt_no);
1674 if (err)
1675 return EINVAL;
1676 break;
1677 case USB_GET_NO_ALT:
1678 ai = (struct usb_alt_interface *)addr;
1679 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen);
1680 if (cdesc == NULL)
1681 return EINVAL;
1682 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1683 if (idesc == NULL) {
1684 kmem_free(cdesc, cdesclen);
1685 return EINVAL;
1686 }
1687 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1688 idesc->bInterfaceNumber);
1689 kmem_free(cdesc, cdesclen);
1690 break;
1691 case USB_GET_DEVICE_DESC:
1692 *(usb_device_descriptor_t *)addr =
1693 *usbd_get_device_descriptor(sc->sc_udev);
1694 break;
1695 case USB_GET_CONFIG_DESC:
1696 cd = (struct usb_config_desc *)addr;
1697 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen);
1698 if (cdesc == NULL)
1699 return EINVAL;
1700 cd->ucd_desc = *cdesc;
1701 kmem_free(cdesc, cdesclen);
1702 break;
1703 case USB_GET_INTERFACE_DESC:
1704 id = (struct usb_interface_desc *)addr;
1705 cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen);
1706 if (cdesc == NULL)
1707 return EINVAL;
1708 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1709 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1710 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1711 else
1712 alt = id->uid_alt_index;
1713 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1714 if (idesc == NULL) {
1715 kmem_free(cdesc, cdesclen);
1716 return EINVAL;
1717 }
1718 id->uid_desc = *idesc;
1719 kmem_free(cdesc, cdesclen);
1720 break;
1721 case USB_GET_ENDPOINT_DESC:
1722 ed = (struct usb_endpoint_desc *)addr;
1723 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen);
1724 if (cdesc == NULL)
1725 return EINVAL;
1726 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1727 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1728 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1729 else
1730 alt = ed->ued_alt_index;
1731 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1732 alt, ed->ued_endpoint_index);
1733 if (edesc == NULL) {
1734 kmem_free(cdesc, cdesclen);
1735 return EINVAL;
1736 }
1737 ed->ued_desc = *edesc;
1738 kmem_free(cdesc, cdesclen);
1739 break;
1740 case USB_GET_FULL_DESC:
1741 {
1742 int len;
1743 struct iovec iov;
1744 struct uio uio;
1745 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1746
1747 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen);
1748 if (cdesc == NULL)
1749 return EINVAL;
1750 len = cdesclen;
1751 if (len > fd->ufd_size)
1752 len = fd->ufd_size;
1753 iov.iov_base = (void *)fd->ufd_data;
1754 iov.iov_len = len;
1755 uio.uio_iov = &iov;
1756 uio.uio_iovcnt = 1;
1757 uio.uio_resid = len;
1758 uio.uio_offset = 0;
1759 uio.uio_rw = UIO_READ;
1760 uio.uio_vmspace = l->l_proc->p_vmspace;
1761 error = uiomove((void *)cdesc, len, &uio);
1762 kmem_free(cdesc, cdesclen);
1763 return error;
1764 }
1765 case USB_GET_STRING_DESC: {
1766 int len;
1767 si = (struct usb_string_desc *)addr;
1768 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1769 si->usd_language_id, &si->usd_desc, &len);
1770 if (err)
1771 return EINVAL;
1772 break;
1773 }
1774 case USB_DO_REQUEST:
1775 {
1776 struct usb_ctl_request *ur = (void *)addr;
1777 int len = UGETW(ur->ucr_request.wLength);
1778 struct iovec iov;
1779 struct uio uio;
1780 void *ptr = 0;
1781 usbd_status xerr;
1782
1783 error = 0;
1784
1785 if (!(flag & FWRITE))
1786 return EPERM;
1787 /* Avoid requests that would damage the bus integrity. */
1788 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1789 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1790 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1791 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1792 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1793 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1794 return EINVAL;
1795
1796 if (len < 0 || len > 32767)
1797 return EINVAL;
1798 if (len != 0) {
1799 iov.iov_base = (void *)ur->ucr_data;
1800 iov.iov_len = len;
1801 uio.uio_iov = &iov;
1802 uio.uio_iovcnt = 1;
1803 uio.uio_resid = len;
1804 uio.uio_offset = 0;
1805 uio.uio_rw =
1806 ur->ucr_request.bmRequestType & UT_READ ?
1807 UIO_READ : UIO_WRITE;
1808 uio.uio_vmspace = l->l_proc->p_vmspace;
1809 ptr = kmem_alloc(len, KM_SLEEP);
1810 if (uio.uio_rw == UIO_WRITE) {
1811 error = uiomove(ptr, len, &uio);
1812 if (error)
1813 goto ret;
1814 }
1815 }
1816 sce = &sc->sc_endpoints[endpt][IN];
1817 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1818 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1819 if (xerr) {
1820 error = EIO;
1821 goto ret;
1822 }
1823 if (len != 0) {
1824 if (uio.uio_rw == UIO_READ) {
1825 size_t alen = min(len, ur->ucr_actlen);
1826 error = uiomove(ptr, alen, &uio);
1827 if (error)
1828 goto ret;
1829 }
1830 }
1831 ret:
1832 if (ptr)
1833 kmem_free(ptr, len);
1834 return error;
1835 }
1836 case USB_GET_DEVICEINFO:
1837 usbd_fill_deviceinfo(sc->sc_udev,
1838 (struct usb_device_info *)addr, 0);
1839 break;
1840 #ifdef COMPAT_30
1841 case USB_GET_DEVICEINFO_OLD:
1842 usbd_fill_deviceinfo_old(sc->sc_udev,
1843 (struct usb_device_info_old *)addr, 0);
1844
1845 break;
1846 #endif
1847 default:
1848 return EINVAL;
1849 }
1850 return 0;
1851 }
1852
1853 int
1854 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1855 {
1856 int endpt = UGENENDPOINT(dev);
1857 struct ugen_softc *sc;
1858 int error;
1859
1860 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1861 if (sc == NULL || sc->sc_dying)
1862 return ENXIO;
1863
1864 sc->sc_refcnt++;
1865 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1866 if (--sc->sc_refcnt < 0)
1867 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1868 return error;
1869 }
1870
1871 int
1872 ugenpoll(dev_t dev, int events, struct lwp *l)
1873 {
1874 struct ugen_softc *sc;
1875 struct ugen_endpoint *sce_in, *sce_out;
1876 int revents = 0;
1877
1878 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1879 if (sc == NULL)
1880 return ENXIO;
1881
1882 if (sc->sc_dying)
1883 return POLLHUP;
1884
1885 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1886 return ENODEV;
1887
1888 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1889 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1890 if (sce_in == NULL && sce_out == NULL)
1891 return POLLERR;
1892 #ifdef DIAGNOSTIC
1893 if (!sce_in->edesc && !sce_out->edesc) {
1894 printf("ugenpoll: no edesc\n");
1895 return POLLERR;
1896 }
1897 /* It's possible to have only one pipe open. */
1898 if (!sce_in->pipeh && !sce_out->pipeh) {
1899 printf("ugenpoll: no pipe\n");
1900 return POLLERR;
1901 }
1902 #endif
1903
1904 mutex_enter(&sc->sc_lock);
1905 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1906 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1907 case UE_INTERRUPT:
1908 if (sce_in->q.c_cc > 0)
1909 revents |= events & (POLLIN | POLLRDNORM);
1910 else
1911 selrecord(l, &sce_in->rsel);
1912 break;
1913 case UE_ISOCHRONOUS:
1914 if (sce_in->cur != sce_in->fill)
1915 revents |= events & (POLLIN | POLLRDNORM);
1916 else
1917 selrecord(l, &sce_in->rsel);
1918 break;
1919 case UE_BULK:
1920 if (sce_in->state & UGEN_BULK_RA) {
1921 if (sce_in->ra_wb_used > 0)
1922 revents |= events &
1923 (POLLIN | POLLRDNORM);
1924 else
1925 selrecord(l, &sce_in->rsel);
1926 break;
1927 }
1928 /*
1929 * We have no easy way of determining if a read will
1930 * yield any data or a write will happen.
1931 * Pretend they will.
1932 */
1933 revents |= events & (POLLIN | POLLRDNORM);
1934 break;
1935 default:
1936 break;
1937 }
1938 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1939 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1940 case UE_INTERRUPT:
1941 case UE_ISOCHRONOUS:
1942 /* XXX unimplemented */
1943 break;
1944 case UE_BULK:
1945 if (sce_out->state & UGEN_BULK_WB) {
1946 if (sce_out->ra_wb_used <
1947 sce_out->limit - sce_out->ibuf)
1948 revents |= events &
1949 (POLLOUT | POLLWRNORM);
1950 else
1951 selrecord(l, &sce_out->rsel);
1952 break;
1953 }
1954 /*
1955 * We have no easy way of determining if a read will
1956 * yield any data or a write will happen.
1957 * Pretend they will.
1958 */
1959 revents |= events & (POLLOUT | POLLWRNORM);
1960 break;
1961 default:
1962 break;
1963 }
1964
1965 mutex_exit(&sc->sc_lock);
1966
1967 return revents;
1968 }
1969
1970 static void
1971 filt_ugenrdetach(struct knote *kn)
1972 {
1973 struct ugen_endpoint *sce = kn->kn_hook;
1974 struct ugen_softc *sc = sce->sc;
1975
1976 mutex_enter(&sc->sc_lock);
1977 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1978 mutex_exit(&sc->sc_lock);
1979 }
1980
1981 static int
1982 filt_ugenread_intr(struct knote *kn, long hint)
1983 {
1984 struct ugen_endpoint *sce = kn->kn_hook;
1985 struct ugen_softc *sc = sce->sc;
1986
1987 if (sc->sc_dying)
1988 return 0;
1989
1990 kn->kn_data = sce->q.c_cc;
1991 return kn->kn_data > 0;
1992 }
1993
1994 static int
1995 filt_ugenread_isoc(struct knote *kn, long hint)
1996 {
1997 struct ugen_endpoint *sce = kn->kn_hook;
1998 struct ugen_softc *sc = sce->sc;
1999
2000 if (sc->sc_dying)
2001 return 0;
2002
2003 if (sce->cur == sce->fill)
2004 return 0;
2005
2006 if (sce->cur < sce->fill)
2007 kn->kn_data = sce->fill - sce->cur;
2008 else
2009 kn->kn_data = (sce->limit - sce->cur) +
2010 (sce->fill - sce->ibuf);
2011
2012 return 1;
2013 }
2014
2015 static int
2016 filt_ugenread_bulk(struct knote *kn, long hint)
2017 {
2018 struct ugen_endpoint *sce = kn->kn_hook;
2019 struct ugen_softc *sc = sce->sc;
2020
2021 if (sc->sc_dying)
2022 return 0;
2023
2024 if (!(sce->state & UGEN_BULK_RA))
2025 /*
2026 * We have no easy way of determining if a read will
2027 * yield any data or a write will happen.
2028 * So, emulate "seltrue".
2029 */
2030 return filt_seltrue(kn, hint);
2031
2032 if (sce->ra_wb_used == 0)
2033 return 0;
2034
2035 kn->kn_data = sce->ra_wb_used;
2036
2037 return 1;
2038 }
2039
2040 static int
2041 filt_ugenwrite_bulk(struct knote *kn, long hint)
2042 {
2043 struct ugen_endpoint *sce = kn->kn_hook;
2044 struct ugen_softc *sc = sce->sc;
2045
2046 if (sc->sc_dying)
2047 return 0;
2048
2049 if (!(sce->state & UGEN_BULK_WB))
2050 /*
2051 * We have no easy way of determining if a read will
2052 * yield any data or a write will happen.
2053 * So, emulate "seltrue".
2054 */
2055 return filt_seltrue(kn, hint);
2056
2057 if (sce->ra_wb_used == sce->limit - sce->ibuf)
2058 return 0;
2059
2060 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2061
2062 return 1;
2063 }
2064
2065 static const struct filterops ugenread_intr_filtops = {
2066 .f_isfd = 1,
2067 .f_attach = NULL,
2068 .f_detach = filt_ugenrdetach,
2069 .f_event = filt_ugenread_intr,
2070 };
2071
2072 static const struct filterops ugenread_isoc_filtops = {
2073 .f_isfd = 1,
2074 .f_attach = NULL,
2075 .f_detach = filt_ugenrdetach,
2076 .f_event = filt_ugenread_isoc,
2077 };
2078
2079 static const struct filterops ugenread_bulk_filtops = {
2080 .f_isfd = 1,
2081 .f_attach = NULL,
2082 .f_detach = filt_ugenrdetach,
2083 .f_event = filt_ugenread_bulk,
2084 };
2085
2086 static const struct filterops ugenwrite_bulk_filtops = {
2087 .f_isfd = 1,
2088 .f_attach = NULL,
2089 .f_detach = filt_ugenrdetach,
2090 .f_event = filt_ugenwrite_bulk,
2091 };
2092
2093 int
2094 ugenkqfilter(dev_t dev, struct knote *kn)
2095 {
2096 struct ugen_softc *sc;
2097 struct ugen_endpoint *sce;
2098 struct klist *klist;
2099
2100 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2101 if (sc == NULL || sc->sc_dying)
2102 return ENXIO;
2103
2104 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2105 return ENODEV;
2106
2107 switch (kn->kn_filter) {
2108 case EVFILT_READ:
2109 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2110 if (sce == NULL)
2111 return EINVAL;
2112
2113 klist = &sce->rsel.sel_klist;
2114 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2115 case UE_INTERRUPT:
2116 kn->kn_fop = &ugenread_intr_filtops;
2117 break;
2118 case UE_ISOCHRONOUS:
2119 kn->kn_fop = &ugenread_isoc_filtops;
2120 break;
2121 case UE_BULK:
2122 kn->kn_fop = &ugenread_bulk_filtops;
2123 break;
2124 default:
2125 return EINVAL;
2126 }
2127 break;
2128
2129 case EVFILT_WRITE:
2130 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2131 if (sce == NULL)
2132 return EINVAL;
2133
2134 klist = &sce->rsel.sel_klist;
2135 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2136 case UE_INTERRUPT:
2137 case UE_ISOCHRONOUS:
2138 /* XXX poll doesn't support this */
2139 return EINVAL;
2140
2141 case UE_BULK:
2142 kn->kn_fop = &ugenwrite_bulk_filtops;
2143 break;
2144 default:
2145 return EINVAL;
2146 }
2147 break;
2148
2149 default:
2150 return EINVAL;
2151 }
2152
2153 kn->kn_hook = sce;
2154
2155 mutex_enter(&sc->sc_lock);
2156 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2157 mutex_exit(&sc->sc_lock);
2158
2159 return 0;
2160 }
2161