ugen.c revision 1.152 1 /* $NetBSD: ugen.c,v 1.152 2020/08/16 02:33:17 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.152 2020/08/16 02:33:17 riastradh Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #include "opt_usb.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/device.h>
52 #include <sys/ioctl.h>
53 #include <sys/conf.h>
54 #include <sys/tty.h>
55 #include <sys/file.h>
56 #include <sys/select.h>
57 #include <sys/proc.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60 #include <sys/compat_stub.h>
61
62 #include <dev/usb/usb.h>
63 #include <dev/usb/usbdi.h>
64 #include <dev/usb/usbdi_util.h>
65
66 #include "ioconf.h"
67
68 #ifdef UGEN_DEBUG
69 #define DPRINTF(x) if (ugendebug) printf x
70 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x
71 int ugendebug = 0;
72 #else
73 #define DPRINTF(x)
74 #define DPRINTFN(n,x)
75 #endif
76
77 #define UGEN_CHUNK 128 /* chunk size for read */
78 #define UGEN_IBSIZE 1020 /* buffer size */
79 #define UGEN_BBSIZE 1024
80
81 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
82 #define UGEN_NISORFRMS 8 /* number of transactions per req */
83 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
84
85 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
86 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
87
88 struct isoreq {
89 struct ugen_endpoint *sce;
90 struct usbd_xfer *xfer;
91 void *dmabuf;
92 uint16_t sizes[UGEN_NISORFRMS];
93 };
94
95 struct ugen_endpoint {
96 struct ugen_softc *sc;
97 usb_endpoint_descriptor_t *edesc;
98 struct usbd_interface *iface;
99 int state;
100 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
101 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
102 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
103 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
104 struct usbd_pipe *pipeh;
105 struct clist q;
106 u_char *ibuf; /* start of buffer (circular for isoc) */
107 u_char *fill; /* location for input (isoc) */
108 u_char *limit; /* end of circular buffer (isoc) */
109 u_char *cur; /* current read location (isoc) */
110 uint32_t timeout;
111 uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
112 uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
113 uint32_t ra_wb_used; /* how much is in buffer */
114 uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */
115 struct usbd_xfer *ra_wb_xfer;
116 struct isoreq isoreqs[UGEN_NISOREQS];
117 /* Keep these last; we don't overwrite them in ugen_set_config() */
118 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
119 struct selinfo rsel;
120 kcondvar_t cv;
121 };
122
123 struct ugen_softc {
124 device_t sc_dev; /* base device */
125 struct usbd_device *sc_udev;
126
127 kmutex_t sc_lock;
128 kcondvar_t sc_detach_cv;
129
130 char sc_is_open[USB_MAX_ENDPOINTS];
131 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
132 #define OUT 0
133 #define IN 1
134
135 int sc_refcnt;
136 char sc_buffer[UGEN_BBSIZE];
137 u_char sc_dying;
138 };
139
140 static dev_type_open(ugenopen);
141 static dev_type_close(ugenclose);
142 static dev_type_read(ugenread);
143 static dev_type_write(ugenwrite);
144 static dev_type_ioctl(ugenioctl);
145 static dev_type_poll(ugenpoll);
146 static dev_type_kqfilter(ugenkqfilter);
147
148 const struct cdevsw ugen_cdevsw = {
149 .d_open = ugenopen,
150 .d_close = ugenclose,
151 .d_read = ugenread,
152 .d_write = ugenwrite,
153 .d_ioctl = ugenioctl,
154 .d_stop = nostop,
155 .d_tty = notty,
156 .d_poll = ugenpoll,
157 .d_mmap = nommap,
158 .d_kqfilter = ugenkqfilter,
159 .d_discard = nodiscard,
160 .d_flag = D_OTHER,
161 };
162
163 Static void ugenintr(struct usbd_xfer *, void *,
164 usbd_status);
165 Static void ugen_isoc_rintr(struct usbd_xfer *, void *,
166 usbd_status);
167 Static void ugen_bulkra_intr(struct usbd_xfer *, void *,
168 usbd_status);
169 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *,
170 usbd_status);
171 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
172 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
173 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
174 void *, int, struct lwp *);
175 Static int ugen_set_config(struct ugen_softc *, int, int);
176 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *,
177 int, int *);
178 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
179 Static int ugen_get_alt_index(struct ugen_softc *, int);
180 Static void ugen_clear_endpoints(struct ugen_softc *);
181
182 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
183 #define UGENENDPOINT(n) (minor(n) & 0xf)
184 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
185
186 static int ugenif_match(device_t, cfdata_t, void *);
187 static void ugenif_attach(device_t, device_t, void *);
188 static int ugen_match(device_t, cfdata_t, void *);
189 static void ugen_attach(device_t, device_t, void *);
190 static int ugen_detach(device_t, int);
191 static int ugen_activate(device_t, enum devact);
192
193 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match,
194 ugen_attach, ugen_detach, ugen_activate);
195 CFATTACH_DECL_NEW(ugenif, sizeof(struct ugen_softc), ugenif_match,
196 ugenif_attach, ugen_detach, ugen_activate);
197
198 /* toggle to control attach priority. -1 means "let autoconf decide" */
199 int ugen_override = -1;
200
201 static int
202 ugen_match(device_t parent, cfdata_t match, void *aux)
203 {
204 struct usb_attach_arg *uaa = aux;
205 int override;
206
207 if (ugen_override != -1)
208 override = ugen_override;
209 else
210 override = match->cf_flags & 1;
211
212 if (override)
213 return UMATCH_HIGHEST;
214 else if (uaa->uaa_usegeneric)
215 return UMATCH_GENERIC;
216 else
217 return UMATCH_NONE;
218 }
219
220 static int
221 ugenif_match(device_t parent, cfdata_t match, void *aux)
222 {
223 /* Assume that they knew what they configured! (see ugenif(4)) */
224 return UMATCH_HIGHEST;
225 }
226
227 static void
228 ugen_attach(device_t parent, device_t self, void *aux)
229 {
230 struct usb_attach_arg *uaa = aux;
231 struct usbif_attach_arg uiaa;
232
233 memset(&uiaa, 0, sizeof(uiaa));
234 uiaa.uiaa_port = uaa->uaa_port;
235 uiaa.uiaa_vendor = uaa->uaa_vendor;
236 uiaa.uiaa_product = uaa->uaa_product;
237 uiaa.uiaa_release = uaa->uaa_release;
238 uiaa.uiaa_device = uaa->uaa_device;
239 uiaa.uiaa_configno = -1;
240 uiaa.uiaa_ifaceno = -1;
241
242 ugenif_attach(parent, self, &uiaa);
243 }
244
245 static void
246 ugenif_attach(device_t parent, device_t self, void *aux)
247 {
248 struct ugen_softc *sc = device_private(self);
249 struct usbif_attach_arg *uiaa = aux;
250 struct usbd_device *udev;
251 char *devinfop;
252 usbd_status err;
253 int i, dir, conf;
254
255 aprint_naive("\n");
256 aprint_normal("\n");
257
258 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
259 cv_init(&sc->sc_detach_cv, "ugendet");
260
261 devinfop = usbd_devinfo_alloc(uiaa->uiaa_device, 0);
262 aprint_normal_dev(self, "%s\n", devinfop);
263 usbd_devinfo_free(devinfop);
264
265 sc->sc_dev = self;
266 sc->sc_udev = udev = uiaa->uiaa_device;
267
268 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
269 for (dir = OUT; dir <= IN; dir++) {
270 struct ugen_endpoint *sce;
271
272 sce = &sc->sc_endpoints[i][dir];
273 selinit(&sce->rsel);
274 cv_init(&sce->cv, "ugensce");
275 }
276 }
277
278 if (uiaa->uiaa_ifaceno < 0) {
279 /*
280 * If we attach the whole device,
281 * set configuration index 0, the default one.
282 */
283 err = usbd_set_config_index(udev, 0, 0);
284 if (err) {
285 aprint_error_dev(self,
286 "setting configuration index 0 failed\n");
287 sc->sc_dying = 1;
288 return;
289 }
290 }
291
292 /* Get current configuration */
293 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
294
295 /* Set up all the local state for this configuration. */
296 err = ugen_set_config(sc, conf, uiaa->uiaa_ifaceno < 0);
297 if (err) {
298 aprint_error_dev(self, "setting configuration %d failed\n",
299 conf);
300 sc->sc_dying = 1;
301 return;
302 }
303
304 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev);
305
306 if (!pmf_device_register(self, NULL, NULL))
307 aprint_error_dev(self, "couldn't establish power handler\n");
308
309 }
310
311 Static void
312 ugen_clear_endpoints(struct ugen_softc *sc)
313 {
314
315 /* Clear out the old info, but leave the selinfo and cv initialised. */
316 for (int i = 0; i < USB_MAX_ENDPOINTS; i++) {
317 for (int dir = OUT; dir <= IN; dir++) {
318 struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir];
319 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
320 }
321 }
322 }
323
324 Static int
325 ugen_set_config(struct ugen_softc *sc, int configno, int chkopen)
326 {
327 struct usbd_device *dev = sc->sc_udev;
328 usb_config_descriptor_t *cdesc;
329 struct usbd_interface *iface;
330 usb_endpoint_descriptor_t *ed;
331 struct ugen_endpoint *sce;
332 uint8_t niface, nendpt;
333 int ifaceno, endptno, endpt;
334 usbd_status err;
335 int dir;
336
337 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
338 device_xname(sc->sc_dev), configno, sc));
339
340 if (chkopen) {
341 /*
342 * We start at 1, not 0, because we don't care whether the
343 * control endpoint is open or not. It is always present.
344 */
345 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
346 if (sc->sc_is_open[endptno]) {
347 DPRINTFN(1,
348 ("ugen_set_config: %s - endpoint %d is open\n",
349 device_xname(sc->sc_dev), endptno));
350 return USBD_IN_USE;
351 }
352 }
353
354 /* Avoid setting the current value. */
355 cdesc = usbd_get_config_descriptor(dev);
356 if (!cdesc || cdesc->bConfigurationValue != configno) {
357 err = usbd_set_config_no(dev, configno, 1);
358 if (err)
359 return err;
360 }
361
362 ugen_clear_endpoints(sc);
363
364 err = usbd_interface_count(dev, &niface);
365 if (err)
366 return err;
367
368 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
369 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
370 err = usbd_device2interface_handle(dev, ifaceno, &iface);
371 if (err)
372 return err;
373 err = usbd_endpoint_count(iface, &nendpt);
374 if (err)
375 return err;
376 for (endptno = 0; endptno < nendpt; endptno++) {
377 ed = usbd_interface2endpoint_descriptor(iface,endptno);
378 KASSERT(ed != NULL);
379 endpt = ed->bEndpointAddress;
380 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
381 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
382 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
383 "(%d,%d), sce=%p\n",
384 endptno, endpt, UE_GET_ADDR(endpt),
385 UE_GET_DIR(endpt), sce));
386 sce->sc = sc;
387 sce->edesc = ed;
388 sce->iface = iface;
389 }
390 }
391 return USBD_NORMAL_COMPLETION;
392 }
393
394 static int
395 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
396 {
397 struct ugen_softc *sc;
398 int unit = UGENUNIT(dev);
399 int endpt = UGENENDPOINT(dev);
400 usb_endpoint_descriptor_t *edesc;
401 struct ugen_endpoint *sce;
402 int dir, isize;
403 usbd_status err;
404 struct usbd_xfer *xfer;
405 int i, j;
406
407 sc = device_lookup_private(&ugen_cd, unit);
408 if (sc == NULL || sc->sc_dying)
409 return ENXIO;
410
411 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
412 flag, mode, unit, endpt));
413
414 /* The control endpoint allows multiple opens. */
415 if (endpt == USB_CONTROL_ENDPOINT) {
416 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
417 return 0;
418 }
419
420 if (sc->sc_is_open[endpt])
421 return EBUSY;
422
423 /* Make sure there are pipes for all directions. */
424 for (dir = OUT; dir <= IN; dir++) {
425 if (flag & (dir == OUT ? FWRITE : FREAD)) {
426 sce = &sc->sc_endpoints[endpt][dir];
427 if (sce->edesc == NULL)
428 return ENXIO;
429 }
430 }
431
432 /* Actually open the pipes. */
433 /* XXX Should back out properly if it fails. */
434 for (dir = OUT; dir <= IN; dir++) {
435 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
436 continue;
437 sce = &sc->sc_endpoints[endpt][dir];
438 sce->state = 0;
439 sce->timeout = USBD_NO_TIMEOUT;
440 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
441 sc, endpt, dir, sce));
442 edesc = sce->edesc;
443 switch (edesc->bmAttributes & UE_XFERTYPE) {
444 case UE_INTERRUPT:
445 if (dir == OUT) {
446 err = usbd_open_pipe(sce->iface,
447 edesc->bEndpointAddress, 0, &sce->pipeh);
448 if (err)
449 return EIO;
450 break;
451 }
452 isize = UGETW(edesc->wMaxPacketSize);
453 if (isize == 0) /* shouldn't happen */
454 return EINVAL;
455 sce->ibuf = kmem_alloc(isize, KM_SLEEP);
456 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
457 endpt, isize));
458 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
459 kmem_free(sce->ibuf, isize);
460 sce->ibuf = NULL;
461 return ENOMEM;
462 }
463 err = usbd_open_pipe_intr(sce->iface,
464 edesc->bEndpointAddress,
465 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
466 sce->ibuf, isize, ugenintr,
467 USBD_DEFAULT_INTERVAL);
468 if (err) {
469 clfree(&sce->q);
470 kmem_free(sce->ibuf, isize);
471 sce->ibuf = NULL;
472 return EIO;
473 }
474 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
475 break;
476 case UE_BULK:
477 err = usbd_open_pipe(sce->iface,
478 edesc->bEndpointAddress, 0, &sce->pipeh);
479 if (err)
480 return EIO;
481 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
482 /*
483 * Use request size for non-RA/WB transfers
484 * as the default.
485 */
486 sce->ra_wb_reqsize = UGEN_BBSIZE;
487 break;
488 case UE_ISOCHRONOUS:
489 if (dir == OUT)
490 return EINVAL;
491 isize = UGETW(edesc->wMaxPacketSize);
492 if (isize == 0) /* shouldn't happen */
493 return EINVAL;
494 sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES,
495 KM_SLEEP);
496 sce->cur = sce->fill = sce->ibuf;
497 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
498 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
499 endpt, isize));
500 err = usbd_open_pipe(sce->iface,
501 edesc->bEndpointAddress, 0, &sce->pipeh);
502 if (err) {
503 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
504 sce->ibuf = NULL;
505 return EIO;
506 }
507 for (i = 0; i < UGEN_NISOREQS; ++i) {
508 sce->isoreqs[i].sce = sce;
509 err = usbd_create_xfer(sce->pipeh,
510 isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS,
511 &xfer);
512 if (err)
513 goto bad;
514 sce->isoreqs[i].xfer = xfer;
515 sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer);
516 for (j = 0; j < UGEN_NISORFRMS; ++j)
517 sce->isoreqs[i].sizes[j] = isize;
518 usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i],
519 sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0,
520 ugen_isoc_rintr);
521 (void)usbd_transfer(xfer);
522 }
523 DPRINTFN(5, ("ugenopen: isoc open done\n"));
524 break;
525 bad:
526 while (--i >= 0) /* implicit buffer free */
527 usbd_destroy_xfer(sce->isoreqs[i].xfer);
528 usbd_close_pipe(sce->pipeh);
529 sce->pipeh = NULL;
530 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
531 sce->ibuf = NULL;
532 return ENOMEM;
533 case UE_CONTROL:
534 sce->timeout = USBD_DEFAULT_TIMEOUT;
535 return EINVAL;
536 }
537 }
538 sc->sc_is_open[endpt] = 1;
539 return 0;
540 }
541
542 static int
543 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
544 {
545 int endpt = UGENENDPOINT(dev);
546 struct ugen_softc *sc;
547 struct ugen_endpoint *sce;
548 int dir;
549 int i;
550
551 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
552 if (sc == NULL || sc->sc_dying)
553 return ENXIO;
554
555 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
556 flag, mode, UGENUNIT(dev), endpt));
557
558 #ifdef DIAGNOSTIC
559 if (!sc->sc_is_open[endpt]) {
560 printf("ugenclose: not open\n");
561 return EINVAL;
562 }
563 #endif
564
565 if (endpt == USB_CONTROL_ENDPOINT) {
566 DPRINTFN(5, ("ugenclose: close control\n"));
567 sc->sc_is_open[endpt] = 0;
568 return 0;
569 }
570
571 for (dir = OUT; dir <= IN; dir++) {
572 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
573 continue;
574 sce = &sc->sc_endpoints[endpt][dir];
575 if (sce->pipeh == NULL)
576 continue;
577 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
578 endpt, dir, sce));
579
580 usbd_abort_pipe(sce->pipeh);
581
582 int isize = UGETW(sce->edesc->wMaxPacketSize);
583 int msize = 0;
584
585 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
586 case UE_INTERRUPT:
587 ndflush(&sce->q, sce->q.c_cc);
588 clfree(&sce->q);
589 msize = isize;
590 break;
591 case UE_ISOCHRONOUS:
592 for (i = 0; i < UGEN_NISOREQS; ++i)
593 usbd_destroy_xfer(sce->isoreqs[i].xfer);
594 msize = isize * UGEN_NISOFRAMES;
595 break;
596 case UE_BULK:
597 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) {
598 usbd_destroy_xfer(sce->ra_wb_xfer);
599 msize = sce->ra_wb_bufsize;
600 }
601 break;
602 default:
603 break;
604 }
605 usbd_close_pipe(sce->pipeh);
606 sce->pipeh = NULL;
607 if (sce->ibuf != NULL) {
608 kmem_free(sce->ibuf, msize);
609 sce->ibuf = NULL;
610 }
611 }
612 sc->sc_is_open[endpt] = 0;
613
614 return 0;
615 }
616
617 Static int
618 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
619 {
620 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
621 uint32_t n, tn;
622 struct usbd_xfer *xfer;
623 usbd_status err;
624 int error = 0;
625
626 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
627
628 if (endpt == USB_CONTROL_ENDPOINT)
629 return ENODEV;
630
631 #ifdef DIAGNOSTIC
632 if (sce->edesc == NULL) {
633 printf("ugenread: no edesc\n");
634 return EIO;
635 }
636 if (sce->pipeh == NULL) {
637 printf("ugenread: no pipe\n");
638 return EIO;
639 }
640 #endif
641
642 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
643 case UE_INTERRUPT:
644 /* Block until activity occurred. */
645 mutex_enter(&sc->sc_lock);
646 while (sce->q.c_cc == 0) {
647 if (flag & IO_NDELAY) {
648 mutex_exit(&sc->sc_lock);
649 return EWOULDBLOCK;
650 }
651 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
652 /* "ugenri" */
653 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
654 mstohz(sce->timeout));
655 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
656 if (sc->sc_dying)
657 error = EIO;
658 if (error)
659 break;
660 }
661 mutex_exit(&sc->sc_lock);
662
663 /* Transfer as many chunks as possible. */
664 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
665 n = uimin(sce->q.c_cc, uio->uio_resid);
666 if (n > sizeof(sc->sc_buffer))
667 n = sizeof(sc->sc_buffer);
668
669 /* Remove a small chunk from the input queue. */
670 q_to_b(&sce->q, sc->sc_buffer, n);
671 DPRINTFN(5, ("ugenread: got %d chars\n", n));
672
673 /* Copy the data to the user process. */
674 error = uiomove(sc->sc_buffer, n, uio);
675 if (error)
676 break;
677 }
678 break;
679 case UE_BULK:
680 if (sce->state & UGEN_BULK_RA) {
681 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
682 uio->uio_resid, sce->ra_wb_used));
683 xfer = sce->ra_wb_xfer;
684
685 mutex_enter(&sc->sc_lock);
686 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
687 mutex_exit(&sc->sc_lock);
688 return EWOULDBLOCK;
689 }
690 while (uio->uio_resid > 0 && !error) {
691 while (sce->ra_wb_used == 0) {
692 DPRINTFN(5,
693 ("ugenread: sleep on %p\n",
694 sce));
695 /* "ugenrb" */
696 error = cv_timedwait_sig(&sce->cv,
697 &sc->sc_lock, mstohz(sce->timeout));
698 DPRINTFN(5,
699 ("ugenread: woke, error=%d\n",
700 error));
701 if (sc->sc_dying)
702 error = EIO;
703 if (error)
704 break;
705 }
706
707 /* Copy data to the process. */
708 while (uio->uio_resid > 0
709 && sce->ra_wb_used > 0) {
710 n = uimin(uio->uio_resid,
711 sce->ra_wb_used);
712 n = uimin(n, sce->limit - sce->cur);
713 error = uiomove(sce->cur, n, uio);
714 if (error)
715 break;
716 sce->cur += n;
717 sce->ra_wb_used -= n;
718 if (sce->cur == sce->limit)
719 sce->cur = sce->ibuf;
720 }
721
722 /*
723 * If the transfers stopped because the
724 * buffer was full, restart them.
725 */
726 if (sce->state & UGEN_RA_WB_STOP &&
727 sce->ra_wb_used < sce->limit - sce->ibuf) {
728 n = (sce->limit - sce->ibuf)
729 - sce->ra_wb_used;
730 usbd_setup_xfer(xfer, sce, NULL,
731 uimin(n, sce->ra_wb_xferlen),
732 0, USBD_NO_TIMEOUT,
733 ugen_bulkra_intr);
734 sce->state &= ~UGEN_RA_WB_STOP;
735 err = usbd_transfer(xfer);
736 if (err != USBD_IN_PROGRESS)
737 /*
738 * The transfer has not been
739 * queued. Setting STOP
740 * will make us try
741 * again at the next read.
742 */
743 sce->state |= UGEN_RA_WB_STOP;
744 }
745 }
746 mutex_exit(&sc->sc_lock);
747 break;
748 }
749 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
750 0, 0, &xfer);
751 if (error)
752 return error;
753 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
754 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
755 tn = n;
756 err = usbd_bulk_transfer(xfer, sce->pipeh,
757 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0,
758 sce->timeout, sc->sc_buffer, &tn);
759 if (err) {
760 if (err == USBD_INTERRUPTED)
761 error = EINTR;
762 else if (err == USBD_TIMEOUT)
763 error = ETIMEDOUT;
764 else
765 error = EIO;
766 break;
767 }
768 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
769 error = uiomove(sc->sc_buffer, tn, uio);
770 if (error || tn < n)
771 break;
772 }
773 usbd_destroy_xfer(xfer);
774 break;
775 case UE_ISOCHRONOUS:
776 mutex_enter(&sc->sc_lock);
777 while (sce->cur == sce->fill) {
778 if (flag & IO_NDELAY) {
779 mutex_exit(&sc->sc_lock);
780 return EWOULDBLOCK;
781 }
782 /* "ugenri" */
783 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
784 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
785 mstohz(sce->timeout));
786 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
787 if (sc->sc_dying)
788 error = EIO;
789 if (error)
790 break;
791 }
792
793 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
794 if(sce->fill > sce->cur)
795 n = uimin(sce->fill - sce->cur, uio->uio_resid);
796 else
797 n = uimin(sce->limit - sce->cur, uio->uio_resid);
798
799 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
800
801 /* Copy the data to the user process. */
802 error = uiomove(sce->cur, n, uio);
803 if (error)
804 break;
805 sce->cur += n;
806 if (sce->cur >= sce->limit)
807 sce->cur = sce->ibuf;
808 }
809 mutex_exit(&sc->sc_lock);
810 break;
811
812
813 default:
814 return ENXIO;
815 }
816 return error;
817 }
818
819 static int
820 ugenread(dev_t dev, struct uio *uio, int flag)
821 {
822 int endpt = UGENENDPOINT(dev);
823 struct ugen_softc *sc;
824 int error;
825
826 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
827 if (sc == NULL || sc->sc_dying)
828 return ENXIO;
829
830 mutex_enter(&sc->sc_lock);
831 sc->sc_refcnt++;
832 mutex_exit(&sc->sc_lock);
833
834 error = ugen_do_read(sc, endpt, uio, flag);
835
836 mutex_enter(&sc->sc_lock);
837 if (--sc->sc_refcnt < 0)
838 cv_broadcast(&sc->sc_detach_cv);
839 mutex_exit(&sc->sc_lock);
840
841 return error;
842 }
843
844 Static int
845 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
846 int flag)
847 {
848 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
849 uint32_t n;
850 int error = 0;
851 uint32_t tn;
852 char *dbuf;
853 struct usbd_xfer *xfer;
854 usbd_status err;
855
856 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
857
858 if (endpt == USB_CONTROL_ENDPOINT)
859 return ENODEV;
860
861 #ifdef DIAGNOSTIC
862 if (sce->edesc == NULL) {
863 printf("ugenwrite: no edesc\n");
864 return EIO;
865 }
866 if (sce->pipeh == NULL) {
867 printf("ugenwrite: no pipe\n");
868 return EIO;
869 }
870 #endif
871
872 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
873 case UE_BULK:
874 if (sce->state & UGEN_BULK_WB) {
875 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
876 uio->uio_resid, sce->ra_wb_used));
877 xfer = sce->ra_wb_xfer;
878
879 mutex_enter(&sc->sc_lock);
880 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
881 flag & IO_NDELAY) {
882 mutex_exit(&sc->sc_lock);
883 return EWOULDBLOCK;
884 }
885 while (uio->uio_resid > 0 && !error) {
886 while (sce->ra_wb_used ==
887 sce->limit - sce->ibuf) {
888 DPRINTFN(5,
889 ("ugenwrite: sleep on %p\n",
890 sce));
891 /* "ugenwb" */
892 error = cv_timedwait_sig(&sce->cv,
893 &sc->sc_lock, mstohz(sce->timeout));
894 DPRINTFN(5,
895 ("ugenwrite: woke, error=%d\n",
896 error));
897 if (sc->sc_dying)
898 error = EIO;
899 if (error)
900 break;
901 }
902
903 /* Copy data from the process. */
904 while (uio->uio_resid > 0 &&
905 sce->ra_wb_used < sce->limit - sce->ibuf) {
906 n = uimin(uio->uio_resid,
907 (sce->limit - sce->ibuf)
908 - sce->ra_wb_used);
909 n = uimin(n, sce->limit - sce->fill);
910 error = uiomove(sce->fill, n, uio);
911 if (error)
912 break;
913 sce->fill += n;
914 sce->ra_wb_used += n;
915 if (sce->fill == sce->limit)
916 sce->fill = sce->ibuf;
917 }
918
919 /*
920 * If the transfers stopped because the
921 * buffer was empty, restart them.
922 */
923 if (sce->state & UGEN_RA_WB_STOP &&
924 sce->ra_wb_used > 0) {
925 dbuf = (char *)usbd_get_buffer(xfer);
926 n = uimin(sce->ra_wb_used,
927 sce->ra_wb_xferlen);
928 tn = uimin(n, sce->limit - sce->cur);
929 memcpy(dbuf, sce->cur, tn);
930 dbuf += tn;
931 if (n - tn > 0)
932 memcpy(dbuf, sce->ibuf,
933 n - tn);
934 usbd_setup_xfer(xfer, sce, NULL, n,
935 0, USBD_NO_TIMEOUT,
936 ugen_bulkwb_intr);
937 sce->state &= ~UGEN_RA_WB_STOP;
938 err = usbd_transfer(xfer);
939 if (err != USBD_IN_PROGRESS)
940 /*
941 * The transfer has not been
942 * queued. Setting STOP
943 * will make us try again
944 * at the next read.
945 */
946 sce->state |= UGEN_RA_WB_STOP;
947 }
948 }
949 mutex_exit(&sc->sc_lock);
950 break;
951 }
952 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
953 0, 0, &xfer);
954 if (error)
955 return error;
956 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
957 error = uiomove(sc->sc_buffer, n, uio);
958 if (error)
959 break;
960 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
961 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout,
962 sc->sc_buffer, &n);
963 if (err) {
964 if (err == USBD_INTERRUPTED)
965 error = EINTR;
966 else if (err == USBD_TIMEOUT)
967 error = ETIMEDOUT;
968 else
969 error = EIO;
970 break;
971 }
972 }
973 usbd_destroy_xfer(xfer);
974 break;
975 case UE_INTERRUPT:
976 error = usbd_create_xfer(sce->pipeh,
977 UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer);
978 if (error)
979 return error;
980 while ((n = uimin(UGETW(sce->edesc->wMaxPacketSize),
981 uio->uio_resid)) != 0) {
982 error = uiomove(sc->sc_buffer, n, uio);
983 if (error)
984 break;
985 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
986 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
987 sce->timeout, sc->sc_buffer, &n);
988 if (err) {
989 if (err == USBD_INTERRUPTED)
990 error = EINTR;
991 else if (err == USBD_TIMEOUT)
992 error = ETIMEDOUT;
993 else
994 error = EIO;
995 break;
996 }
997 }
998 usbd_destroy_xfer(xfer);
999 break;
1000 default:
1001 return ENXIO;
1002 }
1003 return error;
1004 }
1005
1006 static int
1007 ugenwrite(dev_t dev, struct uio *uio, int flag)
1008 {
1009 int endpt = UGENENDPOINT(dev);
1010 struct ugen_softc *sc;
1011 int error;
1012
1013 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1014 if (sc == NULL || sc->sc_dying)
1015 return ENXIO;
1016
1017 mutex_enter(&sc->sc_lock);
1018 sc->sc_refcnt++;
1019 mutex_exit(&sc->sc_lock);
1020
1021 error = ugen_do_write(sc, endpt, uio, flag);
1022
1023 mutex_enter(&sc->sc_lock);
1024 if (--sc->sc_refcnt < 0)
1025 cv_broadcast(&sc->sc_detach_cv);
1026 mutex_exit(&sc->sc_lock);
1027
1028 return error;
1029 }
1030
1031 static int
1032 ugen_activate(device_t self, enum devact act)
1033 {
1034 struct ugen_softc *sc = device_private(self);
1035
1036 switch (act) {
1037 case DVACT_DEACTIVATE:
1038 sc->sc_dying = 1;
1039 return 0;
1040 default:
1041 return EOPNOTSUPP;
1042 }
1043 }
1044
1045 static int
1046 ugen_detach(device_t self, int flags)
1047 {
1048 struct ugen_softc *sc = device_private(self);
1049 struct ugen_endpoint *sce;
1050 int i, dir;
1051 int maj, mn;
1052
1053 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1054
1055 sc->sc_dying = 1;
1056 pmf_device_deregister(self);
1057 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1058 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1059 for (dir = OUT; dir <= IN; dir++) {
1060 sce = &sc->sc_endpoints[i][dir];
1061 if (sce->pipeh)
1062 usbd_abort_pipe(sce->pipeh);
1063 }
1064 }
1065
1066 mutex_enter(&sc->sc_lock);
1067 if (--sc->sc_refcnt >= 0) {
1068 /* Wake everyone */
1069 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1070 cv_signal(&sc->sc_endpoints[i][IN].cv);
1071 /* Wait for processes to go away. */
1072 if (cv_timedwait(&sc->sc_detach_cv, &sc->sc_lock, hz * 60))
1073 aprint_error_dev(self, ": didn't detach\n");
1074 }
1075 mutex_exit(&sc->sc_lock);
1076
1077 /* locate the major number */
1078 maj = cdevsw_lookup_major(&ugen_cdevsw);
1079
1080 /* Nuke the vnodes for any open instances (calls close). */
1081 mn = device_unit(self) * USB_MAX_ENDPOINTS;
1082 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1083
1084 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev);
1085
1086 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1087 for (dir = OUT; dir <= IN; dir++) {
1088 sce = &sc->sc_endpoints[i][dir];
1089 seldestroy(&sce->rsel);
1090 cv_destroy(&sce->cv);
1091 }
1092 }
1093
1094 cv_destroy(&sc->sc_detach_cv);
1095 mutex_destroy(&sc->sc_lock);
1096
1097 return 0;
1098 }
1099
1100 Static void
1101 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
1102 {
1103 struct ugen_endpoint *sce = addr;
1104 struct ugen_softc *sc = sce->sc;
1105 uint32_t count;
1106 u_char *ibuf;
1107
1108 if (status == USBD_CANCELLED)
1109 return;
1110
1111 if (status != USBD_NORMAL_COMPLETION) {
1112 DPRINTF(("ugenintr: status=%d\n", status));
1113 if (status == USBD_STALLED)
1114 usbd_clear_endpoint_stall_async(sce->pipeh);
1115 return;
1116 }
1117
1118 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1119 ibuf = sce->ibuf;
1120
1121 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1122 xfer, status, count));
1123 DPRINTFN(5, (" data = %02x %02x %02x\n",
1124 ibuf[0], ibuf[1], ibuf[2]));
1125
1126 mutex_enter(&sc->sc_lock);
1127 (void)b_to_q(ibuf, count, &sce->q);
1128 cv_signal(&sce->cv);
1129 mutex_exit(&sc->sc_lock);
1130 selnotify(&sce->rsel, 0, 0);
1131 }
1132
1133 Static void
1134 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr,
1135 usbd_status status)
1136 {
1137 struct isoreq *req = addr;
1138 struct ugen_endpoint *sce = req->sce;
1139 struct ugen_softc *sc = sce->sc;
1140 uint32_t count, n;
1141 int i, isize;
1142
1143 /* Return if we are aborting. */
1144 if (status == USBD_CANCELLED)
1145 return;
1146
1147 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1148 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1149 (long)(req - sce->isoreqs), count));
1150
1151 mutex_enter(&sc->sc_lock);
1152
1153 /* throw away oldest input if the buffer is full */
1154 if (sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1155 sce->cur += count;
1156 if (sce->cur >= sce->limit)
1157 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1158 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1159 count));
1160 }
1161
1162 isize = UGETW(sce->edesc->wMaxPacketSize);
1163 for (i = 0; i < UGEN_NISORFRMS; i++) {
1164 uint32_t actlen = req->sizes[i];
1165 char const *tbuf = (char const *)req->dmabuf + isize * i;
1166
1167 /* copy data to buffer */
1168 while (actlen > 0) {
1169 n = uimin(actlen, sce->limit - sce->fill);
1170 memcpy(sce->fill, tbuf, n);
1171
1172 tbuf += n;
1173 actlen -= n;
1174 sce->fill += n;
1175 if (sce->fill == sce->limit)
1176 sce->fill = sce->ibuf;
1177 }
1178
1179 /* setup size for next transfer */
1180 req->sizes[i] = isize;
1181 }
1182
1183 usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0,
1184 ugen_isoc_rintr);
1185 (void)usbd_transfer(xfer);
1186
1187 cv_signal(&sce->cv);
1188 mutex_exit(&sc->sc_lock);
1189 selnotify(&sce->rsel, 0, 0);
1190 }
1191
1192 Static void
1193 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr,
1194 usbd_status status)
1195 {
1196 struct ugen_endpoint *sce = addr;
1197 struct ugen_softc *sc = sce->sc;
1198 uint32_t count, n;
1199 char const *tbuf;
1200 usbd_status err;
1201
1202 /* Return if we are aborting. */
1203 if (status == USBD_CANCELLED)
1204 return;
1205
1206 if (status != USBD_NORMAL_COMPLETION) {
1207 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1208 sce->state |= UGEN_RA_WB_STOP;
1209 if (status == USBD_STALLED)
1210 usbd_clear_endpoint_stall_async(sce->pipeh);
1211 return;
1212 }
1213
1214 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1215
1216 mutex_enter(&sc->sc_lock);
1217
1218 /* Keep track of how much is in the buffer. */
1219 sce->ra_wb_used += count;
1220
1221 /* Copy data to buffer. */
1222 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1223 n = uimin(count, sce->limit - sce->fill);
1224 memcpy(sce->fill, tbuf, n);
1225 tbuf += n;
1226 count -= n;
1227 sce->fill += n;
1228 if (sce->fill == sce->limit)
1229 sce->fill = sce->ibuf;
1230 if (count > 0) {
1231 memcpy(sce->fill, tbuf, count);
1232 sce->fill += count;
1233 }
1234
1235 /* Set up the next request if necessary. */
1236 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1237 if (n > 0) {
1238 usbd_setup_xfer(xfer, sce, NULL, uimin(n, sce->ra_wb_xferlen), 0,
1239 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1240 err = usbd_transfer(xfer);
1241 if (err != USBD_IN_PROGRESS) {
1242 printf("usbd_bulkra_intr: error=%d\n", err);
1243 /*
1244 * The transfer has not been queued. Setting STOP
1245 * will make us try again at the next read.
1246 */
1247 sce->state |= UGEN_RA_WB_STOP;
1248 }
1249 }
1250 else
1251 sce->state |= UGEN_RA_WB_STOP;
1252
1253 cv_signal(&sce->cv);
1254 mutex_exit(&sc->sc_lock);
1255 selnotify(&sce->rsel, 0, 0);
1256 }
1257
1258 Static void
1259 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr,
1260 usbd_status status)
1261 {
1262 struct ugen_endpoint *sce = addr;
1263 struct ugen_softc *sc = sce->sc;
1264 uint32_t count, n;
1265 char *tbuf;
1266 usbd_status err;
1267
1268 /* Return if we are aborting. */
1269 if (status == USBD_CANCELLED)
1270 return;
1271
1272 if (status != USBD_NORMAL_COMPLETION) {
1273 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1274 sce->state |= UGEN_RA_WB_STOP;
1275 if (status == USBD_STALLED)
1276 usbd_clear_endpoint_stall_async(sce->pipeh);
1277 return;
1278 }
1279
1280 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1281
1282 mutex_enter(&sc->sc_lock);
1283
1284 /* Keep track of how much is in the buffer. */
1285 sce->ra_wb_used -= count;
1286
1287 /* Update buffer pointers. */
1288 sce->cur += count;
1289 if (sce->cur >= sce->limit)
1290 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1291
1292 /* Set up next request if necessary. */
1293 if (sce->ra_wb_used > 0) {
1294 /* copy data from buffer */
1295 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1296 count = uimin(sce->ra_wb_used, sce->ra_wb_xferlen);
1297 n = uimin(count, sce->limit - sce->cur);
1298 memcpy(tbuf, sce->cur, n);
1299 tbuf += n;
1300 if (count - n > 0)
1301 memcpy(tbuf, sce->ibuf, count - n);
1302
1303 usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT,
1304 ugen_bulkwb_intr);
1305 err = usbd_transfer(xfer);
1306 if (err != USBD_IN_PROGRESS) {
1307 printf("usbd_bulkwb_intr: error=%d\n", err);
1308 /*
1309 * The transfer has not been queued. Setting STOP
1310 * will make us try again at the next write.
1311 */
1312 sce->state |= UGEN_RA_WB_STOP;
1313 }
1314 }
1315 else
1316 sce->state |= UGEN_RA_WB_STOP;
1317
1318 cv_signal(&sce->cv);
1319 mutex_exit(&sc->sc_lock);
1320 selnotify(&sce->rsel, 0, 0);
1321 }
1322
1323 Static usbd_status
1324 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1325 {
1326 struct usbd_interface *iface;
1327 usb_endpoint_descriptor_t *ed;
1328 usbd_status err;
1329 struct ugen_endpoint *sce;
1330 uint8_t niface, nendpt, endptno, endpt;
1331 int dir;
1332
1333 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1334
1335 err = usbd_interface_count(sc->sc_udev, &niface);
1336 if (err)
1337 return err;
1338 if (ifaceidx < 0 || ifaceidx >= niface)
1339 return USBD_INVAL;
1340
1341 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1342 if (err)
1343 return err;
1344 err = usbd_endpoint_count(iface, &nendpt);
1345 if (err)
1346 return err;
1347
1348 /* change setting */
1349 err = usbd_set_interface(iface, altno);
1350 if (err)
1351 return err;
1352
1353 err = usbd_endpoint_count(iface, &nendpt);
1354 if (err)
1355 return err;
1356
1357 ugen_clear_endpoints(sc);
1358
1359 for (endptno = 0; endptno < nendpt; endptno++) {
1360 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1361 KASSERT(ed != NULL);
1362 endpt = ed->bEndpointAddress;
1363 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1364 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1365 sce->sc = sc;
1366 sce->edesc = ed;
1367 sce->iface = iface;
1368 }
1369 return 0;
1370 }
1371
1372 /* Retrieve a complete descriptor for a certain device and index. */
1373 Static usb_config_descriptor_t *
1374 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1375 {
1376 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1377 int len;
1378 usbd_status err;
1379
1380 if (index == USB_CURRENT_CONFIG_INDEX) {
1381 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1382 if (tdesc == NULL)
1383 return NULL;
1384 len = UGETW(tdesc->wTotalLength);
1385 if (lenp)
1386 *lenp = len;
1387 cdesc = kmem_alloc(len, KM_SLEEP);
1388 memcpy(cdesc, tdesc, len);
1389 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1390 } else {
1391 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1392 if (err)
1393 return 0;
1394 len = UGETW(cdescr.wTotalLength);
1395 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1396 if (lenp)
1397 *lenp = len;
1398 cdesc = kmem_alloc(len, KM_SLEEP);
1399 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1400 if (err) {
1401 kmem_free(cdesc, len);
1402 return 0;
1403 }
1404 }
1405 return cdesc;
1406 }
1407
1408 Static int
1409 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1410 {
1411 struct usbd_interface *iface;
1412 usbd_status err;
1413
1414 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1415 if (err)
1416 return -1;
1417 return usbd_get_interface_altindex(iface);
1418 }
1419
1420 Static int
1421 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1422 void *addr, int flag, struct lwp *l)
1423 {
1424 struct ugen_endpoint *sce;
1425 usbd_status err;
1426 struct usbd_interface *iface;
1427 struct usb_config_desc *cd;
1428 usb_config_descriptor_t *cdesc;
1429 struct usb_interface_desc *id;
1430 usb_interface_descriptor_t *idesc;
1431 struct usb_endpoint_desc *ed;
1432 usb_endpoint_descriptor_t *edesc;
1433 struct usb_alt_interface *ai;
1434 struct usb_string_desc *si;
1435 uint8_t conf, alt;
1436 int cdesclen;
1437 int error;
1438 int dir;
1439
1440 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1441 if (sc->sc_dying)
1442 return EIO;
1443
1444 switch (cmd) {
1445 case FIONBIO:
1446 /* All handled in the upper FS layer. */
1447 return 0;
1448 case USB_SET_SHORT_XFER:
1449 if (endpt == USB_CONTROL_ENDPOINT)
1450 return EINVAL;
1451 /* This flag only affects read */
1452 sce = &sc->sc_endpoints[endpt][IN];
1453 if (sce == NULL || sce->pipeh == NULL)
1454 return EINVAL;
1455 if (*(int *)addr)
1456 sce->state |= UGEN_SHORT_OK;
1457 else
1458 sce->state &= ~UGEN_SHORT_OK;
1459 return 0;
1460 case USB_SET_TIMEOUT:
1461 for (dir = OUT; dir <= IN; dir++) {
1462 sce = &sc->sc_endpoints[endpt][dir];
1463 if (sce == NULL)
1464 return EINVAL;
1465
1466 sce->timeout = *(int *)addr;
1467 }
1468 return 0;
1469 case USB_SET_BULK_RA:
1470 if (endpt == USB_CONTROL_ENDPOINT)
1471 return EINVAL;
1472 sce = &sc->sc_endpoints[endpt][IN];
1473 if (sce == NULL || sce->pipeh == NULL)
1474 return EINVAL;
1475 edesc = sce->edesc;
1476 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1477 return EINVAL;
1478
1479 if (*(int *)addr) {
1480 /* Only turn RA on if it's currently off. */
1481 if (sce->state & UGEN_BULK_RA)
1482 return 0;
1483
1484 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1485 /* shouldn't happen */
1486 return EINVAL;
1487 error = usbd_create_xfer(sce->pipeh,
1488 sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer);
1489 if (error)
1490 return error;
1491 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1492 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1493 sce->fill = sce->cur = sce->ibuf;
1494 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1495 sce->ra_wb_used = 0;
1496 sce->state |= UGEN_BULK_RA;
1497 sce->state &= ~UGEN_RA_WB_STOP;
1498 /* Now start reading. */
1499 usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL,
1500 uimin(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1501 0, USBD_NO_TIMEOUT, ugen_bulkra_intr);
1502 err = usbd_transfer(sce->ra_wb_xfer);
1503 if (err != USBD_IN_PROGRESS) {
1504 sce->state &= ~UGEN_BULK_RA;
1505 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1506 sce->ibuf = NULL;
1507 usbd_destroy_xfer(sce->ra_wb_xfer);
1508 return EIO;
1509 }
1510 } else {
1511 /* Only turn RA off if it's currently on. */
1512 if (!(sce->state & UGEN_BULK_RA))
1513 return 0;
1514
1515 sce->state &= ~UGEN_BULK_RA;
1516 usbd_abort_pipe(sce->pipeh);
1517 usbd_destroy_xfer(sce->ra_wb_xfer);
1518 /*
1519 * XXX Discard whatever's in the buffer, but we
1520 * should keep it around and drain the buffer
1521 * instead.
1522 */
1523 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1524 sce->ibuf = NULL;
1525 }
1526 return 0;
1527 case USB_SET_BULK_WB:
1528 if (endpt == USB_CONTROL_ENDPOINT)
1529 return EINVAL;
1530 sce = &sc->sc_endpoints[endpt][OUT];
1531 if (sce == NULL || sce->pipeh == NULL)
1532 return EINVAL;
1533 edesc = sce->edesc;
1534 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1535 return EINVAL;
1536
1537 if (*(int *)addr) {
1538 /* Only turn WB on if it's currently off. */
1539 if (sce->state & UGEN_BULK_WB)
1540 return 0;
1541
1542 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1543 /* shouldn't happen */
1544 return EINVAL;
1545 error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize,
1546 0, 0, &sce->ra_wb_xfer);
1547 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1548 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1549 sce->fill = sce->cur = sce->ibuf;
1550 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1551 sce->ra_wb_used = 0;
1552 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1553 } else {
1554 /* Only turn WB off if it's currently on. */
1555 if (!(sce->state & UGEN_BULK_WB))
1556 return 0;
1557
1558 sce->state &= ~UGEN_BULK_WB;
1559 /*
1560 * XXX Discard whatever's in the buffer, but we
1561 * should keep it around and keep writing to
1562 * drain the buffer instead.
1563 */
1564 usbd_abort_pipe(sce->pipeh);
1565 usbd_destroy_xfer(sce->ra_wb_xfer);
1566 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1567 sce->ibuf = NULL;
1568 }
1569 return 0;
1570 case USB_SET_BULK_RA_OPT:
1571 case USB_SET_BULK_WB_OPT:
1572 {
1573 struct usb_bulk_ra_wb_opt *opt;
1574
1575 if (endpt == USB_CONTROL_ENDPOINT)
1576 return EINVAL;
1577 opt = (struct usb_bulk_ra_wb_opt *)addr;
1578 if (cmd == USB_SET_BULK_RA_OPT)
1579 sce = &sc->sc_endpoints[endpt][IN];
1580 else
1581 sce = &sc->sc_endpoints[endpt][OUT];
1582 if (sce == NULL || sce->pipeh == NULL)
1583 return EINVAL;
1584 if (opt->ra_wb_buffer_size < 1 ||
1585 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1586 opt->ra_wb_request_size < 1 ||
1587 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1588 return EINVAL;
1589 /*
1590 * XXX These changes do not take effect until the
1591 * next time RA/WB mode is enabled but they ought to
1592 * take effect immediately.
1593 */
1594 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1595 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1596 return 0;
1597 }
1598 default:
1599 break;
1600 }
1601
1602 if (endpt != USB_CONTROL_ENDPOINT)
1603 return EINVAL;
1604
1605 switch (cmd) {
1606 #ifdef UGEN_DEBUG
1607 case USB_SETDEBUG:
1608 ugendebug = *(int *)addr;
1609 break;
1610 #endif
1611 case USB_GET_CONFIG:
1612 err = usbd_get_config(sc->sc_udev, &conf);
1613 if (err)
1614 return EIO;
1615 *(int *)addr = conf;
1616 break;
1617 case USB_SET_CONFIG:
1618 if (!(flag & FWRITE))
1619 return EPERM;
1620 err = ugen_set_config(sc, *(int *)addr, 1);
1621 switch (err) {
1622 case USBD_NORMAL_COMPLETION:
1623 break;
1624 case USBD_IN_USE:
1625 return EBUSY;
1626 default:
1627 return EIO;
1628 }
1629 break;
1630 case USB_GET_ALTINTERFACE:
1631 ai = (struct usb_alt_interface *)addr;
1632 err = usbd_device2interface_handle(sc->sc_udev,
1633 ai->uai_interface_index, &iface);
1634 if (err)
1635 return EINVAL;
1636 idesc = usbd_get_interface_descriptor(iface);
1637 if (idesc == NULL)
1638 return EIO;
1639 ai->uai_alt_no = idesc->bAlternateSetting;
1640 break;
1641 case USB_SET_ALTINTERFACE:
1642 if (!(flag & FWRITE))
1643 return EPERM;
1644 ai = (struct usb_alt_interface *)addr;
1645 err = usbd_device2interface_handle(sc->sc_udev,
1646 ai->uai_interface_index, &iface);
1647 if (err)
1648 return EINVAL;
1649 err = ugen_set_interface(sc, ai->uai_interface_index,
1650 ai->uai_alt_no);
1651 if (err)
1652 return EINVAL;
1653 break;
1654 case USB_GET_NO_ALT:
1655 ai = (struct usb_alt_interface *)addr;
1656 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen);
1657 if (cdesc == NULL)
1658 return EINVAL;
1659 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1660 if (idesc == NULL) {
1661 kmem_free(cdesc, cdesclen);
1662 return EINVAL;
1663 }
1664 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1665 idesc->bInterfaceNumber);
1666 kmem_free(cdesc, cdesclen);
1667 break;
1668 case USB_GET_DEVICE_DESC:
1669 *(usb_device_descriptor_t *)addr =
1670 *usbd_get_device_descriptor(sc->sc_udev);
1671 break;
1672 case USB_GET_CONFIG_DESC:
1673 cd = (struct usb_config_desc *)addr;
1674 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen);
1675 if (cdesc == NULL)
1676 return EINVAL;
1677 cd->ucd_desc = *cdesc;
1678 kmem_free(cdesc, cdesclen);
1679 break;
1680 case USB_GET_INTERFACE_DESC:
1681 id = (struct usb_interface_desc *)addr;
1682 cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen);
1683 if (cdesc == NULL)
1684 return EINVAL;
1685 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1686 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1687 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1688 else
1689 alt = id->uid_alt_index;
1690 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1691 if (idesc == NULL) {
1692 kmem_free(cdesc, cdesclen);
1693 return EINVAL;
1694 }
1695 id->uid_desc = *idesc;
1696 kmem_free(cdesc, cdesclen);
1697 break;
1698 case USB_GET_ENDPOINT_DESC:
1699 ed = (struct usb_endpoint_desc *)addr;
1700 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen);
1701 if (cdesc == NULL)
1702 return EINVAL;
1703 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1704 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1705 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1706 else
1707 alt = ed->ued_alt_index;
1708 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1709 alt, ed->ued_endpoint_index);
1710 if (edesc == NULL) {
1711 kmem_free(cdesc, cdesclen);
1712 return EINVAL;
1713 }
1714 ed->ued_desc = *edesc;
1715 kmem_free(cdesc, cdesclen);
1716 break;
1717 case USB_GET_FULL_DESC:
1718 {
1719 int len;
1720 struct iovec iov;
1721 struct uio uio;
1722 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1723
1724 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen);
1725 if (cdesc == NULL)
1726 return EINVAL;
1727 len = cdesclen;
1728 if (len > fd->ufd_size)
1729 len = fd->ufd_size;
1730 iov.iov_base = (void *)fd->ufd_data;
1731 iov.iov_len = len;
1732 uio.uio_iov = &iov;
1733 uio.uio_iovcnt = 1;
1734 uio.uio_resid = len;
1735 uio.uio_offset = 0;
1736 uio.uio_rw = UIO_READ;
1737 uio.uio_vmspace = l->l_proc->p_vmspace;
1738 error = uiomove((void *)cdesc, len, &uio);
1739 kmem_free(cdesc, cdesclen);
1740 return error;
1741 }
1742 case USB_GET_STRING_DESC: {
1743 int len;
1744 si = (struct usb_string_desc *)addr;
1745 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1746 si->usd_language_id, &si->usd_desc, &len);
1747 if (err)
1748 return EINVAL;
1749 break;
1750 }
1751 case USB_DO_REQUEST:
1752 {
1753 struct usb_ctl_request *ur = (void *)addr;
1754 int len = UGETW(ur->ucr_request.wLength);
1755 struct iovec iov;
1756 struct uio uio;
1757 void *ptr = 0;
1758 usbd_status xerr;
1759
1760 error = 0;
1761
1762 if (!(flag & FWRITE))
1763 return EPERM;
1764 /* Avoid requests that would damage the bus integrity. */
1765 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1766 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1767 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1768 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1769 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1770 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1771 return EINVAL;
1772
1773 if (len < 0 || len > 32767)
1774 return EINVAL;
1775 if (len != 0) {
1776 iov.iov_base = (void *)ur->ucr_data;
1777 iov.iov_len = len;
1778 uio.uio_iov = &iov;
1779 uio.uio_iovcnt = 1;
1780 uio.uio_resid = len;
1781 uio.uio_offset = 0;
1782 uio.uio_rw =
1783 ur->ucr_request.bmRequestType & UT_READ ?
1784 UIO_READ : UIO_WRITE;
1785 uio.uio_vmspace = l->l_proc->p_vmspace;
1786 ptr = kmem_alloc(len, KM_SLEEP);
1787 if (uio.uio_rw == UIO_WRITE) {
1788 error = uiomove(ptr, len, &uio);
1789 if (error)
1790 goto ret;
1791 }
1792 }
1793 sce = &sc->sc_endpoints[endpt][IN];
1794 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1795 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1796 if (xerr) {
1797 error = EIO;
1798 goto ret;
1799 }
1800 if (len != 0) {
1801 if (uio.uio_rw == UIO_READ) {
1802 size_t alen = uimin(len, ur->ucr_actlen);
1803 error = uiomove(ptr, alen, &uio);
1804 if (error)
1805 goto ret;
1806 }
1807 }
1808 ret:
1809 if (ptr)
1810 kmem_free(ptr, len);
1811 return error;
1812 }
1813 case USB_GET_DEVICEINFO:
1814 usbd_fill_deviceinfo(sc->sc_udev,
1815 (struct usb_device_info *)addr, 0);
1816 break;
1817 case USB_GET_DEVICEINFO_OLD:
1818 {
1819 int ret;
1820 MODULE_HOOK_CALL(usb_subr_fill_30_hook,
1821 (sc->sc_udev, (struct usb_device_info_old *)addr, 0,
1822 usbd_devinfo_vp, usbd_printBCD),
1823 enosys(), ret);
1824 if (ret == 0)
1825 return 0;
1826 return EINVAL;
1827 }
1828 default:
1829 return EINVAL;
1830 }
1831 return 0;
1832 }
1833
1834 static int
1835 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1836 {
1837 int endpt = UGENENDPOINT(dev);
1838 struct ugen_softc *sc;
1839 int error;
1840
1841 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1842 if (sc == NULL || sc->sc_dying)
1843 return ENXIO;
1844
1845 sc->sc_refcnt++;
1846 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1847 if (--sc->sc_refcnt < 0)
1848 cv_broadcast(&sc->sc_detach_cv);
1849 return error;
1850 }
1851
1852 static int
1853 ugenpoll(dev_t dev, int events, struct lwp *l)
1854 {
1855 struct ugen_softc *sc;
1856 struct ugen_endpoint *sce_in, *sce_out;
1857 int revents = 0;
1858
1859 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1860 if (sc == NULL)
1861 return ENXIO;
1862
1863 if (sc->sc_dying)
1864 return POLLHUP;
1865
1866 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1867 return ENODEV;
1868
1869 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1870 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1871 if (sce_in == NULL && sce_out == NULL)
1872 return POLLERR;
1873 #ifdef DIAGNOSTIC
1874 if (!sce_in->edesc && !sce_out->edesc) {
1875 printf("ugenpoll: no edesc\n");
1876 return POLLERR;
1877 }
1878 /* It's possible to have only one pipe open. */
1879 if (!sce_in->pipeh && !sce_out->pipeh) {
1880 printf("ugenpoll: no pipe\n");
1881 return POLLERR;
1882 }
1883 #endif
1884
1885 mutex_enter(&sc->sc_lock);
1886 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1887 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1888 case UE_INTERRUPT:
1889 if (sce_in->q.c_cc > 0)
1890 revents |= events & (POLLIN | POLLRDNORM);
1891 else
1892 selrecord(l, &sce_in->rsel);
1893 break;
1894 case UE_ISOCHRONOUS:
1895 if (sce_in->cur != sce_in->fill)
1896 revents |= events & (POLLIN | POLLRDNORM);
1897 else
1898 selrecord(l, &sce_in->rsel);
1899 break;
1900 case UE_BULK:
1901 if (sce_in->state & UGEN_BULK_RA) {
1902 if (sce_in->ra_wb_used > 0)
1903 revents |= events &
1904 (POLLIN | POLLRDNORM);
1905 else
1906 selrecord(l, &sce_in->rsel);
1907 break;
1908 }
1909 /*
1910 * We have no easy way of determining if a read will
1911 * yield any data or a write will happen.
1912 * Pretend they will.
1913 */
1914 revents |= events & (POLLIN | POLLRDNORM);
1915 break;
1916 default:
1917 break;
1918 }
1919 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1920 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1921 case UE_INTERRUPT:
1922 case UE_ISOCHRONOUS:
1923 /* XXX unimplemented */
1924 break;
1925 case UE_BULK:
1926 if (sce_out->state & UGEN_BULK_WB) {
1927 if (sce_out->ra_wb_used <
1928 sce_out->limit - sce_out->ibuf)
1929 revents |= events &
1930 (POLLOUT | POLLWRNORM);
1931 else
1932 selrecord(l, &sce_out->rsel);
1933 break;
1934 }
1935 /*
1936 * We have no easy way of determining if a read will
1937 * yield any data or a write will happen.
1938 * Pretend they will.
1939 */
1940 revents |= events & (POLLOUT | POLLWRNORM);
1941 break;
1942 default:
1943 break;
1944 }
1945
1946 mutex_exit(&sc->sc_lock);
1947
1948 return revents;
1949 }
1950
1951 static void
1952 filt_ugenrdetach(struct knote *kn)
1953 {
1954 struct ugen_endpoint *sce = kn->kn_hook;
1955 struct ugen_softc *sc = sce->sc;
1956
1957 mutex_enter(&sc->sc_lock);
1958 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1959 mutex_exit(&sc->sc_lock);
1960 }
1961
1962 static int
1963 filt_ugenread_intr(struct knote *kn, long hint)
1964 {
1965 struct ugen_endpoint *sce = kn->kn_hook;
1966 struct ugen_softc *sc = sce->sc;
1967
1968 if (sc->sc_dying)
1969 return 0;
1970
1971 kn->kn_data = sce->q.c_cc;
1972 return kn->kn_data > 0;
1973 }
1974
1975 static int
1976 filt_ugenread_isoc(struct knote *kn, long hint)
1977 {
1978 struct ugen_endpoint *sce = kn->kn_hook;
1979 struct ugen_softc *sc = sce->sc;
1980
1981 if (sc->sc_dying)
1982 return 0;
1983
1984 if (sce->cur == sce->fill)
1985 return 0;
1986
1987 if (sce->cur < sce->fill)
1988 kn->kn_data = sce->fill - sce->cur;
1989 else
1990 kn->kn_data = (sce->limit - sce->cur) +
1991 (sce->fill - sce->ibuf);
1992
1993 return 1;
1994 }
1995
1996 static int
1997 filt_ugenread_bulk(struct knote *kn, long hint)
1998 {
1999 struct ugen_endpoint *sce = kn->kn_hook;
2000 struct ugen_softc *sc = sce->sc;
2001
2002 if (sc->sc_dying)
2003 return 0;
2004
2005 if (!(sce->state & UGEN_BULK_RA))
2006 /*
2007 * We have no easy way of determining if a read will
2008 * yield any data or a write will happen.
2009 * So, emulate "seltrue".
2010 */
2011 return filt_seltrue(kn, hint);
2012
2013 if (sce->ra_wb_used == 0)
2014 return 0;
2015
2016 kn->kn_data = sce->ra_wb_used;
2017
2018 return 1;
2019 }
2020
2021 static int
2022 filt_ugenwrite_bulk(struct knote *kn, long hint)
2023 {
2024 struct ugen_endpoint *sce = kn->kn_hook;
2025 struct ugen_softc *sc = sce->sc;
2026
2027 if (sc->sc_dying)
2028 return 0;
2029
2030 if (!(sce->state & UGEN_BULK_WB))
2031 /*
2032 * We have no easy way of determining if a read will
2033 * yield any data or a write will happen.
2034 * So, emulate "seltrue".
2035 */
2036 return filt_seltrue(kn, hint);
2037
2038 if (sce->ra_wb_used == sce->limit - sce->ibuf)
2039 return 0;
2040
2041 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2042
2043 return 1;
2044 }
2045
2046 static const struct filterops ugenread_intr_filtops = {
2047 .f_isfd = 1,
2048 .f_attach = NULL,
2049 .f_detach = filt_ugenrdetach,
2050 .f_event = filt_ugenread_intr,
2051 };
2052
2053 static const struct filterops ugenread_isoc_filtops = {
2054 .f_isfd = 1,
2055 .f_attach = NULL,
2056 .f_detach = filt_ugenrdetach,
2057 .f_event = filt_ugenread_isoc,
2058 };
2059
2060 static const struct filterops ugenread_bulk_filtops = {
2061 .f_isfd = 1,
2062 .f_attach = NULL,
2063 .f_detach = filt_ugenrdetach,
2064 .f_event = filt_ugenread_bulk,
2065 };
2066
2067 static const struct filterops ugenwrite_bulk_filtops = {
2068 .f_isfd = 1,
2069 .f_attach = NULL,
2070 .f_detach = filt_ugenrdetach,
2071 .f_event = filt_ugenwrite_bulk,
2072 };
2073
2074 static int
2075 ugenkqfilter(dev_t dev, struct knote *kn)
2076 {
2077 struct ugen_softc *sc;
2078 struct ugen_endpoint *sce;
2079 struct klist *klist;
2080
2081 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2082 if (sc == NULL || sc->sc_dying)
2083 return ENXIO;
2084
2085 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2086 return ENODEV;
2087
2088 switch (kn->kn_filter) {
2089 case EVFILT_READ:
2090 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2091 if (sce == NULL)
2092 return EINVAL;
2093
2094 klist = &sce->rsel.sel_klist;
2095 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2096 case UE_INTERRUPT:
2097 kn->kn_fop = &ugenread_intr_filtops;
2098 break;
2099 case UE_ISOCHRONOUS:
2100 kn->kn_fop = &ugenread_isoc_filtops;
2101 break;
2102 case UE_BULK:
2103 kn->kn_fop = &ugenread_bulk_filtops;
2104 break;
2105 default:
2106 return EINVAL;
2107 }
2108 break;
2109
2110 case EVFILT_WRITE:
2111 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2112 if (sce == NULL)
2113 return EINVAL;
2114
2115 klist = &sce->rsel.sel_klist;
2116 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2117 case UE_INTERRUPT:
2118 case UE_ISOCHRONOUS:
2119 /* XXX poll doesn't support this */
2120 return EINVAL;
2121
2122 case UE_BULK:
2123 kn->kn_fop = &ugenwrite_bulk_filtops;
2124 break;
2125 default:
2126 return EINVAL;
2127 }
2128 break;
2129
2130 default:
2131 return EINVAL;
2132 }
2133
2134 kn->kn_hook = sce;
2135
2136 mutex_enter(&sc->sc_lock);
2137 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2138 mutex_exit(&sc->sc_lock);
2139
2140 return 0;
2141 }
2142