ugen.c revision 1.150 1 /* $NetBSD: ugen.c,v 1.150 2020/03/14 02:35:33 christos Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.150 2020/03/14 02:35:33 christos Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #include "opt_usb.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/device.h>
52 #include <sys/ioctl.h>
53 #include <sys/conf.h>
54 #include <sys/tty.h>
55 #include <sys/file.h>
56 #include <sys/select.h>
57 #include <sys/proc.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60 #include <sys/compat_stub.h>
61
62 #include <dev/usb/usb.h>
63 #include <dev/usb/usbdi.h>
64 #include <dev/usb/usbdi_util.h>
65
66 #include "ioconf.h"
67
68 #ifdef UGEN_DEBUG
69 #define DPRINTF(x) if (ugendebug) printf x
70 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x
71 int ugendebug = 0;
72 #else
73 #define DPRINTF(x)
74 #define DPRINTFN(n,x)
75 #endif
76
77 #define UGEN_CHUNK 128 /* chunk size for read */
78 #define UGEN_IBSIZE 1020 /* buffer size */
79 #define UGEN_BBSIZE 1024
80
81 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
82 #define UGEN_NISORFRMS 8 /* number of transactions per req */
83 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
84
85 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
86 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
87
88 struct isoreq {
89 struct ugen_endpoint *sce;
90 struct usbd_xfer *xfer;
91 void *dmabuf;
92 uint16_t sizes[UGEN_NISORFRMS];
93 };
94
95 struct ugen_endpoint {
96 struct ugen_softc *sc;
97 usb_endpoint_descriptor_t *edesc;
98 struct usbd_interface *iface;
99 int state;
100 #define UGEN_ASLP 0x02 /* waiting for data */
101 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
102 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
103 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
104 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
105 struct usbd_pipe *pipeh;
106 struct clist q;
107 u_char *ibuf; /* start of buffer (circular for isoc) */
108 u_char *fill; /* location for input (isoc) */
109 u_char *limit; /* end of circular buffer (isoc) */
110 u_char *cur; /* current read location (isoc) */
111 uint32_t timeout;
112 uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
113 uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
114 uint32_t ra_wb_used; /* how much is in buffer */
115 uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */
116 struct usbd_xfer *ra_wb_xfer;
117 struct isoreq isoreqs[UGEN_NISOREQS];
118 /* Keep these last; we don't overwrite them in ugen_set_config() */
119 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
120 struct selinfo rsel;
121 kcondvar_t cv;
122 };
123
124 struct ugen_softc {
125 device_t sc_dev; /* base device */
126 struct usbd_device *sc_udev;
127
128 kmutex_t sc_lock;
129 kcondvar_t sc_detach_cv;
130
131 char sc_is_open[USB_MAX_ENDPOINTS];
132 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
133 #define OUT 0
134 #define IN 1
135
136 int sc_refcnt;
137 char sc_buffer[UGEN_BBSIZE];
138 u_char sc_dying;
139 };
140
141 static dev_type_open(ugenopen);
142 static dev_type_close(ugenclose);
143 static dev_type_read(ugenread);
144 static dev_type_write(ugenwrite);
145 static dev_type_ioctl(ugenioctl);
146 static dev_type_poll(ugenpoll);
147 static dev_type_kqfilter(ugenkqfilter);
148
149 const struct cdevsw ugen_cdevsw = {
150 .d_open = ugenopen,
151 .d_close = ugenclose,
152 .d_read = ugenread,
153 .d_write = ugenwrite,
154 .d_ioctl = ugenioctl,
155 .d_stop = nostop,
156 .d_tty = notty,
157 .d_poll = ugenpoll,
158 .d_mmap = nommap,
159 .d_kqfilter = ugenkqfilter,
160 .d_discard = nodiscard,
161 .d_flag = D_OTHER,
162 };
163
164 Static void ugenintr(struct usbd_xfer *, void *,
165 usbd_status);
166 Static void ugen_isoc_rintr(struct usbd_xfer *, void *,
167 usbd_status);
168 Static void ugen_bulkra_intr(struct usbd_xfer *, void *,
169 usbd_status);
170 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *,
171 usbd_status);
172 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
173 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
174 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
175 void *, int, struct lwp *);
176 Static int ugen_set_config(struct ugen_softc *, int, int);
177 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *,
178 int, int *);
179 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
180 Static int ugen_get_alt_index(struct ugen_softc *, int);
181 Static void ugen_clear_endpoints(struct ugen_softc *);
182
183 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
184 #define UGENENDPOINT(n) (minor(n) & 0xf)
185 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
186
187 static int ugenif_match(device_t, cfdata_t, void *);
188 static void ugenif_attach(device_t, device_t, void *);
189 static int ugen_match(device_t, cfdata_t, void *);
190 static void ugen_attach(device_t, device_t, void *);
191 static int ugen_detach(device_t, int);
192 static int ugen_activate(device_t, enum devact);
193
194 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match,
195 ugen_attach, ugen_detach, ugen_activate);
196 CFATTACH_DECL_NEW(ugenif, sizeof(struct ugen_softc), ugenif_match,
197 ugenif_attach, ugen_detach, ugen_activate);
198
199 /* toggle to control attach priority. -1 means "let autoconf decide" */
200 int ugen_override = -1;
201
202 static int
203 ugen_match(device_t parent, cfdata_t match, void *aux)
204 {
205 struct usb_attach_arg *uaa = aux;
206 int override;
207
208 if (ugen_override != -1)
209 override = ugen_override;
210 else
211 override = match->cf_flags & 1;
212
213 if (override)
214 return UMATCH_HIGHEST;
215 else if (uaa->uaa_usegeneric)
216 return UMATCH_GENERIC;
217 else
218 return UMATCH_NONE;
219 }
220
221 static int
222 ugenif_match(device_t parent, cfdata_t match, void *aux)
223 {
224 /* Assume that they knew what they configured! (see ugenif(4)) */
225 return UMATCH_HIGHEST;
226 }
227
228 static void
229 ugen_attach(device_t parent, device_t self, void *aux)
230 {
231 struct usb_attach_arg *uaa = aux;
232 struct usbif_attach_arg uiaa;
233
234 memset(&uiaa, 0, sizeof uiaa);
235 uiaa.uiaa_port = uaa->uaa_port;
236 uiaa.uiaa_vendor = uaa->uaa_vendor;
237 uiaa.uiaa_product = uaa->uaa_product;
238 uiaa.uiaa_release = uaa->uaa_release;
239 uiaa.uiaa_device = uaa->uaa_device;
240 uiaa.uiaa_configno = -1;
241 uiaa.uiaa_ifaceno = -1;
242
243 ugenif_attach(parent, self, &uiaa);
244 }
245
246 static void
247 ugenif_attach(device_t parent, device_t self, void *aux)
248 {
249 struct ugen_softc *sc = device_private(self);
250 struct usbif_attach_arg *uiaa = aux;
251 struct usbd_device *udev;
252 char *devinfop;
253 usbd_status err;
254 int i, dir, conf;
255
256 aprint_naive("\n");
257 aprint_normal("\n");
258
259 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
260 cv_init(&sc->sc_detach_cv, "ugendet");
261
262 devinfop = usbd_devinfo_alloc(uiaa->uiaa_device, 0);
263 aprint_normal_dev(self, "%s\n", devinfop);
264 usbd_devinfo_free(devinfop);
265
266 sc->sc_dev = self;
267 sc->sc_udev = udev = uiaa->uiaa_device;
268
269 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
270 for (dir = OUT; dir <= IN; dir++) {
271 struct ugen_endpoint *sce;
272
273 sce = &sc->sc_endpoints[i][dir];
274 selinit(&sce->rsel);
275 cv_init(&sce->cv, "ugensce");
276 }
277 }
278
279 if (uiaa->uiaa_ifaceno < 0) {
280 /*
281 * If we attach the whole device,
282 * set configuration index 0, the default one.
283 */
284 err = usbd_set_config_index(udev, 0, 0);
285 if (err) {
286 aprint_error_dev(self,
287 "setting configuration index 0 failed\n");
288 sc->sc_dying = 1;
289 return;
290 }
291 }
292
293 /* Get current configuration */
294 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
295
296 /* Set up all the local state for this configuration. */
297 err = ugen_set_config(sc, conf, uiaa->uiaa_ifaceno < 0);
298 if (err) {
299 aprint_error_dev(self, "setting configuration %d failed\n",
300 conf);
301 sc->sc_dying = 1;
302 return;
303 }
304
305 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev);
306
307 if (!pmf_device_register(self, NULL, NULL))
308 aprint_error_dev(self, "couldn't establish power handler\n");
309
310 }
311
312 Static void
313 ugen_clear_endpoints(struct ugen_softc *sc)
314 {
315
316 /* Clear out the old info, but leave the selinfo and cv initialised. */
317 for (int i = 0; i < USB_MAX_ENDPOINTS; i++) {
318 for (int dir = OUT; dir <= IN; dir++) {
319 struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir];
320 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
321 }
322 }
323 }
324
325 Static int
326 ugen_set_config(struct ugen_softc *sc, int configno, int chkopen)
327 {
328 struct usbd_device *dev = sc->sc_udev;
329 usb_config_descriptor_t *cdesc;
330 struct usbd_interface *iface;
331 usb_endpoint_descriptor_t *ed;
332 struct ugen_endpoint *sce;
333 uint8_t niface, nendpt;
334 int ifaceno, endptno, endpt;
335 usbd_status err;
336 int dir;
337
338 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
339 device_xname(sc->sc_dev), configno, sc));
340
341 if (chkopen) {
342 /*
343 * We start at 1, not 0, because we don't care whether the
344 * control endpoint is open or not. It is always present.
345 */
346 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
347 if (sc->sc_is_open[endptno]) {
348 DPRINTFN(1,
349 ("ugen_set_config: %s - endpoint %d is open\n",
350 device_xname(sc->sc_dev), endptno));
351 return USBD_IN_USE;
352 }
353 }
354
355 /* Avoid setting the current value. */
356 cdesc = usbd_get_config_descriptor(dev);
357 if (!cdesc || cdesc->bConfigurationValue != configno) {
358 err = usbd_set_config_no(dev, configno, 1);
359 if (err)
360 return err;
361 }
362
363 ugen_clear_endpoints(sc);
364
365 err = usbd_interface_count(dev, &niface);
366 if (err)
367 return err;
368
369 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
370 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
371 err = usbd_device2interface_handle(dev, ifaceno, &iface);
372 if (err)
373 return err;
374 err = usbd_endpoint_count(iface, &nendpt);
375 if (err)
376 return err;
377 for (endptno = 0; endptno < nendpt; endptno++) {
378 ed = usbd_interface2endpoint_descriptor(iface,endptno);
379 KASSERT(ed != NULL);
380 endpt = ed->bEndpointAddress;
381 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
382 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
383 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
384 "(%d,%d), sce=%p\n",
385 endptno, endpt, UE_GET_ADDR(endpt),
386 UE_GET_DIR(endpt), sce));
387 sce->sc = sc;
388 sce->edesc = ed;
389 sce->iface = iface;
390 }
391 }
392 return USBD_NORMAL_COMPLETION;
393 }
394
395 static int
396 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
397 {
398 struct ugen_softc *sc;
399 int unit = UGENUNIT(dev);
400 int endpt = UGENENDPOINT(dev);
401 usb_endpoint_descriptor_t *edesc;
402 struct ugen_endpoint *sce;
403 int dir, isize;
404 usbd_status err;
405 struct usbd_xfer *xfer;
406 int i, j;
407
408 sc = device_lookup_private(&ugen_cd, unit);
409 if (sc == NULL || sc->sc_dying)
410 return ENXIO;
411
412 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
413 flag, mode, unit, endpt));
414
415 /* The control endpoint allows multiple opens. */
416 if (endpt == USB_CONTROL_ENDPOINT) {
417 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
418 return 0;
419 }
420
421 if (sc->sc_is_open[endpt])
422 return EBUSY;
423
424 /* Make sure there are pipes for all directions. */
425 for (dir = OUT; dir <= IN; dir++) {
426 if (flag & (dir == OUT ? FWRITE : FREAD)) {
427 sce = &sc->sc_endpoints[endpt][dir];
428 if (sce->edesc == NULL)
429 return ENXIO;
430 }
431 }
432
433 /* Actually open the pipes. */
434 /* XXX Should back out properly if it fails. */
435 for (dir = OUT; dir <= IN; dir++) {
436 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
437 continue;
438 sce = &sc->sc_endpoints[endpt][dir];
439 sce->state = 0;
440 sce->timeout = USBD_NO_TIMEOUT;
441 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
442 sc, endpt, dir, sce));
443 edesc = sce->edesc;
444 switch (edesc->bmAttributes & UE_XFERTYPE) {
445 case UE_INTERRUPT:
446 if (dir == OUT) {
447 err = usbd_open_pipe(sce->iface,
448 edesc->bEndpointAddress, 0, &sce->pipeh);
449 if (err)
450 return EIO;
451 break;
452 }
453 isize = UGETW(edesc->wMaxPacketSize);
454 if (isize == 0) /* shouldn't happen */
455 return EINVAL;
456 sce->ibuf = kmem_alloc(isize, KM_SLEEP);
457 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
458 endpt, isize));
459 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
460 kmem_free(sce->ibuf, isize);
461 sce->ibuf = NULL;
462 return ENOMEM;
463 }
464 err = usbd_open_pipe_intr(sce->iface,
465 edesc->bEndpointAddress,
466 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
467 sce->ibuf, isize, ugenintr,
468 USBD_DEFAULT_INTERVAL);
469 if (err) {
470 clfree(&sce->q);
471 kmem_free(sce->ibuf, isize);
472 sce->ibuf = NULL;
473 return EIO;
474 }
475 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
476 break;
477 case UE_BULK:
478 err = usbd_open_pipe(sce->iface,
479 edesc->bEndpointAddress, 0, &sce->pipeh);
480 if (err)
481 return EIO;
482 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
483 /*
484 * Use request size for non-RA/WB transfers
485 * as the default.
486 */
487 sce->ra_wb_reqsize = UGEN_BBSIZE;
488 break;
489 case UE_ISOCHRONOUS:
490 if (dir == OUT)
491 return EINVAL;
492 isize = UGETW(edesc->wMaxPacketSize);
493 if (isize == 0) /* shouldn't happen */
494 return EINVAL;
495 sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES,
496 KM_SLEEP);
497 sce->cur = sce->fill = sce->ibuf;
498 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
499 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
500 endpt, isize));
501 err = usbd_open_pipe(sce->iface,
502 edesc->bEndpointAddress, 0, &sce->pipeh);
503 if (err) {
504 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
505 sce->ibuf = NULL;
506 return EIO;
507 }
508 for (i = 0; i < UGEN_NISOREQS; ++i) {
509 sce->isoreqs[i].sce = sce;
510 err = usbd_create_xfer(sce->pipeh,
511 isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS,
512 &xfer);
513 if (err)
514 goto bad;
515 sce->isoreqs[i].xfer = xfer;
516 sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer);
517 for (j = 0; j < UGEN_NISORFRMS; ++j)
518 sce->isoreqs[i].sizes[j] = isize;
519 usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i],
520 sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0,
521 ugen_isoc_rintr);
522 (void)usbd_transfer(xfer);
523 }
524 DPRINTFN(5, ("ugenopen: isoc open done\n"));
525 break;
526 bad:
527 while (--i >= 0) /* implicit buffer free */
528 usbd_destroy_xfer(sce->isoreqs[i].xfer);
529 usbd_close_pipe(sce->pipeh);
530 sce->pipeh = NULL;
531 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
532 sce->ibuf = NULL;
533 return ENOMEM;
534 case UE_CONTROL:
535 sce->timeout = USBD_DEFAULT_TIMEOUT;
536 return EINVAL;
537 }
538 }
539 sc->sc_is_open[endpt] = 1;
540 return 0;
541 }
542
543 static int
544 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
545 {
546 int endpt = UGENENDPOINT(dev);
547 struct ugen_softc *sc;
548 struct ugen_endpoint *sce;
549 int dir;
550 int i;
551
552 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
553 if (sc == NULL || sc->sc_dying)
554 return ENXIO;
555
556 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
557 flag, mode, UGENUNIT(dev), endpt));
558
559 #ifdef DIAGNOSTIC
560 if (!sc->sc_is_open[endpt]) {
561 printf("ugenclose: not open\n");
562 return EINVAL;
563 }
564 #endif
565
566 if (endpt == USB_CONTROL_ENDPOINT) {
567 DPRINTFN(5, ("ugenclose: close control\n"));
568 sc->sc_is_open[endpt] = 0;
569 return 0;
570 }
571
572 for (dir = OUT; dir <= IN; dir++) {
573 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
574 continue;
575 sce = &sc->sc_endpoints[endpt][dir];
576 if (sce->pipeh == NULL)
577 continue;
578 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
579 endpt, dir, sce));
580
581 usbd_abort_pipe(sce->pipeh);
582
583 int isize = UGETW(sce->edesc->wMaxPacketSize);
584 int msize = 0;
585
586 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
587 case UE_INTERRUPT:
588 ndflush(&sce->q, sce->q.c_cc);
589 clfree(&sce->q);
590 msize = isize;
591 break;
592 case UE_ISOCHRONOUS:
593 for (i = 0; i < UGEN_NISOREQS; ++i)
594 usbd_destroy_xfer(sce->isoreqs[i].xfer);
595 msize = isize * UGEN_NISOFRAMES;
596 break;
597 case UE_BULK:
598 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) {
599 usbd_destroy_xfer(sce->ra_wb_xfer);
600 msize = sce->ra_wb_bufsize;
601 }
602 break;
603 default:
604 break;
605 }
606 usbd_close_pipe(sce->pipeh);
607 sce->pipeh = NULL;
608 if (sce->ibuf != NULL) {
609 kmem_free(sce->ibuf, msize);
610 sce->ibuf = NULL;
611 }
612 }
613 sc->sc_is_open[endpt] = 0;
614
615 return 0;
616 }
617
618 Static int
619 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
620 {
621 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
622 uint32_t n, tn;
623 struct usbd_xfer *xfer;
624 usbd_status err;
625 int error = 0;
626
627 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
628
629 if (endpt == USB_CONTROL_ENDPOINT)
630 return ENODEV;
631
632 #ifdef DIAGNOSTIC
633 if (sce->edesc == NULL) {
634 printf("ugenread: no edesc\n");
635 return EIO;
636 }
637 if (sce->pipeh == NULL) {
638 printf("ugenread: no pipe\n");
639 return EIO;
640 }
641 #endif
642
643 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
644 case UE_INTERRUPT:
645 /* Block until activity occurred. */
646 mutex_enter(&sc->sc_lock);
647 while (sce->q.c_cc == 0) {
648 if (flag & IO_NDELAY) {
649 mutex_exit(&sc->sc_lock);
650 return EWOULDBLOCK;
651 }
652 sce->state |= UGEN_ASLP;
653 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
654 /* "ugenri" */
655 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
656 mstohz(sce->timeout));
657 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
658 if (sc->sc_dying)
659 error = EIO;
660 if (error) {
661 sce->state &= ~UGEN_ASLP;
662 break;
663 }
664 }
665 mutex_exit(&sc->sc_lock);
666
667 /* Transfer as many chunks as possible. */
668 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
669 n = uimin(sce->q.c_cc, uio->uio_resid);
670 if (n > sizeof(sc->sc_buffer))
671 n = sizeof(sc->sc_buffer);
672
673 /* Remove a small chunk from the input queue. */
674 q_to_b(&sce->q, sc->sc_buffer, n);
675 DPRINTFN(5, ("ugenread: got %d chars\n", n));
676
677 /* Copy the data to the user process. */
678 error = uiomove(sc->sc_buffer, n, uio);
679 if (error)
680 break;
681 }
682 break;
683 case UE_BULK:
684 if (sce->state & UGEN_BULK_RA) {
685 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
686 uio->uio_resid, sce->ra_wb_used));
687 xfer = sce->ra_wb_xfer;
688
689 mutex_enter(&sc->sc_lock);
690 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
691 mutex_exit(&sc->sc_lock);
692 return EWOULDBLOCK;
693 }
694 while (uio->uio_resid > 0 && !error) {
695 while (sce->ra_wb_used == 0) {
696 sce->state |= UGEN_ASLP;
697 DPRINTFN(5,
698 ("ugenread: sleep on %p\n",
699 sce));
700 /* "ugenrb" */
701 error = cv_timedwait_sig(&sce->cv,
702 &sc->sc_lock, mstohz(sce->timeout));
703 DPRINTFN(5,
704 ("ugenread: woke, error=%d\n",
705 error));
706 if (sc->sc_dying)
707 error = EIO;
708 if (error) {
709 sce->state &= ~UGEN_ASLP;
710 break;
711 }
712 }
713
714 /* Copy data to the process. */
715 while (uio->uio_resid > 0
716 && sce->ra_wb_used > 0) {
717 n = uimin(uio->uio_resid,
718 sce->ra_wb_used);
719 n = uimin(n, sce->limit - sce->cur);
720 error = uiomove(sce->cur, n, uio);
721 if (error)
722 break;
723 sce->cur += n;
724 sce->ra_wb_used -= n;
725 if (sce->cur == sce->limit)
726 sce->cur = sce->ibuf;
727 }
728
729 /*
730 * If the transfers stopped because the
731 * buffer was full, restart them.
732 */
733 if (sce->state & UGEN_RA_WB_STOP &&
734 sce->ra_wb_used < sce->limit - sce->ibuf) {
735 n = (sce->limit - sce->ibuf)
736 - sce->ra_wb_used;
737 usbd_setup_xfer(xfer, sce, NULL,
738 uimin(n, sce->ra_wb_xferlen),
739 0, USBD_NO_TIMEOUT,
740 ugen_bulkra_intr);
741 sce->state &= ~UGEN_RA_WB_STOP;
742 err = usbd_transfer(xfer);
743 if (err != USBD_IN_PROGRESS)
744 /*
745 * The transfer has not been
746 * queued. Setting STOP
747 * will make us try
748 * again at the next read.
749 */
750 sce->state |= UGEN_RA_WB_STOP;
751 }
752 }
753 mutex_exit(&sc->sc_lock);
754 break;
755 }
756 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
757 0, 0, &xfer);
758 if (error)
759 return error;
760 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
761 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
762 tn = n;
763 err = usbd_bulk_transfer(xfer, sce->pipeh,
764 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0,
765 sce->timeout, sc->sc_buffer, &tn);
766 if (err) {
767 if (err == USBD_INTERRUPTED)
768 error = EINTR;
769 else if (err == USBD_TIMEOUT)
770 error = ETIMEDOUT;
771 else
772 error = EIO;
773 break;
774 }
775 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
776 error = uiomove(sc->sc_buffer, tn, uio);
777 if (error || tn < n)
778 break;
779 }
780 usbd_destroy_xfer(xfer);
781 break;
782 case UE_ISOCHRONOUS:
783 mutex_enter(&sc->sc_lock);
784 while (sce->cur == sce->fill) {
785 if (flag & IO_NDELAY) {
786 mutex_exit(&sc->sc_lock);
787 return EWOULDBLOCK;
788 }
789 sce->state |= UGEN_ASLP;
790 /* "ugenri" */
791 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
792 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
793 mstohz(sce->timeout));
794 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
795 if (sc->sc_dying)
796 error = EIO;
797 if (error) {
798 sce->state &= ~UGEN_ASLP;
799 break;
800 }
801 }
802
803 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
804 if(sce->fill > sce->cur)
805 n = uimin(sce->fill - sce->cur, uio->uio_resid);
806 else
807 n = uimin(sce->limit - sce->cur, uio->uio_resid);
808
809 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
810
811 /* Copy the data to the user process. */
812 error = uiomove(sce->cur, n, uio);
813 if (error)
814 break;
815 sce->cur += n;
816 if (sce->cur >= sce->limit)
817 sce->cur = sce->ibuf;
818 }
819 mutex_exit(&sc->sc_lock);
820 break;
821
822
823 default:
824 return ENXIO;
825 }
826 return error;
827 }
828
829 static int
830 ugenread(dev_t dev, struct uio *uio, int flag)
831 {
832 int endpt = UGENENDPOINT(dev);
833 struct ugen_softc *sc;
834 int error;
835
836 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
837 if (sc == NULL || sc->sc_dying)
838 return ENXIO;
839
840 mutex_enter(&sc->sc_lock);
841 sc->sc_refcnt++;
842 mutex_exit(&sc->sc_lock);
843
844 error = ugen_do_read(sc, endpt, uio, flag);
845
846 mutex_enter(&sc->sc_lock);
847 if (--sc->sc_refcnt < 0)
848 cv_broadcast(&sc->sc_detach_cv);
849 mutex_exit(&sc->sc_lock);
850
851 return error;
852 }
853
854 Static int
855 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
856 int flag)
857 {
858 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
859 uint32_t n;
860 int error = 0;
861 uint32_t tn;
862 char *dbuf;
863 struct usbd_xfer *xfer;
864 usbd_status err;
865
866 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
867
868 if (endpt == USB_CONTROL_ENDPOINT)
869 return ENODEV;
870
871 #ifdef DIAGNOSTIC
872 if (sce->edesc == NULL) {
873 printf("ugenwrite: no edesc\n");
874 return EIO;
875 }
876 if (sce->pipeh == NULL) {
877 printf("ugenwrite: no pipe\n");
878 return EIO;
879 }
880 #endif
881
882 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
883 case UE_BULK:
884 if (sce->state & UGEN_BULK_WB) {
885 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
886 uio->uio_resid, sce->ra_wb_used));
887 xfer = sce->ra_wb_xfer;
888
889 mutex_enter(&sc->sc_lock);
890 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
891 flag & IO_NDELAY) {
892 mutex_exit(&sc->sc_lock);
893 return EWOULDBLOCK;
894 }
895 while (uio->uio_resid > 0 && !error) {
896 while (sce->ra_wb_used ==
897 sce->limit - sce->ibuf) {
898 sce->state |= UGEN_ASLP;
899 DPRINTFN(5,
900 ("ugenwrite: sleep on %p\n",
901 sce));
902 /* "ugenwb" */
903 error = cv_timedwait_sig(&sce->cv,
904 &sc->sc_lock, mstohz(sce->timeout));
905 DPRINTFN(5,
906 ("ugenwrite: woke, error=%d\n",
907 error));
908 if (sc->sc_dying)
909 error = EIO;
910 if (error) {
911 sce->state &= ~UGEN_ASLP;
912 break;
913 }
914 }
915
916 /* Copy data from the process. */
917 while (uio->uio_resid > 0 &&
918 sce->ra_wb_used < sce->limit - sce->ibuf) {
919 n = uimin(uio->uio_resid,
920 (sce->limit - sce->ibuf)
921 - sce->ra_wb_used);
922 n = uimin(n, sce->limit - sce->fill);
923 error = uiomove(sce->fill, n, uio);
924 if (error)
925 break;
926 sce->fill += n;
927 sce->ra_wb_used += n;
928 if (sce->fill == sce->limit)
929 sce->fill = sce->ibuf;
930 }
931
932 /*
933 * If the transfers stopped because the
934 * buffer was empty, restart them.
935 */
936 if (sce->state & UGEN_RA_WB_STOP &&
937 sce->ra_wb_used > 0) {
938 dbuf = (char *)usbd_get_buffer(xfer);
939 n = uimin(sce->ra_wb_used,
940 sce->ra_wb_xferlen);
941 tn = uimin(n, sce->limit - sce->cur);
942 memcpy(dbuf, sce->cur, tn);
943 dbuf += tn;
944 if (n - tn > 0)
945 memcpy(dbuf, sce->ibuf,
946 n - tn);
947 usbd_setup_xfer(xfer, sce, NULL, n,
948 0, USBD_NO_TIMEOUT,
949 ugen_bulkwb_intr);
950 sce->state &= ~UGEN_RA_WB_STOP;
951 err = usbd_transfer(xfer);
952 if (err != USBD_IN_PROGRESS)
953 /*
954 * The transfer has not been
955 * queued. Setting STOP
956 * will make us try again
957 * at the next read.
958 */
959 sce->state |= UGEN_RA_WB_STOP;
960 }
961 }
962 mutex_exit(&sc->sc_lock);
963 break;
964 }
965 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
966 0, 0, &xfer);
967 if (error)
968 return error;
969 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
970 error = uiomove(sc->sc_buffer, n, uio);
971 if (error)
972 break;
973 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
974 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout,
975 sc->sc_buffer, &n);
976 if (err) {
977 if (err == USBD_INTERRUPTED)
978 error = EINTR;
979 else if (err == USBD_TIMEOUT)
980 error = ETIMEDOUT;
981 else
982 error = EIO;
983 break;
984 }
985 }
986 usbd_destroy_xfer(xfer);
987 break;
988 case UE_INTERRUPT:
989 error = usbd_create_xfer(sce->pipeh,
990 UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer);
991 if (error)
992 return error;
993 while ((n = uimin(UGETW(sce->edesc->wMaxPacketSize),
994 uio->uio_resid)) != 0) {
995 error = uiomove(sc->sc_buffer, n, uio);
996 if (error)
997 break;
998 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
999 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
1000 sce->timeout, sc->sc_buffer, &n);
1001 if (err) {
1002 if (err == USBD_INTERRUPTED)
1003 error = EINTR;
1004 else if (err == USBD_TIMEOUT)
1005 error = ETIMEDOUT;
1006 else
1007 error = EIO;
1008 break;
1009 }
1010 }
1011 usbd_destroy_xfer(xfer);
1012 break;
1013 default:
1014 return ENXIO;
1015 }
1016 return error;
1017 }
1018
1019 static int
1020 ugenwrite(dev_t dev, struct uio *uio, int flag)
1021 {
1022 int endpt = UGENENDPOINT(dev);
1023 struct ugen_softc *sc;
1024 int error;
1025
1026 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1027 if (sc == NULL || sc->sc_dying)
1028 return ENXIO;
1029
1030 mutex_enter(&sc->sc_lock);
1031 sc->sc_refcnt++;
1032 mutex_exit(&sc->sc_lock);
1033
1034 error = ugen_do_write(sc, endpt, uio, flag);
1035
1036 mutex_enter(&sc->sc_lock);
1037 if (--sc->sc_refcnt < 0)
1038 cv_broadcast(&sc->sc_detach_cv);
1039 mutex_exit(&sc->sc_lock);
1040
1041 return error;
1042 }
1043
1044 static int
1045 ugen_activate(device_t self, enum devact act)
1046 {
1047 struct ugen_softc *sc = device_private(self);
1048
1049 switch (act) {
1050 case DVACT_DEACTIVATE:
1051 sc->sc_dying = 1;
1052 return 0;
1053 default:
1054 return EOPNOTSUPP;
1055 }
1056 }
1057
1058 static int
1059 ugen_detach(device_t self, int flags)
1060 {
1061 struct ugen_softc *sc = device_private(self);
1062 struct ugen_endpoint *sce;
1063 int i, dir;
1064 int maj, mn;
1065
1066 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1067
1068 sc->sc_dying = 1;
1069 pmf_device_deregister(self);
1070 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1071 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1072 for (dir = OUT; dir <= IN; dir++) {
1073 sce = &sc->sc_endpoints[i][dir];
1074 if (sce->pipeh)
1075 usbd_abort_pipe(sce->pipeh);
1076 }
1077 }
1078
1079 mutex_enter(&sc->sc_lock);
1080 if (--sc->sc_refcnt >= 0) {
1081 /* Wake everyone */
1082 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1083 cv_signal(&sc->sc_endpoints[i][IN].cv);
1084 /* Wait for processes to go away. */
1085 if (cv_timedwait(&sc->sc_detach_cv, &sc->sc_lock, hz * 60))
1086 aprint_error_dev(self, ": didn't detach\n");
1087 }
1088 mutex_exit(&sc->sc_lock);
1089
1090 /* locate the major number */
1091 maj = cdevsw_lookup_major(&ugen_cdevsw);
1092
1093 /* Nuke the vnodes for any open instances (calls close). */
1094 mn = device_unit(self) * USB_MAX_ENDPOINTS;
1095 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1096
1097 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev);
1098
1099 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1100 for (dir = OUT; dir <= IN; dir++) {
1101 sce = &sc->sc_endpoints[i][dir];
1102 seldestroy(&sce->rsel);
1103 cv_destroy(&sce->cv);
1104 }
1105 }
1106
1107 cv_destroy(&sc->sc_detach_cv);
1108 mutex_destroy(&sc->sc_lock);
1109
1110 return 0;
1111 }
1112
1113 Static void
1114 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
1115 {
1116 struct ugen_endpoint *sce = addr;
1117 struct ugen_softc *sc = sce->sc;
1118 uint32_t count;
1119 u_char *ibuf;
1120
1121 if (status == USBD_CANCELLED)
1122 return;
1123
1124 if (status != USBD_NORMAL_COMPLETION) {
1125 DPRINTF(("ugenintr: status=%d\n", status));
1126 if (status == USBD_STALLED)
1127 usbd_clear_endpoint_stall_async(sce->pipeh);
1128 return;
1129 }
1130
1131 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1132 ibuf = sce->ibuf;
1133
1134 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1135 xfer, status, count));
1136 DPRINTFN(5, (" data = %02x %02x %02x\n",
1137 ibuf[0], ibuf[1], ibuf[2]));
1138
1139 (void)b_to_q(ibuf, count, &sce->q);
1140
1141 mutex_enter(&sc->sc_lock);
1142 if (sce->state & UGEN_ASLP) {
1143 sce->state &= ~UGEN_ASLP;
1144 DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1145 cv_signal(&sce->cv);
1146 }
1147 mutex_exit(&sc->sc_lock);
1148 selnotify(&sce->rsel, 0, 0);
1149 }
1150
1151 Static void
1152 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr,
1153 usbd_status status)
1154 {
1155 struct isoreq *req = addr;
1156 struct ugen_endpoint *sce = req->sce;
1157 struct ugen_softc *sc = sce->sc;
1158 uint32_t count, n;
1159 int i, isize;
1160
1161 /* Return if we are aborting. */
1162 if (status == USBD_CANCELLED)
1163 return;
1164
1165 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1166 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1167 (long)(req - sce->isoreqs), count));
1168
1169 /* throw away oldest input if the buffer is full */
1170 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1171 sce->cur += count;
1172 if(sce->cur >= sce->limit)
1173 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1174 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1175 count));
1176 }
1177
1178 isize = UGETW(sce->edesc->wMaxPacketSize);
1179 for (i = 0; i < UGEN_NISORFRMS; i++) {
1180 uint32_t actlen = req->sizes[i];
1181 char const *tbuf = (char const *)req->dmabuf + isize * i;
1182
1183 /* copy data to buffer */
1184 while (actlen > 0) {
1185 n = uimin(actlen, sce->limit - sce->fill);
1186 memcpy(sce->fill, tbuf, n);
1187
1188 tbuf += n;
1189 actlen -= n;
1190 sce->fill += n;
1191 if(sce->fill == sce->limit)
1192 sce->fill = sce->ibuf;
1193 }
1194
1195 /* setup size for next transfer */
1196 req->sizes[i] = isize;
1197 }
1198
1199 usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0,
1200 ugen_isoc_rintr);
1201 (void)usbd_transfer(xfer);
1202
1203 mutex_enter(&sc->sc_lock);
1204 if (sce->state & UGEN_ASLP) {
1205 sce->state &= ~UGEN_ASLP;
1206 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1207 cv_signal(&sce->cv);
1208 }
1209 mutex_exit(&sc->sc_lock);
1210 selnotify(&sce->rsel, 0, 0);
1211 }
1212
1213 Static void
1214 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr,
1215 usbd_status status)
1216 {
1217 struct ugen_endpoint *sce = addr;
1218 struct ugen_softc *sc = sce->sc;
1219 uint32_t count, n;
1220 char const *tbuf;
1221 usbd_status err;
1222
1223 /* Return if we are aborting. */
1224 if (status == USBD_CANCELLED)
1225 return;
1226
1227 if (status != USBD_NORMAL_COMPLETION) {
1228 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1229 sce->state |= UGEN_RA_WB_STOP;
1230 if (status == USBD_STALLED)
1231 usbd_clear_endpoint_stall_async(sce->pipeh);
1232 return;
1233 }
1234
1235 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1236
1237 /* Keep track of how much is in the buffer. */
1238 sce->ra_wb_used += count;
1239
1240 /* Copy data to buffer. */
1241 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1242 n = uimin(count, sce->limit - sce->fill);
1243 memcpy(sce->fill, tbuf, n);
1244 tbuf += n;
1245 count -= n;
1246 sce->fill += n;
1247 if (sce->fill == sce->limit)
1248 sce->fill = sce->ibuf;
1249 if (count > 0) {
1250 memcpy(sce->fill, tbuf, count);
1251 sce->fill += count;
1252 }
1253
1254 /* Set up the next request if necessary. */
1255 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1256 if (n > 0) {
1257 usbd_setup_xfer(xfer, sce, NULL, uimin(n, sce->ra_wb_xferlen), 0,
1258 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1259 err = usbd_transfer(xfer);
1260 if (err != USBD_IN_PROGRESS) {
1261 printf("usbd_bulkra_intr: error=%d\n", err);
1262 /*
1263 * The transfer has not been queued. Setting STOP
1264 * will make us try again at the next read.
1265 */
1266 sce->state |= UGEN_RA_WB_STOP;
1267 }
1268 }
1269 else
1270 sce->state |= UGEN_RA_WB_STOP;
1271
1272 mutex_enter(&sc->sc_lock);
1273 if (sce->state & UGEN_ASLP) {
1274 sce->state &= ~UGEN_ASLP;
1275 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1276 cv_signal(&sce->cv);
1277 }
1278 mutex_exit(&sc->sc_lock);
1279 selnotify(&sce->rsel, 0, 0);
1280 }
1281
1282 Static void
1283 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr,
1284 usbd_status status)
1285 {
1286 struct ugen_endpoint *sce = addr;
1287 struct ugen_softc *sc = sce->sc;
1288 uint32_t count, n;
1289 char *tbuf;
1290 usbd_status err;
1291
1292 /* Return if we are aborting. */
1293 if (status == USBD_CANCELLED)
1294 return;
1295
1296 if (status != USBD_NORMAL_COMPLETION) {
1297 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1298 sce->state |= UGEN_RA_WB_STOP;
1299 if (status == USBD_STALLED)
1300 usbd_clear_endpoint_stall_async(sce->pipeh);
1301 return;
1302 }
1303
1304 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1305
1306 /* Keep track of how much is in the buffer. */
1307 sce->ra_wb_used -= count;
1308
1309 /* Update buffer pointers. */
1310 sce->cur += count;
1311 if (sce->cur >= sce->limit)
1312 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1313
1314 /* Set up next request if necessary. */
1315 if (sce->ra_wb_used > 0) {
1316 /* copy data from buffer */
1317 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1318 count = uimin(sce->ra_wb_used, sce->ra_wb_xferlen);
1319 n = uimin(count, sce->limit - sce->cur);
1320 memcpy(tbuf, sce->cur, n);
1321 tbuf += n;
1322 if (count - n > 0)
1323 memcpy(tbuf, sce->ibuf, count - n);
1324
1325 usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT,
1326 ugen_bulkwb_intr);
1327 err = usbd_transfer(xfer);
1328 if (err != USBD_IN_PROGRESS) {
1329 printf("usbd_bulkwb_intr: error=%d\n", err);
1330 /*
1331 * The transfer has not been queued. Setting STOP
1332 * will make us try again at the next write.
1333 */
1334 sce->state |= UGEN_RA_WB_STOP;
1335 }
1336 }
1337 else
1338 sce->state |= UGEN_RA_WB_STOP;
1339
1340 mutex_enter(&sc->sc_lock);
1341 if (sce->state & UGEN_ASLP) {
1342 sce->state &= ~UGEN_ASLP;
1343 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1344 cv_signal(&sce->cv);
1345 }
1346 mutex_exit(&sc->sc_lock);
1347 selnotify(&sce->rsel, 0, 0);
1348 }
1349
1350 Static usbd_status
1351 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1352 {
1353 struct usbd_interface *iface;
1354 usb_endpoint_descriptor_t *ed;
1355 usbd_status err;
1356 struct ugen_endpoint *sce;
1357 uint8_t niface, nendpt, endptno, endpt;
1358 int dir;
1359
1360 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1361
1362 err = usbd_interface_count(sc->sc_udev, &niface);
1363 if (err)
1364 return err;
1365 if (ifaceidx < 0 || ifaceidx >= niface)
1366 return USBD_INVAL;
1367
1368 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1369 if (err)
1370 return err;
1371 err = usbd_endpoint_count(iface, &nendpt);
1372 if (err)
1373 return err;
1374
1375 /* change setting */
1376 err = usbd_set_interface(iface, altno);
1377 if (err)
1378 return err;
1379
1380 err = usbd_endpoint_count(iface, &nendpt);
1381 if (err)
1382 return err;
1383
1384 ugen_clear_endpoints(sc);
1385
1386 for (endptno = 0; endptno < nendpt; endptno++) {
1387 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1388 KASSERT(ed != NULL);
1389 endpt = ed->bEndpointAddress;
1390 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1391 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1392 sce->sc = sc;
1393 sce->edesc = ed;
1394 sce->iface = iface;
1395 }
1396 return 0;
1397 }
1398
1399 /* Retrieve a complete descriptor for a certain device and index. */
1400 Static usb_config_descriptor_t *
1401 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1402 {
1403 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1404 int len;
1405 usbd_status err;
1406
1407 if (index == USB_CURRENT_CONFIG_INDEX) {
1408 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1409 if (tdesc == NULL)
1410 return NULL;
1411 len = UGETW(tdesc->wTotalLength);
1412 if (lenp)
1413 *lenp = len;
1414 cdesc = kmem_alloc(len, KM_SLEEP);
1415 memcpy(cdesc, tdesc, len);
1416 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1417 } else {
1418 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1419 if (err)
1420 return 0;
1421 len = UGETW(cdescr.wTotalLength);
1422 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1423 if (lenp)
1424 *lenp = len;
1425 cdesc = kmem_alloc(len, KM_SLEEP);
1426 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1427 if (err) {
1428 kmem_free(cdesc, len);
1429 return 0;
1430 }
1431 }
1432 return cdesc;
1433 }
1434
1435 Static int
1436 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1437 {
1438 struct usbd_interface *iface;
1439 usbd_status err;
1440
1441 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1442 if (err)
1443 return -1;
1444 return usbd_get_interface_altindex(iface);
1445 }
1446
1447 Static int
1448 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1449 void *addr, int flag, struct lwp *l)
1450 {
1451 struct ugen_endpoint *sce;
1452 usbd_status err;
1453 struct usbd_interface *iface;
1454 struct usb_config_desc *cd;
1455 usb_config_descriptor_t *cdesc;
1456 struct usb_interface_desc *id;
1457 usb_interface_descriptor_t *idesc;
1458 struct usb_endpoint_desc *ed;
1459 usb_endpoint_descriptor_t *edesc;
1460 struct usb_alt_interface *ai;
1461 struct usb_string_desc *si;
1462 uint8_t conf, alt;
1463 int cdesclen;
1464 int error;
1465 int dir;
1466
1467 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1468 if (sc->sc_dying)
1469 return EIO;
1470
1471 switch (cmd) {
1472 case FIONBIO:
1473 /* All handled in the upper FS layer. */
1474 return 0;
1475 case USB_SET_SHORT_XFER:
1476 if (endpt == USB_CONTROL_ENDPOINT)
1477 return EINVAL;
1478 /* This flag only affects read */
1479 sce = &sc->sc_endpoints[endpt][IN];
1480 if (sce == NULL || sce->pipeh == NULL)
1481 return EINVAL;
1482 if (*(int *)addr)
1483 sce->state |= UGEN_SHORT_OK;
1484 else
1485 sce->state &= ~UGEN_SHORT_OK;
1486 return 0;
1487 case USB_SET_TIMEOUT:
1488 for (dir = OUT; dir <= IN; dir++) {
1489 sce = &sc->sc_endpoints[endpt][dir];
1490 if (sce == NULL)
1491 return EINVAL;
1492
1493 sce->timeout = *(int *)addr;
1494 }
1495 return 0;
1496 case USB_SET_BULK_RA:
1497 if (endpt == USB_CONTROL_ENDPOINT)
1498 return EINVAL;
1499 sce = &sc->sc_endpoints[endpt][IN];
1500 if (sce == NULL || sce->pipeh == NULL)
1501 return EINVAL;
1502 edesc = sce->edesc;
1503 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1504 return EINVAL;
1505
1506 if (*(int *)addr) {
1507 /* Only turn RA on if it's currently off. */
1508 if (sce->state & UGEN_BULK_RA)
1509 return 0;
1510
1511 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1512 /* shouldn't happen */
1513 return EINVAL;
1514 error = usbd_create_xfer(sce->pipeh,
1515 sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer);
1516 if (error)
1517 return error;
1518 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1519 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1520 sce->fill = sce->cur = sce->ibuf;
1521 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1522 sce->ra_wb_used = 0;
1523 sce->state |= UGEN_BULK_RA;
1524 sce->state &= ~UGEN_RA_WB_STOP;
1525 /* Now start reading. */
1526 usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL,
1527 uimin(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1528 0, USBD_NO_TIMEOUT, ugen_bulkra_intr);
1529 err = usbd_transfer(sce->ra_wb_xfer);
1530 if (err != USBD_IN_PROGRESS) {
1531 sce->state &= ~UGEN_BULK_RA;
1532 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1533 sce->ibuf = NULL;
1534 usbd_destroy_xfer(sce->ra_wb_xfer);
1535 return EIO;
1536 }
1537 } else {
1538 /* Only turn RA off if it's currently on. */
1539 if (!(sce->state & UGEN_BULK_RA))
1540 return 0;
1541
1542 sce->state &= ~UGEN_BULK_RA;
1543 usbd_abort_pipe(sce->pipeh);
1544 usbd_destroy_xfer(sce->ra_wb_xfer);
1545 /*
1546 * XXX Discard whatever's in the buffer, but we
1547 * should keep it around and drain the buffer
1548 * instead.
1549 */
1550 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1551 sce->ibuf = NULL;
1552 }
1553 return 0;
1554 case USB_SET_BULK_WB:
1555 if (endpt == USB_CONTROL_ENDPOINT)
1556 return EINVAL;
1557 sce = &sc->sc_endpoints[endpt][OUT];
1558 if (sce == NULL || sce->pipeh == NULL)
1559 return EINVAL;
1560 edesc = sce->edesc;
1561 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1562 return EINVAL;
1563
1564 if (*(int *)addr) {
1565 /* Only turn WB on if it's currently off. */
1566 if (sce->state & UGEN_BULK_WB)
1567 return 0;
1568
1569 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1570 /* shouldn't happen */
1571 return EINVAL;
1572 error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize,
1573 0, 0, &sce->ra_wb_xfer);
1574 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1575 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1576 sce->fill = sce->cur = sce->ibuf;
1577 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1578 sce->ra_wb_used = 0;
1579 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1580 } else {
1581 /* Only turn WB off if it's currently on. */
1582 if (!(sce->state & UGEN_BULK_WB))
1583 return 0;
1584
1585 sce->state &= ~UGEN_BULK_WB;
1586 /*
1587 * XXX Discard whatever's in the buffer, but we
1588 * should keep it around and keep writing to
1589 * drain the buffer instead.
1590 */
1591 usbd_abort_pipe(sce->pipeh);
1592 usbd_destroy_xfer(sce->ra_wb_xfer);
1593 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1594 sce->ibuf = NULL;
1595 }
1596 return 0;
1597 case USB_SET_BULK_RA_OPT:
1598 case USB_SET_BULK_WB_OPT:
1599 {
1600 struct usb_bulk_ra_wb_opt *opt;
1601
1602 if (endpt == USB_CONTROL_ENDPOINT)
1603 return EINVAL;
1604 opt = (struct usb_bulk_ra_wb_opt *)addr;
1605 if (cmd == USB_SET_BULK_RA_OPT)
1606 sce = &sc->sc_endpoints[endpt][IN];
1607 else
1608 sce = &sc->sc_endpoints[endpt][OUT];
1609 if (sce == NULL || sce->pipeh == NULL)
1610 return EINVAL;
1611 if (opt->ra_wb_buffer_size < 1 ||
1612 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1613 opt->ra_wb_request_size < 1 ||
1614 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1615 return EINVAL;
1616 /*
1617 * XXX These changes do not take effect until the
1618 * next time RA/WB mode is enabled but they ought to
1619 * take effect immediately.
1620 */
1621 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1622 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1623 return 0;
1624 }
1625 default:
1626 break;
1627 }
1628
1629 if (endpt != USB_CONTROL_ENDPOINT)
1630 return EINVAL;
1631
1632 switch (cmd) {
1633 #ifdef UGEN_DEBUG
1634 case USB_SETDEBUG:
1635 ugendebug = *(int *)addr;
1636 break;
1637 #endif
1638 case USB_GET_CONFIG:
1639 err = usbd_get_config(sc->sc_udev, &conf);
1640 if (err)
1641 return EIO;
1642 *(int *)addr = conf;
1643 break;
1644 case USB_SET_CONFIG:
1645 if (!(flag & FWRITE))
1646 return EPERM;
1647 err = ugen_set_config(sc, *(int *)addr, 1);
1648 switch (err) {
1649 case USBD_NORMAL_COMPLETION:
1650 break;
1651 case USBD_IN_USE:
1652 return EBUSY;
1653 default:
1654 return EIO;
1655 }
1656 break;
1657 case USB_GET_ALTINTERFACE:
1658 ai = (struct usb_alt_interface *)addr;
1659 err = usbd_device2interface_handle(sc->sc_udev,
1660 ai->uai_interface_index, &iface);
1661 if (err)
1662 return EINVAL;
1663 idesc = usbd_get_interface_descriptor(iface);
1664 if (idesc == NULL)
1665 return EIO;
1666 ai->uai_alt_no = idesc->bAlternateSetting;
1667 break;
1668 case USB_SET_ALTINTERFACE:
1669 if (!(flag & FWRITE))
1670 return EPERM;
1671 ai = (struct usb_alt_interface *)addr;
1672 err = usbd_device2interface_handle(sc->sc_udev,
1673 ai->uai_interface_index, &iface);
1674 if (err)
1675 return EINVAL;
1676 err = ugen_set_interface(sc, ai->uai_interface_index,
1677 ai->uai_alt_no);
1678 if (err)
1679 return EINVAL;
1680 break;
1681 case USB_GET_NO_ALT:
1682 ai = (struct usb_alt_interface *)addr;
1683 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen);
1684 if (cdesc == NULL)
1685 return EINVAL;
1686 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1687 if (idesc == NULL) {
1688 kmem_free(cdesc, cdesclen);
1689 return EINVAL;
1690 }
1691 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1692 idesc->bInterfaceNumber);
1693 kmem_free(cdesc, cdesclen);
1694 break;
1695 case USB_GET_DEVICE_DESC:
1696 *(usb_device_descriptor_t *)addr =
1697 *usbd_get_device_descriptor(sc->sc_udev);
1698 break;
1699 case USB_GET_CONFIG_DESC:
1700 cd = (struct usb_config_desc *)addr;
1701 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen);
1702 if (cdesc == NULL)
1703 return EINVAL;
1704 cd->ucd_desc = *cdesc;
1705 kmem_free(cdesc, cdesclen);
1706 break;
1707 case USB_GET_INTERFACE_DESC:
1708 id = (struct usb_interface_desc *)addr;
1709 cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen);
1710 if (cdesc == NULL)
1711 return EINVAL;
1712 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1713 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1714 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1715 else
1716 alt = id->uid_alt_index;
1717 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1718 if (idesc == NULL) {
1719 kmem_free(cdesc, cdesclen);
1720 return EINVAL;
1721 }
1722 id->uid_desc = *idesc;
1723 kmem_free(cdesc, cdesclen);
1724 break;
1725 case USB_GET_ENDPOINT_DESC:
1726 ed = (struct usb_endpoint_desc *)addr;
1727 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen);
1728 if (cdesc == NULL)
1729 return EINVAL;
1730 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1731 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1732 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1733 else
1734 alt = ed->ued_alt_index;
1735 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1736 alt, ed->ued_endpoint_index);
1737 if (edesc == NULL) {
1738 kmem_free(cdesc, cdesclen);
1739 return EINVAL;
1740 }
1741 ed->ued_desc = *edesc;
1742 kmem_free(cdesc, cdesclen);
1743 break;
1744 case USB_GET_FULL_DESC:
1745 {
1746 int len;
1747 struct iovec iov;
1748 struct uio uio;
1749 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1750
1751 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen);
1752 if (cdesc == NULL)
1753 return EINVAL;
1754 len = cdesclen;
1755 if (len > fd->ufd_size)
1756 len = fd->ufd_size;
1757 iov.iov_base = (void *)fd->ufd_data;
1758 iov.iov_len = len;
1759 uio.uio_iov = &iov;
1760 uio.uio_iovcnt = 1;
1761 uio.uio_resid = len;
1762 uio.uio_offset = 0;
1763 uio.uio_rw = UIO_READ;
1764 uio.uio_vmspace = l->l_proc->p_vmspace;
1765 error = uiomove((void *)cdesc, len, &uio);
1766 kmem_free(cdesc, cdesclen);
1767 return error;
1768 }
1769 case USB_GET_STRING_DESC: {
1770 int len;
1771 si = (struct usb_string_desc *)addr;
1772 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1773 si->usd_language_id, &si->usd_desc, &len);
1774 if (err)
1775 return EINVAL;
1776 break;
1777 }
1778 case USB_DO_REQUEST:
1779 {
1780 struct usb_ctl_request *ur = (void *)addr;
1781 int len = UGETW(ur->ucr_request.wLength);
1782 struct iovec iov;
1783 struct uio uio;
1784 void *ptr = 0;
1785 usbd_status xerr;
1786
1787 error = 0;
1788
1789 if (!(flag & FWRITE))
1790 return EPERM;
1791 /* Avoid requests that would damage the bus integrity. */
1792 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1793 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1794 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1795 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1796 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1797 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1798 return EINVAL;
1799
1800 if (len < 0 || len > 32767)
1801 return EINVAL;
1802 if (len != 0) {
1803 iov.iov_base = (void *)ur->ucr_data;
1804 iov.iov_len = len;
1805 uio.uio_iov = &iov;
1806 uio.uio_iovcnt = 1;
1807 uio.uio_resid = len;
1808 uio.uio_offset = 0;
1809 uio.uio_rw =
1810 ur->ucr_request.bmRequestType & UT_READ ?
1811 UIO_READ : UIO_WRITE;
1812 uio.uio_vmspace = l->l_proc->p_vmspace;
1813 ptr = kmem_alloc(len, KM_SLEEP);
1814 if (uio.uio_rw == UIO_WRITE) {
1815 error = uiomove(ptr, len, &uio);
1816 if (error)
1817 goto ret;
1818 }
1819 }
1820 sce = &sc->sc_endpoints[endpt][IN];
1821 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1822 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1823 if (xerr) {
1824 error = EIO;
1825 goto ret;
1826 }
1827 if (len != 0) {
1828 if (uio.uio_rw == UIO_READ) {
1829 size_t alen = uimin(len, ur->ucr_actlen);
1830 error = uiomove(ptr, alen, &uio);
1831 if (error)
1832 goto ret;
1833 }
1834 }
1835 ret:
1836 if (ptr)
1837 kmem_free(ptr, len);
1838 return error;
1839 }
1840 case USB_GET_DEVICEINFO:
1841 usbd_fill_deviceinfo(sc->sc_udev,
1842 (struct usb_device_info *)addr, 0);
1843 break;
1844 case USB_GET_DEVICEINFO_OLD:
1845 {
1846 int ret;
1847 MODULE_HOOK_CALL(usb_subr_fill_30_hook,
1848 (sc->sc_udev, (struct usb_device_info_old *)addr, 0,
1849 usbd_devinfo_vp, usbd_printBCD),
1850 enosys(), ret);
1851 if (ret == 0)
1852 return 0;
1853 return EINVAL;
1854 }
1855 default:
1856 return EINVAL;
1857 }
1858 return 0;
1859 }
1860
1861 static int
1862 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1863 {
1864 int endpt = UGENENDPOINT(dev);
1865 struct ugen_softc *sc;
1866 int error;
1867
1868 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1869 if (sc == NULL || sc->sc_dying)
1870 return ENXIO;
1871
1872 sc->sc_refcnt++;
1873 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1874 if (--sc->sc_refcnt < 0)
1875 cv_broadcast(&sc->sc_detach_cv);
1876 return error;
1877 }
1878
1879 static int
1880 ugenpoll(dev_t dev, int events, struct lwp *l)
1881 {
1882 struct ugen_softc *sc;
1883 struct ugen_endpoint *sce_in, *sce_out;
1884 int revents = 0;
1885
1886 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1887 if (sc == NULL)
1888 return ENXIO;
1889
1890 if (sc->sc_dying)
1891 return POLLHUP;
1892
1893 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1894 return ENODEV;
1895
1896 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1897 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1898 if (sce_in == NULL && sce_out == NULL)
1899 return POLLERR;
1900 #ifdef DIAGNOSTIC
1901 if (!sce_in->edesc && !sce_out->edesc) {
1902 printf("ugenpoll: no edesc\n");
1903 return POLLERR;
1904 }
1905 /* It's possible to have only one pipe open. */
1906 if (!sce_in->pipeh && !sce_out->pipeh) {
1907 printf("ugenpoll: no pipe\n");
1908 return POLLERR;
1909 }
1910 #endif
1911
1912 mutex_enter(&sc->sc_lock);
1913 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1914 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1915 case UE_INTERRUPT:
1916 if (sce_in->q.c_cc > 0)
1917 revents |= events & (POLLIN | POLLRDNORM);
1918 else
1919 selrecord(l, &sce_in->rsel);
1920 break;
1921 case UE_ISOCHRONOUS:
1922 if (sce_in->cur != sce_in->fill)
1923 revents |= events & (POLLIN | POLLRDNORM);
1924 else
1925 selrecord(l, &sce_in->rsel);
1926 break;
1927 case UE_BULK:
1928 if (sce_in->state & UGEN_BULK_RA) {
1929 if (sce_in->ra_wb_used > 0)
1930 revents |= events &
1931 (POLLIN | POLLRDNORM);
1932 else
1933 selrecord(l, &sce_in->rsel);
1934 break;
1935 }
1936 /*
1937 * We have no easy way of determining if a read will
1938 * yield any data or a write will happen.
1939 * Pretend they will.
1940 */
1941 revents |= events & (POLLIN | POLLRDNORM);
1942 break;
1943 default:
1944 break;
1945 }
1946 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1947 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1948 case UE_INTERRUPT:
1949 case UE_ISOCHRONOUS:
1950 /* XXX unimplemented */
1951 break;
1952 case UE_BULK:
1953 if (sce_out->state & UGEN_BULK_WB) {
1954 if (sce_out->ra_wb_used <
1955 sce_out->limit - sce_out->ibuf)
1956 revents |= events &
1957 (POLLOUT | POLLWRNORM);
1958 else
1959 selrecord(l, &sce_out->rsel);
1960 break;
1961 }
1962 /*
1963 * We have no easy way of determining if a read will
1964 * yield any data or a write will happen.
1965 * Pretend they will.
1966 */
1967 revents |= events & (POLLOUT | POLLWRNORM);
1968 break;
1969 default:
1970 break;
1971 }
1972
1973 mutex_exit(&sc->sc_lock);
1974
1975 return revents;
1976 }
1977
1978 static void
1979 filt_ugenrdetach(struct knote *kn)
1980 {
1981 struct ugen_endpoint *sce = kn->kn_hook;
1982 struct ugen_softc *sc = sce->sc;
1983
1984 mutex_enter(&sc->sc_lock);
1985 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1986 mutex_exit(&sc->sc_lock);
1987 }
1988
1989 static int
1990 filt_ugenread_intr(struct knote *kn, long hint)
1991 {
1992 struct ugen_endpoint *sce = kn->kn_hook;
1993 struct ugen_softc *sc = sce->sc;
1994
1995 if (sc->sc_dying)
1996 return 0;
1997
1998 kn->kn_data = sce->q.c_cc;
1999 return kn->kn_data > 0;
2000 }
2001
2002 static int
2003 filt_ugenread_isoc(struct knote *kn, long hint)
2004 {
2005 struct ugen_endpoint *sce = kn->kn_hook;
2006 struct ugen_softc *sc = sce->sc;
2007
2008 if (sc->sc_dying)
2009 return 0;
2010
2011 if (sce->cur == sce->fill)
2012 return 0;
2013
2014 if (sce->cur < sce->fill)
2015 kn->kn_data = sce->fill - sce->cur;
2016 else
2017 kn->kn_data = (sce->limit - sce->cur) +
2018 (sce->fill - sce->ibuf);
2019
2020 return 1;
2021 }
2022
2023 static int
2024 filt_ugenread_bulk(struct knote *kn, long hint)
2025 {
2026 struct ugen_endpoint *sce = kn->kn_hook;
2027 struct ugen_softc *sc = sce->sc;
2028
2029 if (sc->sc_dying)
2030 return 0;
2031
2032 if (!(sce->state & UGEN_BULK_RA))
2033 /*
2034 * We have no easy way of determining if a read will
2035 * yield any data or a write will happen.
2036 * So, emulate "seltrue".
2037 */
2038 return filt_seltrue(kn, hint);
2039
2040 if (sce->ra_wb_used == 0)
2041 return 0;
2042
2043 kn->kn_data = sce->ra_wb_used;
2044
2045 return 1;
2046 }
2047
2048 static int
2049 filt_ugenwrite_bulk(struct knote *kn, long hint)
2050 {
2051 struct ugen_endpoint *sce = kn->kn_hook;
2052 struct ugen_softc *sc = sce->sc;
2053
2054 if (sc->sc_dying)
2055 return 0;
2056
2057 if (!(sce->state & UGEN_BULK_WB))
2058 /*
2059 * We have no easy way of determining if a read will
2060 * yield any data or a write will happen.
2061 * So, emulate "seltrue".
2062 */
2063 return filt_seltrue(kn, hint);
2064
2065 if (sce->ra_wb_used == sce->limit - sce->ibuf)
2066 return 0;
2067
2068 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2069
2070 return 1;
2071 }
2072
2073 static const struct filterops ugenread_intr_filtops = {
2074 .f_isfd = 1,
2075 .f_attach = NULL,
2076 .f_detach = filt_ugenrdetach,
2077 .f_event = filt_ugenread_intr,
2078 };
2079
2080 static const struct filterops ugenread_isoc_filtops = {
2081 .f_isfd = 1,
2082 .f_attach = NULL,
2083 .f_detach = filt_ugenrdetach,
2084 .f_event = filt_ugenread_isoc,
2085 };
2086
2087 static const struct filterops ugenread_bulk_filtops = {
2088 .f_isfd = 1,
2089 .f_attach = NULL,
2090 .f_detach = filt_ugenrdetach,
2091 .f_event = filt_ugenread_bulk,
2092 };
2093
2094 static const struct filterops ugenwrite_bulk_filtops = {
2095 .f_isfd = 1,
2096 .f_attach = NULL,
2097 .f_detach = filt_ugenrdetach,
2098 .f_event = filt_ugenwrite_bulk,
2099 };
2100
2101 static int
2102 ugenkqfilter(dev_t dev, struct knote *kn)
2103 {
2104 struct ugen_softc *sc;
2105 struct ugen_endpoint *sce;
2106 struct klist *klist;
2107
2108 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2109 if (sc == NULL || sc->sc_dying)
2110 return ENXIO;
2111
2112 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2113 return ENODEV;
2114
2115 switch (kn->kn_filter) {
2116 case EVFILT_READ:
2117 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2118 if (sce == NULL)
2119 return EINVAL;
2120
2121 klist = &sce->rsel.sel_klist;
2122 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2123 case UE_INTERRUPT:
2124 kn->kn_fop = &ugenread_intr_filtops;
2125 break;
2126 case UE_ISOCHRONOUS:
2127 kn->kn_fop = &ugenread_isoc_filtops;
2128 break;
2129 case UE_BULK:
2130 kn->kn_fop = &ugenread_bulk_filtops;
2131 break;
2132 default:
2133 return EINVAL;
2134 }
2135 break;
2136
2137 case EVFILT_WRITE:
2138 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2139 if (sce == NULL)
2140 return EINVAL;
2141
2142 klist = &sce->rsel.sel_klist;
2143 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2144 case UE_INTERRUPT:
2145 case UE_ISOCHRONOUS:
2146 /* XXX poll doesn't support this */
2147 return EINVAL;
2148
2149 case UE_BULK:
2150 kn->kn_fop = &ugenwrite_bulk_filtops;
2151 break;
2152 default:
2153 return EINVAL;
2154 }
2155 break;
2156
2157 default:
2158 return EINVAL;
2159 }
2160
2161 kn->kn_hook = sce;
2162
2163 mutex_enter(&sc->sc_lock);
2164 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2165 mutex_exit(&sc->sc_lock);
2166
2167 return 0;
2168 }
2169