ugen.c revision 1.124.4.1 1 /* $NetBSD: ugen.c,v 1.124.4.1 2016/03/06 18:10:20 martin Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.124.4.1 2016/03/06 18:10:20 martin Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/device.h>
51 #include <sys/ioctl.h>
52 #include <sys/conf.h>
53 #include <sys/tty.h>
54 #include <sys/file.h>
55 #include <sys/select.h>
56 #include <sys/proc.h>
57 #include <sys/vnode.h>
58 #include <sys/poll.h>
59
60 #include <dev/usb/usb.h>
61 #include <dev/usb/usbdi.h>
62 #include <dev/usb/usbdi_util.h>
63
64 #ifdef UGEN_DEBUG
65 #define DPRINTF(x) if (ugendebug) printf x
66 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x
67 int ugendebug = 0;
68 #else
69 #define DPRINTF(x)
70 #define DPRINTFN(n,x)
71 #endif
72
73 #define UGEN_CHUNK 128 /* chunk size for read */
74 #define UGEN_IBSIZE 1020 /* buffer size */
75 #define UGEN_BBSIZE 1024
76
77 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
78 #define UGEN_NISORFRMS 8 /* number of transactions per req */
79 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
80
81 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
82 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
83
84 struct ugen_endpoint {
85 struct ugen_softc *sc;
86 usb_endpoint_descriptor_t *edesc;
87 usbd_interface_handle iface;
88 int state;
89 #define UGEN_ASLP 0x02 /* waiting for data */
90 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
91 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
92 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
93 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
94 usbd_pipe_handle pipeh;
95 struct clist q;
96 u_char *ibuf; /* start of buffer (circular for isoc) */
97 u_char *fill; /* location for input (isoc) */
98 u_char *limit; /* end of circular buffer (isoc) */
99 u_char *cur; /* current read location (isoc) */
100 u_int32_t timeout;
101 u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
102 u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
103 u_int32_t ra_wb_used; /* how much is in buffer */
104 u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
105 usbd_xfer_handle ra_wb_xfer;
106 struct isoreq {
107 struct ugen_endpoint *sce;
108 usbd_xfer_handle xfer;
109 void *dmabuf;
110 u_int16_t sizes[UGEN_NISORFRMS];
111 } isoreqs[UGEN_NISOREQS];
112 /* Keep these last; we don't overwrite them in ugen_set_config() */
113 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
114 struct selinfo rsel;
115 kcondvar_t cv;
116 };
117
118 struct ugen_softc {
119 device_t sc_dev; /* base device */
120 usbd_device_handle sc_udev;
121
122 kmutex_t sc_lock;
123 kcondvar_t sc_detach_cv;
124
125 char sc_is_open[USB_MAX_ENDPOINTS];
126 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
127 #define OUT 0
128 #define IN 1
129
130 int sc_refcnt;
131 char sc_buffer[UGEN_BBSIZE];
132 u_char sc_dying;
133 };
134
135 dev_type_open(ugenopen);
136 dev_type_close(ugenclose);
137 dev_type_read(ugenread);
138 dev_type_write(ugenwrite);
139 dev_type_ioctl(ugenioctl);
140 dev_type_poll(ugenpoll);
141 dev_type_kqfilter(ugenkqfilter);
142
143 const struct cdevsw ugen_cdevsw = {
144 .d_open = ugenopen,
145 .d_close = ugenclose,
146 .d_read = ugenread,
147 .d_write = ugenwrite,
148 .d_ioctl = ugenioctl,
149 .d_stop = nostop,
150 .d_tty = notty,
151 .d_poll = ugenpoll,
152 .d_mmap = nommap,
153 .d_kqfilter = ugenkqfilter,
154 .d_discard = nodiscard,
155 .d_flag = D_OTHER,
156 };
157
158 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
159 usbd_status status);
160 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
161 usbd_status status);
162 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
163 usbd_status status);
164 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
165 usbd_status status);
166 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
167 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
168 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
169 void *, int, struct lwp *);
170 Static int ugen_set_config(struct ugen_softc *sc, int configno);
171 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
172 int index, int *lenp);
173 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
174 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
175
176 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
177 #define UGENENDPOINT(n) (minor(n) & 0xf)
178 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
179
180 int ugen_match(device_t, cfdata_t, void *);
181 void ugen_attach(device_t, device_t, void *);
182 int ugen_detach(device_t, int);
183 int ugen_activate(device_t, enum devact);
184 extern struct cfdriver ugen_cd;
185 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate);
186
187 /* toggle to control attach priority. -1 means "let autoconf decide" */
188 int ugen_override = -1;
189
190 int
191 ugen_match(device_t parent, cfdata_t match, void *aux)
192 {
193 struct usb_attach_arg *uaa = aux;
194 int override;
195
196 if (ugen_override != -1)
197 override = ugen_override;
198 else
199 override = match->cf_flags & 1;
200
201 if (override)
202 return (UMATCH_HIGHEST);
203 else if (uaa->usegeneric)
204 return (UMATCH_GENERIC);
205 else
206 return (UMATCH_NONE);
207 }
208
209 void
210 ugen_attach(device_t parent, device_t self, void *aux)
211 {
212 struct ugen_softc *sc = device_private(self);
213 struct usb_attach_arg *uaa = aux;
214 usbd_device_handle udev;
215 char *devinfop;
216 usbd_status err;
217 int i, dir, conf;
218
219 aprint_naive("\n");
220 aprint_normal("\n");
221
222 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_USB);
223 cv_init(&sc->sc_detach_cv, "ugendet");
224
225 devinfop = usbd_devinfo_alloc(uaa->device, 0);
226 aprint_normal_dev(self, "%s\n", devinfop);
227 usbd_devinfo_free(devinfop);
228
229 sc->sc_dev = self;
230 sc->sc_udev = udev = uaa->device;
231
232 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
233 for (dir = OUT; dir <= IN; dir++) {
234 struct ugen_endpoint *sce;
235
236 sce = &sc->sc_endpoints[i][dir];
237 selinit(&sce->rsel);
238 cv_init(&sce->cv, "ugensce");
239 }
240 }
241
242 /* First set configuration index 0, the default one for ugen. */
243 err = usbd_set_config_index(udev, 0, 0);
244 if (err) {
245 aprint_error_dev(self,
246 "setting configuration index 0 failed\n");
247 sc->sc_dying = 1;
248 return;
249 }
250 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
251
252 /* Set up all the local state for this configuration. */
253 err = ugen_set_config(sc, conf);
254 if (err) {
255 aprint_error_dev(self, "setting configuration %d failed\n",
256 conf);
257 sc->sc_dying = 1;
258 return;
259 }
260
261 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
262 sc->sc_dev);
263
264 if (!pmf_device_register(self, NULL, NULL))
265 aprint_error_dev(self, "couldn't establish power handler\n");
266
267 return;
268 }
269
270 Static void
271 ugen_clear_endpoints(struct ugen_softc *sc)
272 {
273
274 /* Clear out the old info, but leave the selinfo and cv initialised. */
275 for (int i = 0; i < USB_MAX_ENDPOINTS; i++) {
276 for (int dir = OUT; dir <= IN; dir++) {
277 struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir];
278 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
279 }
280 }
281 }
282
283 Static int
284 ugen_set_config(struct ugen_softc *sc, int configno)
285 {
286 usbd_device_handle dev = sc->sc_udev;
287 usb_config_descriptor_t *cdesc;
288 usbd_interface_handle iface;
289 usb_endpoint_descriptor_t *ed;
290 struct ugen_endpoint *sce;
291 u_int8_t niface, nendpt;
292 int ifaceno, endptno, endpt;
293 usbd_status err;
294 int dir;
295
296 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
297 device_xname(sc->sc_dev), configno, sc));
298
299 /*
300 * We start at 1, not 0, because we don't care whether the
301 * control endpoint is open or not. It is always present.
302 */
303 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
304 if (sc->sc_is_open[endptno]) {
305 DPRINTFN(1,
306 ("ugen_set_config: %s - endpoint %d is open\n",
307 device_xname(sc->sc_dev), endptno));
308 return (USBD_IN_USE);
309 }
310
311 /* Avoid setting the current value. */
312 cdesc = usbd_get_config_descriptor(dev);
313 if (!cdesc || cdesc->bConfigurationValue != configno) {
314 err = usbd_set_config_no(dev, configno, 1);
315 if (err)
316 return (err);
317 }
318
319 err = usbd_interface_count(dev, &niface);
320 if (err)
321 return (err);
322
323 ugen_clear_endpoints(sc);
324
325 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
326 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
327 err = usbd_device2interface_handle(dev, ifaceno, &iface);
328 if (err)
329 return (err);
330 err = usbd_endpoint_count(iface, &nendpt);
331 if (err)
332 return (err);
333 for (endptno = 0; endptno < nendpt; endptno++) {
334 ed = usbd_interface2endpoint_descriptor(iface,endptno);
335 KASSERT(ed != NULL);
336 endpt = ed->bEndpointAddress;
337 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
338 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
339 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
340 "(%d,%d), sce=%p\n",
341 endptno, endpt, UE_GET_ADDR(endpt),
342 UE_GET_DIR(endpt), sce));
343 sce->sc = sc;
344 sce->edesc = ed;
345 sce->iface = iface;
346 }
347 }
348 return (USBD_NORMAL_COMPLETION);
349 }
350
351 int
352 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
353 {
354 struct ugen_softc *sc;
355 int unit = UGENUNIT(dev);
356 int endpt = UGENENDPOINT(dev);
357 usb_endpoint_descriptor_t *edesc;
358 struct ugen_endpoint *sce;
359 int dir, isize;
360 usbd_status err;
361 usbd_xfer_handle xfer;
362 void *tbuf;
363 int i, j;
364
365 sc = device_lookup_private(&ugen_cd, unit);
366 if (sc == NULL)
367 return ENXIO;
368
369 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
370 flag, mode, unit, endpt));
371
372 if (sc == NULL || sc->sc_dying)
373 return (ENXIO);
374
375 /* The control endpoint allows multiple opens. */
376 if (endpt == USB_CONTROL_ENDPOINT) {
377 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
378 return (0);
379 }
380
381 if (sc->sc_is_open[endpt])
382 return (EBUSY);
383
384 /* Make sure there are pipes for all directions. */
385 for (dir = OUT; dir <= IN; dir++) {
386 if (flag & (dir == OUT ? FWRITE : FREAD)) {
387 sce = &sc->sc_endpoints[endpt][dir];
388 if (sce->edesc == NULL)
389 return (ENXIO);
390 }
391 }
392
393 /* Actually open the pipes. */
394 /* XXX Should back out properly if it fails. */
395 for (dir = OUT; dir <= IN; dir++) {
396 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
397 continue;
398 sce = &sc->sc_endpoints[endpt][dir];
399 sce->state = 0;
400 sce->timeout = USBD_NO_TIMEOUT;
401 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
402 sc, endpt, dir, sce));
403 edesc = sce->edesc;
404 switch (edesc->bmAttributes & UE_XFERTYPE) {
405 case UE_INTERRUPT:
406 if (dir == OUT) {
407 err = usbd_open_pipe(sce->iface,
408 edesc->bEndpointAddress, 0, &sce->pipeh);
409 if (err)
410 return (EIO);
411 break;
412 }
413 isize = UGETW(edesc->wMaxPacketSize);
414 if (isize == 0) /* shouldn't happen */
415 return (EINVAL);
416 sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
417 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
418 endpt, isize));
419 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
420 free(sce->ibuf, M_USBDEV);
421 sce->ibuf = NULL;
422 return (ENOMEM);
423 }
424 err = usbd_open_pipe_intr(sce->iface,
425 edesc->bEndpointAddress,
426 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
427 sce->ibuf, isize, ugenintr,
428 USBD_DEFAULT_INTERVAL);
429 if (err) {
430 clfree(&sce->q);
431 free(sce->ibuf, M_USBDEV);
432 sce->ibuf = NULL;
433 return (EIO);
434 }
435 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
436 break;
437 case UE_BULK:
438 err = usbd_open_pipe(sce->iface,
439 edesc->bEndpointAddress, 0, &sce->pipeh);
440 if (err)
441 return (EIO);
442 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
443 /*
444 * Use request size for non-RA/WB transfers
445 * as the default.
446 */
447 sce->ra_wb_reqsize = UGEN_BBSIZE;
448 break;
449 case UE_ISOCHRONOUS:
450 if (dir == OUT)
451 return (EINVAL);
452 isize = UGETW(edesc->wMaxPacketSize);
453 if (isize == 0) /* shouldn't happen */
454 return (EINVAL);
455 sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
456 M_USBDEV, M_WAITOK);
457 sce->cur = sce->fill = sce->ibuf;
458 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
459 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
460 endpt, isize));
461 err = usbd_open_pipe(sce->iface,
462 edesc->bEndpointAddress, 0, &sce->pipeh);
463 if (err) {
464 free(sce->ibuf, M_USBDEV);
465 sce->ibuf = NULL;
466 return (EIO);
467 }
468 for(i = 0; i < UGEN_NISOREQS; ++i) {
469 sce->isoreqs[i].sce = sce;
470 xfer = usbd_alloc_xfer(sc->sc_udev);
471 if (xfer == 0)
472 goto bad;
473 sce->isoreqs[i].xfer = xfer;
474 tbuf = usbd_alloc_buffer
475 (xfer, isize * UGEN_NISORFRMS);
476 if (tbuf == 0) {
477 i++;
478 goto bad;
479 }
480 sce->isoreqs[i].dmabuf = tbuf;
481 for(j = 0; j < UGEN_NISORFRMS; ++j)
482 sce->isoreqs[i].sizes[j] = isize;
483 usbd_setup_isoc_xfer
484 (xfer, sce->pipeh, &sce->isoreqs[i],
485 sce->isoreqs[i].sizes,
486 UGEN_NISORFRMS, USBD_NO_COPY,
487 ugen_isoc_rintr);
488 (void)usbd_transfer(xfer);
489 }
490 DPRINTFN(5, ("ugenopen: isoc open done\n"));
491 break;
492 bad:
493 while (--i >= 0) /* implicit buffer free */
494 usbd_free_xfer(sce->isoreqs[i].xfer);
495 usbd_close_pipe(sce->pipeh);
496 sce->pipeh = NULL;
497 free(sce->ibuf, M_USBDEV);
498 sce->ibuf = NULL;
499 return (ENOMEM);
500 case UE_CONTROL:
501 sce->timeout = USBD_DEFAULT_TIMEOUT;
502 return (EINVAL);
503 }
504 }
505 sc->sc_is_open[endpt] = 1;
506 return (0);
507 }
508
509 int
510 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
511 {
512 int endpt = UGENENDPOINT(dev);
513 struct ugen_softc *sc;
514 struct ugen_endpoint *sce;
515 int dir;
516 int i;
517
518 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
519 if (sc == NULL)
520 return ENXIO;
521
522 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
523 flag, mode, UGENUNIT(dev), endpt));
524
525 #ifdef DIAGNOSTIC
526 if (!sc->sc_is_open[endpt]) {
527 printf("ugenclose: not open\n");
528 return (EINVAL);
529 }
530 #endif
531
532 if (endpt == USB_CONTROL_ENDPOINT) {
533 DPRINTFN(5, ("ugenclose: close control\n"));
534 sc->sc_is_open[endpt] = 0;
535 return (0);
536 }
537
538 for (dir = OUT; dir <= IN; dir++) {
539 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
540 continue;
541 sce = &sc->sc_endpoints[endpt][dir];
542 if (sce->pipeh == NULL)
543 continue;
544 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
545 endpt, dir, sce));
546
547 usbd_abort_pipe(sce->pipeh);
548 usbd_close_pipe(sce->pipeh);
549 sce->pipeh = NULL;
550
551 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
552 case UE_INTERRUPT:
553 ndflush(&sce->q, sce->q.c_cc);
554 clfree(&sce->q);
555 break;
556 case UE_ISOCHRONOUS:
557 for (i = 0; i < UGEN_NISOREQS; ++i)
558 usbd_free_xfer(sce->isoreqs[i].xfer);
559 break;
560 case UE_BULK:
561 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
562 /* ibuf freed below */
563 usbd_free_xfer(sce->ra_wb_xfer);
564 break;
565 default:
566 break;
567 }
568
569 if (sce->ibuf != NULL) {
570 free(sce->ibuf, M_USBDEV);
571 sce->ibuf = NULL;
572 }
573 }
574 sc->sc_is_open[endpt] = 0;
575
576 return (0);
577 }
578
579 Static int
580 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
581 {
582 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
583 u_int32_t n, tn;
584 usbd_xfer_handle xfer;
585 usbd_status err;
586 int error = 0;
587
588 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
589
590 if (sc->sc_dying)
591 return (EIO);
592
593 if (endpt == USB_CONTROL_ENDPOINT)
594 return (ENODEV);
595
596 #ifdef DIAGNOSTIC
597 if (sce->edesc == NULL) {
598 printf("ugenread: no edesc\n");
599 return (EIO);
600 }
601 if (sce->pipeh == NULL) {
602 printf("ugenread: no pipe\n");
603 return (EIO);
604 }
605 #endif
606
607 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
608 case UE_INTERRUPT:
609 /* Block until activity occurred. */
610 mutex_enter(&sc->sc_lock);
611 while (sce->q.c_cc == 0) {
612 if (flag & IO_NDELAY) {
613 mutex_exit(&sc->sc_lock);
614 return (EWOULDBLOCK);
615 }
616 sce->state |= UGEN_ASLP;
617 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
618 /* "ugenri" */
619 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
620 mstohz(sce->timeout));
621 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
622 if (sc->sc_dying)
623 error = EIO;
624 if (error) {
625 sce->state &= ~UGEN_ASLP;
626 break;
627 }
628 }
629 mutex_exit(&sc->sc_lock);
630
631 /* Transfer as many chunks as possible. */
632 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
633 n = min(sce->q.c_cc, uio->uio_resid);
634 if (n > sizeof(sc->sc_buffer))
635 n = sizeof(sc->sc_buffer);
636
637 /* Remove a small chunk from the input queue. */
638 q_to_b(&sce->q, sc->sc_buffer, n);
639 DPRINTFN(5, ("ugenread: got %d chars\n", n));
640
641 /* Copy the data to the user process. */
642 error = uiomove(sc->sc_buffer, n, uio);
643 if (error)
644 break;
645 }
646 break;
647 case UE_BULK:
648 if (sce->state & UGEN_BULK_RA) {
649 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
650 uio->uio_resid, sce->ra_wb_used));
651 xfer = sce->ra_wb_xfer;
652
653 mutex_enter(&sc->sc_lock);
654 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
655 mutex_exit(&sc->sc_lock);
656 return (EWOULDBLOCK);
657 }
658 while (uio->uio_resid > 0 && !error) {
659 while (sce->ra_wb_used == 0) {
660 sce->state |= UGEN_ASLP;
661 DPRINTFN(5,
662 ("ugenread: sleep on %p\n",
663 sce));
664 /* "ugenrb" */
665 error = cv_timedwait_sig(&sce->cv,
666 &sc->sc_lock, mstohz(sce->timeout));
667 DPRINTFN(5,
668 ("ugenread: woke, error=%d\n",
669 error));
670 if (sc->sc_dying)
671 error = EIO;
672 if (error) {
673 sce->state &= ~UGEN_ASLP;
674 break;
675 }
676 }
677
678 /* Copy data to the process. */
679 while (uio->uio_resid > 0
680 && sce->ra_wb_used > 0) {
681 n = min(uio->uio_resid,
682 sce->ra_wb_used);
683 n = min(n, sce->limit - sce->cur);
684 error = uiomove(sce->cur, n, uio);
685 if (error)
686 break;
687 sce->cur += n;
688 sce->ra_wb_used -= n;
689 if (sce->cur == sce->limit)
690 sce->cur = sce->ibuf;
691 }
692
693 /*
694 * If the transfers stopped because the
695 * buffer was full, restart them.
696 */
697 if (sce->state & UGEN_RA_WB_STOP &&
698 sce->ra_wb_used < sce->limit - sce->ibuf) {
699 n = (sce->limit - sce->ibuf)
700 - sce->ra_wb_used;
701 usbd_setup_xfer(xfer,
702 sce->pipeh, sce, NULL,
703 min(n, sce->ra_wb_xferlen),
704 USBD_NO_COPY, USBD_NO_TIMEOUT,
705 ugen_bulkra_intr);
706 sce->state &= ~UGEN_RA_WB_STOP;
707 err = usbd_transfer(xfer);
708 if (err != USBD_IN_PROGRESS)
709 /*
710 * The transfer has not been
711 * queued. Setting STOP
712 * will make us try
713 * again at the next read.
714 */
715 sce->state |= UGEN_RA_WB_STOP;
716 }
717 }
718 mutex_exit(&sc->sc_lock);
719 break;
720 }
721 xfer = usbd_alloc_xfer(sc->sc_udev);
722 if (xfer == 0)
723 return (ENOMEM);
724 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
725 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
726 tn = n;
727 err = usbd_bulk_transfer(
728 xfer, sce->pipeh,
729 sce->state & UGEN_SHORT_OK ?
730 USBD_SHORT_XFER_OK : 0,
731 sce->timeout, sc->sc_buffer, &tn, "ugenrb");
732 if (err) {
733 if (err == USBD_INTERRUPTED)
734 error = EINTR;
735 else if (err == USBD_TIMEOUT)
736 error = ETIMEDOUT;
737 else
738 error = EIO;
739 break;
740 }
741 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
742 error = uiomove(sc->sc_buffer, tn, uio);
743 if (error || tn < n)
744 break;
745 }
746 usbd_free_xfer(xfer);
747 break;
748 case UE_ISOCHRONOUS:
749 mutex_enter(&sc->sc_lock);
750 while (sce->cur == sce->fill) {
751 if (flag & IO_NDELAY) {
752 mutex_exit(&sc->sc_lock);
753 return (EWOULDBLOCK);
754 }
755 sce->state |= UGEN_ASLP;
756 /* "ugenri" */
757 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
758 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
759 mstohz(sce->timeout));
760 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
761 if (sc->sc_dying)
762 error = EIO;
763 if (error) {
764 sce->state &= ~UGEN_ASLP;
765 break;
766 }
767 }
768
769 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
770 if(sce->fill > sce->cur)
771 n = min(sce->fill - sce->cur, uio->uio_resid);
772 else
773 n = min(sce->limit - sce->cur, uio->uio_resid);
774
775 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
776
777 /* Copy the data to the user process. */
778 error = uiomove(sce->cur, n, uio);
779 if (error)
780 break;
781 sce->cur += n;
782 if (sce->cur >= sce->limit)
783 sce->cur = sce->ibuf;
784 }
785 mutex_exit(&sc->sc_lock);
786 break;
787
788
789 default:
790 return (ENXIO);
791 }
792 return (error);
793 }
794
795 int
796 ugenread(dev_t dev, struct uio *uio, int flag)
797 {
798 int endpt = UGENENDPOINT(dev);
799 struct ugen_softc *sc;
800 int error;
801
802 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
803 if (sc == NULL)
804 return ENXIO;
805
806 mutex_enter(&sc->sc_lock);
807 sc->sc_refcnt++;
808 mutex_exit(&sc->sc_lock);
809
810 error = ugen_do_read(sc, endpt, uio, flag);
811
812 mutex_enter(&sc->sc_lock);
813 if (--sc->sc_refcnt < 0)
814 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
815 mutex_exit(&sc->sc_lock);
816
817 return (error);
818 }
819
820 Static int
821 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
822 int flag)
823 {
824 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
825 u_int32_t n;
826 int error = 0;
827 u_int32_t tn;
828 char *dbuf;
829 usbd_xfer_handle xfer;
830 usbd_status err;
831
832 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
833
834 if (sc->sc_dying)
835 return (EIO);
836
837 if (endpt == USB_CONTROL_ENDPOINT)
838 return (ENODEV);
839
840 #ifdef DIAGNOSTIC
841 if (sce->edesc == NULL) {
842 printf("ugenwrite: no edesc\n");
843 return (EIO);
844 }
845 if (sce->pipeh == NULL) {
846 printf("ugenwrite: no pipe\n");
847 return (EIO);
848 }
849 #endif
850
851 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
852 case UE_BULK:
853 if (sce->state & UGEN_BULK_WB) {
854 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
855 uio->uio_resid, sce->ra_wb_used));
856 xfer = sce->ra_wb_xfer;
857
858 mutex_enter(&sc->sc_lock);
859 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
860 flag & IO_NDELAY) {
861 mutex_exit(&sc->sc_lock);
862 return (EWOULDBLOCK);
863 }
864 while (uio->uio_resid > 0 && !error) {
865 while (sce->ra_wb_used ==
866 sce->limit - sce->ibuf) {
867 sce->state |= UGEN_ASLP;
868 DPRINTFN(5,
869 ("ugenwrite: sleep on %p\n",
870 sce));
871 /* "ugenwb" */
872 error = cv_timedwait_sig(&sce->cv,
873 &sc->sc_lock, mstohz(sce->timeout));
874 DPRINTFN(5,
875 ("ugenwrite: woke, error=%d\n",
876 error));
877 if (sc->sc_dying)
878 error = EIO;
879 if (error) {
880 sce->state &= ~UGEN_ASLP;
881 break;
882 }
883 }
884
885 /* Copy data from the process. */
886 while (uio->uio_resid > 0 &&
887 sce->ra_wb_used < sce->limit - sce->ibuf) {
888 n = min(uio->uio_resid,
889 (sce->limit - sce->ibuf)
890 - sce->ra_wb_used);
891 n = min(n, sce->limit - sce->fill);
892 error = uiomove(sce->fill, n, uio);
893 if (error)
894 break;
895 sce->fill += n;
896 sce->ra_wb_used += n;
897 if (sce->fill == sce->limit)
898 sce->fill = sce->ibuf;
899 }
900
901 /*
902 * If the transfers stopped because the
903 * buffer was empty, restart them.
904 */
905 if (sce->state & UGEN_RA_WB_STOP &&
906 sce->ra_wb_used > 0) {
907 dbuf = (char *)usbd_get_buffer(xfer);
908 n = min(sce->ra_wb_used,
909 sce->ra_wb_xferlen);
910 tn = min(n, sce->limit - sce->cur);
911 memcpy(dbuf, sce->cur, tn);
912 dbuf += tn;
913 if (n - tn > 0)
914 memcpy(dbuf, sce->ibuf,
915 n - tn);
916 usbd_setup_xfer(xfer,
917 sce->pipeh, sce, NULL, n,
918 USBD_NO_COPY, USBD_NO_TIMEOUT,
919 ugen_bulkwb_intr);
920 sce->state &= ~UGEN_RA_WB_STOP;
921 err = usbd_transfer(xfer);
922 if (err != USBD_IN_PROGRESS)
923 /*
924 * The transfer has not been
925 * queued. Setting STOP
926 * will make us try again
927 * at the next read.
928 */
929 sce->state |= UGEN_RA_WB_STOP;
930 }
931 }
932 mutex_exit(&sc->sc_lock);
933 break;
934 }
935 xfer = usbd_alloc_xfer(sc->sc_udev);
936 if (xfer == 0)
937 return (EIO);
938 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
939 error = uiomove(sc->sc_buffer, n, uio);
940 if (error)
941 break;
942 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
943 err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
944 sce->timeout, sc->sc_buffer, &n,"ugenwb");
945 if (err) {
946 if (err == USBD_INTERRUPTED)
947 error = EINTR;
948 else if (err == USBD_TIMEOUT)
949 error = ETIMEDOUT;
950 else
951 error = EIO;
952 break;
953 }
954 }
955 usbd_free_xfer(xfer);
956 break;
957 case UE_INTERRUPT:
958 xfer = usbd_alloc_xfer(sc->sc_udev);
959 if (xfer == 0)
960 return (EIO);
961 while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
962 uio->uio_resid)) != 0) {
963 error = uiomove(sc->sc_buffer, n, uio);
964 if (error)
965 break;
966 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
967 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
968 sce->timeout, sc->sc_buffer, &n, "ugenwi");
969 if (err) {
970 if (err == USBD_INTERRUPTED)
971 error = EINTR;
972 else if (err == USBD_TIMEOUT)
973 error = ETIMEDOUT;
974 else
975 error = EIO;
976 break;
977 }
978 }
979 usbd_free_xfer(xfer);
980 break;
981 default:
982 return (ENXIO);
983 }
984 return (error);
985 }
986
987 int
988 ugenwrite(dev_t dev, struct uio *uio, int flag)
989 {
990 int endpt = UGENENDPOINT(dev);
991 struct ugen_softc *sc;
992 int error;
993
994 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
995 if (sc == NULL)
996 return ENXIO;
997
998 mutex_enter(&sc->sc_lock);
999 sc->sc_refcnt++;
1000 mutex_exit(&sc->sc_lock);
1001
1002 error = ugen_do_write(sc, endpt, uio, flag);
1003
1004 mutex_enter(&sc->sc_lock);
1005 if (--sc->sc_refcnt < 0)
1006 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1007 mutex_exit(&sc->sc_lock);
1008
1009 return (error);
1010 }
1011
1012 int
1013 ugen_activate(device_t self, enum devact act)
1014 {
1015 struct ugen_softc *sc = device_private(self);
1016
1017 switch (act) {
1018 case DVACT_DEACTIVATE:
1019 sc->sc_dying = 1;
1020 return 0;
1021 default:
1022 return EOPNOTSUPP;
1023 }
1024 }
1025
1026 int
1027 ugen_detach(device_t self, int flags)
1028 {
1029 struct ugen_softc *sc = device_private(self);
1030 struct ugen_endpoint *sce;
1031 int i, dir;
1032 int maj, mn;
1033
1034 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1035
1036 sc->sc_dying = 1;
1037 pmf_device_deregister(self);
1038 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1039 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1040 for (dir = OUT; dir <= IN; dir++) {
1041 sce = &sc->sc_endpoints[i][dir];
1042 if (sce->pipeh)
1043 usbd_abort_pipe(sce->pipeh);
1044 }
1045 }
1046
1047 mutex_enter(&sc->sc_lock);
1048 if (--sc->sc_refcnt >= 0) {
1049 /* Wake everyone */
1050 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1051 cv_signal(&sc->sc_endpoints[i][IN].cv);
1052 /* Wait for processes to go away. */
1053 usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock);
1054 }
1055 mutex_exit(&sc->sc_lock);
1056
1057 /* locate the major number */
1058 maj = cdevsw_lookup_major(&ugen_cdevsw);
1059
1060 /* Nuke the vnodes for any open instances (calls close). */
1061 mn = device_unit(self) * USB_MAX_ENDPOINTS;
1062 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1063
1064 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1065 sc->sc_dev);
1066
1067 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1068 for (dir = OUT; dir <= IN; dir++) {
1069 sce = &sc->sc_endpoints[i][dir];
1070 seldestroy(&sce->rsel);
1071 cv_destroy(&sce->cv);
1072 }
1073 }
1074
1075 cv_destroy(&sc->sc_detach_cv);
1076 mutex_destroy(&sc->sc_lock);
1077
1078 return (0);
1079 }
1080
1081 Static void
1082 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1083 {
1084 struct ugen_endpoint *sce = addr;
1085 struct ugen_softc *sc = sce->sc;
1086 u_int32_t count;
1087 u_char *ibuf;
1088
1089 if (status == USBD_CANCELLED)
1090 return;
1091
1092 if (status != USBD_NORMAL_COMPLETION) {
1093 DPRINTF(("ugenintr: status=%d\n", status));
1094 if (status == USBD_STALLED)
1095 usbd_clear_endpoint_stall_async(sce->pipeh);
1096 return;
1097 }
1098
1099 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1100 ibuf = sce->ibuf;
1101
1102 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1103 xfer, status, count));
1104 DPRINTFN(5, (" data = %02x %02x %02x\n",
1105 ibuf[0], ibuf[1], ibuf[2]));
1106
1107 (void)b_to_q(ibuf, count, &sce->q);
1108
1109 mutex_enter(&sc->sc_lock);
1110 if (sce->state & UGEN_ASLP) {
1111 sce->state &= ~UGEN_ASLP;
1112 DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1113 cv_signal(&sce->cv);
1114 }
1115 mutex_exit(&sc->sc_lock);
1116 selnotify(&sce->rsel, 0, 0);
1117 }
1118
1119 Static void
1120 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1121 usbd_status status)
1122 {
1123 struct isoreq *req = addr;
1124 struct ugen_endpoint *sce = req->sce;
1125 struct ugen_softc *sc = sce->sc;
1126 u_int32_t count, n;
1127 int i, isize;
1128
1129 /* Return if we are aborting. */
1130 if (status == USBD_CANCELLED)
1131 return;
1132
1133 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1134 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1135 (long)(req - sce->isoreqs), count));
1136
1137 /* throw away oldest input if the buffer is full */
1138 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1139 sce->cur += count;
1140 if(sce->cur >= sce->limit)
1141 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1142 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1143 count));
1144 }
1145
1146 isize = UGETW(sce->edesc->wMaxPacketSize);
1147 for (i = 0; i < UGEN_NISORFRMS; i++) {
1148 u_int32_t actlen = req->sizes[i];
1149 char const *tbuf = (char const *)req->dmabuf + isize * i;
1150
1151 /* copy data to buffer */
1152 while (actlen > 0) {
1153 n = min(actlen, sce->limit - sce->fill);
1154 memcpy(sce->fill, tbuf, n);
1155
1156 tbuf += n;
1157 actlen -= n;
1158 sce->fill += n;
1159 if(sce->fill == sce->limit)
1160 sce->fill = sce->ibuf;
1161 }
1162
1163 /* setup size for next transfer */
1164 req->sizes[i] = isize;
1165 }
1166
1167 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1168 USBD_NO_COPY, ugen_isoc_rintr);
1169 (void)usbd_transfer(xfer);
1170
1171 mutex_enter(&sc->sc_lock);
1172 if (sce->state & UGEN_ASLP) {
1173 sce->state &= ~UGEN_ASLP;
1174 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1175 cv_signal(&sce->cv);
1176 }
1177 mutex_exit(&sc->sc_lock);
1178 selnotify(&sce->rsel, 0, 0);
1179 }
1180
1181 Static void
1182 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1183 usbd_status status)
1184 {
1185 struct ugen_endpoint *sce = addr;
1186 struct ugen_softc *sc = sce->sc;
1187 u_int32_t count, n;
1188 char const *tbuf;
1189 usbd_status err;
1190
1191 /* Return if we are aborting. */
1192 if (status == USBD_CANCELLED)
1193 return;
1194
1195 if (status != USBD_NORMAL_COMPLETION) {
1196 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1197 sce->state |= UGEN_RA_WB_STOP;
1198 if (status == USBD_STALLED)
1199 usbd_clear_endpoint_stall_async(sce->pipeh);
1200 return;
1201 }
1202
1203 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1204
1205 /* Keep track of how much is in the buffer. */
1206 sce->ra_wb_used += count;
1207
1208 /* Copy data to buffer. */
1209 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1210 n = min(count, sce->limit - sce->fill);
1211 memcpy(sce->fill, tbuf, n);
1212 tbuf += n;
1213 count -= n;
1214 sce->fill += n;
1215 if (sce->fill == sce->limit)
1216 sce->fill = sce->ibuf;
1217 if (count > 0) {
1218 memcpy(sce->fill, tbuf, count);
1219 sce->fill += count;
1220 }
1221
1222 /* Set up the next request if necessary. */
1223 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1224 if (n > 0) {
1225 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1226 min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1227 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1228 err = usbd_transfer(xfer);
1229 if (err != USBD_IN_PROGRESS) {
1230 printf("usbd_bulkra_intr: error=%d\n", err);
1231 /*
1232 * The transfer has not been queued. Setting STOP
1233 * will make us try again at the next read.
1234 */
1235 sce->state |= UGEN_RA_WB_STOP;
1236 }
1237 }
1238 else
1239 sce->state |= UGEN_RA_WB_STOP;
1240
1241 mutex_enter(&sc->sc_lock);
1242 if (sce->state & UGEN_ASLP) {
1243 sce->state &= ~UGEN_ASLP;
1244 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1245 cv_signal(&sce->cv);
1246 }
1247 mutex_exit(&sc->sc_lock);
1248 selnotify(&sce->rsel, 0, 0);
1249 }
1250
1251 Static void
1252 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1253 usbd_status status)
1254 {
1255 struct ugen_endpoint *sce = addr;
1256 struct ugen_softc *sc = sce->sc;
1257 u_int32_t count, n;
1258 char *tbuf;
1259 usbd_status err;
1260
1261 /* Return if we are aborting. */
1262 if (status == USBD_CANCELLED)
1263 return;
1264
1265 if (status != USBD_NORMAL_COMPLETION) {
1266 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1267 sce->state |= UGEN_RA_WB_STOP;
1268 if (status == USBD_STALLED)
1269 usbd_clear_endpoint_stall_async(sce->pipeh);
1270 return;
1271 }
1272
1273 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1274
1275 /* Keep track of how much is in the buffer. */
1276 sce->ra_wb_used -= count;
1277
1278 /* Update buffer pointers. */
1279 sce->cur += count;
1280 if (sce->cur >= sce->limit)
1281 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1282
1283 /* Set up next request if necessary. */
1284 if (sce->ra_wb_used > 0) {
1285 /* copy data from buffer */
1286 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1287 count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1288 n = min(count, sce->limit - sce->cur);
1289 memcpy(tbuf, sce->cur, n);
1290 tbuf += n;
1291 if (count - n > 0)
1292 memcpy(tbuf, sce->ibuf, count - n);
1293
1294 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1295 count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1296 err = usbd_transfer(xfer);
1297 if (err != USBD_IN_PROGRESS) {
1298 printf("usbd_bulkwb_intr: error=%d\n", err);
1299 /*
1300 * The transfer has not been queued. Setting STOP
1301 * will make us try again at the next write.
1302 */
1303 sce->state |= UGEN_RA_WB_STOP;
1304 }
1305 }
1306 else
1307 sce->state |= UGEN_RA_WB_STOP;
1308
1309 mutex_enter(&sc->sc_lock);
1310 if (sce->state & UGEN_ASLP) {
1311 sce->state &= ~UGEN_ASLP;
1312 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1313 cv_signal(&sce->cv);
1314 }
1315 mutex_exit(&sc->sc_lock);
1316 selnotify(&sce->rsel, 0, 0);
1317 }
1318
1319 Static usbd_status
1320 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1321 {
1322 usbd_interface_handle iface;
1323 usb_endpoint_descriptor_t *ed;
1324 usbd_status err;
1325 struct ugen_endpoint *sce;
1326 u_int8_t niface, nendpt, endptno, endpt;
1327 int dir;
1328
1329 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1330
1331 err = usbd_interface_count(sc->sc_udev, &niface);
1332 if (err)
1333 return (err);
1334 if (ifaceidx < 0 || ifaceidx >= niface)
1335 return (USBD_INVAL);
1336
1337 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1338 if (err)
1339 return (err);
1340 err = usbd_endpoint_count(iface, &nendpt);
1341 if (err)
1342 return (err);
1343
1344 /* change setting */
1345 err = usbd_set_interface(iface, altno);
1346 if (err)
1347 return (err);
1348
1349 err = usbd_endpoint_count(iface, &nendpt);
1350 if (err)
1351 return (err);
1352
1353 ugen_clear_endpoints(sc);
1354
1355 for (endptno = 0; endptno < nendpt; endptno++) {
1356 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1357 KASSERT(ed != NULL);
1358 endpt = ed->bEndpointAddress;
1359 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1360 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1361 sce->sc = sc;
1362 sce->edesc = ed;
1363 sce->iface = iface;
1364 }
1365 return (0);
1366 }
1367
1368 /* Retrieve a complete descriptor for a certain device and index. */
1369 Static usb_config_descriptor_t *
1370 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1371 {
1372 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1373 int len;
1374 usbd_status err;
1375
1376 if (index == USB_CURRENT_CONFIG_INDEX) {
1377 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1378 len = UGETW(tdesc->wTotalLength);
1379 if (lenp)
1380 *lenp = len;
1381 cdesc = malloc(len, M_TEMP, M_WAITOK);
1382 memcpy(cdesc, tdesc, len);
1383 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1384 } else {
1385 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1386 if (err)
1387 return (0);
1388 len = UGETW(cdescr.wTotalLength);
1389 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1390 if (lenp)
1391 *lenp = len;
1392 cdesc = malloc(len, M_TEMP, M_WAITOK);
1393 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1394 if (err) {
1395 free(cdesc, M_TEMP);
1396 return (0);
1397 }
1398 }
1399 return (cdesc);
1400 }
1401
1402 Static int
1403 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1404 {
1405 usbd_interface_handle iface;
1406 usbd_status err;
1407
1408 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1409 if (err)
1410 return (-1);
1411 return (usbd_get_interface_altindex(iface));
1412 }
1413
1414 Static int
1415 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1416 void *addr, int flag, struct lwp *l)
1417 {
1418 struct ugen_endpoint *sce;
1419 usbd_status err;
1420 usbd_interface_handle iface;
1421 struct usb_config_desc *cd;
1422 usb_config_descriptor_t *cdesc;
1423 struct usb_interface_desc *id;
1424 usb_interface_descriptor_t *idesc;
1425 struct usb_endpoint_desc *ed;
1426 usb_endpoint_descriptor_t *edesc;
1427 struct usb_alt_interface *ai;
1428 struct usb_string_desc *si;
1429 u_int8_t conf, alt;
1430
1431 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1432 if (sc->sc_dying)
1433 return (EIO);
1434
1435 switch (cmd) {
1436 case FIONBIO:
1437 /* All handled in the upper FS layer. */
1438 return (0);
1439 case USB_SET_SHORT_XFER:
1440 if (endpt == USB_CONTROL_ENDPOINT)
1441 return (EINVAL);
1442 /* This flag only affects read */
1443 sce = &sc->sc_endpoints[endpt][IN];
1444 if (sce == NULL || sce->pipeh == NULL)
1445 return (EINVAL);
1446 if (*(int *)addr)
1447 sce->state |= UGEN_SHORT_OK;
1448 else
1449 sce->state &= ~UGEN_SHORT_OK;
1450 return (0);
1451 case USB_SET_TIMEOUT:
1452 sce = &sc->sc_endpoints[endpt][IN];
1453 if (sce == NULL
1454 /* XXX this shouldn't happen, but the distinction between
1455 input and output pipes isn't clear enough.
1456 || sce->pipeh == NULL */
1457 )
1458 return (EINVAL);
1459 sce->timeout = *(int *)addr;
1460 return (0);
1461 case USB_SET_BULK_RA:
1462 if (endpt == USB_CONTROL_ENDPOINT)
1463 return (EINVAL);
1464 sce = &sc->sc_endpoints[endpt][IN];
1465 if (sce == NULL || sce->pipeh == NULL)
1466 return (EINVAL);
1467 edesc = sce->edesc;
1468 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1469 return (EINVAL);
1470
1471 if (*(int *)addr) {
1472 /* Only turn RA on if it's currently off. */
1473 if (sce->state & UGEN_BULK_RA)
1474 return (0);
1475
1476 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1477 /* shouldn't happen */
1478 return (EINVAL);
1479 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1480 if (sce->ra_wb_xfer == NULL)
1481 return (ENOMEM);
1482 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1483 /*
1484 * Set up a dmabuf because we reuse the xfer with
1485 * the same (max) request length like isoc.
1486 */
1487 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1488 sce->ra_wb_xferlen) == 0) {
1489 usbd_free_xfer(sce->ra_wb_xfer);
1490 return (ENOMEM);
1491 }
1492 sce->ibuf = malloc(sce->ra_wb_bufsize,
1493 M_USBDEV, M_WAITOK);
1494 sce->fill = sce->cur = sce->ibuf;
1495 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1496 sce->ra_wb_used = 0;
1497 sce->state |= UGEN_BULK_RA;
1498 sce->state &= ~UGEN_RA_WB_STOP;
1499 /* Now start reading. */
1500 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1501 NULL,
1502 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1503 USBD_NO_COPY, USBD_NO_TIMEOUT,
1504 ugen_bulkra_intr);
1505 err = usbd_transfer(sce->ra_wb_xfer);
1506 if (err != USBD_IN_PROGRESS) {
1507 sce->state &= ~UGEN_BULK_RA;
1508 free(sce->ibuf, M_USBDEV);
1509 sce->ibuf = NULL;
1510 usbd_free_xfer(sce->ra_wb_xfer);
1511 return (EIO);
1512 }
1513 } else {
1514 /* Only turn RA off if it's currently on. */
1515 if (!(sce->state & UGEN_BULK_RA))
1516 return (0);
1517
1518 sce->state &= ~UGEN_BULK_RA;
1519 usbd_abort_pipe(sce->pipeh);
1520 usbd_free_xfer(sce->ra_wb_xfer);
1521 /*
1522 * XXX Discard whatever's in the buffer, but we
1523 * should keep it around and drain the buffer
1524 * instead.
1525 */
1526 free(sce->ibuf, M_USBDEV);
1527 sce->ibuf = NULL;
1528 }
1529 return (0);
1530 case USB_SET_BULK_WB:
1531 if (endpt == USB_CONTROL_ENDPOINT)
1532 return (EINVAL);
1533 sce = &sc->sc_endpoints[endpt][OUT];
1534 if (sce == NULL || sce->pipeh == NULL)
1535 return (EINVAL);
1536 edesc = sce->edesc;
1537 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1538 return (EINVAL);
1539
1540 if (*(int *)addr) {
1541 /* Only turn WB on if it's currently off. */
1542 if (sce->state & UGEN_BULK_WB)
1543 return (0);
1544
1545 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1546 /* shouldn't happen */
1547 return (EINVAL);
1548 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1549 if (sce->ra_wb_xfer == NULL)
1550 return (ENOMEM);
1551 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1552 /*
1553 * Set up a dmabuf because we reuse the xfer with
1554 * the same (max) request length like isoc.
1555 */
1556 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1557 sce->ra_wb_xferlen) == 0) {
1558 usbd_free_xfer(sce->ra_wb_xfer);
1559 return (ENOMEM);
1560 }
1561 sce->ibuf = malloc(sce->ra_wb_bufsize,
1562 M_USBDEV, M_WAITOK);
1563 sce->fill = sce->cur = sce->ibuf;
1564 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1565 sce->ra_wb_used = 0;
1566 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1567 } else {
1568 /* Only turn WB off if it's currently on. */
1569 if (!(sce->state & UGEN_BULK_WB))
1570 return (0);
1571
1572 sce->state &= ~UGEN_BULK_WB;
1573 /*
1574 * XXX Discard whatever's in the buffer, but we
1575 * should keep it around and keep writing to
1576 * drain the buffer instead.
1577 */
1578 usbd_abort_pipe(sce->pipeh);
1579 usbd_free_xfer(sce->ra_wb_xfer);
1580 free(sce->ibuf, M_USBDEV);
1581 sce->ibuf = NULL;
1582 }
1583 return (0);
1584 case USB_SET_BULK_RA_OPT:
1585 case USB_SET_BULK_WB_OPT:
1586 {
1587 struct usb_bulk_ra_wb_opt *opt;
1588
1589 if (endpt == USB_CONTROL_ENDPOINT)
1590 return (EINVAL);
1591 opt = (struct usb_bulk_ra_wb_opt *)addr;
1592 if (cmd == USB_SET_BULK_RA_OPT)
1593 sce = &sc->sc_endpoints[endpt][IN];
1594 else
1595 sce = &sc->sc_endpoints[endpt][OUT];
1596 if (sce == NULL || sce->pipeh == NULL)
1597 return (EINVAL);
1598 if (opt->ra_wb_buffer_size < 1 ||
1599 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1600 opt->ra_wb_request_size < 1 ||
1601 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1602 return (EINVAL);
1603 /*
1604 * XXX These changes do not take effect until the
1605 * next time RA/WB mode is enabled but they ought to
1606 * take effect immediately.
1607 */
1608 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1609 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1610 return (0);
1611 }
1612 default:
1613 break;
1614 }
1615
1616 if (endpt != USB_CONTROL_ENDPOINT)
1617 return (EINVAL);
1618
1619 switch (cmd) {
1620 #ifdef UGEN_DEBUG
1621 case USB_SETDEBUG:
1622 ugendebug = *(int *)addr;
1623 break;
1624 #endif
1625 case USB_GET_CONFIG:
1626 err = usbd_get_config(sc->sc_udev, &conf);
1627 if (err)
1628 return (EIO);
1629 *(int *)addr = conf;
1630 break;
1631 case USB_SET_CONFIG:
1632 if (!(flag & FWRITE))
1633 return (EPERM);
1634 err = ugen_set_config(sc, *(int *)addr);
1635 switch (err) {
1636 case USBD_NORMAL_COMPLETION:
1637 break;
1638 case USBD_IN_USE:
1639 return (EBUSY);
1640 default:
1641 return (EIO);
1642 }
1643 break;
1644 case USB_GET_ALTINTERFACE:
1645 ai = (struct usb_alt_interface *)addr;
1646 err = usbd_device2interface_handle(sc->sc_udev,
1647 ai->uai_interface_index, &iface);
1648 if (err)
1649 return (EINVAL);
1650 idesc = usbd_get_interface_descriptor(iface);
1651 if (idesc == NULL)
1652 return (EIO);
1653 ai->uai_alt_no = idesc->bAlternateSetting;
1654 break;
1655 case USB_SET_ALTINTERFACE:
1656 if (!(flag & FWRITE))
1657 return (EPERM);
1658 ai = (struct usb_alt_interface *)addr;
1659 err = usbd_device2interface_handle(sc->sc_udev,
1660 ai->uai_interface_index, &iface);
1661 if (err)
1662 return (EINVAL);
1663 err = ugen_set_interface(sc, ai->uai_interface_index,
1664 ai->uai_alt_no);
1665 if (err)
1666 return (EINVAL);
1667 break;
1668 case USB_GET_NO_ALT:
1669 ai = (struct usb_alt_interface *)addr;
1670 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1671 if (cdesc == NULL)
1672 return (EINVAL);
1673 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1674 if (idesc == NULL) {
1675 free(cdesc, M_TEMP);
1676 return (EINVAL);
1677 }
1678 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1679 idesc->bInterfaceNumber);
1680 free(cdesc, M_TEMP);
1681 break;
1682 case USB_GET_DEVICE_DESC:
1683 *(usb_device_descriptor_t *)addr =
1684 *usbd_get_device_descriptor(sc->sc_udev);
1685 break;
1686 case USB_GET_CONFIG_DESC:
1687 cd = (struct usb_config_desc *)addr;
1688 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1689 if (cdesc == NULL)
1690 return (EINVAL);
1691 cd->ucd_desc = *cdesc;
1692 free(cdesc, M_TEMP);
1693 break;
1694 case USB_GET_INTERFACE_DESC:
1695 id = (struct usb_interface_desc *)addr;
1696 cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1697 if (cdesc == NULL)
1698 return (EINVAL);
1699 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1700 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1701 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1702 else
1703 alt = id->uid_alt_index;
1704 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1705 if (idesc == NULL) {
1706 free(cdesc, M_TEMP);
1707 return (EINVAL);
1708 }
1709 id->uid_desc = *idesc;
1710 free(cdesc, M_TEMP);
1711 break;
1712 case USB_GET_ENDPOINT_DESC:
1713 ed = (struct usb_endpoint_desc *)addr;
1714 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1715 if (cdesc == NULL)
1716 return (EINVAL);
1717 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1718 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1719 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1720 else
1721 alt = ed->ued_alt_index;
1722 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1723 alt, ed->ued_endpoint_index);
1724 if (edesc == NULL) {
1725 free(cdesc, M_TEMP);
1726 return (EINVAL);
1727 }
1728 ed->ued_desc = *edesc;
1729 free(cdesc, M_TEMP);
1730 break;
1731 case USB_GET_FULL_DESC:
1732 {
1733 int len;
1734 struct iovec iov;
1735 struct uio uio;
1736 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1737 int error;
1738
1739 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1740 if (cdesc == NULL)
1741 return (EINVAL);
1742 if (len > fd->ufd_size)
1743 len = fd->ufd_size;
1744 iov.iov_base = (void *)fd->ufd_data;
1745 iov.iov_len = len;
1746 uio.uio_iov = &iov;
1747 uio.uio_iovcnt = 1;
1748 uio.uio_resid = len;
1749 uio.uio_offset = 0;
1750 uio.uio_rw = UIO_READ;
1751 uio.uio_vmspace = l->l_proc->p_vmspace;
1752 error = uiomove((void *)cdesc, len, &uio);
1753 free(cdesc, M_TEMP);
1754 return (error);
1755 }
1756 case USB_GET_STRING_DESC: {
1757 int len;
1758 si = (struct usb_string_desc *)addr;
1759 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1760 si->usd_language_id, &si->usd_desc, &len);
1761 if (err)
1762 return (EINVAL);
1763 break;
1764 }
1765 case USB_DO_REQUEST:
1766 {
1767 struct usb_ctl_request *ur = (void *)addr;
1768 int len = UGETW(ur->ucr_request.wLength);
1769 struct iovec iov;
1770 struct uio uio;
1771 void *ptr = 0;
1772 usbd_status xerr;
1773 int error = 0;
1774
1775 if (!(flag & FWRITE))
1776 return (EPERM);
1777 /* Avoid requests that would damage the bus integrity. */
1778 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1779 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1780 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1781 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1782 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1783 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1784 return (EINVAL);
1785
1786 if (len < 0 || len > 32767)
1787 return (EINVAL);
1788 if (len != 0) {
1789 iov.iov_base = (void *)ur->ucr_data;
1790 iov.iov_len = len;
1791 uio.uio_iov = &iov;
1792 uio.uio_iovcnt = 1;
1793 uio.uio_resid = len;
1794 uio.uio_offset = 0;
1795 uio.uio_rw =
1796 ur->ucr_request.bmRequestType & UT_READ ?
1797 UIO_READ : UIO_WRITE;
1798 uio.uio_vmspace = l->l_proc->p_vmspace;
1799 ptr = malloc(len, M_TEMP, M_WAITOK);
1800 if (uio.uio_rw == UIO_WRITE) {
1801 error = uiomove(ptr, len, &uio);
1802 if (error)
1803 goto ret;
1804 }
1805 }
1806 sce = &sc->sc_endpoints[endpt][IN];
1807 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1808 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1809 if (xerr) {
1810 error = EIO;
1811 goto ret;
1812 }
1813 if (len != 0) {
1814 if (uio.uio_rw == UIO_READ) {
1815 error = uiomove(ptr, len, &uio);
1816 if (error)
1817 goto ret;
1818 }
1819 }
1820 ret:
1821 if (ptr)
1822 free(ptr, M_TEMP);
1823 return (error);
1824 }
1825 case USB_GET_DEVICEINFO:
1826 usbd_fill_deviceinfo(sc->sc_udev,
1827 (struct usb_device_info *)addr, 0);
1828 break;
1829 #ifdef COMPAT_30
1830 case USB_GET_DEVICEINFO_OLD:
1831 usbd_fill_deviceinfo_old(sc->sc_udev,
1832 (struct usb_device_info_old *)addr, 0);
1833
1834 break;
1835 #endif
1836 default:
1837 return (EINVAL);
1838 }
1839 return (0);
1840 }
1841
1842 int
1843 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1844 {
1845 int endpt = UGENENDPOINT(dev);
1846 struct ugen_softc *sc;
1847 int error;
1848
1849 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1850 if (sc == NULL)
1851 return ENXIO;
1852
1853 sc->sc_refcnt++;
1854 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1855 if (--sc->sc_refcnt < 0)
1856 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1857 return (error);
1858 }
1859
1860 int
1861 ugenpoll(dev_t dev, int events, struct lwp *l)
1862 {
1863 struct ugen_softc *sc;
1864 struct ugen_endpoint *sce_in, *sce_out;
1865 int revents = 0;
1866
1867 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1868 if (sc == NULL)
1869 return ENXIO;
1870
1871 if (sc->sc_dying)
1872 return (POLLHUP);
1873
1874 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1875 return ENODEV;
1876
1877 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1878 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1879 if (sce_in == NULL && sce_out == NULL)
1880 return (POLLERR);
1881 #ifdef DIAGNOSTIC
1882 if (!sce_in->edesc && !sce_out->edesc) {
1883 printf("ugenpoll: no edesc\n");
1884 return (POLLERR);
1885 }
1886 /* It's possible to have only one pipe open. */
1887 if (!sce_in->pipeh && !sce_out->pipeh) {
1888 printf("ugenpoll: no pipe\n");
1889 return (POLLERR);
1890 }
1891 #endif
1892
1893 mutex_enter(&sc->sc_lock);
1894 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1895 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1896 case UE_INTERRUPT:
1897 if (sce_in->q.c_cc > 0)
1898 revents |= events & (POLLIN | POLLRDNORM);
1899 else
1900 selrecord(l, &sce_in->rsel);
1901 break;
1902 case UE_ISOCHRONOUS:
1903 if (sce_in->cur != sce_in->fill)
1904 revents |= events & (POLLIN | POLLRDNORM);
1905 else
1906 selrecord(l, &sce_in->rsel);
1907 break;
1908 case UE_BULK:
1909 if (sce_in->state & UGEN_BULK_RA) {
1910 if (sce_in->ra_wb_used > 0)
1911 revents |= events &
1912 (POLLIN | POLLRDNORM);
1913 else
1914 selrecord(l, &sce_in->rsel);
1915 break;
1916 }
1917 /*
1918 * We have no easy way of determining if a read will
1919 * yield any data or a write will happen.
1920 * Pretend they will.
1921 */
1922 revents |= events & (POLLIN | POLLRDNORM);
1923 break;
1924 default:
1925 break;
1926 }
1927 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1928 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1929 case UE_INTERRUPT:
1930 case UE_ISOCHRONOUS:
1931 /* XXX unimplemented */
1932 break;
1933 case UE_BULK:
1934 if (sce_out->state & UGEN_BULK_WB) {
1935 if (sce_out->ra_wb_used <
1936 sce_out->limit - sce_out->ibuf)
1937 revents |= events &
1938 (POLLOUT | POLLWRNORM);
1939 else
1940 selrecord(l, &sce_out->rsel);
1941 break;
1942 }
1943 /*
1944 * We have no easy way of determining if a read will
1945 * yield any data or a write will happen.
1946 * Pretend they will.
1947 */
1948 revents |= events & (POLLOUT | POLLWRNORM);
1949 break;
1950 default:
1951 break;
1952 }
1953
1954 mutex_exit(&sc->sc_lock);
1955
1956 return (revents);
1957 }
1958
1959 static void
1960 filt_ugenrdetach(struct knote *kn)
1961 {
1962 struct ugen_endpoint *sce = kn->kn_hook;
1963 struct ugen_softc *sc = sce->sc;
1964
1965 mutex_enter(&sc->sc_lock);
1966 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1967 mutex_exit(&sc->sc_lock);
1968 }
1969
1970 static int
1971 filt_ugenread_intr(struct knote *kn, long hint)
1972 {
1973 struct ugen_endpoint *sce = kn->kn_hook;
1974
1975 kn->kn_data = sce->q.c_cc;
1976 return (kn->kn_data > 0);
1977 }
1978
1979 static int
1980 filt_ugenread_isoc(struct knote *kn, long hint)
1981 {
1982 struct ugen_endpoint *sce = kn->kn_hook;
1983
1984 if (sce->cur == sce->fill)
1985 return (0);
1986
1987 if (sce->cur < sce->fill)
1988 kn->kn_data = sce->fill - sce->cur;
1989 else
1990 kn->kn_data = (sce->limit - sce->cur) +
1991 (sce->fill - sce->ibuf);
1992
1993 return (1);
1994 }
1995
1996 static int
1997 filt_ugenread_bulk(struct knote *kn, long hint)
1998 {
1999 struct ugen_endpoint *sce = kn->kn_hook;
2000
2001 if (!(sce->state & UGEN_BULK_RA))
2002 /*
2003 * We have no easy way of determining if a read will
2004 * yield any data or a write will happen.
2005 * So, emulate "seltrue".
2006 */
2007 return (filt_seltrue(kn, hint));
2008
2009 if (sce->ra_wb_used == 0)
2010 return (0);
2011
2012 kn->kn_data = sce->ra_wb_used;
2013
2014 return (1);
2015 }
2016
2017 static int
2018 filt_ugenwrite_bulk(struct knote *kn, long hint)
2019 {
2020 struct ugen_endpoint *sce = kn->kn_hook;
2021
2022 if (!(sce->state & UGEN_BULK_WB))
2023 /*
2024 * We have no easy way of determining if a read will
2025 * yield any data or a write will happen.
2026 * So, emulate "seltrue".
2027 */
2028 return (filt_seltrue(kn, hint));
2029
2030 if (sce->ra_wb_used == sce->limit - sce->ibuf)
2031 return (0);
2032
2033 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2034
2035 return (1);
2036 }
2037
2038 static const struct filterops ugenread_intr_filtops =
2039 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2040
2041 static const struct filterops ugenread_isoc_filtops =
2042 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2043
2044 static const struct filterops ugenread_bulk_filtops =
2045 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2046
2047 static const struct filterops ugenwrite_bulk_filtops =
2048 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2049
2050 int
2051 ugenkqfilter(dev_t dev, struct knote *kn)
2052 {
2053 struct ugen_softc *sc;
2054 struct ugen_endpoint *sce;
2055 struct klist *klist;
2056
2057 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2058 if (sc == NULL)
2059 return ENXIO;
2060
2061 if (sc->sc_dying)
2062 return (ENXIO);
2063
2064 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2065 return ENODEV;
2066
2067 switch (kn->kn_filter) {
2068 case EVFILT_READ:
2069 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2070 if (sce == NULL)
2071 return (EINVAL);
2072
2073 klist = &sce->rsel.sel_klist;
2074 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2075 case UE_INTERRUPT:
2076 kn->kn_fop = &ugenread_intr_filtops;
2077 break;
2078 case UE_ISOCHRONOUS:
2079 kn->kn_fop = &ugenread_isoc_filtops;
2080 break;
2081 case UE_BULK:
2082 kn->kn_fop = &ugenread_bulk_filtops;
2083 break;
2084 default:
2085 return (EINVAL);
2086 }
2087 break;
2088
2089 case EVFILT_WRITE:
2090 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2091 if (sce == NULL)
2092 return (EINVAL);
2093
2094 klist = &sce->rsel.sel_klist;
2095 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2096 case UE_INTERRUPT:
2097 case UE_ISOCHRONOUS:
2098 /* XXX poll doesn't support this */
2099 return (EINVAL);
2100
2101 case UE_BULK:
2102 kn->kn_fop = &ugenwrite_bulk_filtops;
2103 break;
2104 default:
2105 return (EINVAL);
2106 }
2107 break;
2108
2109 default:
2110 return (EINVAL);
2111 }
2112
2113 kn->kn_hook = sce;
2114
2115 mutex_enter(&sc->sc_lock);
2116 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2117 mutex_exit(&sc->sc_lock);
2118
2119 return (0);
2120 }
2121