ugen.c revision 1.124.2.2 1 /* $NetBSD: ugen.c,v 1.124.2.2 2016/03/07 14:36:55 martin Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.124.2.2 2016/03/07 14:36:55 martin Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/device.h>
51 #include <sys/ioctl.h>
52 #include <sys/conf.h>
53 #include <sys/tty.h>
54 #include <sys/file.h>
55 #include <sys/select.h>
56 #include <sys/proc.h>
57 #include <sys/vnode.h>
58 #include <sys/poll.h>
59
60 #include <dev/usb/usb.h>
61 #include <dev/usb/usbdi.h>
62 #include <dev/usb/usbdi_util.h>
63
64 #ifdef UGEN_DEBUG
65 #define DPRINTF(x) if (ugendebug) printf x
66 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x
67 int ugendebug = 0;
68 #else
69 #define DPRINTF(x)
70 #define DPRINTFN(n,x)
71 #endif
72
73 #define UGEN_CHUNK 128 /* chunk size for read */
74 #define UGEN_IBSIZE 1020 /* buffer size */
75 #define UGEN_BBSIZE 1024
76
77 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
78 #define UGEN_NISORFRMS 8 /* number of transactions per req */
79 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
80
81 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
82 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
83
84 struct ugen_endpoint {
85 struct ugen_softc *sc;
86 usb_endpoint_descriptor_t *edesc;
87 usbd_interface_handle iface;
88 int state;
89 #define UGEN_ASLP 0x02 /* waiting for data */
90 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
91 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
92 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
93 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
94 usbd_pipe_handle pipeh;
95 struct clist q;
96 u_char *ibuf; /* start of buffer (circular for isoc) */
97 u_char *fill; /* location for input (isoc) */
98 u_char *limit; /* end of circular buffer (isoc) */
99 u_char *cur; /* current read location (isoc) */
100 u_int32_t timeout;
101 u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
102 u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
103 u_int32_t ra_wb_used; /* how much is in buffer */
104 u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
105 usbd_xfer_handle ra_wb_xfer;
106 struct isoreq {
107 struct ugen_endpoint *sce;
108 usbd_xfer_handle xfer;
109 void *dmabuf;
110 u_int16_t sizes[UGEN_NISORFRMS];
111 } isoreqs[UGEN_NISOREQS];
112 /* Keep these last; we don't overwrite them in ugen_set_config() */
113 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
114 struct selinfo rsel;
115 kcondvar_t cv;
116 };
117
118 struct ugen_softc {
119 device_t sc_dev; /* base device */
120 usbd_device_handle sc_udev;
121
122 kmutex_t sc_lock;
123 kcondvar_t sc_detach_cv;
124
125 char sc_is_open[USB_MAX_ENDPOINTS];
126 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
127 #define OUT 0
128 #define IN 1
129
130 int sc_refcnt;
131 char sc_buffer[UGEN_BBSIZE];
132 u_char sc_dying;
133 };
134
135 dev_type_open(ugenopen);
136 dev_type_close(ugenclose);
137 dev_type_read(ugenread);
138 dev_type_write(ugenwrite);
139 dev_type_ioctl(ugenioctl);
140 dev_type_poll(ugenpoll);
141 dev_type_kqfilter(ugenkqfilter);
142
143 const struct cdevsw ugen_cdevsw = {
144 .d_open = ugenopen,
145 .d_close = ugenclose,
146 .d_read = ugenread,
147 .d_write = ugenwrite,
148 .d_ioctl = ugenioctl,
149 .d_stop = nostop,
150 .d_tty = notty,
151 .d_poll = ugenpoll,
152 .d_mmap = nommap,
153 .d_kqfilter = ugenkqfilter,
154 .d_discard = nodiscard,
155 .d_flag = D_OTHER,
156 };
157
158 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
159 usbd_status status);
160 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
161 usbd_status status);
162 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
163 usbd_status status);
164 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
165 usbd_status status);
166 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
167 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
168 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
169 void *, int, struct lwp *);
170 Static int ugen_set_config(struct ugen_softc *sc, int configno);
171 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
172 int index, int *lenp);
173 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
174 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
175 Static void ugen_clear_endpoints(struct ugen_softc *);
176
177 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
178 #define UGENENDPOINT(n) (minor(n) & 0xf)
179 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
180
181 int ugen_match(device_t, cfdata_t, void *);
182 void ugen_attach(device_t, device_t, void *);
183 int ugen_detach(device_t, int);
184 int ugen_activate(device_t, enum devact);
185 extern struct cfdriver ugen_cd;
186 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate);
187
188 /* toggle to control attach priority. -1 means "let autoconf decide" */
189 int ugen_override = -1;
190
191 int
192 ugen_match(device_t parent, cfdata_t match, void *aux)
193 {
194 struct usb_attach_arg *uaa = aux;
195 int override;
196
197 if (ugen_override != -1)
198 override = ugen_override;
199 else
200 override = match->cf_flags & 1;
201
202 if (override)
203 return (UMATCH_HIGHEST);
204 else if (uaa->usegeneric)
205 return (UMATCH_GENERIC);
206 else
207 return (UMATCH_NONE);
208 }
209
210 void
211 ugen_attach(device_t parent, device_t self, void *aux)
212 {
213 struct ugen_softc *sc = device_private(self);
214 struct usb_attach_arg *uaa = aux;
215 usbd_device_handle udev;
216 char *devinfop;
217 usbd_status err;
218 int i, dir, conf;
219
220 aprint_naive("\n");
221 aprint_normal("\n");
222
223 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_USB);
224 cv_init(&sc->sc_detach_cv, "ugendet");
225
226 devinfop = usbd_devinfo_alloc(uaa->device, 0);
227 aprint_normal_dev(self, "%s\n", devinfop);
228 usbd_devinfo_free(devinfop);
229
230 sc->sc_dev = self;
231 sc->sc_udev = udev = uaa->device;
232
233 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
234 for (dir = OUT; dir <= IN; dir++) {
235 struct ugen_endpoint *sce;
236
237 sce = &sc->sc_endpoints[i][dir];
238 selinit(&sce->rsel);
239 cv_init(&sce->cv, "ugensce");
240 }
241 }
242
243 /* First set configuration index 0, the default one for ugen. */
244 err = usbd_set_config_index(udev, 0, 0);
245 if (err) {
246 aprint_error_dev(self,
247 "setting configuration index 0 failed\n");
248 sc->sc_dying = 1;
249 return;
250 }
251 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
252
253 /* Set up all the local state for this configuration. */
254 err = ugen_set_config(sc, conf);
255 if (err) {
256 aprint_error_dev(self, "setting configuration %d failed\n",
257 conf);
258 sc->sc_dying = 1;
259 return;
260 }
261
262 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
263 sc->sc_dev);
264
265 if (!pmf_device_register(self, NULL, NULL))
266 aprint_error_dev(self, "couldn't establish power handler\n");
267
268 return;
269 }
270
271 Static void
272 ugen_clear_endpoints(struct ugen_softc *sc)
273 {
274
275 /* Clear out the old info, but leave the selinfo and cv initialised. */
276 for (int i = 0; i < USB_MAX_ENDPOINTS; i++) {
277 for (int dir = OUT; dir <= IN; dir++) {
278 struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir];
279 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
280 }
281 }
282 }
283
284 Static int
285 ugen_set_config(struct ugen_softc *sc, int configno)
286 {
287 usbd_device_handle dev = sc->sc_udev;
288 usb_config_descriptor_t *cdesc;
289 usbd_interface_handle iface;
290 usb_endpoint_descriptor_t *ed;
291 struct ugen_endpoint *sce;
292 u_int8_t niface, nendpt;
293 int ifaceno, endptno, endpt;
294 usbd_status err;
295 int dir;
296
297 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
298 device_xname(sc->sc_dev), configno, sc));
299
300 /*
301 * We start at 1, not 0, because we don't care whether the
302 * control endpoint is open or not. It is always present.
303 */
304 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
305 if (sc->sc_is_open[endptno]) {
306 DPRINTFN(1,
307 ("ugen_set_config: %s - endpoint %d is open\n",
308 device_xname(sc->sc_dev), endptno));
309 return (USBD_IN_USE);
310 }
311
312 /* Avoid setting the current value. */
313 cdesc = usbd_get_config_descriptor(dev);
314 if (!cdesc || cdesc->bConfigurationValue != configno) {
315 err = usbd_set_config_no(dev, configno, 1);
316 if (err)
317 return (err);
318 }
319
320 err = usbd_interface_count(dev, &niface);
321 if (err)
322 return (err);
323
324 ugen_clear_endpoints(sc);
325
326 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
327 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
328 err = usbd_device2interface_handle(dev, ifaceno, &iface);
329 if (err)
330 return (err);
331 err = usbd_endpoint_count(iface, &nendpt);
332 if (err)
333 return (err);
334 for (endptno = 0; endptno < nendpt; endptno++) {
335 ed = usbd_interface2endpoint_descriptor(iface,endptno);
336 KASSERT(ed != NULL);
337 endpt = ed->bEndpointAddress;
338 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
339 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
340 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
341 "(%d,%d), sce=%p\n",
342 endptno, endpt, UE_GET_ADDR(endpt),
343 UE_GET_DIR(endpt), sce));
344 sce->sc = sc;
345 sce->edesc = ed;
346 sce->iface = iface;
347 }
348 }
349 return (USBD_NORMAL_COMPLETION);
350 }
351
352 int
353 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
354 {
355 struct ugen_softc *sc;
356 int unit = UGENUNIT(dev);
357 int endpt = UGENENDPOINT(dev);
358 usb_endpoint_descriptor_t *edesc;
359 struct ugen_endpoint *sce;
360 int dir, isize;
361 usbd_status err;
362 usbd_xfer_handle xfer;
363 void *tbuf;
364 int i, j;
365
366 sc = device_lookup_private(&ugen_cd, unit);
367 if (sc == NULL)
368 return ENXIO;
369
370 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
371 flag, mode, unit, endpt));
372
373 if (sc == NULL || sc->sc_dying)
374 return (ENXIO);
375
376 /* The control endpoint allows multiple opens. */
377 if (endpt == USB_CONTROL_ENDPOINT) {
378 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
379 return (0);
380 }
381
382 if (sc->sc_is_open[endpt])
383 return (EBUSY);
384
385 /* Make sure there are pipes for all directions. */
386 for (dir = OUT; dir <= IN; dir++) {
387 if (flag & (dir == OUT ? FWRITE : FREAD)) {
388 sce = &sc->sc_endpoints[endpt][dir];
389 if (sce->edesc == NULL)
390 return (ENXIO);
391 }
392 }
393
394 /* Actually open the pipes. */
395 /* XXX Should back out properly if it fails. */
396 for (dir = OUT; dir <= IN; dir++) {
397 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
398 continue;
399 sce = &sc->sc_endpoints[endpt][dir];
400 sce->state = 0;
401 sce->timeout = USBD_NO_TIMEOUT;
402 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
403 sc, endpt, dir, sce));
404 edesc = sce->edesc;
405 switch (edesc->bmAttributes & UE_XFERTYPE) {
406 case UE_INTERRUPT:
407 if (dir == OUT) {
408 err = usbd_open_pipe(sce->iface,
409 edesc->bEndpointAddress, 0, &sce->pipeh);
410 if (err)
411 return (EIO);
412 break;
413 }
414 isize = UGETW(edesc->wMaxPacketSize);
415 if (isize == 0) /* shouldn't happen */
416 return (EINVAL);
417 sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
418 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
419 endpt, isize));
420 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
421 free(sce->ibuf, M_USBDEV);
422 sce->ibuf = NULL;
423 return (ENOMEM);
424 }
425 err = usbd_open_pipe_intr(sce->iface,
426 edesc->bEndpointAddress,
427 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
428 sce->ibuf, isize, ugenintr,
429 USBD_DEFAULT_INTERVAL);
430 if (err) {
431 clfree(&sce->q);
432 free(sce->ibuf, M_USBDEV);
433 sce->ibuf = NULL;
434 return (EIO);
435 }
436 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
437 break;
438 case UE_BULK:
439 err = usbd_open_pipe(sce->iface,
440 edesc->bEndpointAddress, 0, &sce->pipeh);
441 if (err)
442 return (EIO);
443 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
444 /*
445 * Use request size for non-RA/WB transfers
446 * as the default.
447 */
448 sce->ra_wb_reqsize = UGEN_BBSIZE;
449 break;
450 case UE_ISOCHRONOUS:
451 if (dir == OUT)
452 return (EINVAL);
453 isize = UGETW(edesc->wMaxPacketSize);
454 if (isize == 0) /* shouldn't happen */
455 return (EINVAL);
456 sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
457 M_USBDEV, M_WAITOK);
458 sce->cur = sce->fill = sce->ibuf;
459 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
460 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
461 endpt, isize));
462 err = usbd_open_pipe(sce->iface,
463 edesc->bEndpointAddress, 0, &sce->pipeh);
464 if (err) {
465 free(sce->ibuf, M_USBDEV);
466 sce->ibuf = NULL;
467 return (EIO);
468 }
469 for(i = 0; i < UGEN_NISOREQS; ++i) {
470 sce->isoreqs[i].sce = sce;
471 xfer = usbd_alloc_xfer(sc->sc_udev);
472 if (xfer == 0)
473 goto bad;
474 sce->isoreqs[i].xfer = xfer;
475 tbuf = usbd_alloc_buffer
476 (xfer, isize * UGEN_NISORFRMS);
477 if (tbuf == 0) {
478 i++;
479 goto bad;
480 }
481 sce->isoreqs[i].dmabuf = tbuf;
482 for(j = 0; j < UGEN_NISORFRMS; ++j)
483 sce->isoreqs[i].sizes[j] = isize;
484 usbd_setup_isoc_xfer
485 (xfer, sce->pipeh, &sce->isoreqs[i],
486 sce->isoreqs[i].sizes,
487 UGEN_NISORFRMS, USBD_NO_COPY,
488 ugen_isoc_rintr);
489 (void)usbd_transfer(xfer);
490 }
491 DPRINTFN(5, ("ugenopen: isoc open done\n"));
492 break;
493 bad:
494 while (--i >= 0) /* implicit buffer free */
495 usbd_free_xfer(sce->isoreqs[i].xfer);
496 usbd_close_pipe(sce->pipeh);
497 sce->pipeh = NULL;
498 free(sce->ibuf, M_USBDEV);
499 sce->ibuf = NULL;
500 return (ENOMEM);
501 case UE_CONTROL:
502 sce->timeout = USBD_DEFAULT_TIMEOUT;
503 return (EINVAL);
504 }
505 }
506 sc->sc_is_open[endpt] = 1;
507 return (0);
508 }
509
510 int
511 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
512 {
513 int endpt = UGENENDPOINT(dev);
514 struct ugen_softc *sc;
515 struct ugen_endpoint *sce;
516 int dir;
517 int i;
518
519 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
520 if (sc == NULL)
521 return ENXIO;
522
523 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
524 flag, mode, UGENUNIT(dev), endpt));
525
526 #ifdef DIAGNOSTIC
527 if (!sc->sc_is_open[endpt]) {
528 printf("ugenclose: not open\n");
529 return (EINVAL);
530 }
531 #endif
532
533 if (endpt == USB_CONTROL_ENDPOINT) {
534 DPRINTFN(5, ("ugenclose: close control\n"));
535 sc->sc_is_open[endpt] = 0;
536 return (0);
537 }
538
539 for (dir = OUT; dir <= IN; dir++) {
540 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
541 continue;
542 sce = &sc->sc_endpoints[endpt][dir];
543 if (sce->pipeh == NULL)
544 continue;
545 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
546 endpt, dir, sce));
547
548 usbd_abort_pipe(sce->pipeh);
549 usbd_close_pipe(sce->pipeh);
550 sce->pipeh = NULL;
551
552 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
553 case UE_INTERRUPT:
554 ndflush(&sce->q, sce->q.c_cc);
555 clfree(&sce->q);
556 break;
557 case UE_ISOCHRONOUS:
558 for (i = 0; i < UGEN_NISOREQS; ++i)
559 usbd_free_xfer(sce->isoreqs[i].xfer);
560 break;
561 case UE_BULK:
562 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
563 /* ibuf freed below */
564 usbd_free_xfer(sce->ra_wb_xfer);
565 break;
566 default:
567 break;
568 }
569
570 if (sce->ibuf != NULL) {
571 free(sce->ibuf, M_USBDEV);
572 sce->ibuf = NULL;
573 }
574 }
575 sc->sc_is_open[endpt] = 0;
576
577 return (0);
578 }
579
580 Static int
581 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
582 {
583 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
584 u_int32_t n, tn;
585 usbd_xfer_handle xfer;
586 usbd_status err;
587 int error = 0;
588
589 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
590
591 if (sc->sc_dying)
592 return (EIO);
593
594 if (endpt == USB_CONTROL_ENDPOINT)
595 return (ENODEV);
596
597 #ifdef DIAGNOSTIC
598 if (sce->edesc == NULL) {
599 printf("ugenread: no edesc\n");
600 return (EIO);
601 }
602 if (sce->pipeh == NULL) {
603 printf("ugenread: no pipe\n");
604 return (EIO);
605 }
606 #endif
607
608 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
609 case UE_INTERRUPT:
610 /* Block until activity occurred. */
611 mutex_enter(&sc->sc_lock);
612 while (sce->q.c_cc == 0) {
613 if (flag & IO_NDELAY) {
614 mutex_exit(&sc->sc_lock);
615 return (EWOULDBLOCK);
616 }
617 sce->state |= UGEN_ASLP;
618 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
619 /* "ugenri" */
620 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
621 mstohz(sce->timeout));
622 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
623 if (sc->sc_dying)
624 error = EIO;
625 if (error) {
626 sce->state &= ~UGEN_ASLP;
627 break;
628 }
629 }
630 mutex_exit(&sc->sc_lock);
631
632 /* Transfer as many chunks as possible. */
633 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
634 n = min(sce->q.c_cc, uio->uio_resid);
635 if (n > sizeof(sc->sc_buffer))
636 n = sizeof(sc->sc_buffer);
637
638 /* Remove a small chunk from the input queue. */
639 q_to_b(&sce->q, sc->sc_buffer, n);
640 DPRINTFN(5, ("ugenread: got %d chars\n", n));
641
642 /* Copy the data to the user process. */
643 error = uiomove(sc->sc_buffer, n, uio);
644 if (error)
645 break;
646 }
647 break;
648 case UE_BULK:
649 if (sce->state & UGEN_BULK_RA) {
650 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
651 uio->uio_resid, sce->ra_wb_used));
652 xfer = sce->ra_wb_xfer;
653
654 mutex_enter(&sc->sc_lock);
655 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
656 mutex_exit(&sc->sc_lock);
657 return (EWOULDBLOCK);
658 }
659 while (uio->uio_resid > 0 && !error) {
660 while (sce->ra_wb_used == 0) {
661 sce->state |= UGEN_ASLP;
662 DPRINTFN(5,
663 ("ugenread: sleep on %p\n",
664 sce));
665 /* "ugenrb" */
666 error = cv_timedwait_sig(&sce->cv,
667 &sc->sc_lock, mstohz(sce->timeout));
668 DPRINTFN(5,
669 ("ugenread: woke, error=%d\n",
670 error));
671 if (sc->sc_dying)
672 error = EIO;
673 if (error) {
674 sce->state &= ~UGEN_ASLP;
675 break;
676 }
677 }
678
679 /* Copy data to the process. */
680 while (uio->uio_resid > 0
681 && sce->ra_wb_used > 0) {
682 n = min(uio->uio_resid,
683 sce->ra_wb_used);
684 n = min(n, sce->limit - sce->cur);
685 error = uiomove(sce->cur, n, uio);
686 if (error)
687 break;
688 sce->cur += n;
689 sce->ra_wb_used -= n;
690 if (sce->cur == sce->limit)
691 sce->cur = sce->ibuf;
692 }
693
694 /*
695 * If the transfers stopped because the
696 * buffer was full, restart them.
697 */
698 if (sce->state & UGEN_RA_WB_STOP &&
699 sce->ra_wb_used < sce->limit - sce->ibuf) {
700 n = (sce->limit - sce->ibuf)
701 - sce->ra_wb_used;
702 usbd_setup_xfer(xfer,
703 sce->pipeh, sce, NULL,
704 min(n, sce->ra_wb_xferlen),
705 USBD_NO_COPY, USBD_NO_TIMEOUT,
706 ugen_bulkra_intr);
707 sce->state &= ~UGEN_RA_WB_STOP;
708 err = usbd_transfer(xfer);
709 if (err != USBD_IN_PROGRESS)
710 /*
711 * The transfer has not been
712 * queued. Setting STOP
713 * will make us try
714 * again at the next read.
715 */
716 sce->state |= UGEN_RA_WB_STOP;
717 }
718 }
719 mutex_exit(&sc->sc_lock);
720 break;
721 }
722 xfer = usbd_alloc_xfer(sc->sc_udev);
723 if (xfer == 0)
724 return (ENOMEM);
725 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
726 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
727 tn = n;
728 err = usbd_bulk_transfer(
729 xfer, sce->pipeh,
730 sce->state & UGEN_SHORT_OK ?
731 USBD_SHORT_XFER_OK : 0,
732 sce->timeout, sc->sc_buffer, &tn, "ugenrb");
733 if (err) {
734 if (err == USBD_INTERRUPTED)
735 error = EINTR;
736 else if (err == USBD_TIMEOUT)
737 error = ETIMEDOUT;
738 else
739 error = EIO;
740 break;
741 }
742 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
743 error = uiomove(sc->sc_buffer, tn, uio);
744 if (error || tn < n)
745 break;
746 }
747 usbd_free_xfer(xfer);
748 break;
749 case UE_ISOCHRONOUS:
750 mutex_enter(&sc->sc_lock);
751 while (sce->cur == sce->fill) {
752 if (flag & IO_NDELAY) {
753 mutex_exit(&sc->sc_lock);
754 return (EWOULDBLOCK);
755 }
756 sce->state |= UGEN_ASLP;
757 /* "ugenri" */
758 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
759 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
760 mstohz(sce->timeout));
761 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
762 if (sc->sc_dying)
763 error = EIO;
764 if (error) {
765 sce->state &= ~UGEN_ASLP;
766 break;
767 }
768 }
769
770 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
771 if(sce->fill > sce->cur)
772 n = min(sce->fill - sce->cur, uio->uio_resid);
773 else
774 n = min(sce->limit - sce->cur, uio->uio_resid);
775
776 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
777
778 /* Copy the data to the user process. */
779 error = uiomove(sce->cur, n, uio);
780 if (error)
781 break;
782 sce->cur += n;
783 if (sce->cur >= sce->limit)
784 sce->cur = sce->ibuf;
785 }
786 mutex_exit(&sc->sc_lock);
787 break;
788
789
790 default:
791 return (ENXIO);
792 }
793 return (error);
794 }
795
796 int
797 ugenread(dev_t dev, struct uio *uio, int flag)
798 {
799 int endpt = UGENENDPOINT(dev);
800 struct ugen_softc *sc;
801 int error;
802
803 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
804 if (sc == NULL)
805 return ENXIO;
806
807 mutex_enter(&sc->sc_lock);
808 sc->sc_refcnt++;
809 mutex_exit(&sc->sc_lock);
810
811 error = ugen_do_read(sc, endpt, uio, flag);
812
813 mutex_enter(&sc->sc_lock);
814 if (--sc->sc_refcnt < 0)
815 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
816 mutex_exit(&sc->sc_lock);
817
818 return (error);
819 }
820
821 Static int
822 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
823 int flag)
824 {
825 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
826 u_int32_t n;
827 int error = 0;
828 u_int32_t tn;
829 char *dbuf;
830 usbd_xfer_handle xfer;
831 usbd_status err;
832
833 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
834
835 if (sc->sc_dying)
836 return (EIO);
837
838 if (endpt == USB_CONTROL_ENDPOINT)
839 return (ENODEV);
840
841 #ifdef DIAGNOSTIC
842 if (sce->edesc == NULL) {
843 printf("ugenwrite: no edesc\n");
844 return (EIO);
845 }
846 if (sce->pipeh == NULL) {
847 printf("ugenwrite: no pipe\n");
848 return (EIO);
849 }
850 #endif
851
852 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
853 case UE_BULK:
854 if (sce->state & UGEN_BULK_WB) {
855 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
856 uio->uio_resid, sce->ra_wb_used));
857 xfer = sce->ra_wb_xfer;
858
859 mutex_enter(&sc->sc_lock);
860 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
861 flag & IO_NDELAY) {
862 mutex_exit(&sc->sc_lock);
863 return (EWOULDBLOCK);
864 }
865 while (uio->uio_resid > 0 && !error) {
866 while (sce->ra_wb_used ==
867 sce->limit - sce->ibuf) {
868 sce->state |= UGEN_ASLP;
869 DPRINTFN(5,
870 ("ugenwrite: sleep on %p\n",
871 sce));
872 /* "ugenwb" */
873 error = cv_timedwait_sig(&sce->cv,
874 &sc->sc_lock, mstohz(sce->timeout));
875 DPRINTFN(5,
876 ("ugenwrite: woke, error=%d\n",
877 error));
878 if (sc->sc_dying)
879 error = EIO;
880 if (error) {
881 sce->state &= ~UGEN_ASLP;
882 break;
883 }
884 }
885
886 /* Copy data from the process. */
887 while (uio->uio_resid > 0 &&
888 sce->ra_wb_used < sce->limit - sce->ibuf) {
889 n = min(uio->uio_resid,
890 (sce->limit - sce->ibuf)
891 - sce->ra_wb_used);
892 n = min(n, sce->limit - sce->fill);
893 error = uiomove(sce->fill, n, uio);
894 if (error)
895 break;
896 sce->fill += n;
897 sce->ra_wb_used += n;
898 if (sce->fill == sce->limit)
899 sce->fill = sce->ibuf;
900 }
901
902 /*
903 * If the transfers stopped because the
904 * buffer was empty, restart them.
905 */
906 if (sce->state & UGEN_RA_WB_STOP &&
907 sce->ra_wb_used > 0) {
908 dbuf = (char *)usbd_get_buffer(xfer);
909 n = min(sce->ra_wb_used,
910 sce->ra_wb_xferlen);
911 tn = min(n, sce->limit - sce->cur);
912 memcpy(dbuf, sce->cur, tn);
913 dbuf += tn;
914 if (n - tn > 0)
915 memcpy(dbuf, sce->ibuf,
916 n - tn);
917 usbd_setup_xfer(xfer,
918 sce->pipeh, sce, NULL, n,
919 USBD_NO_COPY, USBD_NO_TIMEOUT,
920 ugen_bulkwb_intr);
921 sce->state &= ~UGEN_RA_WB_STOP;
922 err = usbd_transfer(xfer);
923 if (err != USBD_IN_PROGRESS)
924 /*
925 * The transfer has not been
926 * queued. Setting STOP
927 * will make us try again
928 * at the next read.
929 */
930 sce->state |= UGEN_RA_WB_STOP;
931 }
932 }
933 mutex_exit(&sc->sc_lock);
934 break;
935 }
936 xfer = usbd_alloc_xfer(sc->sc_udev);
937 if (xfer == 0)
938 return (EIO);
939 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
940 error = uiomove(sc->sc_buffer, n, uio);
941 if (error)
942 break;
943 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
944 err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
945 sce->timeout, sc->sc_buffer, &n,"ugenwb");
946 if (err) {
947 if (err == USBD_INTERRUPTED)
948 error = EINTR;
949 else if (err == USBD_TIMEOUT)
950 error = ETIMEDOUT;
951 else
952 error = EIO;
953 break;
954 }
955 }
956 usbd_free_xfer(xfer);
957 break;
958 case UE_INTERRUPT:
959 xfer = usbd_alloc_xfer(sc->sc_udev);
960 if (xfer == 0)
961 return (EIO);
962 while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
963 uio->uio_resid)) != 0) {
964 error = uiomove(sc->sc_buffer, n, uio);
965 if (error)
966 break;
967 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
968 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
969 sce->timeout, sc->sc_buffer, &n, "ugenwi");
970 if (err) {
971 if (err == USBD_INTERRUPTED)
972 error = EINTR;
973 else if (err == USBD_TIMEOUT)
974 error = ETIMEDOUT;
975 else
976 error = EIO;
977 break;
978 }
979 }
980 usbd_free_xfer(xfer);
981 break;
982 default:
983 return (ENXIO);
984 }
985 return (error);
986 }
987
988 int
989 ugenwrite(dev_t dev, struct uio *uio, int flag)
990 {
991 int endpt = UGENENDPOINT(dev);
992 struct ugen_softc *sc;
993 int error;
994
995 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
996 if (sc == NULL)
997 return ENXIO;
998
999 mutex_enter(&sc->sc_lock);
1000 sc->sc_refcnt++;
1001 mutex_exit(&sc->sc_lock);
1002
1003 error = ugen_do_write(sc, endpt, uio, flag);
1004
1005 mutex_enter(&sc->sc_lock);
1006 if (--sc->sc_refcnt < 0)
1007 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1008 mutex_exit(&sc->sc_lock);
1009
1010 return (error);
1011 }
1012
1013 int
1014 ugen_activate(device_t self, enum devact act)
1015 {
1016 struct ugen_softc *sc = device_private(self);
1017
1018 switch (act) {
1019 case DVACT_DEACTIVATE:
1020 sc->sc_dying = 1;
1021 return 0;
1022 default:
1023 return EOPNOTSUPP;
1024 }
1025 }
1026
1027 int
1028 ugen_detach(device_t self, int flags)
1029 {
1030 struct ugen_softc *sc = device_private(self);
1031 struct ugen_endpoint *sce;
1032 int i, dir;
1033 int maj, mn;
1034
1035 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1036
1037 sc->sc_dying = 1;
1038 pmf_device_deregister(self);
1039 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1040 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1041 for (dir = OUT; dir <= IN; dir++) {
1042 sce = &sc->sc_endpoints[i][dir];
1043 if (sce->pipeh)
1044 usbd_abort_pipe(sce->pipeh);
1045 }
1046 }
1047
1048 mutex_enter(&sc->sc_lock);
1049 if (--sc->sc_refcnt >= 0) {
1050 /* Wake everyone */
1051 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1052 cv_signal(&sc->sc_endpoints[i][IN].cv);
1053 /* Wait for processes to go away. */
1054 usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock);
1055 }
1056 mutex_exit(&sc->sc_lock);
1057
1058 /* locate the major number */
1059 maj = cdevsw_lookup_major(&ugen_cdevsw);
1060
1061 /* Nuke the vnodes for any open instances (calls close). */
1062 mn = device_unit(self) * USB_MAX_ENDPOINTS;
1063 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1064
1065 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1066 sc->sc_dev);
1067
1068 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1069 for (dir = OUT; dir <= IN; dir++) {
1070 sce = &sc->sc_endpoints[i][dir];
1071 seldestroy(&sce->rsel);
1072 cv_destroy(&sce->cv);
1073 }
1074 }
1075
1076 cv_destroy(&sc->sc_detach_cv);
1077 mutex_destroy(&sc->sc_lock);
1078
1079 return (0);
1080 }
1081
1082 Static void
1083 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1084 {
1085 struct ugen_endpoint *sce = addr;
1086 struct ugen_softc *sc = sce->sc;
1087 u_int32_t count;
1088 u_char *ibuf;
1089
1090 if (status == USBD_CANCELLED)
1091 return;
1092
1093 if (status != USBD_NORMAL_COMPLETION) {
1094 DPRINTF(("ugenintr: status=%d\n", status));
1095 if (status == USBD_STALLED)
1096 usbd_clear_endpoint_stall_async(sce->pipeh);
1097 return;
1098 }
1099
1100 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1101 ibuf = sce->ibuf;
1102
1103 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1104 xfer, status, count));
1105 DPRINTFN(5, (" data = %02x %02x %02x\n",
1106 ibuf[0], ibuf[1], ibuf[2]));
1107
1108 (void)b_to_q(ibuf, count, &sce->q);
1109
1110 mutex_enter(&sc->sc_lock);
1111 if (sce->state & UGEN_ASLP) {
1112 sce->state &= ~UGEN_ASLP;
1113 DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1114 cv_signal(&sce->cv);
1115 }
1116 mutex_exit(&sc->sc_lock);
1117 selnotify(&sce->rsel, 0, 0);
1118 }
1119
1120 Static void
1121 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1122 usbd_status status)
1123 {
1124 struct isoreq *req = addr;
1125 struct ugen_endpoint *sce = req->sce;
1126 struct ugen_softc *sc = sce->sc;
1127 u_int32_t count, n;
1128 int i, isize;
1129
1130 /* Return if we are aborting. */
1131 if (status == USBD_CANCELLED)
1132 return;
1133
1134 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1135 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1136 (long)(req - sce->isoreqs), count));
1137
1138 /* throw away oldest input if the buffer is full */
1139 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1140 sce->cur += count;
1141 if(sce->cur >= sce->limit)
1142 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1143 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1144 count));
1145 }
1146
1147 isize = UGETW(sce->edesc->wMaxPacketSize);
1148 for (i = 0; i < UGEN_NISORFRMS; i++) {
1149 u_int32_t actlen = req->sizes[i];
1150 char const *tbuf = (char const *)req->dmabuf + isize * i;
1151
1152 /* copy data to buffer */
1153 while (actlen > 0) {
1154 n = min(actlen, sce->limit - sce->fill);
1155 memcpy(sce->fill, tbuf, n);
1156
1157 tbuf += n;
1158 actlen -= n;
1159 sce->fill += n;
1160 if(sce->fill == sce->limit)
1161 sce->fill = sce->ibuf;
1162 }
1163
1164 /* setup size for next transfer */
1165 req->sizes[i] = isize;
1166 }
1167
1168 usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1169 USBD_NO_COPY, ugen_isoc_rintr);
1170 (void)usbd_transfer(xfer);
1171
1172 mutex_enter(&sc->sc_lock);
1173 if (sce->state & UGEN_ASLP) {
1174 sce->state &= ~UGEN_ASLP;
1175 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1176 cv_signal(&sce->cv);
1177 }
1178 mutex_exit(&sc->sc_lock);
1179 selnotify(&sce->rsel, 0, 0);
1180 }
1181
1182 Static void
1183 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1184 usbd_status status)
1185 {
1186 struct ugen_endpoint *sce = addr;
1187 struct ugen_softc *sc = sce->sc;
1188 u_int32_t count, n;
1189 char const *tbuf;
1190 usbd_status err;
1191
1192 /* Return if we are aborting. */
1193 if (status == USBD_CANCELLED)
1194 return;
1195
1196 if (status != USBD_NORMAL_COMPLETION) {
1197 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1198 sce->state |= UGEN_RA_WB_STOP;
1199 if (status == USBD_STALLED)
1200 usbd_clear_endpoint_stall_async(sce->pipeh);
1201 return;
1202 }
1203
1204 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1205
1206 /* Keep track of how much is in the buffer. */
1207 sce->ra_wb_used += count;
1208
1209 /* Copy data to buffer. */
1210 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1211 n = min(count, sce->limit - sce->fill);
1212 memcpy(sce->fill, tbuf, n);
1213 tbuf += n;
1214 count -= n;
1215 sce->fill += n;
1216 if (sce->fill == sce->limit)
1217 sce->fill = sce->ibuf;
1218 if (count > 0) {
1219 memcpy(sce->fill, tbuf, count);
1220 sce->fill += count;
1221 }
1222
1223 /* Set up the next request if necessary. */
1224 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1225 if (n > 0) {
1226 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1227 min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1228 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1229 err = usbd_transfer(xfer);
1230 if (err != USBD_IN_PROGRESS) {
1231 printf("usbd_bulkra_intr: error=%d\n", err);
1232 /*
1233 * The transfer has not been queued. Setting STOP
1234 * will make us try again at the next read.
1235 */
1236 sce->state |= UGEN_RA_WB_STOP;
1237 }
1238 }
1239 else
1240 sce->state |= UGEN_RA_WB_STOP;
1241
1242 mutex_enter(&sc->sc_lock);
1243 if (sce->state & UGEN_ASLP) {
1244 sce->state &= ~UGEN_ASLP;
1245 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1246 cv_signal(&sce->cv);
1247 }
1248 mutex_exit(&sc->sc_lock);
1249 selnotify(&sce->rsel, 0, 0);
1250 }
1251
1252 Static void
1253 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1254 usbd_status status)
1255 {
1256 struct ugen_endpoint *sce = addr;
1257 struct ugen_softc *sc = sce->sc;
1258 u_int32_t count, n;
1259 char *tbuf;
1260 usbd_status err;
1261
1262 /* Return if we are aborting. */
1263 if (status == USBD_CANCELLED)
1264 return;
1265
1266 if (status != USBD_NORMAL_COMPLETION) {
1267 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1268 sce->state |= UGEN_RA_WB_STOP;
1269 if (status == USBD_STALLED)
1270 usbd_clear_endpoint_stall_async(sce->pipeh);
1271 return;
1272 }
1273
1274 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1275
1276 /* Keep track of how much is in the buffer. */
1277 sce->ra_wb_used -= count;
1278
1279 /* Update buffer pointers. */
1280 sce->cur += count;
1281 if (sce->cur >= sce->limit)
1282 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1283
1284 /* Set up next request if necessary. */
1285 if (sce->ra_wb_used > 0) {
1286 /* copy data from buffer */
1287 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1288 count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1289 n = min(count, sce->limit - sce->cur);
1290 memcpy(tbuf, sce->cur, n);
1291 tbuf += n;
1292 if (count - n > 0)
1293 memcpy(tbuf, sce->ibuf, count - n);
1294
1295 usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1296 count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1297 err = usbd_transfer(xfer);
1298 if (err != USBD_IN_PROGRESS) {
1299 printf("usbd_bulkwb_intr: error=%d\n", err);
1300 /*
1301 * The transfer has not been queued. Setting STOP
1302 * will make us try again at the next write.
1303 */
1304 sce->state |= UGEN_RA_WB_STOP;
1305 }
1306 }
1307 else
1308 sce->state |= UGEN_RA_WB_STOP;
1309
1310 mutex_enter(&sc->sc_lock);
1311 if (sce->state & UGEN_ASLP) {
1312 sce->state &= ~UGEN_ASLP;
1313 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1314 cv_signal(&sce->cv);
1315 }
1316 mutex_exit(&sc->sc_lock);
1317 selnotify(&sce->rsel, 0, 0);
1318 }
1319
1320 Static usbd_status
1321 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1322 {
1323 usbd_interface_handle iface;
1324 usb_endpoint_descriptor_t *ed;
1325 usbd_status err;
1326 struct ugen_endpoint *sce;
1327 u_int8_t niface, nendpt, endptno, endpt;
1328 int dir;
1329
1330 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1331
1332 err = usbd_interface_count(sc->sc_udev, &niface);
1333 if (err)
1334 return (err);
1335 if (ifaceidx < 0 || ifaceidx >= niface)
1336 return (USBD_INVAL);
1337
1338 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1339 if (err)
1340 return (err);
1341 err = usbd_endpoint_count(iface, &nendpt);
1342 if (err)
1343 return (err);
1344
1345 /* change setting */
1346 err = usbd_set_interface(iface, altno);
1347 if (err)
1348 return (err);
1349
1350 err = usbd_endpoint_count(iface, &nendpt);
1351 if (err)
1352 return (err);
1353
1354 ugen_clear_endpoints(sc);
1355
1356 for (endptno = 0; endptno < nendpt; endptno++) {
1357 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1358 KASSERT(ed != NULL);
1359 endpt = ed->bEndpointAddress;
1360 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1361 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1362 sce->sc = sc;
1363 sce->edesc = ed;
1364 sce->iface = iface;
1365 }
1366 return (0);
1367 }
1368
1369 /* Retrieve a complete descriptor for a certain device and index. */
1370 Static usb_config_descriptor_t *
1371 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1372 {
1373 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1374 int len;
1375 usbd_status err;
1376
1377 if (index == USB_CURRENT_CONFIG_INDEX) {
1378 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1379 len = UGETW(tdesc->wTotalLength);
1380 if (lenp)
1381 *lenp = len;
1382 cdesc = malloc(len, M_TEMP, M_WAITOK);
1383 memcpy(cdesc, tdesc, len);
1384 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1385 } else {
1386 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1387 if (err)
1388 return (0);
1389 len = UGETW(cdescr.wTotalLength);
1390 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1391 if (lenp)
1392 *lenp = len;
1393 cdesc = malloc(len, M_TEMP, M_WAITOK);
1394 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1395 if (err) {
1396 free(cdesc, M_TEMP);
1397 return (0);
1398 }
1399 }
1400 return (cdesc);
1401 }
1402
1403 Static int
1404 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1405 {
1406 usbd_interface_handle iface;
1407 usbd_status err;
1408
1409 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1410 if (err)
1411 return (-1);
1412 return (usbd_get_interface_altindex(iface));
1413 }
1414
1415 Static int
1416 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1417 void *addr, int flag, struct lwp *l)
1418 {
1419 struct ugen_endpoint *sce;
1420 usbd_status err;
1421 usbd_interface_handle iface;
1422 struct usb_config_desc *cd;
1423 usb_config_descriptor_t *cdesc;
1424 struct usb_interface_desc *id;
1425 usb_interface_descriptor_t *idesc;
1426 struct usb_endpoint_desc *ed;
1427 usb_endpoint_descriptor_t *edesc;
1428 struct usb_alt_interface *ai;
1429 struct usb_string_desc *si;
1430 u_int8_t conf, alt;
1431
1432 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1433 if (sc->sc_dying)
1434 return (EIO);
1435
1436 switch (cmd) {
1437 case FIONBIO:
1438 /* All handled in the upper FS layer. */
1439 return (0);
1440 case USB_SET_SHORT_XFER:
1441 if (endpt == USB_CONTROL_ENDPOINT)
1442 return (EINVAL);
1443 /* This flag only affects read */
1444 sce = &sc->sc_endpoints[endpt][IN];
1445 if (sce == NULL || sce->pipeh == NULL)
1446 return (EINVAL);
1447 if (*(int *)addr)
1448 sce->state |= UGEN_SHORT_OK;
1449 else
1450 sce->state &= ~UGEN_SHORT_OK;
1451 return (0);
1452 case USB_SET_TIMEOUT:
1453 sce = &sc->sc_endpoints[endpt][IN];
1454 if (sce == NULL
1455 /* XXX this shouldn't happen, but the distinction between
1456 input and output pipes isn't clear enough.
1457 || sce->pipeh == NULL */
1458 )
1459 return (EINVAL);
1460 sce->timeout = *(int *)addr;
1461 return (0);
1462 case USB_SET_BULK_RA:
1463 if (endpt == USB_CONTROL_ENDPOINT)
1464 return (EINVAL);
1465 sce = &sc->sc_endpoints[endpt][IN];
1466 if (sce == NULL || sce->pipeh == NULL)
1467 return (EINVAL);
1468 edesc = sce->edesc;
1469 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1470 return (EINVAL);
1471
1472 if (*(int *)addr) {
1473 /* Only turn RA on if it's currently off. */
1474 if (sce->state & UGEN_BULK_RA)
1475 return (0);
1476
1477 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1478 /* shouldn't happen */
1479 return (EINVAL);
1480 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1481 if (sce->ra_wb_xfer == NULL)
1482 return (ENOMEM);
1483 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1484 /*
1485 * Set up a dmabuf because we reuse the xfer with
1486 * the same (max) request length like isoc.
1487 */
1488 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1489 sce->ra_wb_xferlen) == 0) {
1490 usbd_free_xfer(sce->ra_wb_xfer);
1491 return (ENOMEM);
1492 }
1493 sce->ibuf = malloc(sce->ra_wb_bufsize,
1494 M_USBDEV, M_WAITOK);
1495 sce->fill = sce->cur = sce->ibuf;
1496 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1497 sce->ra_wb_used = 0;
1498 sce->state |= UGEN_BULK_RA;
1499 sce->state &= ~UGEN_RA_WB_STOP;
1500 /* Now start reading. */
1501 usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1502 NULL,
1503 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1504 USBD_NO_COPY, USBD_NO_TIMEOUT,
1505 ugen_bulkra_intr);
1506 err = usbd_transfer(sce->ra_wb_xfer);
1507 if (err != USBD_IN_PROGRESS) {
1508 sce->state &= ~UGEN_BULK_RA;
1509 free(sce->ibuf, M_USBDEV);
1510 sce->ibuf = NULL;
1511 usbd_free_xfer(sce->ra_wb_xfer);
1512 return (EIO);
1513 }
1514 } else {
1515 /* Only turn RA off if it's currently on. */
1516 if (!(sce->state & UGEN_BULK_RA))
1517 return (0);
1518
1519 sce->state &= ~UGEN_BULK_RA;
1520 usbd_abort_pipe(sce->pipeh);
1521 usbd_free_xfer(sce->ra_wb_xfer);
1522 /*
1523 * XXX Discard whatever's in the buffer, but we
1524 * should keep it around and drain the buffer
1525 * instead.
1526 */
1527 free(sce->ibuf, M_USBDEV);
1528 sce->ibuf = NULL;
1529 }
1530 return (0);
1531 case USB_SET_BULK_WB:
1532 if (endpt == USB_CONTROL_ENDPOINT)
1533 return (EINVAL);
1534 sce = &sc->sc_endpoints[endpt][OUT];
1535 if (sce == NULL || sce->pipeh == NULL)
1536 return (EINVAL);
1537 edesc = sce->edesc;
1538 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1539 return (EINVAL);
1540
1541 if (*(int *)addr) {
1542 /* Only turn WB on if it's currently off. */
1543 if (sce->state & UGEN_BULK_WB)
1544 return (0);
1545
1546 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1547 /* shouldn't happen */
1548 return (EINVAL);
1549 sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1550 if (sce->ra_wb_xfer == NULL)
1551 return (ENOMEM);
1552 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1553 /*
1554 * Set up a dmabuf because we reuse the xfer with
1555 * the same (max) request length like isoc.
1556 */
1557 if (usbd_alloc_buffer(sce->ra_wb_xfer,
1558 sce->ra_wb_xferlen) == 0) {
1559 usbd_free_xfer(sce->ra_wb_xfer);
1560 return (ENOMEM);
1561 }
1562 sce->ibuf = malloc(sce->ra_wb_bufsize,
1563 M_USBDEV, M_WAITOK);
1564 sce->fill = sce->cur = sce->ibuf;
1565 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1566 sce->ra_wb_used = 0;
1567 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1568 } else {
1569 /* Only turn WB off if it's currently on. */
1570 if (!(sce->state & UGEN_BULK_WB))
1571 return (0);
1572
1573 sce->state &= ~UGEN_BULK_WB;
1574 /*
1575 * XXX Discard whatever's in the buffer, but we
1576 * should keep it around and keep writing to
1577 * drain the buffer instead.
1578 */
1579 usbd_abort_pipe(sce->pipeh);
1580 usbd_free_xfer(sce->ra_wb_xfer);
1581 free(sce->ibuf, M_USBDEV);
1582 sce->ibuf = NULL;
1583 }
1584 return (0);
1585 case USB_SET_BULK_RA_OPT:
1586 case USB_SET_BULK_WB_OPT:
1587 {
1588 struct usb_bulk_ra_wb_opt *opt;
1589
1590 if (endpt == USB_CONTROL_ENDPOINT)
1591 return (EINVAL);
1592 opt = (struct usb_bulk_ra_wb_opt *)addr;
1593 if (cmd == USB_SET_BULK_RA_OPT)
1594 sce = &sc->sc_endpoints[endpt][IN];
1595 else
1596 sce = &sc->sc_endpoints[endpt][OUT];
1597 if (sce == NULL || sce->pipeh == NULL)
1598 return (EINVAL);
1599 if (opt->ra_wb_buffer_size < 1 ||
1600 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1601 opt->ra_wb_request_size < 1 ||
1602 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1603 return (EINVAL);
1604 /*
1605 * XXX These changes do not take effect until the
1606 * next time RA/WB mode is enabled but they ought to
1607 * take effect immediately.
1608 */
1609 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1610 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1611 return (0);
1612 }
1613 default:
1614 break;
1615 }
1616
1617 if (endpt != USB_CONTROL_ENDPOINT)
1618 return (EINVAL);
1619
1620 switch (cmd) {
1621 #ifdef UGEN_DEBUG
1622 case USB_SETDEBUG:
1623 ugendebug = *(int *)addr;
1624 break;
1625 #endif
1626 case USB_GET_CONFIG:
1627 err = usbd_get_config(sc->sc_udev, &conf);
1628 if (err)
1629 return (EIO);
1630 *(int *)addr = conf;
1631 break;
1632 case USB_SET_CONFIG:
1633 if (!(flag & FWRITE))
1634 return (EPERM);
1635 err = ugen_set_config(sc, *(int *)addr);
1636 switch (err) {
1637 case USBD_NORMAL_COMPLETION:
1638 break;
1639 case USBD_IN_USE:
1640 return (EBUSY);
1641 default:
1642 return (EIO);
1643 }
1644 break;
1645 case USB_GET_ALTINTERFACE:
1646 ai = (struct usb_alt_interface *)addr;
1647 err = usbd_device2interface_handle(sc->sc_udev,
1648 ai->uai_interface_index, &iface);
1649 if (err)
1650 return (EINVAL);
1651 idesc = usbd_get_interface_descriptor(iface);
1652 if (idesc == NULL)
1653 return (EIO);
1654 ai->uai_alt_no = idesc->bAlternateSetting;
1655 break;
1656 case USB_SET_ALTINTERFACE:
1657 if (!(flag & FWRITE))
1658 return (EPERM);
1659 ai = (struct usb_alt_interface *)addr;
1660 err = usbd_device2interface_handle(sc->sc_udev,
1661 ai->uai_interface_index, &iface);
1662 if (err)
1663 return (EINVAL);
1664 err = ugen_set_interface(sc, ai->uai_interface_index,
1665 ai->uai_alt_no);
1666 if (err)
1667 return (EINVAL);
1668 break;
1669 case USB_GET_NO_ALT:
1670 ai = (struct usb_alt_interface *)addr;
1671 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1672 if (cdesc == NULL)
1673 return (EINVAL);
1674 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1675 if (idesc == NULL) {
1676 free(cdesc, M_TEMP);
1677 return (EINVAL);
1678 }
1679 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1680 idesc->bInterfaceNumber);
1681 free(cdesc, M_TEMP);
1682 break;
1683 case USB_GET_DEVICE_DESC:
1684 *(usb_device_descriptor_t *)addr =
1685 *usbd_get_device_descriptor(sc->sc_udev);
1686 break;
1687 case USB_GET_CONFIG_DESC:
1688 cd = (struct usb_config_desc *)addr;
1689 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1690 if (cdesc == NULL)
1691 return (EINVAL);
1692 cd->ucd_desc = *cdesc;
1693 free(cdesc, M_TEMP);
1694 break;
1695 case USB_GET_INTERFACE_DESC:
1696 id = (struct usb_interface_desc *)addr;
1697 cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1698 if (cdesc == NULL)
1699 return (EINVAL);
1700 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1701 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1702 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1703 else
1704 alt = id->uid_alt_index;
1705 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1706 if (idesc == NULL) {
1707 free(cdesc, M_TEMP);
1708 return (EINVAL);
1709 }
1710 id->uid_desc = *idesc;
1711 free(cdesc, M_TEMP);
1712 break;
1713 case USB_GET_ENDPOINT_DESC:
1714 ed = (struct usb_endpoint_desc *)addr;
1715 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1716 if (cdesc == NULL)
1717 return (EINVAL);
1718 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1719 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1720 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1721 else
1722 alt = ed->ued_alt_index;
1723 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1724 alt, ed->ued_endpoint_index);
1725 if (edesc == NULL) {
1726 free(cdesc, M_TEMP);
1727 return (EINVAL);
1728 }
1729 ed->ued_desc = *edesc;
1730 free(cdesc, M_TEMP);
1731 break;
1732 case USB_GET_FULL_DESC:
1733 {
1734 int len;
1735 struct iovec iov;
1736 struct uio uio;
1737 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1738 int error;
1739
1740 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1741 if (cdesc == NULL)
1742 return (EINVAL);
1743 if (len > fd->ufd_size)
1744 len = fd->ufd_size;
1745 iov.iov_base = (void *)fd->ufd_data;
1746 iov.iov_len = len;
1747 uio.uio_iov = &iov;
1748 uio.uio_iovcnt = 1;
1749 uio.uio_resid = len;
1750 uio.uio_offset = 0;
1751 uio.uio_rw = UIO_READ;
1752 uio.uio_vmspace = l->l_proc->p_vmspace;
1753 error = uiomove((void *)cdesc, len, &uio);
1754 free(cdesc, M_TEMP);
1755 return (error);
1756 }
1757 case USB_GET_STRING_DESC: {
1758 int len;
1759 si = (struct usb_string_desc *)addr;
1760 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1761 si->usd_language_id, &si->usd_desc, &len);
1762 if (err)
1763 return (EINVAL);
1764 break;
1765 }
1766 case USB_DO_REQUEST:
1767 {
1768 struct usb_ctl_request *ur = (void *)addr;
1769 int len = UGETW(ur->ucr_request.wLength);
1770 struct iovec iov;
1771 struct uio uio;
1772 void *ptr = 0;
1773 usbd_status xerr;
1774 int error = 0;
1775
1776 if (!(flag & FWRITE))
1777 return (EPERM);
1778 /* Avoid requests that would damage the bus integrity. */
1779 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1780 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1781 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1782 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1783 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1784 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1785 return (EINVAL);
1786
1787 if (len < 0 || len > 32767)
1788 return (EINVAL);
1789 if (len != 0) {
1790 iov.iov_base = (void *)ur->ucr_data;
1791 iov.iov_len = len;
1792 uio.uio_iov = &iov;
1793 uio.uio_iovcnt = 1;
1794 uio.uio_resid = len;
1795 uio.uio_offset = 0;
1796 uio.uio_rw =
1797 ur->ucr_request.bmRequestType & UT_READ ?
1798 UIO_READ : UIO_WRITE;
1799 uio.uio_vmspace = l->l_proc->p_vmspace;
1800 ptr = malloc(len, M_TEMP, M_WAITOK);
1801 if (uio.uio_rw == UIO_WRITE) {
1802 error = uiomove(ptr, len, &uio);
1803 if (error)
1804 goto ret;
1805 }
1806 }
1807 sce = &sc->sc_endpoints[endpt][IN];
1808 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1809 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1810 if (xerr) {
1811 error = EIO;
1812 goto ret;
1813 }
1814 if (len != 0) {
1815 if (uio.uio_rw == UIO_READ) {
1816 error = uiomove(ptr, len, &uio);
1817 if (error)
1818 goto ret;
1819 }
1820 }
1821 ret:
1822 if (ptr)
1823 free(ptr, M_TEMP);
1824 return (error);
1825 }
1826 case USB_GET_DEVICEINFO:
1827 usbd_fill_deviceinfo(sc->sc_udev,
1828 (struct usb_device_info *)addr, 0);
1829 break;
1830 #ifdef COMPAT_30
1831 case USB_GET_DEVICEINFO_OLD:
1832 usbd_fill_deviceinfo_old(sc->sc_udev,
1833 (struct usb_device_info_old *)addr, 0);
1834
1835 break;
1836 #endif
1837 default:
1838 return (EINVAL);
1839 }
1840 return (0);
1841 }
1842
1843 int
1844 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1845 {
1846 int endpt = UGENENDPOINT(dev);
1847 struct ugen_softc *sc;
1848 int error;
1849
1850 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1851 if (sc == NULL)
1852 return ENXIO;
1853
1854 sc->sc_refcnt++;
1855 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1856 if (--sc->sc_refcnt < 0)
1857 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv);
1858 return (error);
1859 }
1860
1861 int
1862 ugenpoll(dev_t dev, int events, struct lwp *l)
1863 {
1864 struct ugen_softc *sc;
1865 struct ugen_endpoint *sce_in, *sce_out;
1866 int revents = 0;
1867
1868 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1869 if (sc == NULL)
1870 return ENXIO;
1871
1872 if (sc->sc_dying)
1873 return (POLLHUP);
1874
1875 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
1876 return ENODEV;
1877
1878 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1879 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1880 if (sce_in == NULL && sce_out == NULL)
1881 return (POLLERR);
1882 #ifdef DIAGNOSTIC
1883 if (!sce_in->edesc && !sce_out->edesc) {
1884 printf("ugenpoll: no edesc\n");
1885 return (POLLERR);
1886 }
1887 /* It's possible to have only one pipe open. */
1888 if (!sce_in->pipeh && !sce_out->pipeh) {
1889 printf("ugenpoll: no pipe\n");
1890 return (POLLERR);
1891 }
1892 #endif
1893
1894 mutex_enter(&sc->sc_lock);
1895 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1896 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1897 case UE_INTERRUPT:
1898 if (sce_in->q.c_cc > 0)
1899 revents |= events & (POLLIN | POLLRDNORM);
1900 else
1901 selrecord(l, &sce_in->rsel);
1902 break;
1903 case UE_ISOCHRONOUS:
1904 if (sce_in->cur != sce_in->fill)
1905 revents |= events & (POLLIN | POLLRDNORM);
1906 else
1907 selrecord(l, &sce_in->rsel);
1908 break;
1909 case UE_BULK:
1910 if (sce_in->state & UGEN_BULK_RA) {
1911 if (sce_in->ra_wb_used > 0)
1912 revents |= events &
1913 (POLLIN | POLLRDNORM);
1914 else
1915 selrecord(l, &sce_in->rsel);
1916 break;
1917 }
1918 /*
1919 * We have no easy way of determining if a read will
1920 * yield any data or a write will happen.
1921 * Pretend they will.
1922 */
1923 revents |= events & (POLLIN | POLLRDNORM);
1924 break;
1925 default:
1926 break;
1927 }
1928 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1929 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1930 case UE_INTERRUPT:
1931 case UE_ISOCHRONOUS:
1932 /* XXX unimplemented */
1933 break;
1934 case UE_BULK:
1935 if (sce_out->state & UGEN_BULK_WB) {
1936 if (sce_out->ra_wb_used <
1937 sce_out->limit - sce_out->ibuf)
1938 revents |= events &
1939 (POLLOUT | POLLWRNORM);
1940 else
1941 selrecord(l, &sce_out->rsel);
1942 break;
1943 }
1944 /*
1945 * We have no easy way of determining if a read will
1946 * yield any data or a write will happen.
1947 * Pretend they will.
1948 */
1949 revents |= events & (POLLOUT | POLLWRNORM);
1950 break;
1951 default:
1952 break;
1953 }
1954
1955 mutex_exit(&sc->sc_lock);
1956
1957 return (revents);
1958 }
1959
1960 static void
1961 filt_ugenrdetach(struct knote *kn)
1962 {
1963 struct ugen_endpoint *sce = kn->kn_hook;
1964 struct ugen_softc *sc = sce->sc;
1965
1966 mutex_enter(&sc->sc_lock);
1967 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1968 mutex_exit(&sc->sc_lock);
1969 }
1970
1971 static int
1972 filt_ugenread_intr(struct knote *kn, long hint)
1973 {
1974 struct ugen_endpoint *sce = kn->kn_hook;
1975
1976 kn->kn_data = sce->q.c_cc;
1977 return (kn->kn_data > 0);
1978 }
1979
1980 static int
1981 filt_ugenread_isoc(struct knote *kn, long hint)
1982 {
1983 struct ugen_endpoint *sce = kn->kn_hook;
1984
1985 if (sce->cur == sce->fill)
1986 return (0);
1987
1988 if (sce->cur < sce->fill)
1989 kn->kn_data = sce->fill - sce->cur;
1990 else
1991 kn->kn_data = (sce->limit - sce->cur) +
1992 (sce->fill - sce->ibuf);
1993
1994 return (1);
1995 }
1996
1997 static int
1998 filt_ugenread_bulk(struct knote *kn, long hint)
1999 {
2000 struct ugen_endpoint *sce = kn->kn_hook;
2001
2002 if (!(sce->state & UGEN_BULK_RA))
2003 /*
2004 * We have no easy way of determining if a read will
2005 * yield any data or a write will happen.
2006 * So, emulate "seltrue".
2007 */
2008 return (filt_seltrue(kn, hint));
2009
2010 if (sce->ra_wb_used == 0)
2011 return (0);
2012
2013 kn->kn_data = sce->ra_wb_used;
2014
2015 return (1);
2016 }
2017
2018 static int
2019 filt_ugenwrite_bulk(struct knote *kn, long hint)
2020 {
2021 struct ugen_endpoint *sce = kn->kn_hook;
2022
2023 if (!(sce->state & UGEN_BULK_WB))
2024 /*
2025 * We have no easy way of determining if a read will
2026 * yield any data or a write will happen.
2027 * So, emulate "seltrue".
2028 */
2029 return (filt_seltrue(kn, hint));
2030
2031 if (sce->ra_wb_used == sce->limit - sce->ibuf)
2032 return (0);
2033
2034 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2035
2036 return (1);
2037 }
2038
2039 static const struct filterops ugenread_intr_filtops =
2040 { 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2041
2042 static const struct filterops ugenread_isoc_filtops =
2043 { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2044
2045 static const struct filterops ugenread_bulk_filtops =
2046 { 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2047
2048 static const struct filterops ugenwrite_bulk_filtops =
2049 { 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2050
2051 int
2052 ugenkqfilter(dev_t dev, struct knote *kn)
2053 {
2054 struct ugen_softc *sc;
2055 struct ugen_endpoint *sce;
2056 struct klist *klist;
2057
2058 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2059 if (sc == NULL)
2060 return ENXIO;
2061
2062 if (sc->sc_dying)
2063 return (ENXIO);
2064
2065 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT)
2066 return ENODEV;
2067
2068 switch (kn->kn_filter) {
2069 case EVFILT_READ:
2070 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2071 if (sce == NULL)
2072 return (EINVAL);
2073
2074 klist = &sce->rsel.sel_klist;
2075 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2076 case UE_INTERRUPT:
2077 kn->kn_fop = &ugenread_intr_filtops;
2078 break;
2079 case UE_ISOCHRONOUS:
2080 kn->kn_fop = &ugenread_isoc_filtops;
2081 break;
2082 case UE_BULK:
2083 kn->kn_fop = &ugenread_bulk_filtops;
2084 break;
2085 default:
2086 return (EINVAL);
2087 }
2088 break;
2089
2090 case EVFILT_WRITE:
2091 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2092 if (sce == NULL)
2093 return (EINVAL);
2094
2095 klist = &sce->rsel.sel_klist;
2096 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2097 case UE_INTERRUPT:
2098 case UE_ISOCHRONOUS:
2099 /* XXX poll doesn't support this */
2100 return (EINVAL);
2101
2102 case UE_BULK:
2103 kn->kn_fop = &ugenwrite_bulk_filtops;
2104 break;
2105 default:
2106 return (EINVAL);
2107 }
2108 break;
2109
2110 default:
2111 return (EINVAL);
2112 }
2113
2114 kn->kn_hook = sce;
2115
2116 mutex_enter(&sc->sc_lock);
2117 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2118 mutex_exit(&sc->sc_lock);
2119
2120 return (0);
2121 }
2122