ugen.c revision 1.157 1 /* $NetBSD: ugen.c,v 1.157 2020/08/18 14:32:34 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.157 2020/08/18 14:32:34 riastradh Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #include "opt_usb.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/device.h>
52 #include <sys/ioctl.h>
53 #include <sys/conf.h>
54 #include <sys/tty.h>
55 #include <sys/file.h>
56 #include <sys/select.h>
57 #include <sys/proc.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60 #include <sys/compat_stub.h>
61 #include <sys/module.h>
62 #include <sys/rbtree.h>
63
64 #include <dev/usb/usb.h>
65 #include <dev/usb/usbdi.h>
66 #include <dev/usb/usbdi_util.h>
67
68 #include "ioconf.h"
69
70 #ifdef UGEN_DEBUG
71 #define DPRINTF(x) if (ugendebug) printf x
72 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x
73 int ugendebug = 0;
74 #else
75 #define DPRINTF(x)
76 #define DPRINTFN(n,x)
77 #endif
78
79 #define UGEN_CHUNK 128 /* chunk size for read */
80 #define UGEN_IBSIZE 1020 /* buffer size */
81 #define UGEN_BBSIZE 1024
82
83 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
84 #define UGEN_NISORFRMS 8 /* number of transactions per req */
85 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
86
87 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
88 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
89
90 struct isoreq {
91 struct ugen_endpoint *sce;
92 struct usbd_xfer *xfer;
93 void *dmabuf;
94 uint16_t sizes[UGEN_NISORFRMS];
95 };
96
97 struct ugen_endpoint {
98 struct ugen_softc *sc;
99 usb_endpoint_descriptor_t *edesc;
100 struct usbd_interface *iface;
101 int state;
102 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
103 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
104 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
105 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
106 struct usbd_pipe *pipeh;
107 struct clist q;
108 u_char *ibuf; /* start of buffer (circular for isoc) */
109 u_char *fill; /* location for input (isoc) */
110 u_char *limit; /* end of circular buffer (isoc) */
111 u_char *cur; /* current read location (isoc) */
112 uint32_t timeout;
113 uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
114 uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
115 uint32_t ra_wb_used; /* how much is in buffer */
116 uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */
117 struct usbd_xfer *ra_wb_xfer;
118 struct isoreq isoreqs[UGEN_NISOREQS];
119 /* Keep these last; we don't overwrite them in ugen_set_config() */
120 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
121 struct selinfo rsel;
122 kcondvar_t cv;
123 };
124
125 struct ugen_softc {
126 device_t sc_dev; /* base device */
127 struct usbd_device *sc_udev;
128 struct rb_node sc_node;
129 unsigned sc_unit;
130
131 kmutex_t sc_lock;
132 kcondvar_t sc_detach_cv;
133
134 char sc_is_open[USB_MAX_ENDPOINTS];
135 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
136 #define OUT 0
137 #define IN 1
138
139 int sc_refcnt;
140 char sc_buffer[UGEN_BBSIZE];
141 u_char sc_dying;
142 u_char sc_attached;
143 };
144
145 static struct {
146 kmutex_t lock;
147 rb_tree_t tree;
148 } ugenif __cacheline_aligned;
149
150 static int
151 compare_ugen(void *cookie, const void *vsca, const void *vscb)
152 {
153 const struct ugen_softc *sca = vsca;
154 const struct ugen_softc *scb = vscb;
155
156 if (sca->sc_unit < scb->sc_unit)
157 return -1;
158 if (sca->sc_unit > scb->sc_unit)
159 return +1;
160 return 0;
161 }
162
163 static int
164 compare_ugen_key(void *cookie, const void *vsc, const void *vk)
165 {
166 const struct ugen_softc *sc = vsc;
167 const unsigned *k = vk;
168
169 if (sc->sc_unit < *k)
170 return -1;
171 if (sc->sc_unit > *k)
172 return +1;
173 return 0;
174 }
175
176 static const rb_tree_ops_t ugenif_tree_ops = {
177 .rbto_compare_nodes = compare_ugen,
178 .rbto_compare_key = compare_ugen_key,
179 .rbto_node_offset = offsetof(struct ugen_softc, sc_node),
180 };
181
182 static void
183 ugenif_get_unit(struct ugen_softc *sc)
184 {
185 struct ugen_softc *sc0;
186 unsigned i;
187
188 mutex_enter(&ugenif.lock);
189 for (i = 0, sc0 = RB_TREE_MIN(&ugenif.tree);
190 sc0 != NULL && i == sc0->sc_unit;
191 i++, sc0 = RB_TREE_NEXT(&ugenif.tree, sc0))
192 KASSERT(i < UINT_MAX);
193 KASSERT(rb_tree_find_node(&ugenif.tree, &i) == NULL);
194 sc->sc_unit = i;
195 sc0 = rb_tree_insert_node(&ugenif.tree, sc);
196 KASSERT(sc0 == sc);
197 KASSERT(rb_tree_find_node(&ugenif.tree, &i) == sc);
198 mutex_exit(&ugenif.lock);
199 }
200
201 static void
202 ugenif_put_unit(struct ugen_softc *sc)
203 {
204
205 mutex_enter(&ugenif.lock);
206 KASSERT(rb_tree_find_node(&ugenif.tree, &sc->sc_unit) == sc);
207 rb_tree_remove_node(&ugenif.tree, sc);
208 sc->sc_unit = -1;
209 mutex_exit(&ugenif.lock);
210 }
211
212 static struct ugen_softc *
213 ugenif_acquire(unsigned unit)
214 {
215 struct ugen_softc *sc;
216
217 mutex_enter(&ugenif.lock);
218 sc = rb_tree_find_node(&ugenif.tree, &unit);
219 if (sc == NULL)
220 goto out;
221 mutex_enter(&sc->sc_lock);
222 if (sc->sc_dying) {
223 mutex_exit(&sc->sc_lock);
224 sc = NULL;
225 goto out;
226 }
227 KASSERT(sc->sc_refcnt < INT_MAX);
228 sc->sc_refcnt++;
229 mutex_exit(&sc->sc_lock);
230 out: mutex_exit(&ugenif.lock);
231
232 return sc;
233 }
234
235 static void
236 ugenif_release(struct ugen_softc *sc)
237 {
238
239 mutex_enter(&sc->sc_lock);
240 if (--sc->sc_refcnt < 0)
241 cv_broadcast(&sc->sc_detach_cv);
242 mutex_exit(&sc->sc_lock);
243 }
244
245 static dev_type_open(ugenopen);
246 static dev_type_close(ugenclose);
247 static dev_type_read(ugenread);
248 static dev_type_write(ugenwrite);
249 static dev_type_ioctl(ugenioctl);
250 static dev_type_poll(ugenpoll);
251 static dev_type_kqfilter(ugenkqfilter);
252
253 const struct cdevsw ugen_cdevsw = {
254 .d_open = ugenopen,
255 .d_close = ugenclose,
256 .d_read = ugenread,
257 .d_write = ugenwrite,
258 .d_ioctl = ugenioctl,
259 .d_stop = nostop,
260 .d_tty = notty,
261 .d_poll = ugenpoll,
262 .d_mmap = nommap,
263 .d_kqfilter = ugenkqfilter,
264 .d_discard = nodiscard,
265 .d_flag = D_OTHER,
266 };
267
268 Static void ugenintr(struct usbd_xfer *, void *,
269 usbd_status);
270 Static void ugen_isoc_rintr(struct usbd_xfer *, void *,
271 usbd_status);
272 Static void ugen_bulkra_intr(struct usbd_xfer *, void *,
273 usbd_status);
274 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *,
275 usbd_status);
276 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
277 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
278 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
279 void *, int, struct lwp *);
280 Static int ugen_set_config(struct ugen_softc *, int, int);
281 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *,
282 int, int *);
283 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
284 Static int ugen_get_alt_index(struct ugen_softc *, int);
285 Static void ugen_clear_endpoints(struct ugen_softc *);
286
287 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
288 #define UGENENDPOINT(n) (minor(n) & 0xf)
289 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
290
291 static int ugenif_match(device_t, cfdata_t, void *);
292 static void ugenif_attach(device_t, device_t, void *);
293 static int ugen_match(device_t, cfdata_t, void *);
294 static void ugen_attach(device_t, device_t, void *);
295 static int ugen_detach(device_t, int);
296 static int ugen_activate(device_t, enum devact);
297
298 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match,
299 ugen_attach, ugen_detach, ugen_activate);
300 CFATTACH_DECL_NEW(ugenif, sizeof(struct ugen_softc), ugenif_match,
301 ugenif_attach, ugen_detach, ugen_activate);
302
303 /* toggle to control attach priority. -1 means "let autoconf decide" */
304 int ugen_override = -1;
305
306 static int
307 ugen_match(device_t parent, cfdata_t match, void *aux)
308 {
309 struct usb_attach_arg *uaa = aux;
310 int override;
311
312 if (ugen_override != -1)
313 override = ugen_override;
314 else
315 override = match->cf_flags & 1;
316
317 if (override)
318 return UMATCH_HIGHEST;
319 else if (uaa->uaa_usegeneric)
320 return UMATCH_GENERIC;
321 else
322 return UMATCH_NONE;
323 }
324
325 static int
326 ugenif_match(device_t parent, cfdata_t match, void *aux)
327 {
328 /* Assume that they knew what they configured! (see ugenif(4)) */
329 return UMATCH_HIGHEST;
330 }
331
332 static void
333 ugen_attach(device_t parent, device_t self, void *aux)
334 {
335 struct usb_attach_arg *uaa = aux;
336 struct usbif_attach_arg uiaa;
337
338 memset(&uiaa, 0, sizeof(uiaa));
339 uiaa.uiaa_port = uaa->uaa_port;
340 uiaa.uiaa_vendor = uaa->uaa_vendor;
341 uiaa.uiaa_product = uaa->uaa_product;
342 uiaa.uiaa_release = uaa->uaa_release;
343 uiaa.uiaa_device = uaa->uaa_device;
344 uiaa.uiaa_configno = -1;
345 uiaa.uiaa_ifaceno = -1;
346
347 ugenif_attach(parent, self, &uiaa);
348 }
349
350 static void
351 ugenif_attach(device_t parent, device_t self, void *aux)
352 {
353 struct ugen_softc *sc = device_private(self);
354 struct usbif_attach_arg *uiaa = aux;
355 struct usbd_device *udev;
356 char *devinfop;
357 usbd_status err;
358 int i, dir, conf;
359
360 aprint_naive("\n");
361 aprint_normal("\n");
362
363 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
364 cv_init(&sc->sc_detach_cv, "ugendet");
365
366 devinfop = usbd_devinfo_alloc(uiaa->uiaa_device, 0);
367 aprint_normal_dev(self, "%s\n", devinfop);
368 usbd_devinfo_free(devinfop);
369
370 sc->sc_dev = self;
371 sc->sc_udev = udev = uiaa->uiaa_device;
372
373 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
374 for (dir = OUT; dir <= IN; dir++) {
375 struct ugen_endpoint *sce;
376
377 sce = &sc->sc_endpoints[i][dir];
378 selinit(&sce->rsel);
379 cv_init(&sce->cv, "ugensce");
380 }
381 }
382
383 if (!pmf_device_register(self, NULL, NULL))
384 aprint_error_dev(self, "couldn't establish power handler\n");
385
386 if (uiaa->uiaa_ifaceno < 0) {
387 /*
388 * If we attach the whole device,
389 * set configuration index 0, the default one.
390 */
391 err = usbd_set_config_index(udev, 0, 0);
392 if (err) {
393 aprint_error_dev(self,
394 "setting configuration index 0 failed\n");
395 return;
396 }
397 }
398
399 /* Get current configuration */
400 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
401
402 /* Set up all the local state for this configuration. */
403 err = ugen_set_config(sc, conf, uiaa->uiaa_ifaceno < 0);
404 if (err) {
405 aprint_error_dev(self, "setting configuration %d failed\n",
406 conf);
407 return;
408 }
409
410 ugenif_get_unit(sc);
411 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev);
412 sc->sc_attached = 1;
413 }
414
415 Static void
416 ugen_clear_endpoints(struct ugen_softc *sc)
417 {
418
419 /* Clear out the old info, but leave the selinfo and cv initialised. */
420 for (int i = 0; i < USB_MAX_ENDPOINTS; i++) {
421 for (int dir = OUT; dir <= IN; dir++) {
422 struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir];
423 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
424 }
425 }
426 }
427
428 Static int
429 ugen_set_config(struct ugen_softc *sc, int configno, int chkopen)
430 {
431 struct usbd_device *dev = sc->sc_udev;
432 usb_config_descriptor_t *cdesc;
433 struct usbd_interface *iface;
434 usb_endpoint_descriptor_t *ed;
435 struct ugen_endpoint *sce;
436 uint8_t niface, nendpt;
437 int ifaceno, endptno, endpt;
438 usbd_status err;
439 int dir;
440
441 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
442 device_xname(sc->sc_dev), configno, sc));
443
444 if (chkopen) {
445 /*
446 * We start at 1, not 0, because we don't care whether the
447 * control endpoint is open or not. It is always present.
448 */
449 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
450 if (sc->sc_is_open[endptno]) {
451 DPRINTFN(1,
452 ("ugen_set_config: %s - endpoint %d is open\n",
453 device_xname(sc->sc_dev), endptno));
454 return USBD_IN_USE;
455 }
456 }
457
458 /* Avoid setting the current value. */
459 cdesc = usbd_get_config_descriptor(dev);
460 if (!cdesc || cdesc->bConfigurationValue != configno) {
461 err = usbd_set_config_no(dev, configno, 1);
462 if (err)
463 return err;
464 }
465
466 ugen_clear_endpoints(sc);
467
468 err = usbd_interface_count(dev, &niface);
469 if (err)
470 return err;
471
472 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
473 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
474 err = usbd_device2interface_handle(dev, ifaceno, &iface);
475 if (err)
476 return err;
477 err = usbd_endpoint_count(iface, &nendpt);
478 if (err)
479 return err;
480 for (endptno = 0; endptno < nendpt; endptno++) {
481 ed = usbd_interface2endpoint_descriptor(iface,endptno);
482 KASSERT(ed != NULL);
483 endpt = ed->bEndpointAddress;
484 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
485 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
486 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
487 "(%d,%d), sce=%p\n",
488 endptno, endpt, UE_GET_ADDR(endpt),
489 UE_GET_DIR(endpt), sce));
490 sce->sc = sc;
491 sce->edesc = ed;
492 sce->iface = iface;
493 }
494 }
495 return USBD_NORMAL_COMPLETION;
496 }
497
498 static int
499 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
500 {
501 struct ugen_softc *sc;
502 int unit = UGENUNIT(dev);
503 int endpt = UGENENDPOINT(dev);
504 usb_endpoint_descriptor_t *edesc;
505 struct ugen_endpoint *sce;
506 int dir, isize;
507 usbd_status err;
508 struct usbd_xfer *xfer;
509 int i, j;
510 int error;
511
512 if ((sc = ugenif_acquire(unit)) == NULL)
513 return ENXIO;
514
515 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
516 flag, mode, unit, endpt));
517
518 /* The control endpoint allows multiple opens. */
519 if (endpt == USB_CONTROL_ENDPOINT) {
520 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
521 error = 0;
522 goto out;
523 }
524
525 if (sc->sc_is_open[endpt]) {
526 error = EBUSY;
527 goto out;
528 }
529
530 /* Make sure there are pipes for all directions. */
531 for (dir = OUT; dir <= IN; dir++) {
532 if (flag & (dir == OUT ? FWRITE : FREAD)) {
533 sce = &sc->sc_endpoints[endpt][dir];
534 if (sce->edesc == NULL) {
535 error = ENXIO;
536 goto out;
537 }
538 }
539 }
540
541 /* Actually open the pipes. */
542 /* XXX Should back out properly if it fails. */
543 for (dir = OUT; dir <= IN; dir++) {
544 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
545 continue;
546 sce = &sc->sc_endpoints[endpt][dir];
547 sce->state = 0;
548 sce->timeout = USBD_NO_TIMEOUT;
549 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
550 sc, endpt, dir, sce));
551 edesc = sce->edesc;
552 switch (edesc->bmAttributes & UE_XFERTYPE) {
553 case UE_INTERRUPT:
554 if (dir == OUT) {
555 err = usbd_open_pipe(sce->iface,
556 edesc->bEndpointAddress, 0, &sce->pipeh);
557 if (err) {
558 error = EIO;
559 goto out;
560 }
561 break;
562 }
563 isize = UGETW(edesc->wMaxPacketSize);
564 if (isize == 0) { /* shouldn't happen */
565 error = EINVAL;
566 goto out;
567 }
568 sce->ibuf = kmem_alloc(isize, KM_SLEEP);
569 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
570 endpt, isize));
571 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
572 kmem_free(sce->ibuf, isize);
573 sce->ibuf = NULL;
574 error = ENOMEM;
575 goto out;
576 }
577 err = usbd_open_pipe_intr(sce->iface,
578 edesc->bEndpointAddress,
579 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
580 sce->ibuf, isize, ugenintr,
581 USBD_DEFAULT_INTERVAL);
582 if (err) {
583 clfree(&sce->q);
584 kmem_free(sce->ibuf, isize);
585 sce->ibuf = NULL;
586 error = EIO;
587 goto out;
588 }
589 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
590 break;
591 case UE_BULK:
592 err = usbd_open_pipe(sce->iface,
593 edesc->bEndpointAddress, 0, &sce->pipeh);
594 if (err) {
595 error = EIO;
596 goto out;
597 }
598 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
599 /*
600 * Use request size for non-RA/WB transfers
601 * as the default.
602 */
603 sce->ra_wb_reqsize = UGEN_BBSIZE;
604 break;
605 case UE_ISOCHRONOUS:
606 if (dir == OUT) {
607 error = EINVAL;
608 goto out;
609 }
610 isize = UGETW(edesc->wMaxPacketSize);
611 if (isize == 0) { /* shouldn't happen */
612 error = EINVAL;
613 goto out;
614 }
615 sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES,
616 KM_SLEEP);
617 sce->cur = sce->fill = sce->ibuf;
618 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
619 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
620 endpt, isize));
621 err = usbd_open_pipe(sce->iface,
622 edesc->bEndpointAddress, 0, &sce->pipeh);
623 if (err) {
624 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
625 sce->ibuf = NULL;
626 error = EIO;
627 goto out;
628 }
629 for (i = 0; i < UGEN_NISOREQS; ++i) {
630 sce->isoreqs[i].sce = sce;
631 err = usbd_create_xfer(sce->pipeh,
632 isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS,
633 &xfer);
634 if (err)
635 goto bad;
636 sce->isoreqs[i].xfer = xfer;
637 sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer);
638 for (j = 0; j < UGEN_NISORFRMS; ++j)
639 sce->isoreqs[i].sizes[j] = isize;
640 usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i],
641 sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0,
642 ugen_isoc_rintr);
643 (void)usbd_transfer(xfer);
644 }
645 DPRINTFN(5, ("ugenopen: isoc open done\n"));
646 break;
647 bad:
648 while (--i >= 0) /* implicit buffer free */
649 usbd_destroy_xfer(sce->isoreqs[i].xfer);
650 usbd_close_pipe(sce->pipeh);
651 sce->pipeh = NULL;
652 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
653 sce->ibuf = NULL;
654 error = ENOMEM;
655 goto out;
656 case UE_CONTROL:
657 sce->timeout = USBD_DEFAULT_TIMEOUT;
658 error = EINVAL;
659 goto out;
660 }
661 }
662 sc->sc_is_open[endpt] = 1;
663 error = 0;
664 out: ugenif_release(sc);
665 return error;
666 }
667
668 static int
669 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
670 {
671 int endpt = UGENENDPOINT(dev);
672 struct ugen_softc *sc;
673 struct ugen_endpoint *sce;
674 int dir;
675 int i;
676 int error;
677
678 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
679 return ENXIO;
680
681 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
682 flag, mode, UGENUNIT(dev), endpt));
683
684 KASSERT(sc->sc_is_open[endpt]);
685
686 if (endpt == USB_CONTROL_ENDPOINT) {
687 DPRINTFN(5, ("ugenclose: close control\n"));
688 sc->sc_is_open[endpt] = 0;
689 error = 0;
690 goto out;
691 }
692
693 for (dir = OUT; dir <= IN; dir++) {
694 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
695 continue;
696 sce = &sc->sc_endpoints[endpt][dir];
697 if (sce->pipeh == NULL)
698 continue;
699 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
700 endpt, dir, sce));
701
702 usbd_abort_pipe(sce->pipeh);
703
704 int isize = UGETW(sce->edesc->wMaxPacketSize);
705 int msize = 0;
706
707 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
708 case UE_INTERRUPT:
709 ndflush(&sce->q, sce->q.c_cc);
710 clfree(&sce->q);
711 msize = isize;
712 break;
713 case UE_ISOCHRONOUS:
714 for (i = 0; i < UGEN_NISOREQS; ++i)
715 usbd_destroy_xfer(sce->isoreqs[i].xfer);
716 msize = isize * UGEN_NISOFRAMES;
717 break;
718 case UE_BULK:
719 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) {
720 usbd_destroy_xfer(sce->ra_wb_xfer);
721 msize = sce->ra_wb_bufsize;
722 }
723 break;
724 default:
725 break;
726 }
727 usbd_close_pipe(sce->pipeh);
728 sce->pipeh = NULL;
729 if (sce->ibuf != NULL) {
730 kmem_free(sce->ibuf, msize);
731 sce->ibuf = NULL;
732 }
733 }
734 sc->sc_is_open[endpt] = 0;
735 error = 0;
736
737 out: ugenif_release(sc);
738 return error;
739 }
740
741 Static int
742 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
743 {
744 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
745 uint32_t n, tn;
746 struct usbd_xfer *xfer;
747 usbd_status err;
748 int error = 0;
749
750 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
751
752 if (endpt == USB_CONTROL_ENDPOINT)
753 return ENODEV;
754
755 KASSERT(sce->edesc);
756 KASSERT(sce->pipeh);
757
758 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
759 case UE_INTERRUPT:
760 /* Block until activity occurred. */
761 mutex_enter(&sc->sc_lock);
762 while (sce->q.c_cc == 0) {
763 if (flag & IO_NDELAY) {
764 mutex_exit(&sc->sc_lock);
765 return EWOULDBLOCK;
766 }
767 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
768 /* "ugenri" */
769 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
770 mstohz(sce->timeout));
771 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
772 if (sc->sc_dying)
773 error = EIO;
774 if (error)
775 break;
776 }
777 mutex_exit(&sc->sc_lock);
778
779 /* Transfer as many chunks as possible. */
780 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
781 n = uimin(sce->q.c_cc, uio->uio_resid);
782 if (n > sizeof(sc->sc_buffer))
783 n = sizeof(sc->sc_buffer);
784
785 /* Remove a small chunk from the input queue. */
786 q_to_b(&sce->q, sc->sc_buffer, n);
787 DPRINTFN(5, ("ugenread: got %d chars\n", n));
788
789 /* Copy the data to the user process. */
790 error = uiomove(sc->sc_buffer, n, uio);
791 if (error)
792 break;
793 }
794 break;
795 case UE_BULK:
796 if (sce->state & UGEN_BULK_RA) {
797 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
798 uio->uio_resid, sce->ra_wb_used));
799 xfer = sce->ra_wb_xfer;
800
801 mutex_enter(&sc->sc_lock);
802 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
803 mutex_exit(&sc->sc_lock);
804 return EWOULDBLOCK;
805 }
806 while (uio->uio_resid > 0 && !error) {
807 while (sce->ra_wb_used == 0) {
808 DPRINTFN(5,
809 ("ugenread: sleep on %p\n",
810 sce));
811 /* "ugenrb" */
812 error = cv_timedwait_sig(&sce->cv,
813 &sc->sc_lock, mstohz(sce->timeout));
814 DPRINTFN(5,
815 ("ugenread: woke, error=%d\n",
816 error));
817 if (sc->sc_dying)
818 error = EIO;
819 if (error)
820 break;
821 }
822
823 /* Copy data to the process. */
824 while (uio->uio_resid > 0
825 && sce->ra_wb_used > 0) {
826 n = uimin(uio->uio_resid,
827 sce->ra_wb_used);
828 n = uimin(n, sce->limit - sce->cur);
829 error = uiomove(sce->cur, n, uio);
830 if (error)
831 break;
832 sce->cur += n;
833 sce->ra_wb_used -= n;
834 if (sce->cur == sce->limit)
835 sce->cur = sce->ibuf;
836 }
837
838 /*
839 * If the transfers stopped because the
840 * buffer was full, restart them.
841 */
842 if (sce->state & UGEN_RA_WB_STOP &&
843 sce->ra_wb_used < sce->limit - sce->ibuf) {
844 n = (sce->limit - sce->ibuf)
845 - sce->ra_wb_used;
846 usbd_setup_xfer(xfer, sce, NULL,
847 uimin(n, sce->ra_wb_xferlen),
848 0, USBD_NO_TIMEOUT,
849 ugen_bulkra_intr);
850 sce->state &= ~UGEN_RA_WB_STOP;
851 err = usbd_transfer(xfer);
852 if (err != USBD_IN_PROGRESS)
853 /*
854 * The transfer has not been
855 * queued. Setting STOP
856 * will make us try
857 * again at the next read.
858 */
859 sce->state |= UGEN_RA_WB_STOP;
860 }
861 }
862 mutex_exit(&sc->sc_lock);
863 break;
864 }
865 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
866 0, 0, &xfer);
867 if (error)
868 return error;
869 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
870 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
871 tn = n;
872 err = usbd_bulk_transfer(xfer, sce->pipeh,
873 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0,
874 sce->timeout, sc->sc_buffer, &tn);
875 if (err) {
876 if (err == USBD_INTERRUPTED)
877 error = EINTR;
878 else if (err == USBD_TIMEOUT)
879 error = ETIMEDOUT;
880 else
881 error = EIO;
882 break;
883 }
884 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
885 error = uiomove(sc->sc_buffer, tn, uio);
886 if (error || tn < n)
887 break;
888 }
889 usbd_destroy_xfer(xfer);
890 break;
891 case UE_ISOCHRONOUS:
892 mutex_enter(&sc->sc_lock);
893 while (sce->cur == sce->fill) {
894 if (flag & IO_NDELAY) {
895 mutex_exit(&sc->sc_lock);
896 return EWOULDBLOCK;
897 }
898 /* "ugenri" */
899 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
900 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
901 mstohz(sce->timeout));
902 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
903 if (sc->sc_dying)
904 error = EIO;
905 if (error)
906 break;
907 }
908
909 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
910 if(sce->fill > sce->cur)
911 n = uimin(sce->fill - sce->cur, uio->uio_resid);
912 else
913 n = uimin(sce->limit - sce->cur, uio->uio_resid);
914
915 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
916
917 /* Copy the data to the user process. */
918 error = uiomove(sce->cur, n, uio);
919 if (error)
920 break;
921 sce->cur += n;
922 if (sce->cur >= sce->limit)
923 sce->cur = sce->ibuf;
924 }
925 mutex_exit(&sc->sc_lock);
926 break;
927
928
929 default:
930 return ENXIO;
931 }
932 return error;
933 }
934
935 static int
936 ugenread(dev_t dev, struct uio *uio, int flag)
937 {
938 int endpt = UGENENDPOINT(dev);
939 struct ugen_softc *sc;
940 int error;
941
942 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
943 return ENXIO;
944 error = ugen_do_read(sc, endpt, uio, flag);
945 ugenif_release(sc);
946
947 return error;
948 }
949
950 Static int
951 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
952 int flag)
953 {
954 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
955 uint32_t n;
956 int error = 0;
957 uint32_t tn;
958 char *dbuf;
959 struct usbd_xfer *xfer;
960 usbd_status err;
961
962 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
963
964 if (endpt == USB_CONTROL_ENDPOINT)
965 return ENODEV;
966
967 KASSERT(sce->edesc);
968 KASSERT(sce->pipeh);
969
970 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
971 case UE_BULK:
972 if (sce->state & UGEN_BULK_WB) {
973 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
974 uio->uio_resid, sce->ra_wb_used));
975 xfer = sce->ra_wb_xfer;
976
977 mutex_enter(&sc->sc_lock);
978 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
979 flag & IO_NDELAY) {
980 mutex_exit(&sc->sc_lock);
981 return EWOULDBLOCK;
982 }
983 while (uio->uio_resid > 0 && !error) {
984 while (sce->ra_wb_used ==
985 sce->limit - sce->ibuf) {
986 DPRINTFN(5,
987 ("ugenwrite: sleep on %p\n",
988 sce));
989 /* "ugenwb" */
990 error = cv_timedwait_sig(&sce->cv,
991 &sc->sc_lock, mstohz(sce->timeout));
992 DPRINTFN(5,
993 ("ugenwrite: woke, error=%d\n",
994 error));
995 if (sc->sc_dying)
996 error = EIO;
997 if (error)
998 break;
999 }
1000
1001 /* Copy data from the process. */
1002 while (uio->uio_resid > 0 &&
1003 sce->ra_wb_used < sce->limit - sce->ibuf) {
1004 n = uimin(uio->uio_resid,
1005 (sce->limit - sce->ibuf)
1006 - sce->ra_wb_used);
1007 n = uimin(n, sce->limit - sce->fill);
1008 error = uiomove(sce->fill, n, uio);
1009 if (error)
1010 break;
1011 sce->fill += n;
1012 sce->ra_wb_used += n;
1013 if (sce->fill == sce->limit)
1014 sce->fill = sce->ibuf;
1015 }
1016
1017 /*
1018 * If the transfers stopped because the
1019 * buffer was empty, restart them.
1020 */
1021 if (sce->state & UGEN_RA_WB_STOP &&
1022 sce->ra_wb_used > 0) {
1023 dbuf = (char *)usbd_get_buffer(xfer);
1024 n = uimin(sce->ra_wb_used,
1025 sce->ra_wb_xferlen);
1026 tn = uimin(n, sce->limit - sce->cur);
1027 memcpy(dbuf, sce->cur, tn);
1028 dbuf += tn;
1029 if (n - tn > 0)
1030 memcpy(dbuf, sce->ibuf,
1031 n - tn);
1032 usbd_setup_xfer(xfer, sce, NULL, n,
1033 0, USBD_NO_TIMEOUT,
1034 ugen_bulkwb_intr);
1035 sce->state &= ~UGEN_RA_WB_STOP;
1036 err = usbd_transfer(xfer);
1037 if (err != USBD_IN_PROGRESS)
1038 /*
1039 * The transfer has not been
1040 * queued. Setting STOP
1041 * will make us try again
1042 * at the next read.
1043 */
1044 sce->state |= UGEN_RA_WB_STOP;
1045 }
1046 }
1047 mutex_exit(&sc->sc_lock);
1048 break;
1049 }
1050 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
1051 0, 0, &xfer);
1052 if (error)
1053 return error;
1054 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
1055 error = uiomove(sc->sc_buffer, n, uio);
1056 if (error)
1057 break;
1058 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
1059 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout,
1060 sc->sc_buffer, &n);
1061 if (err) {
1062 if (err == USBD_INTERRUPTED)
1063 error = EINTR;
1064 else if (err == USBD_TIMEOUT)
1065 error = ETIMEDOUT;
1066 else
1067 error = EIO;
1068 break;
1069 }
1070 }
1071 usbd_destroy_xfer(xfer);
1072 break;
1073 case UE_INTERRUPT:
1074 error = usbd_create_xfer(sce->pipeh,
1075 UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer);
1076 if (error)
1077 return error;
1078 while ((n = uimin(UGETW(sce->edesc->wMaxPacketSize),
1079 uio->uio_resid)) != 0) {
1080 error = uiomove(sc->sc_buffer, n, uio);
1081 if (error)
1082 break;
1083 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
1084 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
1085 sce->timeout, sc->sc_buffer, &n);
1086 if (err) {
1087 if (err == USBD_INTERRUPTED)
1088 error = EINTR;
1089 else if (err == USBD_TIMEOUT)
1090 error = ETIMEDOUT;
1091 else
1092 error = EIO;
1093 break;
1094 }
1095 }
1096 usbd_destroy_xfer(xfer);
1097 break;
1098 default:
1099 return ENXIO;
1100 }
1101 return error;
1102 }
1103
1104 static int
1105 ugenwrite(dev_t dev, struct uio *uio, int flag)
1106 {
1107 int endpt = UGENENDPOINT(dev);
1108 struct ugen_softc *sc;
1109 int error;
1110
1111 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
1112 return ENXIO;
1113 error = ugen_do_write(sc, endpt, uio, flag);
1114 ugenif_release(sc);
1115
1116 return error;
1117 }
1118
1119 static int
1120 ugen_activate(device_t self, enum devact act)
1121 {
1122 struct ugen_softc *sc = device_private(self);
1123
1124 switch (act) {
1125 case DVACT_DEACTIVATE:
1126 sc->sc_dying = 1;
1127 return 0;
1128 default:
1129 return EOPNOTSUPP;
1130 }
1131 }
1132
1133 static int
1134 ugen_detach(device_t self, int flags)
1135 {
1136 struct ugen_softc *sc = device_private(self);
1137 struct ugen_endpoint *sce;
1138 int i, dir;
1139 int maj, mn;
1140
1141 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1142
1143 sc->sc_dying = 1;
1144 pmf_device_deregister(self);
1145
1146 if (!sc->sc_attached)
1147 goto out;
1148
1149 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1150 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1151 for (dir = OUT; dir <= IN; dir++) {
1152 sce = &sc->sc_endpoints[i][dir];
1153 if (sce->pipeh)
1154 usbd_abort_pipe(sce->pipeh);
1155 }
1156 }
1157
1158 mutex_enter(&sc->sc_lock);
1159 if (--sc->sc_refcnt >= 0) {
1160 /* Wake everyone */
1161 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1162 cv_signal(&sc->sc_endpoints[i][IN].cv);
1163 /* Wait for processes to go away. */
1164 if (cv_timedwait(&sc->sc_detach_cv, &sc->sc_lock, hz * 60))
1165 aprint_error_dev(self, ": didn't detach\n");
1166 }
1167 mutex_exit(&sc->sc_lock);
1168
1169 /* locate the major number */
1170 maj = cdevsw_lookup_major(&ugen_cdevsw);
1171
1172 /* Nuke the vnodes for any open instances (calls close). */
1173 mn = sc->sc_unit * USB_MAX_ENDPOINTS;
1174 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1175
1176 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev);
1177 ugenif_put_unit(sc);
1178
1179 out: for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1180 for (dir = OUT; dir <= IN; dir++) {
1181 sce = &sc->sc_endpoints[i][dir];
1182 seldestroy(&sce->rsel);
1183 cv_destroy(&sce->cv);
1184 }
1185 }
1186
1187 cv_destroy(&sc->sc_detach_cv);
1188 mutex_destroy(&sc->sc_lock);
1189
1190 return 0;
1191 }
1192
1193 Static void
1194 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
1195 {
1196 struct ugen_endpoint *sce = addr;
1197 struct ugen_softc *sc = sce->sc;
1198 uint32_t count;
1199 u_char *ibuf;
1200
1201 if (status == USBD_CANCELLED)
1202 return;
1203
1204 if (status != USBD_NORMAL_COMPLETION) {
1205 DPRINTF(("ugenintr: status=%d\n", status));
1206 if (status == USBD_STALLED)
1207 usbd_clear_endpoint_stall_async(sce->pipeh);
1208 return;
1209 }
1210
1211 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1212 ibuf = sce->ibuf;
1213
1214 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1215 xfer, status, count));
1216 DPRINTFN(5, (" data = %02x %02x %02x\n",
1217 ibuf[0], ibuf[1], ibuf[2]));
1218
1219 mutex_enter(&sc->sc_lock);
1220 (void)b_to_q(ibuf, count, &sce->q);
1221 cv_signal(&sce->cv);
1222 mutex_exit(&sc->sc_lock);
1223 selnotify(&sce->rsel, 0, 0);
1224 }
1225
1226 Static void
1227 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr,
1228 usbd_status status)
1229 {
1230 struct isoreq *req = addr;
1231 struct ugen_endpoint *sce = req->sce;
1232 struct ugen_softc *sc = sce->sc;
1233 uint32_t count, n;
1234 int i, isize;
1235
1236 /* Return if we are aborting. */
1237 if (status == USBD_CANCELLED)
1238 return;
1239
1240 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1241 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1242 (long)(req - sce->isoreqs), count));
1243
1244 mutex_enter(&sc->sc_lock);
1245
1246 /* throw away oldest input if the buffer is full */
1247 if (sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1248 sce->cur += count;
1249 if (sce->cur >= sce->limit)
1250 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1251 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1252 count));
1253 }
1254
1255 isize = UGETW(sce->edesc->wMaxPacketSize);
1256 for (i = 0; i < UGEN_NISORFRMS; i++) {
1257 uint32_t actlen = req->sizes[i];
1258 char const *tbuf = (char const *)req->dmabuf + isize * i;
1259
1260 /* copy data to buffer */
1261 while (actlen > 0) {
1262 n = uimin(actlen, sce->limit - sce->fill);
1263 memcpy(sce->fill, tbuf, n);
1264
1265 tbuf += n;
1266 actlen -= n;
1267 sce->fill += n;
1268 if (sce->fill == sce->limit)
1269 sce->fill = sce->ibuf;
1270 }
1271
1272 /* setup size for next transfer */
1273 req->sizes[i] = isize;
1274 }
1275
1276 usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0,
1277 ugen_isoc_rintr);
1278 (void)usbd_transfer(xfer);
1279
1280 cv_signal(&sce->cv);
1281 mutex_exit(&sc->sc_lock);
1282 selnotify(&sce->rsel, 0, 0);
1283 }
1284
1285 Static void
1286 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr,
1287 usbd_status status)
1288 {
1289 struct ugen_endpoint *sce = addr;
1290 struct ugen_softc *sc = sce->sc;
1291 uint32_t count, n;
1292 char const *tbuf;
1293 usbd_status err;
1294
1295 /* Return if we are aborting. */
1296 if (status == USBD_CANCELLED)
1297 return;
1298
1299 if (status != USBD_NORMAL_COMPLETION) {
1300 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1301 sce->state |= UGEN_RA_WB_STOP;
1302 if (status == USBD_STALLED)
1303 usbd_clear_endpoint_stall_async(sce->pipeh);
1304 return;
1305 }
1306
1307 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1308
1309 mutex_enter(&sc->sc_lock);
1310
1311 /* Keep track of how much is in the buffer. */
1312 sce->ra_wb_used += count;
1313
1314 /* Copy data to buffer. */
1315 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1316 n = uimin(count, sce->limit - sce->fill);
1317 memcpy(sce->fill, tbuf, n);
1318 tbuf += n;
1319 count -= n;
1320 sce->fill += n;
1321 if (sce->fill == sce->limit)
1322 sce->fill = sce->ibuf;
1323 if (count > 0) {
1324 memcpy(sce->fill, tbuf, count);
1325 sce->fill += count;
1326 }
1327
1328 /* Set up the next request if necessary. */
1329 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1330 if (n > 0) {
1331 usbd_setup_xfer(xfer, sce, NULL, uimin(n, sce->ra_wb_xferlen), 0,
1332 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1333 err = usbd_transfer(xfer);
1334 if (err != USBD_IN_PROGRESS) {
1335 printf("usbd_bulkra_intr: error=%d\n", err);
1336 /*
1337 * The transfer has not been queued. Setting STOP
1338 * will make us try again at the next read.
1339 */
1340 sce->state |= UGEN_RA_WB_STOP;
1341 }
1342 }
1343 else
1344 sce->state |= UGEN_RA_WB_STOP;
1345
1346 cv_signal(&sce->cv);
1347 mutex_exit(&sc->sc_lock);
1348 selnotify(&sce->rsel, 0, 0);
1349 }
1350
1351 Static void
1352 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr,
1353 usbd_status status)
1354 {
1355 struct ugen_endpoint *sce = addr;
1356 struct ugen_softc *sc = sce->sc;
1357 uint32_t count, n;
1358 char *tbuf;
1359 usbd_status err;
1360
1361 /* Return if we are aborting. */
1362 if (status == USBD_CANCELLED)
1363 return;
1364
1365 if (status != USBD_NORMAL_COMPLETION) {
1366 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1367 sce->state |= UGEN_RA_WB_STOP;
1368 if (status == USBD_STALLED)
1369 usbd_clear_endpoint_stall_async(sce->pipeh);
1370 return;
1371 }
1372
1373 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1374
1375 mutex_enter(&sc->sc_lock);
1376
1377 /* Keep track of how much is in the buffer. */
1378 sce->ra_wb_used -= count;
1379
1380 /* Update buffer pointers. */
1381 sce->cur += count;
1382 if (sce->cur >= sce->limit)
1383 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1384
1385 /* Set up next request if necessary. */
1386 if (sce->ra_wb_used > 0) {
1387 /* copy data from buffer */
1388 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1389 count = uimin(sce->ra_wb_used, sce->ra_wb_xferlen);
1390 n = uimin(count, sce->limit - sce->cur);
1391 memcpy(tbuf, sce->cur, n);
1392 tbuf += n;
1393 if (count - n > 0)
1394 memcpy(tbuf, sce->ibuf, count - n);
1395
1396 usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT,
1397 ugen_bulkwb_intr);
1398 err = usbd_transfer(xfer);
1399 if (err != USBD_IN_PROGRESS) {
1400 printf("usbd_bulkwb_intr: error=%d\n", err);
1401 /*
1402 * The transfer has not been queued. Setting STOP
1403 * will make us try again at the next write.
1404 */
1405 sce->state |= UGEN_RA_WB_STOP;
1406 }
1407 }
1408 else
1409 sce->state |= UGEN_RA_WB_STOP;
1410
1411 cv_signal(&sce->cv);
1412 mutex_exit(&sc->sc_lock);
1413 selnotify(&sce->rsel, 0, 0);
1414 }
1415
1416 Static usbd_status
1417 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1418 {
1419 struct usbd_interface *iface;
1420 usb_endpoint_descriptor_t *ed;
1421 usbd_status err;
1422 struct ugen_endpoint *sce;
1423 uint8_t niface, nendpt, endptno, endpt;
1424 int dir;
1425
1426 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1427
1428 err = usbd_interface_count(sc->sc_udev, &niface);
1429 if (err)
1430 return err;
1431 if (ifaceidx < 0 || ifaceidx >= niface)
1432 return USBD_INVAL;
1433
1434 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1435 if (err)
1436 return err;
1437 err = usbd_endpoint_count(iface, &nendpt);
1438 if (err)
1439 return err;
1440
1441 /* change setting */
1442 err = usbd_set_interface(iface, altno);
1443 if (err)
1444 return err;
1445
1446 err = usbd_endpoint_count(iface, &nendpt);
1447 if (err)
1448 return err;
1449
1450 ugen_clear_endpoints(sc);
1451
1452 for (endptno = 0; endptno < nendpt; endptno++) {
1453 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1454 KASSERT(ed != NULL);
1455 endpt = ed->bEndpointAddress;
1456 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1457 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1458 sce->sc = sc;
1459 sce->edesc = ed;
1460 sce->iface = iface;
1461 }
1462 return 0;
1463 }
1464
1465 /* Retrieve a complete descriptor for a certain device and index. */
1466 Static usb_config_descriptor_t *
1467 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1468 {
1469 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1470 int len;
1471 usbd_status err;
1472
1473 if (index == USB_CURRENT_CONFIG_INDEX) {
1474 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1475 if (tdesc == NULL)
1476 return NULL;
1477 len = UGETW(tdesc->wTotalLength);
1478 if (lenp)
1479 *lenp = len;
1480 cdesc = kmem_alloc(len, KM_SLEEP);
1481 memcpy(cdesc, tdesc, len);
1482 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1483 } else {
1484 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1485 if (err)
1486 return 0;
1487 len = UGETW(cdescr.wTotalLength);
1488 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1489 if (lenp)
1490 *lenp = len;
1491 cdesc = kmem_alloc(len, KM_SLEEP);
1492 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1493 if (err) {
1494 kmem_free(cdesc, len);
1495 return 0;
1496 }
1497 }
1498 return cdesc;
1499 }
1500
1501 Static int
1502 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1503 {
1504 struct usbd_interface *iface;
1505 usbd_status err;
1506
1507 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1508 if (err)
1509 return -1;
1510 return usbd_get_interface_altindex(iface);
1511 }
1512
1513 Static int
1514 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1515 void *addr, int flag, struct lwp *l)
1516 {
1517 struct ugen_endpoint *sce;
1518 usbd_status err;
1519 struct usbd_interface *iface;
1520 struct usb_config_desc *cd;
1521 usb_config_descriptor_t *cdesc;
1522 struct usb_interface_desc *id;
1523 usb_interface_descriptor_t *idesc;
1524 struct usb_endpoint_desc *ed;
1525 usb_endpoint_descriptor_t *edesc;
1526 struct usb_alt_interface *ai;
1527 struct usb_string_desc *si;
1528 uint8_t conf, alt;
1529 int cdesclen;
1530 int error;
1531 int dir;
1532
1533 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1534
1535 switch (cmd) {
1536 case FIONBIO:
1537 /* All handled in the upper FS layer. */
1538 return 0;
1539 case USB_SET_SHORT_XFER:
1540 if (endpt == USB_CONTROL_ENDPOINT)
1541 return EINVAL;
1542 /* This flag only affects read */
1543 sce = &sc->sc_endpoints[endpt][IN];
1544 if (sce == NULL || sce->pipeh == NULL)
1545 return EINVAL;
1546 if (*(int *)addr)
1547 sce->state |= UGEN_SHORT_OK;
1548 else
1549 sce->state &= ~UGEN_SHORT_OK;
1550 return 0;
1551 case USB_SET_TIMEOUT:
1552 for (dir = OUT; dir <= IN; dir++) {
1553 sce = &sc->sc_endpoints[endpt][dir];
1554 if (sce == NULL)
1555 return EINVAL;
1556
1557 sce->timeout = *(int *)addr;
1558 }
1559 return 0;
1560 case USB_SET_BULK_RA:
1561 if (endpt == USB_CONTROL_ENDPOINT)
1562 return EINVAL;
1563 sce = &sc->sc_endpoints[endpt][IN];
1564 if (sce == NULL || sce->pipeh == NULL)
1565 return EINVAL;
1566 edesc = sce->edesc;
1567 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1568 return EINVAL;
1569
1570 if (*(int *)addr) {
1571 /* Only turn RA on if it's currently off. */
1572 if (sce->state & UGEN_BULK_RA)
1573 return 0;
1574
1575 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1576 /* shouldn't happen */
1577 return EINVAL;
1578 error = usbd_create_xfer(sce->pipeh,
1579 sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer);
1580 if (error)
1581 return error;
1582 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1583 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1584 sce->fill = sce->cur = sce->ibuf;
1585 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1586 sce->ra_wb_used = 0;
1587 sce->state |= UGEN_BULK_RA;
1588 sce->state &= ~UGEN_RA_WB_STOP;
1589 /* Now start reading. */
1590 usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL,
1591 uimin(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1592 0, USBD_NO_TIMEOUT, ugen_bulkra_intr);
1593 err = usbd_transfer(sce->ra_wb_xfer);
1594 if (err != USBD_IN_PROGRESS) {
1595 sce->state &= ~UGEN_BULK_RA;
1596 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1597 sce->ibuf = NULL;
1598 usbd_destroy_xfer(sce->ra_wb_xfer);
1599 return EIO;
1600 }
1601 } else {
1602 /* Only turn RA off if it's currently on. */
1603 if (!(sce->state & UGEN_BULK_RA))
1604 return 0;
1605
1606 sce->state &= ~UGEN_BULK_RA;
1607 usbd_abort_pipe(sce->pipeh);
1608 usbd_destroy_xfer(sce->ra_wb_xfer);
1609 /*
1610 * XXX Discard whatever's in the buffer, but we
1611 * should keep it around and drain the buffer
1612 * instead.
1613 */
1614 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1615 sce->ibuf = NULL;
1616 }
1617 return 0;
1618 case USB_SET_BULK_WB:
1619 if (endpt == USB_CONTROL_ENDPOINT)
1620 return EINVAL;
1621 sce = &sc->sc_endpoints[endpt][OUT];
1622 if (sce == NULL || sce->pipeh == NULL)
1623 return EINVAL;
1624 edesc = sce->edesc;
1625 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1626 return EINVAL;
1627
1628 if (*(int *)addr) {
1629 /* Only turn WB on if it's currently off. */
1630 if (sce->state & UGEN_BULK_WB)
1631 return 0;
1632
1633 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1634 /* shouldn't happen */
1635 return EINVAL;
1636 error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize,
1637 0, 0, &sce->ra_wb_xfer);
1638 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1639 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1640 sce->fill = sce->cur = sce->ibuf;
1641 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1642 sce->ra_wb_used = 0;
1643 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1644 } else {
1645 /* Only turn WB off if it's currently on. */
1646 if (!(sce->state & UGEN_BULK_WB))
1647 return 0;
1648
1649 sce->state &= ~UGEN_BULK_WB;
1650 /*
1651 * XXX Discard whatever's in the buffer, but we
1652 * should keep it around and keep writing to
1653 * drain the buffer instead.
1654 */
1655 usbd_abort_pipe(sce->pipeh);
1656 usbd_destroy_xfer(sce->ra_wb_xfer);
1657 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1658 sce->ibuf = NULL;
1659 }
1660 return 0;
1661 case USB_SET_BULK_RA_OPT:
1662 case USB_SET_BULK_WB_OPT:
1663 {
1664 struct usb_bulk_ra_wb_opt *opt;
1665
1666 if (endpt == USB_CONTROL_ENDPOINT)
1667 return EINVAL;
1668 opt = (struct usb_bulk_ra_wb_opt *)addr;
1669 if (cmd == USB_SET_BULK_RA_OPT)
1670 sce = &sc->sc_endpoints[endpt][IN];
1671 else
1672 sce = &sc->sc_endpoints[endpt][OUT];
1673 if (sce == NULL || sce->pipeh == NULL)
1674 return EINVAL;
1675 if (opt->ra_wb_buffer_size < 1 ||
1676 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1677 opt->ra_wb_request_size < 1 ||
1678 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1679 return EINVAL;
1680 /*
1681 * XXX These changes do not take effect until the
1682 * next time RA/WB mode is enabled but they ought to
1683 * take effect immediately.
1684 */
1685 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1686 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1687 return 0;
1688 }
1689 default:
1690 break;
1691 }
1692
1693 if (endpt != USB_CONTROL_ENDPOINT)
1694 return EINVAL;
1695
1696 switch (cmd) {
1697 #ifdef UGEN_DEBUG
1698 case USB_SETDEBUG:
1699 ugendebug = *(int *)addr;
1700 break;
1701 #endif
1702 case USB_GET_CONFIG:
1703 err = usbd_get_config(sc->sc_udev, &conf);
1704 if (err)
1705 return EIO;
1706 *(int *)addr = conf;
1707 break;
1708 case USB_SET_CONFIG:
1709 if (!(flag & FWRITE))
1710 return EPERM;
1711 err = ugen_set_config(sc, *(int *)addr, 1);
1712 switch (err) {
1713 case USBD_NORMAL_COMPLETION:
1714 break;
1715 case USBD_IN_USE:
1716 return EBUSY;
1717 default:
1718 return EIO;
1719 }
1720 break;
1721 case USB_GET_ALTINTERFACE:
1722 ai = (struct usb_alt_interface *)addr;
1723 err = usbd_device2interface_handle(sc->sc_udev,
1724 ai->uai_interface_index, &iface);
1725 if (err)
1726 return EINVAL;
1727 idesc = usbd_get_interface_descriptor(iface);
1728 if (idesc == NULL)
1729 return EIO;
1730 ai->uai_alt_no = idesc->bAlternateSetting;
1731 break;
1732 case USB_SET_ALTINTERFACE:
1733 if (!(flag & FWRITE))
1734 return EPERM;
1735 ai = (struct usb_alt_interface *)addr;
1736 err = usbd_device2interface_handle(sc->sc_udev,
1737 ai->uai_interface_index, &iface);
1738 if (err)
1739 return EINVAL;
1740 err = ugen_set_interface(sc, ai->uai_interface_index,
1741 ai->uai_alt_no);
1742 if (err)
1743 return EINVAL;
1744 break;
1745 case USB_GET_NO_ALT:
1746 ai = (struct usb_alt_interface *)addr;
1747 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen);
1748 if (cdesc == NULL)
1749 return EINVAL;
1750 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1751 if (idesc == NULL) {
1752 kmem_free(cdesc, cdesclen);
1753 return EINVAL;
1754 }
1755 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1756 idesc->bInterfaceNumber);
1757 kmem_free(cdesc, cdesclen);
1758 break;
1759 case USB_GET_DEVICE_DESC:
1760 *(usb_device_descriptor_t *)addr =
1761 *usbd_get_device_descriptor(sc->sc_udev);
1762 break;
1763 case USB_GET_CONFIG_DESC:
1764 cd = (struct usb_config_desc *)addr;
1765 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen);
1766 if (cdesc == NULL)
1767 return EINVAL;
1768 cd->ucd_desc = *cdesc;
1769 kmem_free(cdesc, cdesclen);
1770 break;
1771 case USB_GET_INTERFACE_DESC:
1772 id = (struct usb_interface_desc *)addr;
1773 cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen);
1774 if (cdesc == NULL)
1775 return EINVAL;
1776 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1777 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1778 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1779 else
1780 alt = id->uid_alt_index;
1781 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1782 if (idesc == NULL) {
1783 kmem_free(cdesc, cdesclen);
1784 return EINVAL;
1785 }
1786 id->uid_desc = *idesc;
1787 kmem_free(cdesc, cdesclen);
1788 break;
1789 case USB_GET_ENDPOINT_DESC:
1790 ed = (struct usb_endpoint_desc *)addr;
1791 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen);
1792 if (cdesc == NULL)
1793 return EINVAL;
1794 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1795 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1796 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1797 else
1798 alt = ed->ued_alt_index;
1799 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1800 alt, ed->ued_endpoint_index);
1801 if (edesc == NULL) {
1802 kmem_free(cdesc, cdesclen);
1803 return EINVAL;
1804 }
1805 ed->ued_desc = *edesc;
1806 kmem_free(cdesc, cdesclen);
1807 break;
1808 case USB_GET_FULL_DESC:
1809 {
1810 int len;
1811 struct iovec iov;
1812 struct uio uio;
1813 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1814
1815 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen);
1816 if (cdesc == NULL)
1817 return EINVAL;
1818 len = cdesclen;
1819 if (len > fd->ufd_size)
1820 len = fd->ufd_size;
1821 iov.iov_base = (void *)fd->ufd_data;
1822 iov.iov_len = len;
1823 uio.uio_iov = &iov;
1824 uio.uio_iovcnt = 1;
1825 uio.uio_resid = len;
1826 uio.uio_offset = 0;
1827 uio.uio_rw = UIO_READ;
1828 uio.uio_vmspace = l->l_proc->p_vmspace;
1829 error = uiomove((void *)cdesc, len, &uio);
1830 kmem_free(cdesc, cdesclen);
1831 return error;
1832 }
1833 case USB_GET_STRING_DESC: {
1834 int len;
1835 si = (struct usb_string_desc *)addr;
1836 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1837 si->usd_language_id, &si->usd_desc, &len);
1838 if (err)
1839 return EINVAL;
1840 break;
1841 }
1842 case USB_DO_REQUEST:
1843 {
1844 struct usb_ctl_request *ur = (void *)addr;
1845 int len = UGETW(ur->ucr_request.wLength);
1846 struct iovec iov;
1847 struct uio uio;
1848 void *ptr = 0;
1849 usbd_status xerr;
1850
1851 error = 0;
1852
1853 if (!(flag & FWRITE))
1854 return EPERM;
1855 /* Avoid requests that would damage the bus integrity. */
1856 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1857 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1858 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1859 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1860 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1861 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1862 return EINVAL;
1863
1864 if (len < 0 || len > 32767)
1865 return EINVAL;
1866 if (len != 0) {
1867 iov.iov_base = (void *)ur->ucr_data;
1868 iov.iov_len = len;
1869 uio.uio_iov = &iov;
1870 uio.uio_iovcnt = 1;
1871 uio.uio_resid = len;
1872 uio.uio_offset = 0;
1873 uio.uio_rw =
1874 ur->ucr_request.bmRequestType & UT_READ ?
1875 UIO_READ : UIO_WRITE;
1876 uio.uio_vmspace = l->l_proc->p_vmspace;
1877 ptr = kmem_alloc(len, KM_SLEEP);
1878 if (uio.uio_rw == UIO_WRITE) {
1879 error = uiomove(ptr, len, &uio);
1880 if (error)
1881 goto ret;
1882 }
1883 }
1884 sce = &sc->sc_endpoints[endpt][IN];
1885 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1886 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1887 if (xerr) {
1888 error = EIO;
1889 goto ret;
1890 }
1891 if (len != 0) {
1892 if (uio.uio_rw == UIO_READ) {
1893 size_t alen = uimin(len, ur->ucr_actlen);
1894 error = uiomove(ptr, alen, &uio);
1895 if (error)
1896 goto ret;
1897 }
1898 }
1899 ret:
1900 if (ptr)
1901 kmem_free(ptr, len);
1902 return error;
1903 }
1904 case USB_GET_DEVICEINFO:
1905 usbd_fill_deviceinfo(sc->sc_udev,
1906 (struct usb_device_info *)addr, 0);
1907 break;
1908 case USB_GET_DEVICEINFO_OLD:
1909 {
1910 int ret;
1911 MODULE_HOOK_CALL(usb_subr_fill_30_hook,
1912 (sc->sc_udev, (struct usb_device_info_old *)addr, 0,
1913 usbd_devinfo_vp, usbd_printBCD),
1914 enosys(), ret);
1915 if (ret == 0)
1916 return 0;
1917 return EINVAL;
1918 }
1919 default:
1920 return EINVAL;
1921 }
1922 return 0;
1923 }
1924
1925 static int
1926 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1927 {
1928 int endpt = UGENENDPOINT(dev);
1929 struct ugen_softc *sc;
1930 int error;
1931
1932 if ((sc = ugenif_acquire(UGENUNIT(dev))) == 0)
1933 return ENXIO;
1934 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1935 ugenif_release(sc);
1936
1937 return error;
1938 }
1939
1940 static int
1941 ugenpoll(dev_t dev, int events, struct lwp *l)
1942 {
1943 struct ugen_softc *sc;
1944 struct ugen_endpoint *sce_in, *sce_out;
1945 int revents = 0;
1946
1947 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
1948 return POLLHUP;
1949
1950 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) {
1951 revents |= POLLERR;
1952 goto out;
1953 }
1954
1955 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1956 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1957 KASSERT(sce_in->edesc || sce_out->edesc);
1958 KASSERT(sce_in->pipeh || sce_out->pipeh);
1959
1960 mutex_enter(&sc->sc_lock);
1961 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1962 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1963 case UE_INTERRUPT:
1964 if (sce_in->q.c_cc > 0)
1965 revents |= events & (POLLIN | POLLRDNORM);
1966 else
1967 selrecord(l, &sce_in->rsel);
1968 break;
1969 case UE_ISOCHRONOUS:
1970 if (sce_in->cur != sce_in->fill)
1971 revents |= events & (POLLIN | POLLRDNORM);
1972 else
1973 selrecord(l, &sce_in->rsel);
1974 break;
1975 case UE_BULK:
1976 if (sce_in->state & UGEN_BULK_RA) {
1977 if (sce_in->ra_wb_used > 0)
1978 revents |= events &
1979 (POLLIN | POLLRDNORM);
1980 else
1981 selrecord(l, &sce_in->rsel);
1982 break;
1983 }
1984 /*
1985 * We have no easy way of determining if a read will
1986 * yield any data or a write will happen.
1987 * Pretend they will.
1988 */
1989 revents |= events & (POLLIN | POLLRDNORM);
1990 break;
1991 default:
1992 break;
1993 }
1994 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1995 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1996 case UE_INTERRUPT:
1997 case UE_ISOCHRONOUS:
1998 /* XXX unimplemented */
1999 break;
2000 case UE_BULK:
2001 if (sce_out->state & UGEN_BULK_WB) {
2002 if (sce_out->ra_wb_used <
2003 sce_out->limit - sce_out->ibuf)
2004 revents |= events &
2005 (POLLOUT | POLLWRNORM);
2006 else
2007 selrecord(l, &sce_out->rsel);
2008 break;
2009 }
2010 /*
2011 * We have no easy way of determining if a read will
2012 * yield any data or a write will happen.
2013 * Pretend they will.
2014 */
2015 revents |= events & (POLLOUT | POLLWRNORM);
2016 break;
2017 default:
2018 break;
2019 }
2020
2021 mutex_exit(&sc->sc_lock);
2022
2023 out: ugenif_release(sc);
2024 return revents;
2025 }
2026
2027 static void
2028 filt_ugenrdetach(struct knote *kn)
2029 {
2030 struct ugen_endpoint *sce = kn->kn_hook;
2031 struct ugen_softc *sc = sce->sc;
2032
2033 mutex_enter(&sc->sc_lock);
2034 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
2035 mutex_exit(&sc->sc_lock);
2036 }
2037
2038 static int
2039 filt_ugenread_intr(struct knote *kn, long hint)
2040 {
2041 struct ugen_endpoint *sce = kn->kn_hook;
2042 struct ugen_softc *sc = sce->sc;
2043 int ret;
2044
2045 mutex_enter(&sc->sc_lock);
2046 if (sc->sc_dying) {
2047 ret = 0;
2048 } else {
2049 kn->kn_data = sce->q.c_cc;
2050 ret = kn->kn_data > 0;
2051 }
2052 mutex_exit(&sc->sc_lock);
2053
2054 return ret;
2055 }
2056
2057 static int
2058 filt_ugenread_isoc(struct knote *kn, long hint)
2059 {
2060 struct ugen_endpoint *sce = kn->kn_hook;
2061 struct ugen_softc *sc = sce->sc;
2062 int ret;
2063
2064 mutex_enter(&sc->sc_lock);
2065 if (sc->sc_dying) {
2066 ret = 0;
2067 } else if (sce->cur == sce->fill) {
2068 ret = 0;
2069 } else if (sce->cur < sce->fill) {
2070 kn->kn_data = sce->fill - sce->cur;
2071 ret = 1;
2072 } else {
2073 kn->kn_data = (sce->limit - sce->cur) +
2074 (sce->fill - sce->ibuf);
2075 ret = 1;
2076 }
2077 mutex_exit(&sc->sc_lock);
2078
2079 return ret;
2080 }
2081
2082 static int
2083 filt_ugenread_bulk(struct knote *kn, long hint)
2084 {
2085 struct ugen_endpoint *sce = kn->kn_hook;
2086 struct ugen_softc *sc = sce->sc;
2087 int ret;
2088
2089 mutex_enter(&sc->sc_lock);
2090 if (sc->sc_dying) {
2091 ret = 0;
2092 } else if (!(sce->state & UGEN_BULK_RA)) {
2093 /*
2094 * We have no easy way of determining if a read will
2095 * yield any data or a write will happen.
2096 * So, emulate "seltrue".
2097 */
2098 ret = filt_seltrue(kn, hint);
2099 } else if (sce->ra_wb_used == 0) {
2100 ret = 0;
2101 } else {
2102 kn->kn_data = sce->ra_wb_used;
2103 ret = 1;
2104 }
2105 mutex_exit(&sc->sc_lock);
2106
2107 return ret;
2108 }
2109
2110 static int
2111 filt_ugenwrite_bulk(struct knote *kn, long hint)
2112 {
2113 struct ugen_endpoint *sce = kn->kn_hook;
2114 struct ugen_softc *sc = sce->sc;
2115 int ret;
2116
2117 mutex_enter(&sc->sc_lock);
2118 if (sc->sc_dying) {
2119 ret = 0;
2120 } else if (!(sce->state & UGEN_BULK_WB)) {
2121 /*
2122 * We have no easy way of determining if a read will
2123 * yield any data or a write will happen.
2124 * So, emulate "seltrue".
2125 */
2126 ret = filt_seltrue(kn, hint);
2127 } else if (sce->ra_wb_used == sce->limit - sce->ibuf) {
2128 ret = 0;
2129 } else {
2130 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2131 ret = 1;
2132 }
2133 mutex_exit(&sc->sc_lock);
2134
2135 return ret;
2136 }
2137
2138 static const struct filterops ugenread_intr_filtops = {
2139 .f_isfd = 1,
2140 .f_attach = NULL,
2141 .f_detach = filt_ugenrdetach,
2142 .f_event = filt_ugenread_intr,
2143 };
2144
2145 static const struct filterops ugenread_isoc_filtops = {
2146 .f_isfd = 1,
2147 .f_attach = NULL,
2148 .f_detach = filt_ugenrdetach,
2149 .f_event = filt_ugenread_isoc,
2150 };
2151
2152 static const struct filterops ugenread_bulk_filtops = {
2153 .f_isfd = 1,
2154 .f_attach = NULL,
2155 .f_detach = filt_ugenrdetach,
2156 .f_event = filt_ugenread_bulk,
2157 };
2158
2159 static const struct filterops ugenwrite_bulk_filtops = {
2160 .f_isfd = 1,
2161 .f_attach = NULL,
2162 .f_detach = filt_ugenrdetach,
2163 .f_event = filt_ugenwrite_bulk,
2164 };
2165
2166 static int
2167 ugenkqfilter(dev_t dev, struct knote *kn)
2168 {
2169 struct ugen_softc *sc;
2170 struct ugen_endpoint *sce;
2171 struct klist *klist;
2172 int error;
2173
2174 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
2175 return ENXIO;
2176
2177 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) {
2178 error = ENODEV;
2179 goto out;
2180 }
2181
2182 switch (kn->kn_filter) {
2183 case EVFILT_READ:
2184 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2185 if (sce == NULL) {
2186 error = EINVAL;
2187 goto out;
2188 }
2189
2190 klist = &sce->rsel.sel_klist;
2191 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2192 case UE_INTERRUPT:
2193 kn->kn_fop = &ugenread_intr_filtops;
2194 break;
2195 case UE_ISOCHRONOUS:
2196 kn->kn_fop = &ugenread_isoc_filtops;
2197 break;
2198 case UE_BULK:
2199 kn->kn_fop = &ugenread_bulk_filtops;
2200 break;
2201 default:
2202 error = EINVAL;
2203 goto out;
2204 }
2205 break;
2206
2207 case EVFILT_WRITE:
2208 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2209 if (sce == NULL) {
2210 error = EINVAL;
2211 goto out;
2212 }
2213
2214 klist = &sce->rsel.sel_klist;
2215 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2216 case UE_INTERRUPT:
2217 case UE_ISOCHRONOUS:
2218 /* XXX poll doesn't support this */
2219 error = EINVAL;
2220 goto out;
2221
2222 case UE_BULK:
2223 kn->kn_fop = &ugenwrite_bulk_filtops;
2224 break;
2225 default:
2226 error = EINVAL;
2227 goto out;
2228 }
2229 break;
2230
2231 default:
2232 error = EINVAL;
2233 goto out;
2234 }
2235
2236 kn->kn_hook = sce;
2237
2238 mutex_enter(&sc->sc_lock);
2239 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2240 mutex_exit(&sc->sc_lock);
2241
2242 error = 0;
2243
2244 out: ugenif_release(sc);
2245 return error;
2246 }
2247
2248 MODULE(MODULE_CLASS_DRIVER, ugen, NULL);
2249
2250 static int
2251 ugen_modcmd(modcmd_t cmd, void *aux)
2252 {
2253
2254 switch (cmd) {
2255 case MODULE_CMD_INIT:
2256 mutex_init(&ugenif.lock, MUTEX_DEFAULT, IPL_NONE);
2257 rb_tree_init(&ugenif.tree, &ugenif_tree_ops);
2258 return 0;
2259 default:
2260 return ENOTTY;
2261 }
2262 }
2263