ugen.c revision 1.171.2.1 1 /* $NetBSD: ugen.c,v 1.171.2.1 2024/04/16 18:45:39 martin Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.171.2.1 2024/04/16 18:45:39 martin Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #include "opt_usb.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/device.h>
52 #include <sys/ioctl.h>
53 #include <sys/conf.h>
54 #include <sys/tty.h>
55 #include <sys/file.h>
56 #include <sys/select.h>
57 #include <sys/proc.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60 #include <sys/compat_stub.h>
61 #include <sys/module.h>
62 #include <sys/rbtree.h>
63
64 #include <dev/usb/usb.h>
65 #include <dev/usb/usbdi.h>
66 #include <dev/usb/usbdi_util.h>
67 #include <dev/usb/usbhist.h>
68
69 #include "ioconf.h"
70
71 #ifdef USB_DEBUG
72 #ifndef UGEN_DEBUG
73 #define ugendebug 0
74 #else
75 int ugendebug = 0;
76
77 SYSCTL_SETUP(sysctl_hw_ugen_setup, "sysctl hw.ugen setup")
78 {
79 int err;
80 const struct sysctlnode *rnode;
81 const struct sysctlnode *cnode;
82
83 err = sysctl_createv(clog, 0, NULL, &rnode,
84 CTLFLAG_PERMANENT, CTLTYPE_NODE, "ugen",
85 SYSCTL_DESCR("ugen global controls"),
86 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
87
88 if (err)
89 goto fail;
90
91 /* control debugging printfs */
92 err = sysctl_createv(clog, 0, &rnode, &cnode,
93 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
94 "debug", SYSCTL_DESCR("Enable debugging output"),
95 NULL, 0, &ugendebug, sizeof(ugendebug), CTL_CREATE, CTL_EOL);
96 if (err)
97 goto fail;
98
99 return;
100 fail:
101 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
102 }
103
104 #endif /* UGEN_DEBUG */
105 #endif /* USB_DEBUG */
106
107 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOGN(ugendebug,1,FMT,A,B,C,D)
108 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(ugendebug,N,FMT,A,B,C,D)
109 #define UGENHIST_FUNC() USBHIST_FUNC()
110 #define UGENHIST_CALLED(name) USBHIST_CALLED(ugendebug)
111
112 #define UGEN_CHUNK 128 /* chunk size for read */
113 #define UGEN_IBSIZE 1020 /* buffer size */
114 #define UGEN_BBSIZE 1024
115
116 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
117 #define UGEN_NISORFRMS 8 /* number of transactions per req */
118 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
119
120 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
121 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
122
123 struct isoreq {
124 struct ugen_endpoint *sce;
125 struct usbd_xfer *xfer;
126 void *dmabuf;
127 uint16_t sizes[UGEN_NISORFRMS];
128 };
129
130 struct ugen_endpoint {
131 struct ugen_softc *sc;
132 usb_endpoint_descriptor_t *edesc;
133 struct usbd_interface *iface;
134 int state;
135 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
136 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
137 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
138 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
139 struct usbd_pipe *pipeh;
140 struct clist q;
141 u_char *ibuf; /* start of buffer (circular for isoc) */
142 u_char *fill; /* location for input (isoc) */
143 u_char *limit; /* end of circular buffer (isoc) */
144 u_char *cur; /* current read location (isoc) */
145 uint32_t timeout;
146 uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
147 uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
148 uint32_t ra_wb_used; /* how much is in buffer */
149 uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */
150 struct usbd_xfer *ra_wb_xfer;
151 struct isoreq isoreqs[UGEN_NISOREQS];
152 /* Keep these last; we don't overwrite them in ugen_set_config() */
153 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
154 struct selinfo rsel;
155 kcondvar_t cv;
156 };
157
158 struct ugen_softc {
159 device_t sc_dev; /* base device */
160 struct usbd_device *sc_udev;
161 struct rb_node sc_node;
162 unsigned sc_unit;
163
164 kmutex_t sc_lock;
165 kcondvar_t sc_detach_cv;
166
167 char sc_is_open[USB_MAX_ENDPOINTS];
168 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
169 #define OUT 0
170 #define IN 1
171
172 int sc_refcnt;
173 char sc_buffer[UGEN_BBSIZE];
174 u_char sc_dying;
175 u_char sc_attached;
176 };
177
178 static struct {
179 kmutex_t lock;
180 rb_tree_t tree;
181 } ugenif __cacheline_aligned;
182
183 static int
184 compare_ugen(void *cookie, const void *vsca, const void *vscb)
185 {
186 const struct ugen_softc *sca = vsca;
187 const struct ugen_softc *scb = vscb;
188
189 if (sca->sc_unit < scb->sc_unit)
190 return -1;
191 if (sca->sc_unit > scb->sc_unit)
192 return +1;
193 return 0;
194 }
195
196 static int
197 compare_ugen_key(void *cookie, const void *vsc, const void *vk)
198 {
199 const struct ugen_softc *sc = vsc;
200 const unsigned *k = vk;
201
202 if (sc->sc_unit < *k)
203 return -1;
204 if (sc->sc_unit > *k)
205 return +1;
206 return 0;
207 }
208
209 static const rb_tree_ops_t ugenif_tree_ops = {
210 .rbto_compare_nodes = compare_ugen,
211 .rbto_compare_key = compare_ugen_key,
212 .rbto_node_offset = offsetof(struct ugen_softc, sc_node),
213 };
214
215 static void
216 ugenif_get_unit(struct ugen_softc *sc)
217 {
218 struct ugen_softc *sc0;
219 unsigned i;
220
221 mutex_enter(&ugenif.lock);
222 for (i = 0, sc0 = RB_TREE_MIN(&ugenif.tree);
223 sc0 != NULL && i == sc0->sc_unit;
224 i++, sc0 = RB_TREE_NEXT(&ugenif.tree, sc0))
225 KASSERT(i < UINT_MAX);
226 KASSERT(rb_tree_find_node(&ugenif.tree, &i) == NULL);
227 sc->sc_unit = i;
228 sc0 = rb_tree_insert_node(&ugenif.tree, sc);
229 KASSERT(sc0 == sc);
230 KASSERT(rb_tree_find_node(&ugenif.tree, &i) == sc);
231 mutex_exit(&ugenif.lock);
232 }
233
234 static void
235 ugenif_put_unit(struct ugen_softc *sc)
236 {
237
238 mutex_enter(&ugenif.lock);
239 KASSERT(rb_tree_find_node(&ugenif.tree, &sc->sc_unit) == sc);
240 rb_tree_remove_node(&ugenif.tree, sc);
241 sc->sc_unit = -1;
242 mutex_exit(&ugenif.lock);
243 }
244
245 static struct ugen_softc *
246 ugenif_acquire(unsigned unit)
247 {
248 struct ugen_softc *sc;
249
250 mutex_enter(&ugenif.lock);
251 sc = rb_tree_find_node(&ugenif.tree, &unit);
252 if (sc == NULL)
253 goto out;
254 mutex_enter(&sc->sc_lock);
255 if (sc->sc_dying) {
256 mutex_exit(&sc->sc_lock);
257 sc = NULL;
258 goto out;
259 }
260 KASSERT(sc->sc_refcnt < INT_MAX);
261 sc->sc_refcnt++;
262 mutex_exit(&sc->sc_lock);
263 out: mutex_exit(&ugenif.lock);
264
265 return sc;
266 }
267
268 static void
269 ugenif_release(struct ugen_softc *sc)
270 {
271
272 mutex_enter(&sc->sc_lock);
273 if (--sc->sc_refcnt < 0)
274 cv_broadcast(&sc->sc_detach_cv);
275 mutex_exit(&sc->sc_lock);
276 }
277
278 static dev_type_open(ugenopen);
279 static dev_type_close(ugenclose);
280 static dev_type_read(ugenread);
281 static dev_type_write(ugenwrite);
282 static dev_type_ioctl(ugenioctl);
283 static dev_type_poll(ugenpoll);
284 static dev_type_kqfilter(ugenkqfilter);
285
286 const struct cdevsw ugen_cdevsw = {
287 .d_open = ugenopen,
288 .d_close = ugenclose,
289 .d_read = ugenread,
290 .d_write = ugenwrite,
291 .d_ioctl = ugenioctl,
292 .d_stop = nostop,
293 .d_tty = notty,
294 .d_poll = ugenpoll,
295 .d_mmap = nommap,
296 .d_kqfilter = ugenkqfilter,
297 .d_discard = nodiscard,
298 .d_flag = D_OTHER,
299 };
300
301 Static void ugenintr(struct usbd_xfer *, void *,
302 usbd_status);
303 Static void ugen_isoc_rintr(struct usbd_xfer *, void *,
304 usbd_status);
305 Static void ugen_bulkra_intr(struct usbd_xfer *, void *,
306 usbd_status);
307 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *,
308 usbd_status);
309 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
310 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
311 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
312 void *, int, struct lwp *);
313 Static int ugen_set_config(struct ugen_softc *, int, int);
314 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *,
315 int, int *);
316 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
317 Static int ugen_get_alt_index(struct ugen_softc *, int);
318 Static void ugen_clear_endpoints(struct ugen_softc *);
319
320 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
321 #define UGENENDPOINT(n) (minor(n) & 0xf)
322 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
323
324 static int ugenif_match(device_t, cfdata_t, void *);
325 static void ugenif_attach(device_t, device_t, void *);
326 static int ugen_match(device_t, cfdata_t, void *);
327 static void ugen_attach(device_t, device_t, void *);
328 static int ugen_detach(device_t, int);
329 static int ugen_activate(device_t, enum devact);
330
331 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match,
332 ugen_attach, ugen_detach, ugen_activate);
333 CFATTACH_DECL_NEW(ugenif, sizeof(struct ugen_softc), ugenif_match,
334 ugenif_attach, ugen_detach, ugen_activate);
335
336 /* toggle to control attach priority. -1 means "let autoconf decide" */
337 int ugen_override = -1;
338
339 static int
340 ugen_match(device_t parent, cfdata_t match, void *aux)
341 {
342 struct usb_attach_arg *uaa = aux;
343 int override;
344
345 if (ugen_override != -1)
346 override = ugen_override;
347 else
348 override = match->cf_flags & 1;
349
350 if (override)
351 return UMATCH_HIGHEST;
352 else if (uaa->uaa_usegeneric)
353 return UMATCH_GENERIC;
354 else
355 return UMATCH_NONE;
356 }
357
358 static int
359 ugenif_match(device_t parent, cfdata_t match, void *aux)
360 {
361 /*
362 * Like ugen(4), ugenif(4) also has an override flag. It has the
363 * opposite effect, however, causing us to match with GENERIC
364 * priority rather than HIGHEST.
365 */
366 return (match->cf_flags & 1) ? UMATCH_GENERIC : UMATCH_HIGHEST;
367 }
368
369 static void
370 ugen_attach(device_t parent, device_t self, void *aux)
371 {
372 struct usb_attach_arg *uaa = aux;
373 struct usbif_attach_arg uiaa;
374
375 memset(&uiaa, 0, sizeof(uiaa));
376 uiaa.uiaa_port = uaa->uaa_port;
377 uiaa.uiaa_vendor = uaa->uaa_vendor;
378 uiaa.uiaa_product = uaa->uaa_product;
379 uiaa.uiaa_release = uaa->uaa_release;
380 uiaa.uiaa_device = uaa->uaa_device;
381 uiaa.uiaa_configno = -1;
382 uiaa.uiaa_ifaceno = -1;
383
384 ugenif_attach(parent, self, &uiaa);
385 }
386
387 static void
388 ugenif_attach(device_t parent, device_t self, void *aux)
389 {
390 struct ugen_softc *sc = device_private(self);
391 struct usbif_attach_arg *uiaa = aux;
392 struct usbd_device *udev;
393 char *devinfop;
394 usbd_status err;
395 int i, dir, conf;
396
397 aprint_naive("\n");
398 aprint_normal("\n");
399
400 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
401 cv_init(&sc->sc_detach_cv, "ugendet");
402
403 devinfop = usbd_devinfo_alloc(uiaa->uiaa_device, 0);
404 aprint_normal_dev(self, "%s\n", devinfop);
405 usbd_devinfo_free(devinfop);
406
407 sc->sc_dev = self;
408 sc->sc_udev = udev = uiaa->uiaa_device;
409
410 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
411 for (dir = OUT; dir <= IN; dir++) {
412 struct ugen_endpoint *sce;
413
414 sce = &sc->sc_endpoints[i][dir];
415 selinit(&sce->rsel);
416 cv_init(&sce->cv, "ugensce");
417 }
418 }
419
420 if (!pmf_device_register(self, NULL, NULL))
421 aprint_error_dev(self, "couldn't establish power handler\n");
422
423 if (uiaa->uiaa_ifaceno < 0) {
424 /*
425 * If we attach the whole device,
426 * set configuration index 0, the default one.
427 */
428 err = usbd_set_config_index(udev, 0, 0);
429 if (err) {
430 aprint_error_dev(self,
431 "setting configuration index 0 failed\n");
432 return;
433 }
434 }
435
436 /* Get current configuration */
437 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
438
439 /* Set up all the local state for this configuration. */
440 err = ugen_set_config(sc, conf, uiaa->uiaa_ifaceno < 0);
441 if (err) {
442 aprint_error_dev(self, "setting configuration %d failed\n",
443 conf);
444 return;
445 }
446
447 ugenif_get_unit(sc);
448 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev);
449 sc->sc_attached = 1;
450 }
451
452 Static void
453 ugen_clear_endpoints(struct ugen_softc *sc)
454 {
455
456 /* Clear out the old info, but leave the selinfo and cv initialised. */
457 for (int i = 0; i < USB_MAX_ENDPOINTS; i++) {
458 for (int dir = OUT; dir <= IN; dir++) {
459 struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir];
460 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
461 }
462 }
463 }
464
465 Static int
466 ugen_set_config(struct ugen_softc *sc, int configno, int chkopen)
467 {
468 struct usbd_device *dev = sc->sc_udev;
469 usb_config_descriptor_t *cdesc;
470 struct usbd_interface *iface;
471 usb_endpoint_descriptor_t *ed;
472 struct ugen_endpoint *sce;
473 uint8_t niface, nendpt;
474 int ifaceno, endptno, endpt;
475 usbd_status err;
476 int dir;
477
478 UGENHIST_FUNC(); UGENHIST_CALLED();
479
480 DPRINTFN(1, "ugen%jd: to configno %jd, sc=%jx",
481 device_unit(sc->sc_dev), configno, (uintptr_t)sc, 0);
482
483 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
484
485 if (chkopen) {
486 /*
487 * We start at 1, not 0, because we don't care whether the
488 * control endpoint is open or not. It is always present.
489 */
490 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
491 if (sc->sc_is_open[endptno]) {
492 DPRINTFN(1,
493 "ugen%jd - endpoint %d is open",
494 device_unit(sc->sc_dev), endptno, 0, 0);
495 return USBD_IN_USE;
496 }
497
498 /* Prevent opening while we're setting the config. */
499 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) {
500 KASSERT(!sc->sc_is_open[endptno]);
501 sc->sc_is_open[endptno] = 1;
502 }
503 }
504
505 /* Avoid setting the current value. */
506 cdesc = usbd_get_config_descriptor(dev);
507 if (!cdesc || cdesc->bConfigurationValue != configno) {
508 err = usbd_set_config_no(dev, configno, 1);
509 if (err)
510 goto out;
511 }
512
513 ugen_clear_endpoints(sc);
514
515 err = usbd_interface_count(dev, &niface);
516 if (err)
517 goto out;
518
519 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
520 DPRINTFN(1, "ifaceno %jd", ifaceno, 0, 0, 0);
521 err = usbd_device2interface_handle(dev, ifaceno, &iface);
522 if (err)
523 goto out;
524 err = usbd_endpoint_count(iface, &nendpt);
525 if (err)
526 goto out;
527 for (endptno = 0; endptno < nendpt; endptno++) {
528 ed = usbd_interface2endpoint_descriptor(iface,endptno);
529 KASSERT(ed != NULL);
530 endpt = ed->bEndpointAddress;
531 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
532 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
533 DPRINTFN(1, "endptno %jd, endpt=0x%02jx (%jd,%jd)",
534 endptno, endpt, UE_GET_ADDR(endpt),
535 UE_GET_DIR(endpt));
536 sce->sc = sc;
537 sce->edesc = ed;
538 sce->iface = iface;
539 }
540 }
541 err = USBD_NORMAL_COMPLETION;
542
543 out: if (chkopen) {
544 /*
545 * Allow open again now that we're done trying to set
546 * the config.
547 */
548 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) {
549 KASSERT(sc->sc_is_open[endptno]);
550 sc->sc_is_open[endptno] = 0;
551 }
552 }
553 return err;
554 }
555
556 static int
557 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
558 {
559 struct ugen_softc *sc;
560 int unit = UGENUNIT(dev);
561 int endpt = UGENENDPOINT(dev);
562 usb_endpoint_descriptor_t *edesc;
563 struct ugen_endpoint *sce;
564 int dir, isize;
565 usbd_status err;
566 struct usbd_xfer *xfer;
567 int i, j;
568 int error;
569 int opened = 0;
570
571 UGENHIST_FUNC(); UGENHIST_CALLED();
572
573 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
574
575 if ((sc = ugenif_acquire(unit)) == NULL)
576 return ENXIO;
577
578 DPRINTFN(5, "flag=%jd, mode=%jd, unit=%jd endpt=%jd",
579 flag, mode, unit, endpt);
580
581 /* The control endpoint allows multiple opens. */
582 if (endpt == USB_CONTROL_ENDPOINT) {
583 opened = sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
584 error = 0;
585 goto out;
586 }
587
588 if (sc->sc_is_open[endpt]) {
589 error = EBUSY;
590 goto out;
591 }
592 opened = sc->sc_is_open[endpt] = 1;
593
594 /* Make sure there are pipes for all directions. */
595 for (dir = OUT; dir <= IN; dir++) {
596 if (flag & (dir == OUT ? FWRITE : FREAD)) {
597 sce = &sc->sc_endpoints[endpt][dir];
598 if (sce->edesc == NULL) {
599 error = ENXIO;
600 goto out;
601 }
602 }
603 }
604
605 /* Actually open the pipes. */
606 /* XXX Should back out properly if it fails. */
607 for (dir = OUT; dir <= IN; dir++) {
608 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
609 continue;
610 sce = &sc->sc_endpoints[endpt][dir];
611 sce->state = 0;
612 sce->timeout = USBD_NO_TIMEOUT;
613 DPRINTFN(5, "sc=%jx, endpt=%jd, dir=%jd, sce=%jp",
614 (uintptr_t)sc, endpt, dir, (uintptr_t)sce);
615 edesc = sce->edesc;
616 switch (edesc->bmAttributes & UE_XFERTYPE) {
617 case UE_INTERRUPT:
618 if (dir == OUT) {
619 err = usbd_open_pipe(sce->iface,
620 edesc->bEndpointAddress, 0, &sce->pipeh);
621 if (err) {
622 error = EIO;
623 goto out;
624 }
625 break;
626 }
627 isize = UGETW(edesc->wMaxPacketSize);
628 if (isize == 0) { /* shouldn't happen */
629 error = EINVAL;
630 goto out;
631 }
632 sce->ibuf = kmem_alloc(isize, KM_SLEEP);
633 DPRINTFN(5, "intr endpt=%d, isize=%d",
634 endpt, isize, 0, 0);
635 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
636 kmem_free(sce->ibuf, isize);
637 sce->ibuf = NULL;
638 error = ENOMEM;
639 goto out;
640 }
641 err = usbd_open_pipe_intr(sce->iface,
642 edesc->bEndpointAddress,
643 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
644 sce->ibuf, isize, ugenintr,
645 USBD_DEFAULT_INTERVAL);
646 if (err) {
647 clfree(&sce->q);
648 kmem_free(sce->ibuf, isize);
649 sce->ibuf = NULL;
650 error = EIO;
651 goto out;
652 }
653 DPRINTFN(5, "interrupt open done", 0, 0, 0, 0);
654 break;
655 case UE_BULK:
656 err = usbd_open_pipe(sce->iface,
657 edesc->bEndpointAddress, 0, &sce->pipeh);
658 if (err) {
659 error = EIO;
660 goto out;
661 }
662 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
663 /*
664 * Use request size for non-RA/WB transfers
665 * as the default.
666 */
667 sce->ra_wb_reqsize = UGEN_BBSIZE;
668 break;
669 case UE_ISOCHRONOUS:
670 if (dir == OUT) {
671 error = EINVAL;
672 goto out;
673 }
674 isize = UGETW(edesc->wMaxPacketSize);
675 if (isize == 0) { /* shouldn't happen */
676 error = EINVAL;
677 goto out;
678 }
679 sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES,
680 KM_SLEEP);
681 sce->cur = sce->fill = sce->ibuf;
682 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
683 DPRINTFN(5, "isoc endpt=%d, isize=%d",
684 endpt, isize, 0, 0);
685 err = usbd_open_pipe(sce->iface,
686 edesc->bEndpointAddress, 0, &sce->pipeh);
687 if (err) {
688 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
689 sce->ibuf = NULL;
690 error = EIO;
691 goto out;
692 }
693 for (i = 0; i < UGEN_NISOREQS; ++i) {
694 sce->isoreqs[i].sce = sce;
695 err = usbd_create_xfer(sce->pipeh,
696 isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS,
697 &xfer);
698 if (err)
699 goto bad;
700 sce->isoreqs[i].xfer = xfer;
701 sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer);
702 for (j = 0; j < UGEN_NISORFRMS; ++j)
703 sce->isoreqs[i].sizes[j] = isize;
704 usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i],
705 sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0,
706 ugen_isoc_rintr);
707 (void)usbd_transfer(xfer);
708 }
709 DPRINTFN(5, "isoc open done", 0, 0, 0, 0);
710 break;
711 bad:
712 while (--i >= 0) { /* implicit buffer free */
713 usbd_destroy_xfer(sce->isoreqs[i].xfer);
714 sce->isoreqs[i].xfer = NULL;
715 }
716 usbd_close_pipe(sce->pipeh);
717 sce->pipeh = NULL;
718 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
719 sce->ibuf = NULL;
720 error = ENOMEM;
721 goto out;
722 case UE_CONTROL:
723 sce->timeout = USBD_DEFAULT_TIMEOUT;
724 error = EINVAL;
725 goto out;
726 }
727 }
728 error = 0;
729 out: if (error && opened)
730 sc->sc_is_open[endpt] = 0;
731 ugenif_release(sc);
732 return error;
733 }
734
735 static void
736 ugen_do_close(struct ugen_softc *sc, int flag, int endpt)
737 {
738 struct ugen_endpoint *sce;
739 int dir;
740 int i;
741
742 UGENHIST_FUNC(); UGENHIST_CALLED();
743
744 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
745
746 if (!sc->sc_is_open[endpt])
747 goto out;
748
749 if (endpt == USB_CONTROL_ENDPOINT) {
750 DPRINTFN(5, "close control", 0, 0, 0, 0);
751 goto out;
752 }
753
754 for (dir = OUT; dir <= IN; dir++) {
755 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
756 continue;
757 sce = &sc->sc_endpoints[endpt][dir];
758 if (sce->pipeh == NULL)
759 continue;
760 DPRINTFN(5, "endpt=%jd dir=%jd sce=%jx",
761 endpt, dir, (uintptr_t)sce, 0);
762
763 usbd_abort_pipe(sce->pipeh);
764
765 int isize = UGETW(sce->edesc->wMaxPacketSize);
766 int msize = 0;
767
768 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
769 case UE_INTERRUPT:
770 ndflush(&sce->q, sce->q.c_cc);
771 clfree(&sce->q);
772 msize = isize;
773 break;
774 case UE_ISOCHRONOUS:
775 for (i = 0; i < UGEN_NISOREQS; ++i) {
776 usbd_destroy_xfer(sce->isoreqs[i].xfer);
777 sce->isoreqs[i].xfer = NULL;
778 }
779 msize = isize * UGEN_NISOFRAMES;
780 break;
781 case UE_BULK:
782 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) {
783 usbd_destroy_xfer(sce->ra_wb_xfer);
784 sce->ra_wb_xfer = NULL;
785 msize = sce->ra_wb_bufsize;
786 }
787 break;
788 default:
789 break;
790 }
791 usbd_close_pipe(sce->pipeh);
792 sce->pipeh = NULL;
793 if (sce->ibuf != NULL) {
794 kmem_free(sce->ibuf, msize);
795 sce->ibuf = NULL;
796 }
797 }
798
799 out: sc->sc_is_open[endpt] = 0;
800 for (dir = OUT; dir <= IN; dir++) {
801 sce = &sc->sc_endpoints[endpt][dir];
802 KASSERT(sce->pipeh == NULL);
803 KASSERT(sce->ibuf == NULL);
804 KASSERT(sce->ra_wb_xfer == NULL);
805 for (i = 0; i < UGEN_NISOREQS; i++)
806 KASSERT(sce->isoreqs[i].xfer == NULL);
807 }
808 }
809
810 static int
811 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
812 {
813 int endpt = UGENENDPOINT(dev);
814 struct ugen_softc *sc;
815
816 UGENHIST_FUNC(); UGENHIST_CALLED();
817
818 DPRINTFN(5, "flag=%jd, mode=%jd, unit=%jd, endpt=%jd",
819 flag, mode, UGENUNIT(dev), endpt);
820
821 KASSERT(KERNEL_LOCKED_P()); /* ugen_do_close */
822
823 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
824 return ENXIO;
825
826 KASSERT(sc->sc_is_open[endpt]);
827 ugen_do_close(sc, flag, endpt);
828 KASSERT(!sc->sc_is_open[endpt]);
829
830 ugenif_release(sc);
831
832 return 0;
833 }
834
835 Static int
836 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
837 {
838 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
839 uint32_t n, tn;
840 struct usbd_xfer *xfer;
841 usbd_status err;
842 int error = 0;
843
844 UGENHIST_FUNC(); UGENHIST_CALLED();
845
846 DPRINTFN(5, "ugen%d: %jd", device_unit(sc->sc_dev), endpt, 0, 0);
847
848 if (endpt == USB_CONTROL_ENDPOINT)
849 return ENODEV;
850
851 KASSERT(sce->edesc);
852 KASSERT(sce->pipeh);
853
854 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
855 case UE_INTERRUPT:
856 /* Block until activity occurred. */
857 mutex_enter(&sc->sc_lock);
858 while (sce->q.c_cc == 0) {
859 if (flag & IO_NDELAY) {
860 mutex_exit(&sc->sc_lock);
861 return EWOULDBLOCK;
862 }
863 DPRINTFN(5, "sleep on %jx", (uintptr_t)sce, 0, 0, 0);
864 /* "ugenri" */
865 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
866 mstohz(sce->timeout));
867 DPRINTFN(5, "woke, error=%jd",
868 error, 0, 0, 0);
869 if (sc->sc_dying)
870 error = EIO;
871 if (error)
872 break;
873 }
874 mutex_exit(&sc->sc_lock);
875
876 /* Transfer as many chunks as possible. */
877 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
878 n = uimin(sce->q.c_cc, uio->uio_resid);
879 if (n > sizeof(sc->sc_buffer))
880 n = sizeof(sc->sc_buffer);
881
882 /* Remove a small chunk from the input queue. */
883 q_to_b(&sce->q, sc->sc_buffer, n);
884 DPRINTFN(5, "got %jd chars", n, 0, 0, 0);
885
886 /* Copy the data to the user process. */
887 error = uiomove(sc->sc_buffer, n, uio);
888 if (error)
889 break;
890 }
891 break;
892 case UE_BULK:
893 if (sce->state & UGEN_BULK_RA) {
894 DPRINTFN(5, "BULK_RA req: %zd used: %d",
895 uio->uio_resid, sce->ra_wb_used, 0, 0);
896 xfer = sce->ra_wb_xfer;
897
898 mutex_enter(&sc->sc_lock);
899 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
900 mutex_exit(&sc->sc_lock);
901 return EWOULDBLOCK;
902 }
903 while (uio->uio_resid > 0 && !error) {
904 while (sce->ra_wb_used == 0) {
905 DPRINTFN(5, "sleep on %jx",
906 (uintptr_t)sce, 0, 0, 0);
907 /* "ugenrb" */
908 error = cv_timedwait_sig(&sce->cv,
909 &sc->sc_lock, mstohz(sce->timeout));
910 DPRINTFN(5, "woke, error=%jd",
911 error, 0, 0, 0);
912 if (sc->sc_dying)
913 error = EIO;
914 if (error)
915 break;
916 }
917
918 /* Copy data to the process. */
919 while (uio->uio_resid > 0
920 && sce->ra_wb_used > 0) {
921 n = uimin(uio->uio_resid,
922 sce->ra_wb_used);
923 n = uimin(n, sce->limit - sce->cur);
924 error = uiomove(sce->cur, n, uio);
925 if (error)
926 break;
927 sce->cur += n;
928 sce->ra_wb_used -= n;
929 if (sce->cur == sce->limit)
930 sce->cur = sce->ibuf;
931 }
932
933 /*
934 * If the transfers stopped because the
935 * buffer was full, restart them.
936 */
937 if (sce->state & UGEN_RA_WB_STOP &&
938 sce->ra_wb_used < sce->limit - sce->ibuf) {
939 n = (sce->limit - sce->ibuf)
940 - sce->ra_wb_used;
941 usbd_setup_xfer(xfer, sce, NULL,
942 uimin(n, sce->ra_wb_xferlen),
943 0, USBD_NO_TIMEOUT,
944 ugen_bulkra_intr);
945 sce->state &= ~UGEN_RA_WB_STOP;
946 err = usbd_transfer(xfer);
947 if (err != USBD_IN_PROGRESS)
948 /*
949 * The transfer has not been
950 * queued. Setting STOP
951 * will make us try
952 * again at the next read.
953 */
954 sce->state |= UGEN_RA_WB_STOP;
955 }
956 }
957 mutex_exit(&sc->sc_lock);
958 break;
959 }
960 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
961 0, 0, &xfer);
962 if (error)
963 return error;
964 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
965 DPRINTFN(1, "start transfer %jd bytes", n, 0, 0, 0);
966 tn = n;
967 err = usbd_bulk_transfer(xfer, sce->pipeh,
968 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0,
969 sce->timeout, sc->sc_buffer, &tn);
970 if (err) {
971 if (err == USBD_INTERRUPTED)
972 error = EINTR;
973 else if (err == USBD_TIMEOUT)
974 error = ETIMEDOUT;
975 else
976 error = EIO;
977 break;
978 }
979 DPRINTFN(1, "got %jd bytes", tn, 0, 0, 0);
980 error = uiomove(sc->sc_buffer, tn, uio);
981 if (error || tn < n)
982 break;
983 }
984 usbd_destroy_xfer(xfer);
985 break;
986 case UE_ISOCHRONOUS:
987 mutex_enter(&sc->sc_lock);
988 while (sce->cur == sce->fill) {
989 if (flag & IO_NDELAY) {
990 mutex_exit(&sc->sc_lock);
991 return EWOULDBLOCK;
992 }
993 /* "ugenri" */
994 DPRINTFN(5, "sleep on %jx", (uintptr_t)sce, 0, 0, 0);
995 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
996 mstohz(sce->timeout));
997 DPRINTFN(5, "woke, error=%jd", error, 0, 0, 0);
998 if (sc->sc_dying)
999 error = EIO;
1000 if (error)
1001 break;
1002 }
1003
1004 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
1005 if(sce->fill > sce->cur)
1006 n = uimin(sce->fill - sce->cur, uio->uio_resid);
1007 else
1008 n = uimin(sce->limit - sce->cur, uio->uio_resid);
1009
1010 DPRINTFN(5, "isoc got %jd chars", n, 0, 0, 0);
1011
1012 /* Copy the data to the user process. */
1013 error = uiomove(sce->cur, n, uio);
1014 if (error)
1015 break;
1016 sce->cur += n;
1017 if (sce->cur >= sce->limit)
1018 sce->cur = sce->ibuf;
1019 }
1020 mutex_exit(&sc->sc_lock);
1021 break;
1022
1023
1024 default:
1025 return ENXIO;
1026 }
1027 return error;
1028 }
1029
1030 static int
1031 ugenread(dev_t dev, struct uio *uio, int flag)
1032 {
1033 int endpt = UGENENDPOINT(dev);
1034 struct ugen_softc *sc;
1035 int error;
1036
1037 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
1038 return ENXIO;
1039 error = ugen_do_read(sc, endpt, uio, flag);
1040 ugenif_release(sc);
1041
1042 return error;
1043 }
1044
1045 Static int
1046 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
1047 int flag)
1048 {
1049 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
1050 uint32_t n;
1051 int error = 0;
1052 uint32_t tn;
1053 char *dbuf;
1054 struct usbd_xfer *xfer;
1055 usbd_status err;
1056
1057 UGENHIST_FUNC(); UGENHIST_CALLED();
1058
1059 DPRINTFN(5, "ugen%jd: %jd", device_unit(sc->sc_dev), endpt, 0, 0);
1060
1061 if (endpt == USB_CONTROL_ENDPOINT)
1062 return ENODEV;
1063
1064 KASSERT(sce->edesc);
1065 KASSERT(sce->pipeh);
1066
1067 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
1068 case UE_BULK:
1069 if (sce->state & UGEN_BULK_WB) {
1070 DPRINTFN(5, "BULK_WB req: %jd used: %jd",
1071 uio->uio_resid, sce->ra_wb_used, 0, 0);
1072 xfer = sce->ra_wb_xfer;
1073
1074 mutex_enter(&sc->sc_lock);
1075 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
1076 flag & IO_NDELAY) {
1077 mutex_exit(&sc->sc_lock);
1078 return EWOULDBLOCK;
1079 }
1080 while (uio->uio_resid > 0 && !error) {
1081 while (sce->ra_wb_used ==
1082 sce->limit - sce->ibuf) {
1083 DPRINTFN(5, "sleep on %#jx",
1084 (uintptr_t)sce, 0, 0, 0);
1085 /* "ugenwb" */
1086 error = cv_timedwait_sig(&sce->cv,
1087 &sc->sc_lock, mstohz(sce->timeout));
1088 DPRINTFN(5, "woke, error=%d",
1089 error, 0, 0, 0);
1090 if (sc->sc_dying)
1091 error = EIO;
1092 if (error)
1093 break;
1094 }
1095
1096 /* Copy data from the process. */
1097 while (uio->uio_resid > 0 &&
1098 sce->ra_wb_used < sce->limit - sce->ibuf) {
1099 n = uimin(uio->uio_resid,
1100 (sce->limit - sce->ibuf)
1101 - sce->ra_wb_used);
1102 n = uimin(n, sce->limit - sce->fill);
1103 error = uiomove(sce->fill, n, uio);
1104 if (error)
1105 break;
1106 sce->fill += n;
1107 sce->ra_wb_used += n;
1108 if (sce->fill == sce->limit)
1109 sce->fill = sce->ibuf;
1110 }
1111
1112 /*
1113 * If the transfers stopped because the
1114 * buffer was empty, restart them.
1115 */
1116 if (sce->state & UGEN_RA_WB_STOP &&
1117 sce->ra_wb_used > 0) {
1118 dbuf = (char *)usbd_get_buffer(xfer);
1119 n = uimin(sce->ra_wb_used,
1120 sce->ra_wb_xferlen);
1121 tn = uimin(n, sce->limit - sce->cur);
1122 memcpy(dbuf, sce->cur, tn);
1123 dbuf += tn;
1124 if (n - tn > 0)
1125 memcpy(dbuf, sce->ibuf,
1126 n - tn);
1127 usbd_setup_xfer(xfer, sce, NULL, n,
1128 0, USBD_NO_TIMEOUT,
1129 ugen_bulkwb_intr);
1130 sce->state &= ~UGEN_RA_WB_STOP;
1131 err = usbd_transfer(xfer);
1132 if (err != USBD_IN_PROGRESS)
1133 /*
1134 * The transfer has not been
1135 * queued. Setting STOP
1136 * will make us try again
1137 * at the next read.
1138 */
1139 sce->state |= UGEN_RA_WB_STOP;
1140 }
1141 }
1142 mutex_exit(&sc->sc_lock);
1143 break;
1144 }
1145 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
1146 0, 0, &xfer);
1147 if (error)
1148 return error;
1149 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
1150 error = uiomove(sc->sc_buffer, n, uio);
1151 if (error)
1152 break;
1153 DPRINTFN(1, "transfer %jd bytes", n, 0, 0, 0);
1154 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout,
1155 sc->sc_buffer, &n);
1156 if (err) {
1157 if (err == USBD_INTERRUPTED)
1158 error = EINTR;
1159 else if (err == USBD_TIMEOUT)
1160 error = ETIMEDOUT;
1161 else
1162 error = EIO;
1163 break;
1164 }
1165 }
1166 usbd_destroy_xfer(xfer);
1167 break;
1168 case UE_INTERRUPT:
1169 error = usbd_create_xfer(sce->pipeh,
1170 UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer);
1171 if (error)
1172 return error;
1173 while ((n = uimin(UGETW(sce->edesc->wMaxPacketSize),
1174 uio->uio_resid)) != 0) {
1175 error = uiomove(sc->sc_buffer, n, uio);
1176 if (error)
1177 break;
1178 DPRINTFN(1, "transfer %jd bytes", n, 0, 0, 0);
1179 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
1180 sce->timeout, sc->sc_buffer, &n);
1181 if (err) {
1182 if (err == USBD_INTERRUPTED)
1183 error = EINTR;
1184 else if (err == USBD_TIMEOUT)
1185 error = ETIMEDOUT;
1186 else
1187 error = EIO;
1188 break;
1189 }
1190 }
1191 usbd_destroy_xfer(xfer);
1192 break;
1193 default:
1194 return ENXIO;
1195 }
1196 return error;
1197 }
1198
1199 static int
1200 ugenwrite(dev_t dev, struct uio *uio, int flag)
1201 {
1202 int endpt = UGENENDPOINT(dev);
1203 struct ugen_softc *sc;
1204 int error;
1205
1206 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
1207 return ENXIO;
1208 error = ugen_do_write(sc, endpt, uio, flag);
1209 ugenif_release(sc);
1210
1211 return error;
1212 }
1213
1214 static int
1215 ugen_activate(device_t self, enum devact act)
1216 {
1217 struct ugen_softc *sc = device_private(self);
1218
1219 switch (act) {
1220 case DVACT_DEACTIVATE:
1221 sc->sc_dying = 1;
1222 return 0;
1223 default:
1224 return EOPNOTSUPP;
1225 }
1226 }
1227
1228 static int
1229 ugen_detach(device_t self, int flags)
1230 {
1231 struct ugen_softc *sc = device_private(self);
1232 struct ugen_endpoint *sce;
1233 int i, dir;
1234 int maj, mn;
1235
1236 UGENHIST_FUNC(); UGENHIST_CALLED();
1237
1238 DPRINTF("sc=%ju flags=%ju", (uintptr_t)sc, flags, 0, 0);
1239
1240 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
1241
1242 /*
1243 * Fail if we're not forced to detach and userland has any
1244 * endpoints open.
1245 */
1246 if ((flags & DETACH_FORCE) == 0) {
1247 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1248 if (sc->sc_is_open[i])
1249 return EBUSY;
1250 }
1251 }
1252
1253 /* Prevent new users. Prevent suspend/resume. */
1254 sc->sc_dying = 1;
1255 pmf_device_deregister(self);
1256
1257 /*
1258 * If we never finished attaching, skip nixing endpoints and
1259 * users because there aren't any.
1260 */
1261 if (!sc->sc_attached)
1262 goto out;
1263
1264 /* Abort all pipes. */
1265 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1266 for (dir = OUT; dir <= IN; dir++) {
1267 sce = &sc->sc_endpoints[i][dir];
1268 if (sce->pipeh)
1269 usbd_abort_pipe(sce->pipeh);
1270 }
1271 }
1272
1273 /*
1274 * Wait for users to drain. Before this point there can be no
1275 * more I/O operations started because we set sc_dying; after
1276 * this, there can be no more I/O operations in progress, so it
1277 * will be safe to free things.
1278 */
1279 mutex_enter(&sc->sc_lock);
1280 if (--sc->sc_refcnt >= 0) {
1281 /* Wake everyone */
1282 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1283 for (dir = OUT; dir <= IN; dir++)
1284 cv_broadcast(&sc->sc_endpoints[i][dir].cv);
1285 }
1286 /* Wait for processes to go away. */
1287 do {
1288 cv_wait(&sc->sc_detach_cv, &sc->sc_lock);
1289 } while (sc->sc_refcnt >= 0);
1290 }
1291 mutex_exit(&sc->sc_lock);
1292
1293 /* locate the major number */
1294 maj = cdevsw_lookup_major(&ugen_cdevsw);
1295
1296 /*
1297 * Nuke the vnodes for any open instances (calls ugenclose, but
1298 * with no effect because we already set sc_dying).
1299 */
1300 mn = sc->sc_unit * USB_MAX_ENDPOINTS;
1301 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1302
1303 /* Actually close any lingering pipes. */
1304 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1305 ugen_do_close(sc, FREAD|FWRITE, i);
1306
1307 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev);
1308 ugenif_put_unit(sc);
1309
1310 out: for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1311 for (dir = OUT; dir <= IN; dir++) {
1312 sce = &sc->sc_endpoints[i][dir];
1313 seldestroy(&sce->rsel);
1314 cv_destroy(&sce->cv);
1315 }
1316 }
1317
1318 cv_destroy(&sc->sc_detach_cv);
1319 mutex_destroy(&sc->sc_lock);
1320
1321 return 0;
1322 }
1323
1324 Static void
1325 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
1326 {
1327 struct ugen_endpoint *sce = addr;
1328 struct ugen_softc *sc = sce->sc;
1329 uint32_t count;
1330 u_char *ibuf;
1331
1332 UGENHIST_FUNC(); UGENHIST_CALLED();
1333
1334 if (status == USBD_CANCELLED)
1335 return;
1336
1337 if (status != USBD_NORMAL_COMPLETION) {
1338 DPRINTF("status=%jd", status, 0, 0, 0);
1339 if (status == USBD_STALLED)
1340 usbd_clear_endpoint_stall_async(sce->pipeh);
1341 return;
1342 }
1343
1344 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1345 ibuf = sce->ibuf;
1346
1347 DPRINTFN(5, "xfer=%#jx status=%d count=%d",
1348 (uintptr_t)xfer, status, count, 0);
1349 DPRINTFN(5, " data = %02x %02x %02x",
1350 ibuf[0], ibuf[1], ibuf[2], 0);
1351
1352 mutex_enter(&sc->sc_lock);
1353 (void)b_to_q(ibuf, count, &sce->q);
1354 cv_signal(&sce->cv);
1355 mutex_exit(&sc->sc_lock);
1356 selnotify(&sce->rsel, 0, 0);
1357 }
1358
1359 Static void
1360 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr,
1361 usbd_status status)
1362 {
1363 struct isoreq *req = addr;
1364 struct ugen_endpoint *sce = req->sce;
1365 struct ugen_softc *sc = sce->sc;
1366 uint32_t count, n;
1367 int i, isize;
1368
1369 UGENHIST_FUNC(); UGENHIST_CALLED();
1370
1371 /* Return if we are aborting. */
1372 if (status == USBD_CANCELLED)
1373 return;
1374
1375 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1376 DPRINTFN(5, "xfer %ld, count=%d",
1377 (long)(req - sce->isoreqs), count, 0, 0);
1378
1379 mutex_enter(&sc->sc_lock);
1380
1381 /* throw away oldest input if the buffer is full */
1382 if (sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1383 sce->cur += count;
1384 if (sce->cur >= sce->limit)
1385 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1386 DPRINTFN(5, "throwing away %jd bytes",
1387 count, 0, 0, 0);
1388 }
1389
1390 isize = UGETW(sce->edesc->wMaxPacketSize);
1391 for (i = 0; i < UGEN_NISORFRMS; i++) {
1392 uint32_t actlen = req->sizes[i];
1393 char const *tbuf = (char const *)req->dmabuf + isize * i;
1394
1395 /* copy data to buffer */
1396 while (actlen > 0) {
1397 n = uimin(actlen, sce->limit - sce->fill);
1398 memcpy(sce->fill, tbuf, n);
1399
1400 tbuf += n;
1401 actlen -= n;
1402 sce->fill += n;
1403 if (sce->fill == sce->limit)
1404 sce->fill = sce->ibuf;
1405 }
1406
1407 /* setup size for next transfer */
1408 req->sizes[i] = isize;
1409 }
1410
1411 usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0,
1412 ugen_isoc_rintr);
1413 (void)usbd_transfer(xfer);
1414
1415 cv_signal(&sce->cv);
1416 mutex_exit(&sc->sc_lock);
1417 selnotify(&sce->rsel, 0, 0);
1418 }
1419
1420 Static void
1421 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr,
1422 usbd_status status)
1423 {
1424 struct ugen_endpoint *sce = addr;
1425 struct ugen_softc *sc = sce->sc;
1426 uint32_t count, n;
1427 char const *tbuf;
1428 usbd_status err;
1429
1430 UGENHIST_FUNC(); UGENHIST_CALLED();
1431
1432 /* Return if we are aborting. */
1433 if (status == USBD_CANCELLED)
1434 return;
1435
1436 if (status != USBD_NORMAL_COMPLETION) {
1437 DPRINTF("status=%jd", status, 0, 0, 0);
1438 sce->state |= UGEN_RA_WB_STOP;
1439 if (status == USBD_STALLED)
1440 usbd_clear_endpoint_stall_async(sce->pipeh);
1441 return;
1442 }
1443
1444 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1445
1446 mutex_enter(&sc->sc_lock);
1447
1448 /* Keep track of how much is in the buffer. */
1449 sce->ra_wb_used += count;
1450
1451 /* Copy data to buffer. */
1452 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1453 n = uimin(count, sce->limit - sce->fill);
1454 memcpy(sce->fill, tbuf, n);
1455 tbuf += n;
1456 count -= n;
1457 sce->fill += n;
1458 if (sce->fill == sce->limit)
1459 sce->fill = sce->ibuf;
1460 if (count > 0) {
1461 memcpy(sce->fill, tbuf, count);
1462 sce->fill += count;
1463 }
1464
1465 /* Set up the next request if necessary. */
1466 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1467 if (n > 0) {
1468 usbd_setup_xfer(xfer, sce, NULL, uimin(n, sce->ra_wb_xferlen), 0,
1469 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1470 err = usbd_transfer(xfer);
1471 if (err != USBD_IN_PROGRESS) {
1472 printf("error=%d", err);
1473 /*
1474 * The transfer has not been queued. Setting STOP
1475 * will make us try again at the next read.
1476 */
1477 sce->state |= UGEN_RA_WB_STOP;
1478 }
1479 }
1480 else
1481 sce->state |= UGEN_RA_WB_STOP;
1482
1483 cv_signal(&sce->cv);
1484 mutex_exit(&sc->sc_lock);
1485 selnotify(&sce->rsel, 0, 0);
1486 }
1487
1488 Static void
1489 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr,
1490 usbd_status status)
1491 {
1492 struct ugen_endpoint *sce = addr;
1493 struct ugen_softc *sc = sce->sc;
1494 uint32_t count, n;
1495 char *tbuf;
1496 usbd_status err;
1497
1498 UGENHIST_FUNC(); UGENHIST_CALLED();
1499
1500 /* Return if we are aborting. */
1501 if (status == USBD_CANCELLED)
1502 return;
1503
1504 if (status != USBD_NORMAL_COMPLETION) {
1505 DPRINTF("status=%jd", status, 0, 0, 0);
1506 sce->state |= UGEN_RA_WB_STOP;
1507 if (status == USBD_STALLED)
1508 usbd_clear_endpoint_stall_async(sce->pipeh);
1509 return;
1510 }
1511
1512 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1513
1514 mutex_enter(&sc->sc_lock);
1515
1516 /* Keep track of how much is in the buffer. */
1517 sce->ra_wb_used -= count;
1518
1519 /* Update buffer pointers. */
1520 sce->cur += count;
1521 if (sce->cur >= sce->limit)
1522 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1523
1524 /* Set up next request if necessary. */
1525 if (sce->ra_wb_used > 0) {
1526 /* copy data from buffer */
1527 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1528 count = uimin(sce->ra_wb_used, sce->ra_wb_xferlen);
1529 n = uimin(count, sce->limit - sce->cur);
1530 memcpy(tbuf, sce->cur, n);
1531 tbuf += n;
1532 if (count - n > 0)
1533 memcpy(tbuf, sce->ibuf, count - n);
1534
1535 usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT,
1536 ugen_bulkwb_intr);
1537 err = usbd_transfer(xfer);
1538 if (err != USBD_IN_PROGRESS) {
1539 printf("error=%d", err);
1540 /*
1541 * The transfer has not been queued. Setting STOP
1542 * will make us try again at the next write.
1543 */
1544 sce->state |= UGEN_RA_WB_STOP;
1545 }
1546 }
1547 else
1548 sce->state |= UGEN_RA_WB_STOP;
1549
1550 cv_signal(&sce->cv);
1551 mutex_exit(&sc->sc_lock);
1552 selnotify(&sce->rsel, 0, 0);
1553 }
1554
1555 Static usbd_status
1556 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1557 {
1558 struct usbd_interface *iface;
1559 usb_endpoint_descriptor_t *ed;
1560 usbd_status err;
1561 struct ugen_endpoint *sce;
1562 uint8_t niface, nendpt, endptno, endpt;
1563 int dir;
1564
1565 UGENHIST_FUNC(); UGENHIST_CALLED();
1566
1567 DPRINTFN(15, "%d %d", ifaceidx, altno, 0, 0);
1568
1569 err = usbd_interface_count(sc->sc_udev, &niface);
1570 if (err)
1571 return err;
1572 if (ifaceidx < 0 || ifaceidx >= niface)
1573 return USBD_INVAL;
1574
1575 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1576 if (err)
1577 return err;
1578 err = usbd_endpoint_count(iface, &nendpt);
1579 if (err)
1580 return err;
1581
1582 /* change setting */
1583 err = usbd_set_interface(iface, altno);
1584 if (err)
1585 return err;
1586
1587 err = usbd_endpoint_count(iface, &nendpt);
1588 if (err)
1589 return err;
1590
1591 ugen_clear_endpoints(sc);
1592
1593 for (endptno = 0; endptno < nendpt; endptno++) {
1594 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1595 KASSERT(ed != NULL);
1596 endpt = ed->bEndpointAddress;
1597 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1598 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1599 sce->sc = sc;
1600 sce->edesc = ed;
1601 sce->iface = iface;
1602 }
1603 return 0;
1604 }
1605
1606 /* Retrieve a complete descriptor for a certain device and index. */
1607 Static usb_config_descriptor_t *
1608 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1609 {
1610 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1611 int len;
1612 usbd_status err;
1613
1614 UGENHIST_FUNC(); UGENHIST_CALLED();
1615
1616 if (index == USB_CURRENT_CONFIG_INDEX) {
1617 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1618 if (tdesc == NULL)
1619 return NULL;
1620 len = UGETW(tdesc->wTotalLength);
1621 if (lenp)
1622 *lenp = len;
1623 cdesc = kmem_alloc(len, KM_SLEEP);
1624 memcpy(cdesc, tdesc, len);
1625 DPRINTFN(5, "current, len=%jd", len, 0, 0, 0);
1626 } else {
1627 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1628 if (err)
1629 return 0;
1630 len = UGETW(cdescr.wTotalLength);
1631 DPRINTFN(5, "index=%jd, len=%jd", index, len, 0, 0);
1632 if (lenp)
1633 *lenp = len;
1634 cdesc = kmem_alloc(len, KM_SLEEP);
1635 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1636 if (err) {
1637 kmem_free(cdesc, len);
1638 return 0;
1639 }
1640 }
1641 return cdesc;
1642 }
1643
1644 Static int
1645 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1646 {
1647 struct usbd_interface *iface;
1648 usbd_status err;
1649
1650 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1651 if (err)
1652 return -1;
1653 return usbd_get_interface_altindex(iface);
1654 }
1655
1656 Static int
1657 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1658 void *addr, int flag, struct lwp *l)
1659 {
1660 struct ugen_endpoint *sce;
1661 usbd_status err;
1662 struct usbd_interface *iface;
1663 struct usb_config_desc *cd;
1664 usb_config_descriptor_t *cdesc;
1665 struct usb_interface_desc *id;
1666 usb_interface_descriptor_t *idesc;
1667 struct usb_endpoint_desc *ed;
1668 usb_endpoint_descriptor_t *edesc;
1669 struct usb_alt_interface *ai;
1670 struct usb_string_desc *si;
1671 uint8_t conf, alt;
1672 int cdesclen;
1673 int error;
1674 int dir;
1675
1676 UGENHIST_FUNC(); UGENHIST_CALLED();
1677
1678 KASSERT(KERNEL_LOCKED_P()); /* ugen_set_config */
1679
1680 DPRINTFN(5, "cmd=%08jx", cmd, 0, 0, 0);
1681
1682 switch (cmd) {
1683 case FIONBIO:
1684 /* All handled in the upper FS layer. */
1685 return 0;
1686 case USB_SET_SHORT_XFER:
1687 if (endpt == USB_CONTROL_ENDPOINT)
1688 return EINVAL;
1689 /* This flag only affects read */
1690 sce = &sc->sc_endpoints[endpt][IN];
1691 if (sce == NULL || sce->pipeh == NULL)
1692 return EINVAL;
1693 if (*(int *)addr)
1694 sce->state |= UGEN_SHORT_OK;
1695 else
1696 sce->state &= ~UGEN_SHORT_OK;
1697 return 0;
1698 case USB_SET_TIMEOUT:
1699 for (dir = OUT; dir <= IN; dir++) {
1700 sce = &sc->sc_endpoints[endpt][dir];
1701 if (sce == NULL)
1702 return EINVAL;
1703
1704 sce->timeout = *(int *)addr;
1705 }
1706 return 0;
1707 case USB_SET_BULK_RA:
1708 if (endpt == USB_CONTROL_ENDPOINT)
1709 return EINVAL;
1710 sce = &sc->sc_endpoints[endpt][IN];
1711 if (sce == NULL || sce->pipeh == NULL)
1712 return EINVAL;
1713 edesc = sce->edesc;
1714 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1715 return EINVAL;
1716
1717 if (*(int *)addr) {
1718 /* Only turn RA on if it's currently off. */
1719 if (sce->state & UGEN_BULK_RA)
1720 return 0;
1721 KASSERT(sce->ra_wb_xfer == NULL);
1722 KASSERT(sce->ibuf == NULL);
1723
1724 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1725 /* shouldn't happen */
1726 return EINVAL;
1727 error = usbd_create_xfer(sce->pipeh,
1728 sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer);
1729 if (error)
1730 return error;
1731 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1732 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1733 sce->fill = sce->cur = sce->ibuf;
1734 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1735 sce->ra_wb_used = 0;
1736 sce->state |= UGEN_BULK_RA;
1737 sce->state &= ~UGEN_RA_WB_STOP;
1738 /* Now start reading. */
1739 usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL,
1740 uimin(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1741 0, USBD_NO_TIMEOUT, ugen_bulkra_intr);
1742 err = usbd_transfer(sce->ra_wb_xfer);
1743 if (err != USBD_IN_PROGRESS) {
1744 sce->state &= ~UGEN_BULK_RA;
1745 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1746 sce->ibuf = NULL;
1747 usbd_destroy_xfer(sce->ra_wb_xfer);
1748 sce->ra_wb_xfer = NULL;
1749 return EIO;
1750 }
1751 } else {
1752 /* Only turn RA off if it's currently on. */
1753 if (!(sce->state & UGEN_BULK_RA))
1754 return 0;
1755
1756 sce->state &= ~UGEN_BULK_RA;
1757 usbd_abort_pipe(sce->pipeh);
1758 usbd_destroy_xfer(sce->ra_wb_xfer);
1759 sce->ra_wb_xfer = NULL;
1760 /*
1761 * XXX Discard whatever's in the buffer, but we
1762 * should keep it around and drain the buffer
1763 * instead.
1764 */
1765 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1766 sce->ibuf = NULL;
1767 }
1768 return 0;
1769 case USB_SET_BULK_WB:
1770 if (endpt == USB_CONTROL_ENDPOINT)
1771 return EINVAL;
1772 sce = &sc->sc_endpoints[endpt][OUT];
1773 if (sce == NULL || sce->pipeh == NULL)
1774 return EINVAL;
1775 edesc = sce->edesc;
1776 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1777 return EINVAL;
1778
1779 if (*(int *)addr) {
1780 /* Only turn WB on if it's currently off. */
1781 if (sce->state & UGEN_BULK_WB)
1782 return 0;
1783 KASSERT(sce->ra_wb_xfer == NULL);
1784 KASSERT(sce->ibuf == NULL);
1785
1786 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1787 /* shouldn't happen */
1788 return EINVAL;
1789 error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize,
1790 0, 0, &sce->ra_wb_xfer);
1791 /* XXX check error??? */
1792 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1793 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1794 sce->fill = sce->cur = sce->ibuf;
1795 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1796 sce->ra_wb_used = 0;
1797 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1798 } else {
1799 /* Only turn WB off if it's currently on. */
1800 if (!(sce->state & UGEN_BULK_WB))
1801 return 0;
1802
1803 sce->state &= ~UGEN_BULK_WB;
1804 /*
1805 * XXX Discard whatever's in the buffer, but we
1806 * should keep it around and keep writing to
1807 * drain the buffer instead.
1808 */
1809 usbd_abort_pipe(sce->pipeh);
1810 usbd_destroy_xfer(sce->ra_wb_xfer);
1811 sce->ra_wb_xfer = NULL;
1812 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1813 sce->ibuf = NULL;
1814 }
1815 return 0;
1816 case USB_SET_BULK_RA_OPT:
1817 case USB_SET_BULK_WB_OPT:
1818 {
1819 struct usb_bulk_ra_wb_opt *opt;
1820
1821 if (endpt == USB_CONTROL_ENDPOINT)
1822 return EINVAL;
1823 opt = (struct usb_bulk_ra_wb_opt *)addr;
1824 if (cmd == USB_SET_BULK_RA_OPT)
1825 sce = &sc->sc_endpoints[endpt][IN];
1826 else
1827 sce = &sc->sc_endpoints[endpt][OUT];
1828 if (sce == NULL || sce->pipeh == NULL)
1829 return EINVAL;
1830 if (opt->ra_wb_buffer_size < 1 ||
1831 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1832 opt->ra_wb_request_size < 1 ||
1833 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1834 return EINVAL;
1835 /*
1836 * XXX These changes do not take effect until the
1837 * next time RA/WB mode is enabled but they ought to
1838 * take effect immediately.
1839 */
1840 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1841 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1842 return 0;
1843 }
1844 default:
1845 break;
1846 }
1847
1848 if (endpt != USB_CONTROL_ENDPOINT)
1849 return EINVAL;
1850
1851 switch (cmd) {
1852 #ifdef UGEN_DEBUG
1853 case USB_SETDEBUG:
1854 ugendebug = *(int *)addr;
1855 break;
1856 #endif
1857 case USB_GET_CONFIG:
1858 err = usbd_get_config(sc->sc_udev, &conf);
1859 if (err)
1860 return EIO;
1861 *(int *)addr = conf;
1862 break;
1863 case USB_SET_CONFIG:
1864 if (!(flag & FWRITE))
1865 return EPERM;
1866 err = ugen_set_config(sc, *(int *)addr, 1);
1867 switch (err) {
1868 case USBD_NORMAL_COMPLETION:
1869 break;
1870 case USBD_IN_USE:
1871 return EBUSY;
1872 default:
1873 return EIO;
1874 }
1875 break;
1876 case USB_GET_ALTINTERFACE:
1877 ai = (struct usb_alt_interface *)addr;
1878 err = usbd_device2interface_handle(sc->sc_udev,
1879 ai->uai_interface_index, &iface);
1880 if (err)
1881 return EINVAL;
1882 idesc = usbd_get_interface_descriptor(iface);
1883 if (idesc == NULL)
1884 return EIO;
1885 ai->uai_alt_no = idesc->bAlternateSetting;
1886 break;
1887 case USB_SET_ALTINTERFACE:
1888 if (!(flag & FWRITE))
1889 return EPERM;
1890 ai = (struct usb_alt_interface *)addr;
1891 err = usbd_device2interface_handle(sc->sc_udev,
1892 ai->uai_interface_index, &iface);
1893 if (err)
1894 return EINVAL;
1895 err = ugen_set_interface(sc, ai->uai_interface_index,
1896 ai->uai_alt_no);
1897 if (err)
1898 return EINVAL;
1899 break;
1900 case USB_GET_NO_ALT:
1901 ai = (struct usb_alt_interface *)addr;
1902 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen);
1903 if (cdesc == NULL)
1904 return EINVAL;
1905 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1906 if (idesc == NULL) {
1907 kmem_free(cdesc, cdesclen);
1908 return EINVAL;
1909 }
1910 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1911 idesc->bInterfaceNumber);
1912 kmem_free(cdesc, cdesclen);
1913 break;
1914 case USB_GET_DEVICE_DESC:
1915 *(usb_device_descriptor_t *)addr =
1916 *usbd_get_device_descriptor(sc->sc_udev);
1917 break;
1918 case USB_GET_CONFIG_DESC:
1919 cd = (struct usb_config_desc *)addr;
1920 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen);
1921 if (cdesc == NULL)
1922 return EINVAL;
1923 cd->ucd_desc = *cdesc;
1924 kmem_free(cdesc, cdesclen);
1925 break;
1926 case USB_GET_INTERFACE_DESC:
1927 id = (struct usb_interface_desc *)addr;
1928 cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen);
1929 if (cdesc == NULL)
1930 return EINVAL;
1931 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1932 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1933 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1934 else
1935 alt = id->uid_alt_index;
1936 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1937 if (idesc == NULL) {
1938 kmem_free(cdesc, cdesclen);
1939 return EINVAL;
1940 }
1941 id->uid_desc = *idesc;
1942 kmem_free(cdesc, cdesclen);
1943 break;
1944 case USB_GET_ENDPOINT_DESC:
1945 ed = (struct usb_endpoint_desc *)addr;
1946 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen);
1947 if (cdesc == NULL)
1948 return EINVAL;
1949 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1950 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1951 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1952 else
1953 alt = ed->ued_alt_index;
1954 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1955 alt, ed->ued_endpoint_index);
1956 if (edesc == NULL) {
1957 kmem_free(cdesc, cdesclen);
1958 return EINVAL;
1959 }
1960 ed->ued_desc = *edesc;
1961 kmem_free(cdesc, cdesclen);
1962 break;
1963 case USB_GET_FULL_DESC:
1964 {
1965 int len;
1966 struct iovec iov;
1967 struct uio uio;
1968 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1969
1970 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen);
1971 if (cdesc == NULL)
1972 return EINVAL;
1973 len = cdesclen;
1974 if (len > fd->ufd_size)
1975 len = fd->ufd_size;
1976 iov.iov_base = (void *)fd->ufd_data;
1977 iov.iov_len = len;
1978 uio.uio_iov = &iov;
1979 uio.uio_iovcnt = 1;
1980 uio.uio_resid = len;
1981 uio.uio_offset = 0;
1982 uio.uio_rw = UIO_READ;
1983 uio.uio_vmspace = l->l_proc->p_vmspace;
1984 error = uiomove((void *)cdesc, len, &uio);
1985 kmem_free(cdesc, cdesclen);
1986 return error;
1987 }
1988 case USB_GET_STRING_DESC: {
1989 int len;
1990 si = (struct usb_string_desc *)addr;
1991 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1992 si->usd_language_id, &si->usd_desc, &len);
1993 if (err)
1994 return EINVAL;
1995 break;
1996 }
1997 case USB_DO_REQUEST:
1998 {
1999 struct usb_ctl_request *ur = (void *)addr;
2000 int len = UGETW(ur->ucr_request.wLength);
2001 struct iovec iov;
2002 struct uio uio;
2003 void *ptr = 0;
2004 usbd_status xerr;
2005
2006 error = 0;
2007
2008 if (!(flag & FWRITE))
2009 return EPERM;
2010 /* Avoid requests that would damage the bus integrity. */
2011 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
2012 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
2013 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
2014 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
2015 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
2016 ur->ucr_request.bRequest == UR_SET_INTERFACE))
2017 return EINVAL;
2018
2019 if (len < 0 || len > 32767)
2020 return EINVAL;
2021 if (len != 0) {
2022 iov.iov_base = (void *)ur->ucr_data;
2023 iov.iov_len = len;
2024 uio.uio_iov = &iov;
2025 uio.uio_iovcnt = 1;
2026 uio.uio_resid = len;
2027 uio.uio_offset = 0;
2028 uio.uio_rw =
2029 ur->ucr_request.bmRequestType & UT_READ ?
2030 UIO_READ : UIO_WRITE;
2031 uio.uio_vmspace = l->l_proc->p_vmspace;
2032 ptr = kmem_alloc(len, KM_SLEEP);
2033 if (uio.uio_rw == UIO_WRITE) {
2034 error = uiomove(ptr, len, &uio);
2035 if (error)
2036 goto ret;
2037 }
2038 }
2039 sce = &sc->sc_endpoints[endpt][IN];
2040 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
2041 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
2042 if (xerr) {
2043 error = EIO;
2044 goto ret;
2045 }
2046 if (len != 0) {
2047 if (uio.uio_rw == UIO_READ) {
2048 size_t alen = uimin(len, ur->ucr_actlen);
2049 error = uiomove(ptr, alen, &uio);
2050 if (error)
2051 goto ret;
2052 }
2053 }
2054 ret:
2055 if (ptr)
2056 kmem_free(ptr, len);
2057 return error;
2058 }
2059 case USB_GET_DEVICEINFO:
2060 usbd_fill_deviceinfo(sc->sc_udev,
2061 (struct usb_device_info *)addr, 0);
2062 break;
2063 case USB_GET_DEVICEINFO_OLD:
2064 {
2065 int ret;
2066 MODULE_HOOK_CALL(usb_subr_fill_30_hook,
2067 (sc->sc_udev, (struct usb_device_info_old *)addr, 0,
2068 usbd_devinfo_vp, usbd_printBCD),
2069 enosys(), ret);
2070 if (ret == 0)
2071 return 0;
2072 return EINVAL;
2073 }
2074 default:
2075 return EINVAL;
2076 }
2077 return 0;
2078 }
2079
2080 static int
2081 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
2082 {
2083 int endpt = UGENENDPOINT(dev);
2084 struct ugen_softc *sc;
2085 int error;
2086
2087 if ((sc = ugenif_acquire(UGENUNIT(dev))) == 0)
2088 return ENXIO;
2089 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
2090 ugenif_release(sc);
2091
2092 return error;
2093 }
2094
2095 static int
2096 ugenpoll(dev_t dev, int events, struct lwp *l)
2097 {
2098 struct ugen_softc *sc;
2099 struct ugen_endpoint *sce_in, *sce_out;
2100 int revents = 0;
2101
2102 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
2103 return POLLHUP;
2104
2105 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) {
2106 revents |= POLLERR;
2107 goto out;
2108 }
2109
2110 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2111 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2112 KASSERT(sce_in->edesc || sce_out->edesc);
2113 KASSERT(sce_in->pipeh || sce_out->pipeh);
2114
2115 mutex_enter(&sc->sc_lock);
2116 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
2117 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
2118 case UE_INTERRUPT:
2119 if (sce_in->q.c_cc > 0)
2120 revents |= events & (POLLIN | POLLRDNORM);
2121 else
2122 selrecord(l, &sce_in->rsel);
2123 break;
2124 case UE_ISOCHRONOUS:
2125 if (sce_in->cur != sce_in->fill)
2126 revents |= events & (POLLIN | POLLRDNORM);
2127 else
2128 selrecord(l, &sce_in->rsel);
2129 break;
2130 case UE_BULK:
2131 if (sce_in->state & UGEN_BULK_RA) {
2132 if (sce_in->ra_wb_used > 0)
2133 revents |= events &
2134 (POLLIN | POLLRDNORM);
2135 else
2136 selrecord(l, &sce_in->rsel);
2137 break;
2138 }
2139 /*
2140 * We have no easy way of determining if a read will
2141 * yield any data or a write will happen.
2142 * Pretend they will.
2143 */
2144 revents |= events & (POLLIN | POLLRDNORM);
2145 break;
2146 default:
2147 break;
2148 }
2149 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
2150 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
2151 case UE_INTERRUPT:
2152 case UE_ISOCHRONOUS:
2153 /* XXX unimplemented */
2154 break;
2155 case UE_BULK:
2156 if (sce_out->state & UGEN_BULK_WB) {
2157 if (sce_out->ra_wb_used <
2158 sce_out->limit - sce_out->ibuf)
2159 revents |= events &
2160 (POLLOUT | POLLWRNORM);
2161 else
2162 selrecord(l, &sce_out->rsel);
2163 break;
2164 }
2165 /*
2166 * We have no easy way of determining if a read will
2167 * yield any data or a write will happen.
2168 * Pretend they will.
2169 */
2170 revents |= events & (POLLOUT | POLLWRNORM);
2171 break;
2172 default:
2173 break;
2174 }
2175
2176 mutex_exit(&sc->sc_lock);
2177
2178 out: ugenif_release(sc);
2179 return revents;
2180 }
2181
2182 static void
2183 filt_ugenrdetach(struct knote *kn)
2184 {
2185 struct ugen_endpoint *sce = kn->kn_hook;
2186 struct ugen_softc *sc = sce->sc;
2187
2188 mutex_enter(&sc->sc_lock);
2189 selremove_knote(&sce->rsel, kn);
2190 mutex_exit(&sc->sc_lock);
2191 }
2192
2193 static int
2194 filt_ugenread_intr(struct knote *kn, long hint)
2195 {
2196 struct ugen_endpoint *sce = kn->kn_hook;
2197 struct ugen_softc *sc = sce->sc;
2198 int ret;
2199
2200 mutex_enter(&sc->sc_lock);
2201 if (sc->sc_dying) {
2202 ret = 0;
2203 } else {
2204 kn->kn_data = sce->q.c_cc;
2205 ret = kn->kn_data > 0;
2206 }
2207 mutex_exit(&sc->sc_lock);
2208
2209 return ret;
2210 }
2211
2212 static int
2213 filt_ugenread_isoc(struct knote *kn, long hint)
2214 {
2215 struct ugen_endpoint *sce = kn->kn_hook;
2216 struct ugen_softc *sc = sce->sc;
2217 int ret;
2218
2219 mutex_enter(&sc->sc_lock);
2220 if (sc->sc_dying) {
2221 ret = 0;
2222 } else if (sce->cur == sce->fill) {
2223 ret = 0;
2224 } else if (sce->cur < sce->fill) {
2225 kn->kn_data = sce->fill - sce->cur;
2226 ret = 1;
2227 } else {
2228 kn->kn_data = (sce->limit - sce->cur) +
2229 (sce->fill - sce->ibuf);
2230 ret = 1;
2231 }
2232 mutex_exit(&sc->sc_lock);
2233
2234 return ret;
2235 }
2236
2237 static int
2238 filt_ugenread_bulk(struct knote *kn, long hint)
2239 {
2240 struct ugen_endpoint *sce = kn->kn_hook;
2241 struct ugen_softc *sc = sce->sc;
2242 int ret;
2243
2244 mutex_enter(&sc->sc_lock);
2245 if (sc->sc_dying) {
2246 ret = 0;
2247 } else if (!(sce->state & UGEN_BULK_RA)) {
2248 /*
2249 * We have no easy way of determining if a read will
2250 * yield any data or a write will happen.
2251 * So, emulate "seltrue".
2252 */
2253 ret = filt_seltrue(kn, hint);
2254 } else if (sce->ra_wb_used == 0) {
2255 ret = 0;
2256 } else {
2257 kn->kn_data = sce->ra_wb_used;
2258 ret = 1;
2259 }
2260 mutex_exit(&sc->sc_lock);
2261
2262 return ret;
2263 }
2264
2265 static int
2266 filt_ugenwrite_bulk(struct knote *kn, long hint)
2267 {
2268 struct ugen_endpoint *sce = kn->kn_hook;
2269 struct ugen_softc *sc = sce->sc;
2270 int ret;
2271
2272 mutex_enter(&sc->sc_lock);
2273 if (sc->sc_dying) {
2274 ret = 0;
2275 } else if (!(sce->state & UGEN_BULK_WB)) {
2276 /*
2277 * We have no easy way of determining if a read will
2278 * yield any data or a write will happen.
2279 * So, emulate "seltrue".
2280 */
2281 ret = filt_seltrue(kn, hint);
2282 } else if (sce->ra_wb_used == sce->limit - sce->ibuf) {
2283 ret = 0;
2284 } else {
2285 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2286 ret = 1;
2287 }
2288 mutex_exit(&sc->sc_lock);
2289
2290 return ret;
2291 }
2292
2293 static const struct filterops ugenread_intr_filtops = {
2294 .f_flags = FILTEROP_ISFD,
2295 .f_attach = NULL,
2296 .f_detach = filt_ugenrdetach,
2297 .f_event = filt_ugenread_intr,
2298 };
2299
2300 static const struct filterops ugenread_isoc_filtops = {
2301 .f_flags = FILTEROP_ISFD,
2302 .f_attach = NULL,
2303 .f_detach = filt_ugenrdetach,
2304 .f_event = filt_ugenread_isoc,
2305 };
2306
2307 static const struct filterops ugenread_bulk_filtops = {
2308 .f_flags = FILTEROP_ISFD,
2309 .f_attach = NULL,
2310 .f_detach = filt_ugenrdetach,
2311 .f_event = filt_ugenread_bulk,
2312 };
2313
2314 static const struct filterops ugenwrite_bulk_filtops = {
2315 .f_flags = FILTEROP_ISFD,
2316 .f_attach = NULL,
2317 .f_detach = filt_ugenrdetach,
2318 .f_event = filt_ugenwrite_bulk,
2319 };
2320
2321 static int
2322 ugenkqfilter(dev_t dev, struct knote *kn)
2323 {
2324 struct ugen_softc *sc;
2325 struct ugen_endpoint *sce;
2326 struct selinfo *sip;
2327 int error;
2328
2329 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
2330 return ENXIO;
2331
2332 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) {
2333 error = ENODEV;
2334 goto out;
2335 }
2336
2337 switch (kn->kn_filter) {
2338 case EVFILT_READ:
2339 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2340 if (sce == NULL) {
2341 error = EINVAL;
2342 goto out;
2343 }
2344
2345 sip = &sce->rsel;
2346 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2347 case UE_INTERRUPT:
2348 kn->kn_fop = &ugenread_intr_filtops;
2349 break;
2350 case UE_ISOCHRONOUS:
2351 kn->kn_fop = &ugenread_isoc_filtops;
2352 break;
2353 case UE_BULK:
2354 kn->kn_fop = &ugenread_bulk_filtops;
2355 break;
2356 default:
2357 error = EINVAL;
2358 goto out;
2359 }
2360 break;
2361
2362 case EVFILT_WRITE:
2363 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2364 if (sce == NULL) {
2365 error = EINVAL;
2366 goto out;
2367 }
2368
2369 sip = &sce->rsel;
2370 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2371 case UE_INTERRUPT:
2372 case UE_ISOCHRONOUS:
2373 /* XXX poll doesn't support this */
2374 error = EINVAL;
2375 goto out;
2376
2377 case UE_BULK:
2378 kn->kn_fop = &ugenwrite_bulk_filtops;
2379 break;
2380 default:
2381 error = EINVAL;
2382 goto out;
2383 }
2384 break;
2385
2386 default:
2387 error = EINVAL;
2388 goto out;
2389 }
2390
2391 kn->kn_hook = sce;
2392
2393 mutex_enter(&sc->sc_lock);
2394 selrecord_knote(sip, kn);
2395 mutex_exit(&sc->sc_lock);
2396
2397 error = 0;
2398
2399 out: ugenif_release(sc);
2400 return error;
2401 }
2402
2403 MODULE(MODULE_CLASS_DRIVER, ugen, NULL);
2404
2405 static int
2406 ugen_modcmd(modcmd_t cmd, void *aux)
2407 {
2408
2409 switch (cmd) {
2410 case MODULE_CMD_INIT:
2411 mutex_init(&ugenif.lock, MUTEX_DEFAULT, IPL_NONE);
2412 rb_tree_init(&ugenif.tree, &ugenif_tree_ops);
2413 return 0;
2414 default:
2415 return ENOTTY;
2416 }
2417 }
2418