ugen.c revision 1.171.2.2 1 /* $NetBSD: ugen.c,v 1.171.2.2 2024/04/16 18:59:49 martin Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.171.2.2 2024/04/16 18:59:49 martin Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #include "opt_usb.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/device.h>
52 #include <sys/ioctl.h>
53 #include <sys/conf.h>
54 #include <sys/tty.h>
55 #include <sys/file.h>
56 #include <sys/select.h>
57 #include <sys/proc.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60 #include <sys/compat_stub.h>
61 #include <sys/module.h>
62 #include <sys/rbtree.h>
63
64 #include <dev/usb/usb.h>
65 #include <dev/usb/usbdi.h>
66 #include <dev/usb/usbdi_util.h>
67 #include <dev/usb/usbhist.h>
68
69 #include "ioconf.h"
70
71 #ifdef USB_DEBUG
72 #ifndef UGEN_DEBUG
73 #define ugendebug 0
74 #else
75 int ugendebug = 0;
76
77 SYSCTL_SETUP(sysctl_hw_ugen_setup, "sysctl hw.ugen setup")
78 {
79 int err;
80 const struct sysctlnode *rnode;
81 const struct sysctlnode *cnode;
82
83 err = sysctl_createv(clog, 0, NULL, &rnode,
84 CTLFLAG_PERMANENT, CTLTYPE_NODE, "ugen",
85 SYSCTL_DESCR("ugen global controls"),
86 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
87
88 if (err)
89 goto fail;
90
91 /* control debugging printfs */
92 err = sysctl_createv(clog, 0, &rnode, &cnode,
93 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
94 "debug", SYSCTL_DESCR("Enable debugging output"),
95 NULL, 0, &ugendebug, sizeof(ugendebug), CTL_CREATE, CTL_EOL);
96 if (err)
97 goto fail;
98
99 return;
100 fail:
101 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
102 }
103
104 #endif /* UGEN_DEBUG */
105 #endif /* USB_DEBUG */
106
107 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOGN(ugendebug,1,FMT,A,B,C,D)
108 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(ugendebug,N,FMT,A,B,C,D)
109 #define UGENHIST_FUNC() USBHIST_FUNC()
110 #define UGENHIST_CALLED(name) USBHIST_CALLED(ugendebug)
111
112 #define UGEN_CHUNK 128 /* chunk size for read */
113 #define UGEN_IBSIZE 1020 /* buffer size */
114 #define UGEN_BBSIZE 1024
115
116 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
117 #define UGEN_NISORFRMS 8 /* number of transactions per req */
118 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
119
120 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
121 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
122
123 struct isoreq {
124 struct ugen_endpoint *sce;
125 struct usbd_xfer *xfer;
126 void *dmabuf;
127 uint16_t sizes[UGEN_NISORFRMS];
128 };
129
130 struct ugen_endpoint {
131 struct ugen_softc *sc;
132 usb_endpoint_descriptor_t *edesc;
133 struct usbd_interface *iface;
134 int state;
135 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
136 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
137 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
138 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
139 struct usbd_pipe *pipeh;
140 struct clist q;
141 u_char *ibuf; /* start of buffer (circular for isoc) */
142 u_char *fill; /* location for input (isoc) */
143 u_char *limit; /* end of circular buffer (isoc) */
144 u_char *cur; /* current read location (isoc) */
145 uint32_t timeout;
146 uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
147 uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
148 uint32_t ra_wb_used; /* how much is in buffer */
149 uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */
150 struct usbd_xfer *ra_wb_xfer;
151 struct isoreq isoreqs[UGEN_NISOREQS];
152 /* Keep these last; we don't overwrite them in ugen_set_config() */
153 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
154 struct selinfo rsel;
155 kcondvar_t cv;
156 };
157
158 struct ugen_softc {
159 device_t sc_dev; /* base device */
160 struct usbd_device *sc_udev;
161 struct rb_node sc_node;
162 unsigned sc_unit;
163
164 kmutex_t sc_lock;
165 kcondvar_t sc_detach_cv;
166
167 char sc_is_open[USB_MAX_ENDPOINTS];
168 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
169 #define OUT 0
170 #define IN 1
171
172 int sc_refcnt;
173 char sc_buffer[UGEN_BBSIZE];
174 u_char sc_dying;
175 u_char sc_attached;
176 };
177
178 static struct {
179 kmutex_t lock;
180 rb_tree_t tree;
181 } ugenif __cacheline_aligned;
182
183 static int
184 compare_ugen(void *cookie, const void *vsca, const void *vscb)
185 {
186 const struct ugen_softc *sca = vsca;
187 const struct ugen_softc *scb = vscb;
188
189 if (sca->sc_unit < scb->sc_unit)
190 return -1;
191 if (sca->sc_unit > scb->sc_unit)
192 return +1;
193 return 0;
194 }
195
196 static int
197 compare_ugen_key(void *cookie, const void *vsc, const void *vk)
198 {
199 const struct ugen_softc *sc = vsc;
200 const unsigned *k = vk;
201
202 if (sc->sc_unit < *k)
203 return -1;
204 if (sc->sc_unit > *k)
205 return +1;
206 return 0;
207 }
208
209 static const rb_tree_ops_t ugenif_tree_ops = {
210 .rbto_compare_nodes = compare_ugen,
211 .rbto_compare_key = compare_ugen_key,
212 .rbto_node_offset = offsetof(struct ugen_softc, sc_node),
213 };
214
215 static void
216 ugenif_get_unit(struct ugen_softc *sc)
217 {
218 struct ugen_softc *sc0;
219 unsigned i;
220
221 mutex_enter(&ugenif.lock);
222 for (i = 0, sc0 = RB_TREE_MIN(&ugenif.tree);
223 sc0 != NULL && i == sc0->sc_unit;
224 i++, sc0 = RB_TREE_NEXT(&ugenif.tree, sc0))
225 KASSERT(i < UINT_MAX);
226 KASSERT(rb_tree_find_node(&ugenif.tree, &i) == NULL);
227 sc->sc_unit = i;
228 sc0 = rb_tree_insert_node(&ugenif.tree, sc);
229 KASSERT(sc0 == sc);
230 KASSERT(rb_tree_find_node(&ugenif.tree, &i) == sc);
231 mutex_exit(&ugenif.lock);
232
233 prop_dictionary_set_uint(device_properties(sc->sc_dev),
234 "ugen-unit", sc->sc_unit);
235 }
236
237 static void
238 ugenif_put_unit(struct ugen_softc *sc)
239 {
240
241 prop_dictionary_remove(device_properties(sc->sc_dev),
242 "ugen-unit");
243
244 mutex_enter(&ugenif.lock);
245 KASSERT(rb_tree_find_node(&ugenif.tree, &sc->sc_unit) == sc);
246 rb_tree_remove_node(&ugenif.tree, sc);
247 sc->sc_unit = -1;
248 mutex_exit(&ugenif.lock);
249 }
250
251 static struct ugen_softc *
252 ugenif_acquire(unsigned unit)
253 {
254 struct ugen_softc *sc;
255
256 mutex_enter(&ugenif.lock);
257 sc = rb_tree_find_node(&ugenif.tree, &unit);
258 if (sc == NULL)
259 goto out;
260 mutex_enter(&sc->sc_lock);
261 if (sc->sc_dying) {
262 mutex_exit(&sc->sc_lock);
263 sc = NULL;
264 goto out;
265 }
266 KASSERT(sc->sc_refcnt < INT_MAX);
267 sc->sc_refcnt++;
268 mutex_exit(&sc->sc_lock);
269 out: mutex_exit(&ugenif.lock);
270
271 return sc;
272 }
273
274 static void
275 ugenif_release(struct ugen_softc *sc)
276 {
277
278 mutex_enter(&sc->sc_lock);
279 if (--sc->sc_refcnt < 0)
280 cv_broadcast(&sc->sc_detach_cv);
281 mutex_exit(&sc->sc_lock);
282 }
283
284 static dev_type_open(ugenopen);
285 static dev_type_close(ugenclose);
286 static dev_type_read(ugenread);
287 static dev_type_write(ugenwrite);
288 static dev_type_ioctl(ugenioctl);
289 static dev_type_poll(ugenpoll);
290 static dev_type_kqfilter(ugenkqfilter);
291
292 const struct cdevsw ugen_cdevsw = {
293 .d_open = ugenopen,
294 .d_close = ugenclose,
295 .d_read = ugenread,
296 .d_write = ugenwrite,
297 .d_ioctl = ugenioctl,
298 .d_stop = nostop,
299 .d_tty = notty,
300 .d_poll = ugenpoll,
301 .d_mmap = nommap,
302 .d_kqfilter = ugenkqfilter,
303 .d_discard = nodiscard,
304 .d_flag = D_OTHER,
305 };
306
307 Static void ugenintr(struct usbd_xfer *, void *,
308 usbd_status);
309 Static void ugen_isoc_rintr(struct usbd_xfer *, void *,
310 usbd_status);
311 Static void ugen_bulkra_intr(struct usbd_xfer *, void *,
312 usbd_status);
313 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *,
314 usbd_status);
315 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
316 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
317 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
318 void *, int, struct lwp *);
319 Static int ugen_set_config(struct ugen_softc *, int, int);
320 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *,
321 int, int *);
322 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
323 Static int ugen_get_alt_index(struct ugen_softc *, int);
324 Static void ugen_clear_endpoints(struct ugen_softc *);
325
326 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
327 #define UGENENDPOINT(n) (minor(n) & 0xf)
328 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
329
330 static int ugenif_match(device_t, cfdata_t, void *);
331 static void ugenif_attach(device_t, device_t, void *);
332 static int ugen_match(device_t, cfdata_t, void *);
333 static void ugen_attach(device_t, device_t, void *);
334 static int ugen_detach(device_t, int);
335 static int ugen_activate(device_t, enum devact);
336
337 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match,
338 ugen_attach, ugen_detach, ugen_activate);
339 CFATTACH_DECL_NEW(ugenif, sizeof(struct ugen_softc), ugenif_match,
340 ugenif_attach, ugen_detach, ugen_activate);
341
342 /* toggle to control attach priority. -1 means "let autoconf decide" */
343 int ugen_override = -1;
344
345 static int
346 ugen_match(device_t parent, cfdata_t match, void *aux)
347 {
348 struct usb_attach_arg *uaa = aux;
349 int override;
350
351 if (ugen_override != -1)
352 override = ugen_override;
353 else
354 override = match->cf_flags & 1;
355
356 if (override)
357 return UMATCH_HIGHEST;
358 else if (uaa->uaa_usegeneric)
359 return UMATCH_GENERIC;
360 else
361 return UMATCH_NONE;
362 }
363
364 static int
365 ugenif_match(device_t parent, cfdata_t match, void *aux)
366 {
367 /*
368 * Like ugen(4), ugenif(4) also has an override flag. It has the
369 * opposite effect, however, causing us to match with GENERIC
370 * priority rather than HIGHEST.
371 */
372 return (match->cf_flags & 1) ? UMATCH_GENERIC : UMATCH_HIGHEST;
373 }
374
375 static void
376 ugen_attach(device_t parent, device_t self, void *aux)
377 {
378 struct usb_attach_arg *uaa = aux;
379 struct usbif_attach_arg uiaa;
380
381 memset(&uiaa, 0, sizeof(uiaa));
382 uiaa.uiaa_port = uaa->uaa_port;
383 uiaa.uiaa_vendor = uaa->uaa_vendor;
384 uiaa.uiaa_product = uaa->uaa_product;
385 uiaa.uiaa_release = uaa->uaa_release;
386 uiaa.uiaa_device = uaa->uaa_device;
387 uiaa.uiaa_configno = -1;
388 uiaa.uiaa_ifaceno = -1;
389
390 ugenif_attach(parent, self, &uiaa);
391 }
392
393 static void
394 ugenif_attach(device_t parent, device_t self, void *aux)
395 {
396 struct ugen_softc *sc = device_private(self);
397 struct usbif_attach_arg *uiaa = aux;
398 struct usbd_device *udev;
399 char *devinfop;
400 usbd_status err;
401 int i, dir, conf;
402
403 aprint_naive("\n");
404 aprint_normal("\n");
405
406 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
407 cv_init(&sc->sc_detach_cv, "ugendet");
408
409 devinfop = usbd_devinfo_alloc(uiaa->uiaa_device, 0);
410 aprint_normal_dev(self, "%s\n", devinfop);
411 usbd_devinfo_free(devinfop);
412
413 sc->sc_dev = self;
414 sc->sc_udev = udev = uiaa->uiaa_device;
415
416 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
417 for (dir = OUT; dir <= IN; dir++) {
418 struct ugen_endpoint *sce;
419
420 sce = &sc->sc_endpoints[i][dir];
421 selinit(&sce->rsel);
422 cv_init(&sce->cv, "ugensce");
423 }
424 }
425
426 if (!pmf_device_register(self, NULL, NULL))
427 aprint_error_dev(self, "couldn't establish power handler\n");
428
429 if (uiaa->uiaa_ifaceno < 0) {
430 /*
431 * If we attach the whole device,
432 * set configuration index 0, the default one.
433 */
434 err = usbd_set_config_index(udev, 0, 0);
435 if (err) {
436 aprint_error_dev(self,
437 "setting configuration index 0 failed\n");
438 return;
439 }
440 }
441
442 /* Get current configuration */
443 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
444
445 /* Set up all the local state for this configuration. */
446 err = ugen_set_config(sc, conf, uiaa->uiaa_ifaceno < 0);
447 if (err) {
448 aprint_error_dev(self, "setting configuration %d failed\n",
449 conf);
450 return;
451 }
452
453 ugenif_get_unit(sc);
454 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev);
455 sc->sc_attached = 1;
456 }
457
458 Static void
459 ugen_clear_endpoints(struct ugen_softc *sc)
460 {
461
462 /* Clear out the old info, but leave the selinfo and cv initialised. */
463 for (int i = 0; i < USB_MAX_ENDPOINTS; i++) {
464 for (int dir = OUT; dir <= IN; dir++) {
465 struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir];
466 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
467 }
468 }
469 }
470
471 Static int
472 ugen_set_config(struct ugen_softc *sc, int configno, int chkopen)
473 {
474 struct usbd_device *dev = sc->sc_udev;
475 usb_config_descriptor_t *cdesc;
476 struct usbd_interface *iface;
477 usb_endpoint_descriptor_t *ed;
478 struct ugen_endpoint *sce;
479 uint8_t niface, nendpt;
480 int ifaceno, endptno, endpt;
481 usbd_status err;
482 int dir;
483
484 UGENHIST_FUNC(); UGENHIST_CALLED();
485
486 DPRINTFN(1, "ugen%jd: to configno %jd, sc=%jx",
487 device_unit(sc->sc_dev), configno, (uintptr_t)sc, 0);
488
489 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
490
491 if (chkopen) {
492 /*
493 * We start at 1, not 0, because we don't care whether the
494 * control endpoint is open or not. It is always present.
495 */
496 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
497 if (sc->sc_is_open[endptno]) {
498 DPRINTFN(1,
499 "ugen%jd - endpoint %d is open",
500 device_unit(sc->sc_dev), endptno, 0, 0);
501 return USBD_IN_USE;
502 }
503
504 /* Prevent opening while we're setting the config. */
505 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) {
506 KASSERT(!sc->sc_is_open[endptno]);
507 sc->sc_is_open[endptno] = 1;
508 }
509 }
510
511 /* Avoid setting the current value. */
512 cdesc = usbd_get_config_descriptor(dev);
513 if (!cdesc || cdesc->bConfigurationValue != configno) {
514 err = usbd_set_config_no(dev, configno, 1);
515 if (err)
516 goto out;
517 }
518
519 ugen_clear_endpoints(sc);
520
521 err = usbd_interface_count(dev, &niface);
522 if (err)
523 goto out;
524
525 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
526 DPRINTFN(1, "ifaceno %jd", ifaceno, 0, 0, 0);
527 err = usbd_device2interface_handle(dev, ifaceno, &iface);
528 if (err)
529 goto out;
530 err = usbd_endpoint_count(iface, &nendpt);
531 if (err)
532 goto out;
533 for (endptno = 0; endptno < nendpt; endptno++) {
534 ed = usbd_interface2endpoint_descriptor(iface,endptno);
535 KASSERT(ed != NULL);
536 endpt = ed->bEndpointAddress;
537 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
538 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
539 DPRINTFN(1, "endptno %jd, endpt=0x%02jx (%jd,%jd)",
540 endptno, endpt, UE_GET_ADDR(endpt),
541 UE_GET_DIR(endpt));
542 sce->sc = sc;
543 sce->edesc = ed;
544 sce->iface = iface;
545 }
546 }
547 err = USBD_NORMAL_COMPLETION;
548
549 out: if (chkopen) {
550 /*
551 * Allow open again now that we're done trying to set
552 * the config.
553 */
554 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) {
555 KASSERT(sc->sc_is_open[endptno]);
556 sc->sc_is_open[endptno] = 0;
557 }
558 }
559 return err;
560 }
561
562 static int
563 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
564 {
565 struct ugen_softc *sc;
566 int unit = UGENUNIT(dev);
567 int endpt = UGENENDPOINT(dev);
568 usb_endpoint_descriptor_t *edesc;
569 struct ugen_endpoint *sce;
570 int dir, isize;
571 usbd_status err;
572 struct usbd_xfer *xfer;
573 int i, j;
574 int error;
575 int opened = 0;
576
577 UGENHIST_FUNC(); UGENHIST_CALLED();
578
579 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
580
581 if ((sc = ugenif_acquire(unit)) == NULL)
582 return ENXIO;
583
584 DPRINTFN(5, "flag=%jd, mode=%jd, unit=%jd endpt=%jd",
585 flag, mode, unit, endpt);
586
587 /* The control endpoint allows multiple opens. */
588 if (endpt == USB_CONTROL_ENDPOINT) {
589 opened = sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
590 error = 0;
591 goto out;
592 }
593
594 if (sc->sc_is_open[endpt]) {
595 error = EBUSY;
596 goto out;
597 }
598 opened = sc->sc_is_open[endpt] = 1;
599
600 /* Make sure there are pipes for all directions. */
601 for (dir = OUT; dir <= IN; dir++) {
602 if (flag & (dir == OUT ? FWRITE : FREAD)) {
603 sce = &sc->sc_endpoints[endpt][dir];
604 if (sce->edesc == NULL) {
605 error = ENXIO;
606 goto out;
607 }
608 }
609 }
610
611 /* Actually open the pipes. */
612 /* XXX Should back out properly if it fails. */
613 for (dir = OUT; dir <= IN; dir++) {
614 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
615 continue;
616 sce = &sc->sc_endpoints[endpt][dir];
617 sce->state = 0;
618 sce->timeout = USBD_NO_TIMEOUT;
619 DPRINTFN(5, "sc=%jx, endpt=%jd, dir=%jd, sce=%jp",
620 (uintptr_t)sc, endpt, dir, (uintptr_t)sce);
621 edesc = sce->edesc;
622 switch (edesc->bmAttributes & UE_XFERTYPE) {
623 case UE_INTERRUPT:
624 if (dir == OUT) {
625 err = usbd_open_pipe(sce->iface,
626 edesc->bEndpointAddress, 0, &sce->pipeh);
627 if (err) {
628 error = EIO;
629 goto out;
630 }
631 break;
632 }
633 isize = UGETW(edesc->wMaxPacketSize);
634 if (isize == 0) { /* shouldn't happen */
635 error = EINVAL;
636 goto out;
637 }
638 sce->ibuf = kmem_alloc(isize, KM_SLEEP);
639 DPRINTFN(5, "intr endpt=%d, isize=%d",
640 endpt, isize, 0, 0);
641 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
642 kmem_free(sce->ibuf, isize);
643 sce->ibuf = NULL;
644 error = ENOMEM;
645 goto out;
646 }
647 err = usbd_open_pipe_intr(sce->iface,
648 edesc->bEndpointAddress,
649 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
650 sce->ibuf, isize, ugenintr,
651 USBD_DEFAULT_INTERVAL);
652 if (err) {
653 clfree(&sce->q);
654 kmem_free(sce->ibuf, isize);
655 sce->ibuf = NULL;
656 error = EIO;
657 goto out;
658 }
659 DPRINTFN(5, "interrupt open done", 0, 0, 0, 0);
660 break;
661 case UE_BULK:
662 err = usbd_open_pipe(sce->iface,
663 edesc->bEndpointAddress, 0, &sce->pipeh);
664 if (err) {
665 error = EIO;
666 goto out;
667 }
668 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
669 /*
670 * Use request size for non-RA/WB transfers
671 * as the default.
672 */
673 sce->ra_wb_reqsize = UGEN_BBSIZE;
674 break;
675 case UE_ISOCHRONOUS:
676 if (dir == OUT) {
677 error = EINVAL;
678 goto out;
679 }
680 isize = UGETW(edesc->wMaxPacketSize);
681 if (isize == 0) { /* shouldn't happen */
682 error = EINVAL;
683 goto out;
684 }
685 sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES,
686 KM_SLEEP);
687 sce->cur = sce->fill = sce->ibuf;
688 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
689 DPRINTFN(5, "isoc endpt=%d, isize=%d",
690 endpt, isize, 0, 0);
691 err = usbd_open_pipe(sce->iface,
692 edesc->bEndpointAddress, 0, &sce->pipeh);
693 if (err) {
694 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
695 sce->ibuf = NULL;
696 error = EIO;
697 goto out;
698 }
699 for (i = 0; i < UGEN_NISOREQS; ++i) {
700 sce->isoreqs[i].sce = sce;
701 err = usbd_create_xfer(sce->pipeh,
702 isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS,
703 &xfer);
704 if (err)
705 goto bad;
706 sce->isoreqs[i].xfer = xfer;
707 sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer);
708 for (j = 0; j < UGEN_NISORFRMS; ++j)
709 sce->isoreqs[i].sizes[j] = isize;
710 usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i],
711 sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0,
712 ugen_isoc_rintr);
713 (void)usbd_transfer(xfer);
714 }
715 DPRINTFN(5, "isoc open done", 0, 0, 0, 0);
716 break;
717 bad:
718 while (--i >= 0) { /* implicit buffer free */
719 usbd_destroy_xfer(sce->isoreqs[i].xfer);
720 sce->isoreqs[i].xfer = NULL;
721 }
722 usbd_close_pipe(sce->pipeh);
723 sce->pipeh = NULL;
724 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
725 sce->ibuf = NULL;
726 error = ENOMEM;
727 goto out;
728 case UE_CONTROL:
729 sce->timeout = USBD_DEFAULT_TIMEOUT;
730 error = EINVAL;
731 goto out;
732 }
733 }
734 error = 0;
735 out: if (error && opened)
736 sc->sc_is_open[endpt] = 0;
737 ugenif_release(sc);
738 return error;
739 }
740
741 static void
742 ugen_do_close(struct ugen_softc *sc, int flag, int endpt)
743 {
744 struct ugen_endpoint *sce;
745 int dir;
746 int i;
747
748 UGENHIST_FUNC(); UGENHIST_CALLED();
749
750 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
751
752 if (!sc->sc_is_open[endpt])
753 goto out;
754
755 if (endpt == USB_CONTROL_ENDPOINT) {
756 DPRINTFN(5, "close control", 0, 0, 0, 0);
757 goto out;
758 }
759
760 for (dir = OUT; dir <= IN; dir++) {
761 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
762 continue;
763 sce = &sc->sc_endpoints[endpt][dir];
764 if (sce->pipeh == NULL)
765 continue;
766 DPRINTFN(5, "endpt=%jd dir=%jd sce=%jx",
767 endpt, dir, (uintptr_t)sce, 0);
768
769 usbd_abort_pipe(sce->pipeh);
770
771 int isize = UGETW(sce->edesc->wMaxPacketSize);
772 int msize = 0;
773
774 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
775 case UE_INTERRUPT:
776 ndflush(&sce->q, sce->q.c_cc);
777 clfree(&sce->q);
778 msize = isize;
779 break;
780 case UE_ISOCHRONOUS:
781 for (i = 0; i < UGEN_NISOREQS; ++i) {
782 usbd_destroy_xfer(sce->isoreqs[i].xfer);
783 sce->isoreqs[i].xfer = NULL;
784 }
785 msize = isize * UGEN_NISOFRAMES;
786 break;
787 case UE_BULK:
788 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) {
789 usbd_destroy_xfer(sce->ra_wb_xfer);
790 sce->ra_wb_xfer = NULL;
791 msize = sce->ra_wb_bufsize;
792 }
793 break;
794 default:
795 break;
796 }
797 usbd_close_pipe(sce->pipeh);
798 sce->pipeh = NULL;
799 if (sce->ibuf != NULL) {
800 kmem_free(sce->ibuf, msize);
801 sce->ibuf = NULL;
802 }
803 }
804
805 out: sc->sc_is_open[endpt] = 0;
806 for (dir = OUT; dir <= IN; dir++) {
807 sce = &sc->sc_endpoints[endpt][dir];
808 KASSERT(sce->pipeh == NULL);
809 KASSERT(sce->ibuf == NULL);
810 KASSERT(sce->ra_wb_xfer == NULL);
811 for (i = 0; i < UGEN_NISOREQS; i++)
812 KASSERT(sce->isoreqs[i].xfer == NULL);
813 }
814 }
815
816 static int
817 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
818 {
819 int endpt = UGENENDPOINT(dev);
820 struct ugen_softc *sc;
821
822 UGENHIST_FUNC(); UGENHIST_CALLED();
823
824 DPRINTFN(5, "flag=%jd, mode=%jd, unit=%jd, endpt=%jd",
825 flag, mode, UGENUNIT(dev), endpt);
826
827 KASSERT(KERNEL_LOCKED_P()); /* ugen_do_close */
828
829 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
830 return ENXIO;
831
832 KASSERT(sc->sc_is_open[endpt]);
833 ugen_do_close(sc, flag, endpt);
834 KASSERT(!sc->sc_is_open[endpt]);
835
836 ugenif_release(sc);
837
838 return 0;
839 }
840
841 Static int
842 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
843 {
844 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
845 uint32_t n, tn;
846 struct usbd_xfer *xfer;
847 usbd_status err;
848 int error = 0;
849
850 UGENHIST_FUNC(); UGENHIST_CALLED();
851
852 DPRINTFN(5, "ugen%d: %jd", device_unit(sc->sc_dev), endpt, 0, 0);
853
854 if (endpt == USB_CONTROL_ENDPOINT)
855 return ENODEV;
856
857 KASSERT(sce->edesc);
858 KASSERT(sce->pipeh);
859
860 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
861 case UE_INTERRUPT:
862 /* Block until activity occurred. */
863 mutex_enter(&sc->sc_lock);
864 while (sce->q.c_cc == 0) {
865 if (flag & IO_NDELAY) {
866 mutex_exit(&sc->sc_lock);
867 return EWOULDBLOCK;
868 }
869 DPRINTFN(5, "sleep on %jx", (uintptr_t)sce, 0, 0, 0);
870 /* "ugenri" */
871 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
872 mstohz(sce->timeout));
873 DPRINTFN(5, "woke, error=%jd",
874 error, 0, 0, 0);
875 if (sc->sc_dying)
876 error = EIO;
877 if (error)
878 break;
879 }
880 mutex_exit(&sc->sc_lock);
881
882 /* Transfer as many chunks as possible. */
883 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
884 n = uimin(sce->q.c_cc, uio->uio_resid);
885 if (n > sizeof(sc->sc_buffer))
886 n = sizeof(sc->sc_buffer);
887
888 /* Remove a small chunk from the input queue. */
889 q_to_b(&sce->q, sc->sc_buffer, n);
890 DPRINTFN(5, "got %jd chars", n, 0, 0, 0);
891
892 /* Copy the data to the user process. */
893 error = uiomove(sc->sc_buffer, n, uio);
894 if (error)
895 break;
896 }
897 break;
898 case UE_BULK:
899 if (sce->state & UGEN_BULK_RA) {
900 DPRINTFN(5, "BULK_RA req: %zd used: %d",
901 uio->uio_resid, sce->ra_wb_used, 0, 0);
902 xfer = sce->ra_wb_xfer;
903
904 mutex_enter(&sc->sc_lock);
905 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
906 mutex_exit(&sc->sc_lock);
907 return EWOULDBLOCK;
908 }
909 while (uio->uio_resid > 0 && !error) {
910 while (sce->ra_wb_used == 0) {
911 DPRINTFN(5, "sleep on %jx",
912 (uintptr_t)sce, 0, 0, 0);
913 /* "ugenrb" */
914 error = cv_timedwait_sig(&sce->cv,
915 &sc->sc_lock, mstohz(sce->timeout));
916 DPRINTFN(5, "woke, error=%jd",
917 error, 0, 0, 0);
918 if (sc->sc_dying)
919 error = EIO;
920 if (error)
921 break;
922 }
923
924 /* Copy data to the process. */
925 while (uio->uio_resid > 0
926 && sce->ra_wb_used > 0) {
927 n = uimin(uio->uio_resid,
928 sce->ra_wb_used);
929 n = uimin(n, sce->limit - sce->cur);
930 error = uiomove(sce->cur, n, uio);
931 if (error)
932 break;
933 sce->cur += n;
934 sce->ra_wb_used -= n;
935 if (sce->cur == sce->limit)
936 sce->cur = sce->ibuf;
937 }
938
939 /*
940 * If the transfers stopped because the
941 * buffer was full, restart them.
942 */
943 if (sce->state & UGEN_RA_WB_STOP &&
944 sce->ra_wb_used < sce->limit - sce->ibuf) {
945 n = (sce->limit - sce->ibuf)
946 - sce->ra_wb_used;
947 usbd_setup_xfer(xfer, sce, NULL,
948 uimin(n, sce->ra_wb_xferlen),
949 0, USBD_NO_TIMEOUT,
950 ugen_bulkra_intr);
951 sce->state &= ~UGEN_RA_WB_STOP;
952 err = usbd_transfer(xfer);
953 if (err != USBD_IN_PROGRESS)
954 /*
955 * The transfer has not been
956 * queued. Setting STOP
957 * will make us try
958 * again at the next read.
959 */
960 sce->state |= UGEN_RA_WB_STOP;
961 }
962 }
963 mutex_exit(&sc->sc_lock);
964 break;
965 }
966 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
967 0, 0, &xfer);
968 if (error)
969 return error;
970 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
971 DPRINTFN(1, "start transfer %jd bytes", n, 0, 0, 0);
972 tn = n;
973 err = usbd_bulk_transfer(xfer, sce->pipeh,
974 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0,
975 sce->timeout, sc->sc_buffer, &tn);
976 if (err) {
977 if (err == USBD_INTERRUPTED)
978 error = EINTR;
979 else if (err == USBD_TIMEOUT)
980 error = ETIMEDOUT;
981 else
982 error = EIO;
983 break;
984 }
985 DPRINTFN(1, "got %jd bytes", tn, 0, 0, 0);
986 error = uiomove(sc->sc_buffer, tn, uio);
987 if (error || tn < n)
988 break;
989 }
990 usbd_destroy_xfer(xfer);
991 break;
992 case UE_ISOCHRONOUS:
993 mutex_enter(&sc->sc_lock);
994 while (sce->cur == sce->fill) {
995 if (flag & IO_NDELAY) {
996 mutex_exit(&sc->sc_lock);
997 return EWOULDBLOCK;
998 }
999 /* "ugenri" */
1000 DPRINTFN(5, "sleep on %jx", (uintptr_t)sce, 0, 0, 0);
1001 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
1002 mstohz(sce->timeout));
1003 DPRINTFN(5, "woke, error=%jd", error, 0, 0, 0);
1004 if (sc->sc_dying)
1005 error = EIO;
1006 if (error)
1007 break;
1008 }
1009
1010 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
1011 if(sce->fill > sce->cur)
1012 n = uimin(sce->fill - sce->cur, uio->uio_resid);
1013 else
1014 n = uimin(sce->limit - sce->cur, uio->uio_resid);
1015
1016 DPRINTFN(5, "isoc got %jd chars", n, 0, 0, 0);
1017
1018 /* Copy the data to the user process. */
1019 error = uiomove(sce->cur, n, uio);
1020 if (error)
1021 break;
1022 sce->cur += n;
1023 if (sce->cur >= sce->limit)
1024 sce->cur = sce->ibuf;
1025 }
1026 mutex_exit(&sc->sc_lock);
1027 break;
1028
1029
1030 default:
1031 return ENXIO;
1032 }
1033 return error;
1034 }
1035
1036 static int
1037 ugenread(dev_t dev, struct uio *uio, int flag)
1038 {
1039 int endpt = UGENENDPOINT(dev);
1040 struct ugen_softc *sc;
1041 int error;
1042
1043 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
1044 return ENXIO;
1045 error = ugen_do_read(sc, endpt, uio, flag);
1046 ugenif_release(sc);
1047
1048 return error;
1049 }
1050
1051 Static int
1052 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
1053 int flag)
1054 {
1055 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
1056 uint32_t n;
1057 int error = 0;
1058 uint32_t tn;
1059 char *dbuf;
1060 struct usbd_xfer *xfer;
1061 usbd_status err;
1062
1063 UGENHIST_FUNC(); UGENHIST_CALLED();
1064
1065 DPRINTFN(5, "ugen%jd: %jd", device_unit(sc->sc_dev), endpt, 0, 0);
1066
1067 if (endpt == USB_CONTROL_ENDPOINT)
1068 return ENODEV;
1069
1070 KASSERT(sce->edesc);
1071 KASSERT(sce->pipeh);
1072
1073 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
1074 case UE_BULK:
1075 if (sce->state & UGEN_BULK_WB) {
1076 DPRINTFN(5, "BULK_WB req: %jd used: %jd",
1077 uio->uio_resid, sce->ra_wb_used, 0, 0);
1078 xfer = sce->ra_wb_xfer;
1079
1080 mutex_enter(&sc->sc_lock);
1081 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
1082 flag & IO_NDELAY) {
1083 mutex_exit(&sc->sc_lock);
1084 return EWOULDBLOCK;
1085 }
1086 while (uio->uio_resid > 0 && !error) {
1087 while (sce->ra_wb_used ==
1088 sce->limit - sce->ibuf) {
1089 DPRINTFN(5, "sleep on %#jx",
1090 (uintptr_t)sce, 0, 0, 0);
1091 /* "ugenwb" */
1092 error = cv_timedwait_sig(&sce->cv,
1093 &sc->sc_lock, mstohz(sce->timeout));
1094 DPRINTFN(5, "woke, error=%d",
1095 error, 0, 0, 0);
1096 if (sc->sc_dying)
1097 error = EIO;
1098 if (error)
1099 break;
1100 }
1101
1102 /* Copy data from the process. */
1103 while (uio->uio_resid > 0 &&
1104 sce->ra_wb_used < sce->limit - sce->ibuf) {
1105 n = uimin(uio->uio_resid,
1106 (sce->limit - sce->ibuf)
1107 - sce->ra_wb_used);
1108 n = uimin(n, sce->limit - sce->fill);
1109 error = uiomove(sce->fill, n, uio);
1110 if (error)
1111 break;
1112 sce->fill += n;
1113 sce->ra_wb_used += n;
1114 if (sce->fill == sce->limit)
1115 sce->fill = sce->ibuf;
1116 }
1117
1118 /*
1119 * If the transfers stopped because the
1120 * buffer was empty, restart them.
1121 */
1122 if (sce->state & UGEN_RA_WB_STOP &&
1123 sce->ra_wb_used > 0) {
1124 dbuf = (char *)usbd_get_buffer(xfer);
1125 n = uimin(sce->ra_wb_used,
1126 sce->ra_wb_xferlen);
1127 tn = uimin(n, sce->limit - sce->cur);
1128 memcpy(dbuf, sce->cur, tn);
1129 dbuf += tn;
1130 if (n - tn > 0)
1131 memcpy(dbuf, sce->ibuf,
1132 n - tn);
1133 usbd_setup_xfer(xfer, sce, NULL, n,
1134 0, USBD_NO_TIMEOUT,
1135 ugen_bulkwb_intr);
1136 sce->state &= ~UGEN_RA_WB_STOP;
1137 err = usbd_transfer(xfer);
1138 if (err != USBD_IN_PROGRESS)
1139 /*
1140 * The transfer has not been
1141 * queued. Setting STOP
1142 * will make us try again
1143 * at the next read.
1144 */
1145 sce->state |= UGEN_RA_WB_STOP;
1146 }
1147 }
1148 mutex_exit(&sc->sc_lock);
1149 break;
1150 }
1151 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
1152 0, 0, &xfer);
1153 if (error)
1154 return error;
1155 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
1156 error = uiomove(sc->sc_buffer, n, uio);
1157 if (error)
1158 break;
1159 DPRINTFN(1, "transfer %jd bytes", n, 0, 0, 0);
1160 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout,
1161 sc->sc_buffer, &n);
1162 if (err) {
1163 if (err == USBD_INTERRUPTED)
1164 error = EINTR;
1165 else if (err == USBD_TIMEOUT)
1166 error = ETIMEDOUT;
1167 else
1168 error = EIO;
1169 break;
1170 }
1171 }
1172 usbd_destroy_xfer(xfer);
1173 break;
1174 case UE_INTERRUPT:
1175 error = usbd_create_xfer(sce->pipeh,
1176 UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer);
1177 if (error)
1178 return error;
1179 while ((n = uimin(UGETW(sce->edesc->wMaxPacketSize),
1180 uio->uio_resid)) != 0) {
1181 error = uiomove(sc->sc_buffer, n, uio);
1182 if (error)
1183 break;
1184 DPRINTFN(1, "transfer %jd bytes", n, 0, 0, 0);
1185 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
1186 sce->timeout, sc->sc_buffer, &n);
1187 if (err) {
1188 if (err == USBD_INTERRUPTED)
1189 error = EINTR;
1190 else if (err == USBD_TIMEOUT)
1191 error = ETIMEDOUT;
1192 else
1193 error = EIO;
1194 break;
1195 }
1196 }
1197 usbd_destroy_xfer(xfer);
1198 break;
1199 default:
1200 return ENXIO;
1201 }
1202 return error;
1203 }
1204
1205 static int
1206 ugenwrite(dev_t dev, struct uio *uio, int flag)
1207 {
1208 int endpt = UGENENDPOINT(dev);
1209 struct ugen_softc *sc;
1210 int error;
1211
1212 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
1213 return ENXIO;
1214 error = ugen_do_write(sc, endpt, uio, flag);
1215 ugenif_release(sc);
1216
1217 return error;
1218 }
1219
1220 static int
1221 ugen_activate(device_t self, enum devact act)
1222 {
1223 struct ugen_softc *sc = device_private(self);
1224
1225 switch (act) {
1226 case DVACT_DEACTIVATE:
1227 sc->sc_dying = 1;
1228 return 0;
1229 default:
1230 return EOPNOTSUPP;
1231 }
1232 }
1233
1234 static int
1235 ugen_detach(device_t self, int flags)
1236 {
1237 struct ugen_softc *sc = device_private(self);
1238 struct ugen_endpoint *sce;
1239 int i, dir;
1240 int maj, mn;
1241
1242 UGENHIST_FUNC(); UGENHIST_CALLED();
1243
1244 DPRINTF("sc=%ju flags=%ju", (uintptr_t)sc, flags, 0, 0);
1245
1246 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
1247
1248 /*
1249 * Fail if we're not forced to detach and userland has any
1250 * endpoints open.
1251 */
1252 if ((flags & DETACH_FORCE) == 0) {
1253 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1254 if (sc->sc_is_open[i])
1255 return EBUSY;
1256 }
1257 }
1258
1259 /* Prevent new users. Prevent suspend/resume. */
1260 sc->sc_dying = 1;
1261 pmf_device_deregister(self);
1262
1263 /*
1264 * If we never finished attaching, skip nixing endpoints and
1265 * users because there aren't any.
1266 */
1267 if (!sc->sc_attached)
1268 goto out;
1269
1270 /* Abort all pipes. */
1271 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1272 for (dir = OUT; dir <= IN; dir++) {
1273 sce = &sc->sc_endpoints[i][dir];
1274 if (sce->pipeh)
1275 usbd_abort_pipe(sce->pipeh);
1276 }
1277 }
1278
1279 /*
1280 * Wait for users to drain. Before this point there can be no
1281 * more I/O operations started because we set sc_dying; after
1282 * this, there can be no more I/O operations in progress, so it
1283 * will be safe to free things.
1284 */
1285 mutex_enter(&sc->sc_lock);
1286 if (--sc->sc_refcnt >= 0) {
1287 /* Wake everyone */
1288 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1289 for (dir = OUT; dir <= IN; dir++)
1290 cv_broadcast(&sc->sc_endpoints[i][dir].cv);
1291 }
1292 /* Wait for processes to go away. */
1293 do {
1294 cv_wait(&sc->sc_detach_cv, &sc->sc_lock);
1295 } while (sc->sc_refcnt >= 0);
1296 }
1297 mutex_exit(&sc->sc_lock);
1298
1299 /* locate the major number */
1300 maj = cdevsw_lookup_major(&ugen_cdevsw);
1301
1302 /*
1303 * Nuke the vnodes for any open instances (calls ugenclose, but
1304 * with no effect because we already set sc_dying).
1305 */
1306 mn = sc->sc_unit * USB_MAX_ENDPOINTS;
1307 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1308
1309 /* Actually close any lingering pipes. */
1310 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1311 ugen_do_close(sc, FREAD|FWRITE, i);
1312
1313 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev);
1314 ugenif_put_unit(sc);
1315
1316 out: for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1317 for (dir = OUT; dir <= IN; dir++) {
1318 sce = &sc->sc_endpoints[i][dir];
1319 seldestroy(&sce->rsel);
1320 cv_destroy(&sce->cv);
1321 }
1322 }
1323
1324 cv_destroy(&sc->sc_detach_cv);
1325 mutex_destroy(&sc->sc_lock);
1326
1327 return 0;
1328 }
1329
1330 Static void
1331 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
1332 {
1333 struct ugen_endpoint *sce = addr;
1334 struct ugen_softc *sc = sce->sc;
1335 uint32_t count;
1336 u_char *ibuf;
1337
1338 UGENHIST_FUNC(); UGENHIST_CALLED();
1339
1340 if (status == USBD_CANCELLED)
1341 return;
1342
1343 if (status != USBD_NORMAL_COMPLETION) {
1344 DPRINTF("status=%jd", status, 0, 0, 0);
1345 if (status == USBD_STALLED)
1346 usbd_clear_endpoint_stall_async(sce->pipeh);
1347 return;
1348 }
1349
1350 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1351 ibuf = sce->ibuf;
1352
1353 DPRINTFN(5, "xfer=%#jx status=%d count=%d",
1354 (uintptr_t)xfer, status, count, 0);
1355 DPRINTFN(5, " data = %02x %02x %02x",
1356 ibuf[0], ibuf[1], ibuf[2], 0);
1357
1358 mutex_enter(&sc->sc_lock);
1359 (void)b_to_q(ibuf, count, &sce->q);
1360 cv_signal(&sce->cv);
1361 mutex_exit(&sc->sc_lock);
1362 selnotify(&sce->rsel, 0, 0);
1363 }
1364
1365 Static void
1366 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr,
1367 usbd_status status)
1368 {
1369 struct isoreq *req = addr;
1370 struct ugen_endpoint *sce = req->sce;
1371 struct ugen_softc *sc = sce->sc;
1372 uint32_t count, n;
1373 int i, isize;
1374
1375 UGENHIST_FUNC(); UGENHIST_CALLED();
1376
1377 /* Return if we are aborting. */
1378 if (status == USBD_CANCELLED)
1379 return;
1380
1381 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1382 DPRINTFN(5, "xfer %ld, count=%d",
1383 (long)(req - sce->isoreqs), count, 0, 0);
1384
1385 mutex_enter(&sc->sc_lock);
1386
1387 /* throw away oldest input if the buffer is full */
1388 if (sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1389 sce->cur += count;
1390 if (sce->cur >= sce->limit)
1391 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1392 DPRINTFN(5, "throwing away %jd bytes",
1393 count, 0, 0, 0);
1394 }
1395
1396 isize = UGETW(sce->edesc->wMaxPacketSize);
1397 for (i = 0; i < UGEN_NISORFRMS; i++) {
1398 uint32_t actlen = req->sizes[i];
1399 char const *tbuf = (char const *)req->dmabuf + isize * i;
1400
1401 /* copy data to buffer */
1402 while (actlen > 0) {
1403 n = uimin(actlen, sce->limit - sce->fill);
1404 memcpy(sce->fill, tbuf, n);
1405
1406 tbuf += n;
1407 actlen -= n;
1408 sce->fill += n;
1409 if (sce->fill == sce->limit)
1410 sce->fill = sce->ibuf;
1411 }
1412
1413 /* setup size for next transfer */
1414 req->sizes[i] = isize;
1415 }
1416
1417 usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0,
1418 ugen_isoc_rintr);
1419 (void)usbd_transfer(xfer);
1420
1421 cv_signal(&sce->cv);
1422 mutex_exit(&sc->sc_lock);
1423 selnotify(&sce->rsel, 0, 0);
1424 }
1425
1426 Static void
1427 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr,
1428 usbd_status status)
1429 {
1430 struct ugen_endpoint *sce = addr;
1431 struct ugen_softc *sc = sce->sc;
1432 uint32_t count, n;
1433 char const *tbuf;
1434 usbd_status err;
1435
1436 UGENHIST_FUNC(); UGENHIST_CALLED();
1437
1438 /* Return if we are aborting. */
1439 if (status == USBD_CANCELLED)
1440 return;
1441
1442 if (status != USBD_NORMAL_COMPLETION) {
1443 DPRINTF("status=%jd", status, 0, 0, 0);
1444 sce->state |= UGEN_RA_WB_STOP;
1445 if (status == USBD_STALLED)
1446 usbd_clear_endpoint_stall_async(sce->pipeh);
1447 return;
1448 }
1449
1450 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1451
1452 mutex_enter(&sc->sc_lock);
1453
1454 /* Keep track of how much is in the buffer. */
1455 sce->ra_wb_used += count;
1456
1457 /* Copy data to buffer. */
1458 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1459 n = uimin(count, sce->limit - sce->fill);
1460 memcpy(sce->fill, tbuf, n);
1461 tbuf += n;
1462 count -= n;
1463 sce->fill += n;
1464 if (sce->fill == sce->limit)
1465 sce->fill = sce->ibuf;
1466 if (count > 0) {
1467 memcpy(sce->fill, tbuf, count);
1468 sce->fill += count;
1469 }
1470
1471 /* Set up the next request if necessary. */
1472 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1473 if (n > 0) {
1474 usbd_setup_xfer(xfer, sce, NULL, uimin(n, sce->ra_wb_xferlen), 0,
1475 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1476 err = usbd_transfer(xfer);
1477 if (err != USBD_IN_PROGRESS) {
1478 printf("error=%d", err);
1479 /*
1480 * The transfer has not been queued. Setting STOP
1481 * will make us try again at the next read.
1482 */
1483 sce->state |= UGEN_RA_WB_STOP;
1484 }
1485 }
1486 else
1487 sce->state |= UGEN_RA_WB_STOP;
1488
1489 cv_signal(&sce->cv);
1490 mutex_exit(&sc->sc_lock);
1491 selnotify(&sce->rsel, 0, 0);
1492 }
1493
1494 Static void
1495 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr,
1496 usbd_status status)
1497 {
1498 struct ugen_endpoint *sce = addr;
1499 struct ugen_softc *sc = sce->sc;
1500 uint32_t count, n;
1501 char *tbuf;
1502 usbd_status err;
1503
1504 UGENHIST_FUNC(); UGENHIST_CALLED();
1505
1506 /* Return if we are aborting. */
1507 if (status == USBD_CANCELLED)
1508 return;
1509
1510 if (status != USBD_NORMAL_COMPLETION) {
1511 DPRINTF("status=%jd", status, 0, 0, 0);
1512 sce->state |= UGEN_RA_WB_STOP;
1513 if (status == USBD_STALLED)
1514 usbd_clear_endpoint_stall_async(sce->pipeh);
1515 return;
1516 }
1517
1518 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1519
1520 mutex_enter(&sc->sc_lock);
1521
1522 /* Keep track of how much is in the buffer. */
1523 sce->ra_wb_used -= count;
1524
1525 /* Update buffer pointers. */
1526 sce->cur += count;
1527 if (sce->cur >= sce->limit)
1528 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1529
1530 /* Set up next request if necessary. */
1531 if (sce->ra_wb_used > 0) {
1532 /* copy data from buffer */
1533 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1534 count = uimin(sce->ra_wb_used, sce->ra_wb_xferlen);
1535 n = uimin(count, sce->limit - sce->cur);
1536 memcpy(tbuf, sce->cur, n);
1537 tbuf += n;
1538 if (count - n > 0)
1539 memcpy(tbuf, sce->ibuf, count - n);
1540
1541 usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT,
1542 ugen_bulkwb_intr);
1543 err = usbd_transfer(xfer);
1544 if (err != USBD_IN_PROGRESS) {
1545 printf("error=%d", err);
1546 /*
1547 * The transfer has not been queued. Setting STOP
1548 * will make us try again at the next write.
1549 */
1550 sce->state |= UGEN_RA_WB_STOP;
1551 }
1552 }
1553 else
1554 sce->state |= UGEN_RA_WB_STOP;
1555
1556 cv_signal(&sce->cv);
1557 mutex_exit(&sc->sc_lock);
1558 selnotify(&sce->rsel, 0, 0);
1559 }
1560
1561 Static usbd_status
1562 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1563 {
1564 struct usbd_interface *iface;
1565 usb_endpoint_descriptor_t *ed;
1566 usbd_status err;
1567 struct ugen_endpoint *sce;
1568 uint8_t niface, nendpt, endptno, endpt;
1569 int dir;
1570
1571 UGENHIST_FUNC(); UGENHIST_CALLED();
1572
1573 DPRINTFN(15, "%d %d", ifaceidx, altno, 0, 0);
1574
1575 err = usbd_interface_count(sc->sc_udev, &niface);
1576 if (err)
1577 return err;
1578 if (ifaceidx < 0 || ifaceidx >= niface)
1579 return USBD_INVAL;
1580
1581 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1582 if (err)
1583 return err;
1584 err = usbd_endpoint_count(iface, &nendpt);
1585 if (err)
1586 return err;
1587
1588 /* change setting */
1589 err = usbd_set_interface(iface, altno);
1590 if (err)
1591 return err;
1592
1593 err = usbd_endpoint_count(iface, &nendpt);
1594 if (err)
1595 return err;
1596
1597 ugen_clear_endpoints(sc);
1598
1599 for (endptno = 0; endptno < nendpt; endptno++) {
1600 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1601 KASSERT(ed != NULL);
1602 endpt = ed->bEndpointAddress;
1603 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1604 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1605 sce->sc = sc;
1606 sce->edesc = ed;
1607 sce->iface = iface;
1608 }
1609 return 0;
1610 }
1611
1612 /* Retrieve a complete descriptor for a certain device and index. */
1613 Static usb_config_descriptor_t *
1614 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1615 {
1616 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1617 int len;
1618 usbd_status err;
1619
1620 UGENHIST_FUNC(); UGENHIST_CALLED();
1621
1622 if (index == USB_CURRENT_CONFIG_INDEX) {
1623 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1624 if (tdesc == NULL)
1625 return NULL;
1626 len = UGETW(tdesc->wTotalLength);
1627 if (lenp)
1628 *lenp = len;
1629 cdesc = kmem_alloc(len, KM_SLEEP);
1630 memcpy(cdesc, tdesc, len);
1631 DPRINTFN(5, "current, len=%jd", len, 0, 0, 0);
1632 } else {
1633 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1634 if (err)
1635 return 0;
1636 len = UGETW(cdescr.wTotalLength);
1637 DPRINTFN(5, "index=%jd, len=%jd", index, len, 0, 0);
1638 if (lenp)
1639 *lenp = len;
1640 cdesc = kmem_alloc(len, KM_SLEEP);
1641 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1642 if (err) {
1643 kmem_free(cdesc, len);
1644 return 0;
1645 }
1646 }
1647 return cdesc;
1648 }
1649
1650 Static int
1651 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1652 {
1653 struct usbd_interface *iface;
1654 usbd_status err;
1655
1656 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1657 if (err)
1658 return -1;
1659 return usbd_get_interface_altindex(iface);
1660 }
1661
1662 Static int
1663 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1664 void *addr, int flag, struct lwp *l)
1665 {
1666 struct ugen_endpoint *sce;
1667 usbd_status err;
1668 struct usbd_interface *iface;
1669 struct usb_config_desc *cd;
1670 usb_config_descriptor_t *cdesc;
1671 struct usb_interface_desc *id;
1672 usb_interface_descriptor_t *idesc;
1673 struct usb_endpoint_desc *ed;
1674 usb_endpoint_descriptor_t *edesc;
1675 struct usb_alt_interface *ai;
1676 struct usb_string_desc *si;
1677 uint8_t conf, alt;
1678 int cdesclen;
1679 int error;
1680 int dir;
1681
1682 UGENHIST_FUNC(); UGENHIST_CALLED();
1683
1684 KASSERT(KERNEL_LOCKED_P()); /* ugen_set_config */
1685
1686 DPRINTFN(5, "cmd=%08jx", cmd, 0, 0, 0);
1687
1688 switch (cmd) {
1689 case FIONBIO:
1690 /* All handled in the upper FS layer. */
1691 return 0;
1692 case USB_SET_SHORT_XFER:
1693 if (endpt == USB_CONTROL_ENDPOINT)
1694 return EINVAL;
1695 /* This flag only affects read */
1696 sce = &sc->sc_endpoints[endpt][IN];
1697 if (sce == NULL || sce->pipeh == NULL)
1698 return EINVAL;
1699 if (*(int *)addr)
1700 sce->state |= UGEN_SHORT_OK;
1701 else
1702 sce->state &= ~UGEN_SHORT_OK;
1703 return 0;
1704 case USB_SET_TIMEOUT:
1705 for (dir = OUT; dir <= IN; dir++) {
1706 sce = &sc->sc_endpoints[endpt][dir];
1707 if (sce == NULL)
1708 return EINVAL;
1709
1710 sce->timeout = *(int *)addr;
1711 }
1712 return 0;
1713 case USB_SET_BULK_RA:
1714 if (endpt == USB_CONTROL_ENDPOINT)
1715 return EINVAL;
1716 sce = &sc->sc_endpoints[endpt][IN];
1717 if (sce == NULL || sce->pipeh == NULL)
1718 return EINVAL;
1719 edesc = sce->edesc;
1720 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1721 return EINVAL;
1722
1723 if (*(int *)addr) {
1724 /* Only turn RA on if it's currently off. */
1725 if (sce->state & UGEN_BULK_RA)
1726 return 0;
1727 KASSERT(sce->ra_wb_xfer == NULL);
1728 KASSERT(sce->ibuf == NULL);
1729
1730 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1731 /* shouldn't happen */
1732 return EINVAL;
1733 error = usbd_create_xfer(sce->pipeh,
1734 sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer);
1735 if (error)
1736 return error;
1737 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1738 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1739 sce->fill = sce->cur = sce->ibuf;
1740 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1741 sce->ra_wb_used = 0;
1742 sce->state |= UGEN_BULK_RA;
1743 sce->state &= ~UGEN_RA_WB_STOP;
1744 /* Now start reading. */
1745 usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL,
1746 uimin(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1747 0, USBD_NO_TIMEOUT, ugen_bulkra_intr);
1748 err = usbd_transfer(sce->ra_wb_xfer);
1749 if (err != USBD_IN_PROGRESS) {
1750 sce->state &= ~UGEN_BULK_RA;
1751 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1752 sce->ibuf = NULL;
1753 usbd_destroy_xfer(sce->ra_wb_xfer);
1754 sce->ra_wb_xfer = NULL;
1755 return EIO;
1756 }
1757 } else {
1758 /* Only turn RA off if it's currently on. */
1759 if (!(sce->state & UGEN_BULK_RA))
1760 return 0;
1761
1762 sce->state &= ~UGEN_BULK_RA;
1763 usbd_abort_pipe(sce->pipeh);
1764 usbd_destroy_xfer(sce->ra_wb_xfer);
1765 sce->ra_wb_xfer = NULL;
1766 /*
1767 * XXX Discard whatever's in the buffer, but we
1768 * should keep it around and drain the buffer
1769 * instead.
1770 */
1771 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1772 sce->ibuf = NULL;
1773 }
1774 return 0;
1775 case USB_SET_BULK_WB:
1776 if (endpt == USB_CONTROL_ENDPOINT)
1777 return EINVAL;
1778 sce = &sc->sc_endpoints[endpt][OUT];
1779 if (sce == NULL || sce->pipeh == NULL)
1780 return EINVAL;
1781 edesc = sce->edesc;
1782 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1783 return EINVAL;
1784
1785 if (*(int *)addr) {
1786 /* Only turn WB on if it's currently off. */
1787 if (sce->state & UGEN_BULK_WB)
1788 return 0;
1789 KASSERT(sce->ra_wb_xfer == NULL);
1790 KASSERT(sce->ibuf == NULL);
1791
1792 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1793 /* shouldn't happen */
1794 return EINVAL;
1795 error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize,
1796 0, 0, &sce->ra_wb_xfer);
1797 /* XXX check error??? */
1798 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1799 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1800 sce->fill = sce->cur = sce->ibuf;
1801 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1802 sce->ra_wb_used = 0;
1803 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1804 } else {
1805 /* Only turn WB off if it's currently on. */
1806 if (!(sce->state & UGEN_BULK_WB))
1807 return 0;
1808
1809 sce->state &= ~UGEN_BULK_WB;
1810 /*
1811 * XXX Discard whatever's in the buffer, but we
1812 * should keep it around and keep writing to
1813 * drain the buffer instead.
1814 */
1815 usbd_abort_pipe(sce->pipeh);
1816 usbd_destroy_xfer(sce->ra_wb_xfer);
1817 sce->ra_wb_xfer = NULL;
1818 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1819 sce->ibuf = NULL;
1820 }
1821 return 0;
1822 case USB_SET_BULK_RA_OPT:
1823 case USB_SET_BULK_WB_OPT:
1824 {
1825 struct usb_bulk_ra_wb_opt *opt;
1826
1827 if (endpt == USB_CONTROL_ENDPOINT)
1828 return EINVAL;
1829 opt = (struct usb_bulk_ra_wb_opt *)addr;
1830 if (cmd == USB_SET_BULK_RA_OPT)
1831 sce = &sc->sc_endpoints[endpt][IN];
1832 else
1833 sce = &sc->sc_endpoints[endpt][OUT];
1834 if (sce == NULL || sce->pipeh == NULL)
1835 return EINVAL;
1836 if (opt->ra_wb_buffer_size < 1 ||
1837 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1838 opt->ra_wb_request_size < 1 ||
1839 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1840 return EINVAL;
1841 /*
1842 * XXX These changes do not take effect until the
1843 * next time RA/WB mode is enabled but they ought to
1844 * take effect immediately.
1845 */
1846 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1847 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1848 return 0;
1849 }
1850 default:
1851 break;
1852 }
1853
1854 if (endpt != USB_CONTROL_ENDPOINT)
1855 return EINVAL;
1856
1857 switch (cmd) {
1858 #ifdef UGEN_DEBUG
1859 case USB_SETDEBUG:
1860 ugendebug = *(int *)addr;
1861 break;
1862 #endif
1863 case USB_GET_CONFIG:
1864 err = usbd_get_config(sc->sc_udev, &conf);
1865 if (err)
1866 return EIO;
1867 *(int *)addr = conf;
1868 break;
1869 case USB_SET_CONFIG:
1870 if (!(flag & FWRITE))
1871 return EPERM;
1872 err = ugen_set_config(sc, *(int *)addr, 1);
1873 switch (err) {
1874 case USBD_NORMAL_COMPLETION:
1875 break;
1876 case USBD_IN_USE:
1877 return EBUSY;
1878 default:
1879 return EIO;
1880 }
1881 break;
1882 case USB_GET_ALTINTERFACE:
1883 ai = (struct usb_alt_interface *)addr;
1884 err = usbd_device2interface_handle(sc->sc_udev,
1885 ai->uai_interface_index, &iface);
1886 if (err)
1887 return EINVAL;
1888 idesc = usbd_get_interface_descriptor(iface);
1889 if (idesc == NULL)
1890 return EIO;
1891 ai->uai_alt_no = idesc->bAlternateSetting;
1892 break;
1893 case USB_SET_ALTINTERFACE:
1894 if (!(flag & FWRITE))
1895 return EPERM;
1896 ai = (struct usb_alt_interface *)addr;
1897 err = usbd_device2interface_handle(sc->sc_udev,
1898 ai->uai_interface_index, &iface);
1899 if (err)
1900 return EINVAL;
1901 err = ugen_set_interface(sc, ai->uai_interface_index,
1902 ai->uai_alt_no);
1903 if (err)
1904 return EINVAL;
1905 break;
1906 case USB_GET_NO_ALT:
1907 ai = (struct usb_alt_interface *)addr;
1908 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen);
1909 if (cdesc == NULL)
1910 return EINVAL;
1911 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1912 if (idesc == NULL) {
1913 kmem_free(cdesc, cdesclen);
1914 return EINVAL;
1915 }
1916 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1917 idesc->bInterfaceNumber);
1918 kmem_free(cdesc, cdesclen);
1919 break;
1920 case USB_GET_DEVICE_DESC:
1921 *(usb_device_descriptor_t *)addr =
1922 *usbd_get_device_descriptor(sc->sc_udev);
1923 break;
1924 case USB_GET_CONFIG_DESC:
1925 cd = (struct usb_config_desc *)addr;
1926 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen);
1927 if (cdesc == NULL)
1928 return EINVAL;
1929 cd->ucd_desc = *cdesc;
1930 kmem_free(cdesc, cdesclen);
1931 break;
1932 case USB_GET_INTERFACE_DESC:
1933 id = (struct usb_interface_desc *)addr;
1934 cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen);
1935 if (cdesc == NULL)
1936 return EINVAL;
1937 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1938 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1939 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1940 else
1941 alt = id->uid_alt_index;
1942 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1943 if (idesc == NULL) {
1944 kmem_free(cdesc, cdesclen);
1945 return EINVAL;
1946 }
1947 id->uid_desc = *idesc;
1948 kmem_free(cdesc, cdesclen);
1949 break;
1950 case USB_GET_ENDPOINT_DESC:
1951 ed = (struct usb_endpoint_desc *)addr;
1952 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen);
1953 if (cdesc == NULL)
1954 return EINVAL;
1955 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1956 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1957 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1958 else
1959 alt = ed->ued_alt_index;
1960 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1961 alt, ed->ued_endpoint_index);
1962 if (edesc == NULL) {
1963 kmem_free(cdesc, cdesclen);
1964 return EINVAL;
1965 }
1966 ed->ued_desc = *edesc;
1967 kmem_free(cdesc, cdesclen);
1968 break;
1969 case USB_GET_FULL_DESC:
1970 {
1971 int len;
1972 struct iovec iov;
1973 struct uio uio;
1974 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1975
1976 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen);
1977 if (cdesc == NULL)
1978 return EINVAL;
1979 len = cdesclen;
1980 if (len > fd->ufd_size)
1981 len = fd->ufd_size;
1982 iov.iov_base = (void *)fd->ufd_data;
1983 iov.iov_len = len;
1984 uio.uio_iov = &iov;
1985 uio.uio_iovcnt = 1;
1986 uio.uio_resid = len;
1987 uio.uio_offset = 0;
1988 uio.uio_rw = UIO_READ;
1989 uio.uio_vmspace = l->l_proc->p_vmspace;
1990 error = uiomove((void *)cdesc, len, &uio);
1991 kmem_free(cdesc, cdesclen);
1992 return error;
1993 }
1994 case USB_GET_STRING_DESC: {
1995 int len;
1996 si = (struct usb_string_desc *)addr;
1997 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1998 si->usd_language_id, &si->usd_desc, &len);
1999 if (err)
2000 return EINVAL;
2001 break;
2002 }
2003 case USB_DO_REQUEST:
2004 {
2005 struct usb_ctl_request *ur = (void *)addr;
2006 int len = UGETW(ur->ucr_request.wLength);
2007 struct iovec iov;
2008 struct uio uio;
2009 void *ptr = 0;
2010 usbd_status xerr;
2011
2012 error = 0;
2013
2014 if (!(flag & FWRITE))
2015 return EPERM;
2016 /* Avoid requests that would damage the bus integrity. */
2017 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
2018 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
2019 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
2020 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
2021 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
2022 ur->ucr_request.bRequest == UR_SET_INTERFACE))
2023 return EINVAL;
2024
2025 if (len < 0 || len > 32767)
2026 return EINVAL;
2027 if (len != 0) {
2028 iov.iov_base = (void *)ur->ucr_data;
2029 iov.iov_len = len;
2030 uio.uio_iov = &iov;
2031 uio.uio_iovcnt = 1;
2032 uio.uio_resid = len;
2033 uio.uio_offset = 0;
2034 uio.uio_rw =
2035 ur->ucr_request.bmRequestType & UT_READ ?
2036 UIO_READ : UIO_WRITE;
2037 uio.uio_vmspace = l->l_proc->p_vmspace;
2038 ptr = kmem_alloc(len, KM_SLEEP);
2039 if (uio.uio_rw == UIO_WRITE) {
2040 error = uiomove(ptr, len, &uio);
2041 if (error)
2042 goto ret;
2043 }
2044 }
2045 sce = &sc->sc_endpoints[endpt][IN];
2046 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
2047 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
2048 if (xerr) {
2049 error = EIO;
2050 goto ret;
2051 }
2052 if (len != 0) {
2053 if (uio.uio_rw == UIO_READ) {
2054 size_t alen = uimin(len, ur->ucr_actlen);
2055 error = uiomove(ptr, alen, &uio);
2056 if (error)
2057 goto ret;
2058 }
2059 }
2060 ret:
2061 if (ptr)
2062 kmem_free(ptr, len);
2063 return error;
2064 }
2065 case USB_GET_DEVICEINFO:
2066 usbd_fill_deviceinfo(sc->sc_udev,
2067 (struct usb_device_info *)addr, 0);
2068 break;
2069 case USB_GET_DEVICEINFO_OLD:
2070 {
2071 int ret;
2072 MODULE_HOOK_CALL(usb_subr_fill_30_hook,
2073 (sc->sc_udev, (struct usb_device_info_old *)addr, 0,
2074 usbd_devinfo_vp, usbd_printBCD),
2075 enosys(), ret);
2076 if (ret == 0)
2077 return 0;
2078 return EINVAL;
2079 }
2080 default:
2081 return EINVAL;
2082 }
2083 return 0;
2084 }
2085
2086 static int
2087 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
2088 {
2089 int endpt = UGENENDPOINT(dev);
2090 struct ugen_softc *sc;
2091 int error;
2092
2093 if ((sc = ugenif_acquire(UGENUNIT(dev))) == 0)
2094 return ENXIO;
2095 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
2096 ugenif_release(sc);
2097
2098 return error;
2099 }
2100
2101 static int
2102 ugenpoll(dev_t dev, int events, struct lwp *l)
2103 {
2104 struct ugen_softc *sc;
2105 struct ugen_endpoint *sce_in, *sce_out;
2106 int revents = 0;
2107
2108 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
2109 return POLLHUP;
2110
2111 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) {
2112 revents |= POLLERR;
2113 goto out;
2114 }
2115
2116 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2117 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2118 KASSERT(sce_in->edesc || sce_out->edesc);
2119 KASSERT(sce_in->pipeh || sce_out->pipeh);
2120
2121 mutex_enter(&sc->sc_lock);
2122 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
2123 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
2124 case UE_INTERRUPT:
2125 if (sce_in->q.c_cc > 0)
2126 revents |= events & (POLLIN | POLLRDNORM);
2127 else
2128 selrecord(l, &sce_in->rsel);
2129 break;
2130 case UE_ISOCHRONOUS:
2131 if (sce_in->cur != sce_in->fill)
2132 revents |= events & (POLLIN | POLLRDNORM);
2133 else
2134 selrecord(l, &sce_in->rsel);
2135 break;
2136 case UE_BULK:
2137 if (sce_in->state & UGEN_BULK_RA) {
2138 if (sce_in->ra_wb_used > 0)
2139 revents |= events &
2140 (POLLIN | POLLRDNORM);
2141 else
2142 selrecord(l, &sce_in->rsel);
2143 break;
2144 }
2145 /*
2146 * We have no easy way of determining if a read will
2147 * yield any data or a write will happen.
2148 * Pretend they will.
2149 */
2150 revents |= events & (POLLIN | POLLRDNORM);
2151 break;
2152 default:
2153 break;
2154 }
2155 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
2156 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
2157 case UE_INTERRUPT:
2158 case UE_ISOCHRONOUS:
2159 /* XXX unimplemented */
2160 break;
2161 case UE_BULK:
2162 if (sce_out->state & UGEN_BULK_WB) {
2163 if (sce_out->ra_wb_used <
2164 sce_out->limit - sce_out->ibuf)
2165 revents |= events &
2166 (POLLOUT | POLLWRNORM);
2167 else
2168 selrecord(l, &sce_out->rsel);
2169 break;
2170 }
2171 /*
2172 * We have no easy way of determining if a read will
2173 * yield any data or a write will happen.
2174 * Pretend they will.
2175 */
2176 revents |= events & (POLLOUT | POLLWRNORM);
2177 break;
2178 default:
2179 break;
2180 }
2181
2182 mutex_exit(&sc->sc_lock);
2183
2184 out: ugenif_release(sc);
2185 return revents;
2186 }
2187
2188 static void
2189 filt_ugenrdetach(struct knote *kn)
2190 {
2191 struct ugen_endpoint *sce = kn->kn_hook;
2192 struct ugen_softc *sc = sce->sc;
2193
2194 mutex_enter(&sc->sc_lock);
2195 selremove_knote(&sce->rsel, kn);
2196 mutex_exit(&sc->sc_lock);
2197 }
2198
2199 static int
2200 filt_ugenread_intr(struct knote *kn, long hint)
2201 {
2202 struct ugen_endpoint *sce = kn->kn_hook;
2203 struct ugen_softc *sc = sce->sc;
2204 int ret;
2205
2206 mutex_enter(&sc->sc_lock);
2207 if (sc->sc_dying) {
2208 ret = 0;
2209 } else {
2210 kn->kn_data = sce->q.c_cc;
2211 ret = kn->kn_data > 0;
2212 }
2213 mutex_exit(&sc->sc_lock);
2214
2215 return ret;
2216 }
2217
2218 static int
2219 filt_ugenread_isoc(struct knote *kn, long hint)
2220 {
2221 struct ugen_endpoint *sce = kn->kn_hook;
2222 struct ugen_softc *sc = sce->sc;
2223 int ret;
2224
2225 mutex_enter(&sc->sc_lock);
2226 if (sc->sc_dying) {
2227 ret = 0;
2228 } else if (sce->cur == sce->fill) {
2229 ret = 0;
2230 } else if (sce->cur < sce->fill) {
2231 kn->kn_data = sce->fill - sce->cur;
2232 ret = 1;
2233 } else {
2234 kn->kn_data = (sce->limit - sce->cur) +
2235 (sce->fill - sce->ibuf);
2236 ret = 1;
2237 }
2238 mutex_exit(&sc->sc_lock);
2239
2240 return ret;
2241 }
2242
2243 static int
2244 filt_ugenread_bulk(struct knote *kn, long hint)
2245 {
2246 struct ugen_endpoint *sce = kn->kn_hook;
2247 struct ugen_softc *sc = sce->sc;
2248 int ret;
2249
2250 mutex_enter(&sc->sc_lock);
2251 if (sc->sc_dying) {
2252 ret = 0;
2253 } else if (!(sce->state & UGEN_BULK_RA)) {
2254 /*
2255 * We have no easy way of determining if a read will
2256 * yield any data or a write will happen.
2257 * So, emulate "seltrue".
2258 */
2259 ret = filt_seltrue(kn, hint);
2260 } else if (sce->ra_wb_used == 0) {
2261 ret = 0;
2262 } else {
2263 kn->kn_data = sce->ra_wb_used;
2264 ret = 1;
2265 }
2266 mutex_exit(&sc->sc_lock);
2267
2268 return ret;
2269 }
2270
2271 static int
2272 filt_ugenwrite_bulk(struct knote *kn, long hint)
2273 {
2274 struct ugen_endpoint *sce = kn->kn_hook;
2275 struct ugen_softc *sc = sce->sc;
2276 int ret;
2277
2278 mutex_enter(&sc->sc_lock);
2279 if (sc->sc_dying) {
2280 ret = 0;
2281 } else if (!(sce->state & UGEN_BULK_WB)) {
2282 /*
2283 * We have no easy way of determining if a read will
2284 * yield any data or a write will happen.
2285 * So, emulate "seltrue".
2286 */
2287 ret = filt_seltrue(kn, hint);
2288 } else if (sce->ra_wb_used == sce->limit - sce->ibuf) {
2289 ret = 0;
2290 } else {
2291 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2292 ret = 1;
2293 }
2294 mutex_exit(&sc->sc_lock);
2295
2296 return ret;
2297 }
2298
2299 static const struct filterops ugenread_intr_filtops = {
2300 .f_flags = FILTEROP_ISFD,
2301 .f_attach = NULL,
2302 .f_detach = filt_ugenrdetach,
2303 .f_event = filt_ugenread_intr,
2304 };
2305
2306 static const struct filterops ugenread_isoc_filtops = {
2307 .f_flags = FILTEROP_ISFD,
2308 .f_attach = NULL,
2309 .f_detach = filt_ugenrdetach,
2310 .f_event = filt_ugenread_isoc,
2311 };
2312
2313 static const struct filterops ugenread_bulk_filtops = {
2314 .f_flags = FILTEROP_ISFD,
2315 .f_attach = NULL,
2316 .f_detach = filt_ugenrdetach,
2317 .f_event = filt_ugenread_bulk,
2318 };
2319
2320 static const struct filterops ugenwrite_bulk_filtops = {
2321 .f_flags = FILTEROP_ISFD,
2322 .f_attach = NULL,
2323 .f_detach = filt_ugenrdetach,
2324 .f_event = filt_ugenwrite_bulk,
2325 };
2326
2327 static int
2328 ugenkqfilter(dev_t dev, struct knote *kn)
2329 {
2330 struct ugen_softc *sc;
2331 struct ugen_endpoint *sce;
2332 struct selinfo *sip;
2333 int error;
2334
2335 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
2336 return ENXIO;
2337
2338 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) {
2339 error = ENODEV;
2340 goto out;
2341 }
2342
2343 switch (kn->kn_filter) {
2344 case EVFILT_READ:
2345 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2346 if (sce == NULL) {
2347 error = EINVAL;
2348 goto out;
2349 }
2350
2351 sip = &sce->rsel;
2352 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2353 case UE_INTERRUPT:
2354 kn->kn_fop = &ugenread_intr_filtops;
2355 break;
2356 case UE_ISOCHRONOUS:
2357 kn->kn_fop = &ugenread_isoc_filtops;
2358 break;
2359 case UE_BULK:
2360 kn->kn_fop = &ugenread_bulk_filtops;
2361 break;
2362 default:
2363 error = EINVAL;
2364 goto out;
2365 }
2366 break;
2367
2368 case EVFILT_WRITE:
2369 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2370 if (sce == NULL) {
2371 error = EINVAL;
2372 goto out;
2373 }
2374
2375 sip = &sce->rsel;
2376 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2377 case UE_INTERRUPT:
2378 case UE_ISOCHRONOUS:
2379 /* XXX poll doesn't support this */
2380 error = EINVAL;
2381 goto out;
2382
2383 case UE_BULK:
2384 kn->kn_fop = &ugenwrite_bulk_filtops;
2385 break;
2386 default:
2387 error = EINVAL;
2388 goto out;
2389 }
2390 break;
2391
2392 default:
2393 error = EINVAL;
2394 goto out;
2395 }
2396
2397 kn->kn_hook = sce;
2398
2399 mutex_enter(&sc->sc_lock);
2400 selrecord_knote(sip, kn);
2401 mutex_exit(&sc->sc_lock);
2402
2403 error = 0;
2404
2405 out: ugenif_release(sc);
2406 return error;
2407 }
2408
2409 MODULE(MODULE_CLASS_DRIVER, ugen, NULL);
2410
2411 static int
2412 ugen_modcmd(modcmd_t cmd, void *aux)
2413 {
2414
2415 switch (cmd) {
2416 case MODULE_CMD_INIT:
2417 mutex_init(&ugenif.lock, MUTEX_DEFAULT, IPL_NONE);
2418 rb_tree_init(&ugenif.tree, &ugenif_tree_ops);
2419 return 0;
2420 default:
2421 return ENOTTY;
2422 }
2423 }
2424