ugen.c revision 1.162 1 /* $NetBSD: ugen.c,v 1.162 2021/09/07 10:42:59 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology.
10 *
11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved.
12 * Effort sponsored in part by the Defense Advanced Research Projects
13 * Agency (DARPA) and the Department of the Interior National Business
14 * Center under agreement number NBCHC050166.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.162 2021/09/07 10:42:59 riastradh Exp $");
41
42 #ifdef _KERNEL_OPT
43 #include "opt_compat_netbsd.h"
44 #include "opt_usb.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/device.h>
52 #include <sys/ioctl.h>
53 #include <sys/conf.h>
54 #include <sys/tty.h>
55 #include <sys/file.h>
56 #include <sys/select.h>
57 #include <sys/proc.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60 #include <sys/compat_stub.h>
61 #include <sys/module.h>
62 #include <sys/rbtree.h>
63
64 #include <dev/usb/usb.h>
65 #include <dev/usb/usbdi.h>
66 #include <dev/usb/usbdi_util.h>
67
68 #include "ioconf.h"
69
70 #ifdef UGEN_DEBUG
71 #define DPRINTF(x) if (ugendebug) printf x
72 #define DPRINTFN(n,x) if (ugendebug>(n)) printf x
73 int ugendebug = 0;
74 #else
75 #define DPRINTF(x)
76 #define DPRINTFN(n,x)
77 #endif
78
79 #define UGEN_CHUNK 128 /* chunk size for read */
80 #define UGEN_IBSIZE 1020 /* buffer size */
81 #define UGEN_BBSIZE 1024
82
83 #define UGEN_NISOREQS 4 /* number of outstanding xfer requests */
84 #define UGEN_NISORFRMS 8 /* number of transactions per req */
85 #define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS)
86
87 #define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */
88 #define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */
89
90 struct isoreq {
91 struct ugen_endpoint *sce;
92 struct usbd_xfer *xfer;
93 void *dmabuf;
94 uint16_t sizes[UGEN_NISORFRMS];
95 };
96
97 struct ugen_endpoint {
98 struct ugen_softc *sc;
99 usb_endpoint_descriptor_t *edesc;
100 struct usbd_interface *iface;
101 int state;
102 #define UGEN_SHORT_OK 0x04 /* short xfers are OK */
103 #define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */
104 #define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */
105 #define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */
106 struct usbd_pipe *pipeh;
107 struct clist q;
108 u_char *ibuf; /* start of buffer (circular for isoc) */
109 u_char *fill; /* location for input (isoc) */
110 u_char *limit; /* end of circular buffer (isoc) */
111 u_char *cur; /* current read location (isoc) */
112 uint32_t timeout;
113 uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
114 uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
115 uint32_t ra_wb_used; /* how much is in buffer */
116 uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */
117 struct usbd_xfer *ra_wb_xfer;
118 struct isoreq isoreqs[UGEN_NISOREQS];
119 /* Keep these last; we don't overwrite them in ugen_set_config() */
120 #define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel)
121 struct selinfo rsel;
122 kcondvar_t cv;
123 };
124
125 struct ugen_softc {
126 device_t sc_dev; /* base device */
127 struct usbd_device *sc_udev;
128 struct rb_node sc_node;
129 unsigned sc_unit;
130
131 kmutex_t sc_lock;
132 kcondvar_t sc_detach_cv;
133
134 char sc_is_open[USB_MAX_ENDPOINTS];
135 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
136 #define OUT 0
137 #define IN 1
138
139 int sc_refcnt;
140 char sc_buffer[UGEN_BBSIZE];
141 u_char sc_dying;
142 u_char sc_attached;
143 };
144
145 static struct {
146 kmutex_t lock;
147 rb_tree_t tree;
148 } ugenif __cacheline_aligned;
149
150 static int
151 compare_ugen(void *cookie, const void *vsca, const void *vscb)
152 {
153 const struct ugen_softc *sca = vsca;
154 const struct ugen_softc *scb = vscb;
155
156 if (sca->sc_unit < scb->sc_unit)
157 return -1;
158 if (sca->sc_unit > scb->sc_unit)
159 return +1;
160 return 0;
161 }
162
163 static int
164 compare_ugen_key(void *cookie, const void *vsc, const void *vk)
165 {
166 const struct ugen_softc *sc = vsc;
167 const unsigned *k = vk;
168
169 if (sc->sc_unit < *k)
170 return -1;
171 if (sc->sc_unit > *k)
172 return +1;
173 return 0;
174 }
175
176 static const rb_tree_ops_t ugenif_tree_ops = {
177 .rbto_compare_nodes = compare_ugen,
178 .rbto_compare_key = compare_ugen_key,
179 .rbto_node_offset = offsetof(struct ugen_softc, sc_node),
180 };
181
182 static void
183 ugenif_get_unit(struct ugen_softc *sc)
184 {
185 struct ugen_softc *sc0;
186 unsigned i;
187
188 mutex_enter(&ugenif.lock);
189 for (i = 0, sc0 = RB_TREE_MIN(&ugenif.tree);
190 sc0 != NULL && i == sc0->sc_unit;
191 i++, sc0 = RB_TREE_NEXT(&ugenif.tree, sc0))
192 KASSERT(i < UINT_MAX);
193 KASSERT(rb_tree_find_node(&ugenif.tree, &i) == NULL);
194 sc->sc_unit = i;
195 sc0 = rb_tree_insert_node(&ugenif.tree, sc);
196 KASSERT(sc0 == sc);
197 KASSERT(rb_tree_find_node(&ugenif.tree, &i) == sc);
198 mutex_exit(&ugenif.lock);
199 }
200
201 static void
202 ugenif_put_unit(struct ugen_softc *sc)
203 {
204
205 mutex_enter(&ugenif.lock);
206 KASSERT(rb_tree_find_node(&ugenif.tree, &sc->sc_unit) == sc);
207 rb_tree_remove_node(&ugenif.tree, sc);
208 sc->sc_unit = -1;
209 mutex_exit(&ugenif.lock);
210 }
211
212 static struct ugen_softc *
213 ugenif_acquire(unsigned unit)
214 {
215 struct ugen_softc *sc;
216
217 mutex_enter(&ugenif.lock);
218 sc = rb_tree_find_node(&ugenif.tree, &unit);
219 if (sc == NULL)
220 goto out;
221 mutex_enter(&sc->sc_lock);
222 if (sc->sc_dying) {
223 mutex_exit(&sc->sc_lock);
224 sc = NULL;
225 goto out;
226 }
227 KASSERT(sc->sc_refcnt < INT_MAX);
228 sc->sc_refcnt++;
229 mutex_exit(&sc->sc_lock);
230 out: mutex_exit(&ugenif.lock);
231
232 return sc;
233 }
234
235 static void
236 ugenif_release(struct ugen_softc *sc)
237 {
238
239 mutex_enter(&sc->sc_lock);
240 if (--sc->sc_refcnt < 0)
241 cv_broadcast(&sc->sc_detach_cv);
242 mutex_exit(&sc->sc_lock);
243 }
244
245 static dev_type_open(ugenopen);
246 static dev_type_close(ugenclose);
247 static dev_type_read(ugenread);
248 static dev_type_write(ugenwrite);
249 static dev_type_ioctl(ugenioctl);
250 static dev_type_poll(ugenpoll);
251 static dev_type_kqfilter(ugenkqfilter);
252
253 const struct cdevsw ugen_cdevsw = {
254 .d_open = ugenopen,
255 .d_close = ugenclose,
256 .d_read = ugenread,
257 .d_write = ugenwrite,
258 .d_ioctl = ugenioctl,
259 .d_stop = nostop,
260 .d_tty = notty,
261 .d_poll = ugenpoll,
262 .d_mmap = nommap,
263 .d_kqfilter = ugenkqfilter,
264 .d_discard = nodiscard,
265 .d_flag = D_OTHER,
266 };
267
268 Static void ugenintr(struct usbd_xfer *, void *,
269 usbd_status);
270 Static void ugen_isoc_rintr(struct usbd_xfer *, void *,
271 usbd_status);
272 Static void ugen_bulkra_intr(struct usbd_xfer *, void *,
273 usbd_status);
274 Static void ugen_bulkwb_intr(struct usbd_xfer *, void *,
275 usbd_status);
276 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
277 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
278 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
279 void *, int, struct lwp *);
280 Static int ugen_set_config(struct ugen_softc *, int, int);
281 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *,
282 int, int *);
283 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
284 Static int ugen_get_alt_index(struct ugen_softc *, int);
285 Static void ugen_clear_endpoints(struct ugen_softc *);
286
287 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
288 #define UGENENDPOINT(n) (minor(n) & 0xf)
289 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
290
291 static int ugenif_match(device_t, cfdata_t, void *);
292 static void ugenif_attach(device_t, device_t, void *);
293 static int ugen_match(device_t, cfdata_t, void *);
294 static void ugen_attach(device_t, device_t, void *);
295 static int ugen_detach(device_t, int);
296 static int ugen_activate(device_t, enum devact);
297
298 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match,
299 ugen_attach, ugen_detach, ugen_activate);
300 CFATTACH_DECL_NEW(ugenif, sizeof(struct ugen_softc), ugenif_match,
301 ugenif_attach, ugen_detach, ugen_activate);
302
303 /* toggle to control attach priority. -1 means "let autoconf decide" */
304 int ugen_override = -1;
305
306 static int
307 ugen_match(device_t parent, cfdata_t match, void *aux)
308 {
309 struct usb_attach_arg *uaa = aux;
310 int override;
311
312 if (ugen_override != -1)
313 override = ugen_override;
314 else
315 override = match->cf_flags & 1;
316
317 if (override)
318 return UMATCH_HIGHEST;
319 else if (uaa->uaa_usegeneric)
320 return UMATCH_GENERIC;
321 else
322 return UMATCH_NONE;
323 }
324
325 static int
326 ugenif_match(device_t parent, cfdata_t match, void *aux)
327 {
328 /* Assume that they knew what they configured! (see ugenif(4)) */
329 return UMATCH_HIGHEST;
330 }
331
332 static void
333 ugen_attach(device_t parent, device_t self, void *aux)
334 {
335 struct usb_attach_arg *uaa = aux;
336 struct usbif_attach_arg uiaa;
337
338 memset(&uiaa, 0, sizeof(uiaa));
339 uiaa.uiaa_port = uaa->uaa_port;
340 uiaa.uiaa_vendor = uaa->uaa_vendor;
341 uiaa.uiaa_product = uaa->uaa_product;
342 uiaa.uiaa_release = uaa->uaa_release;
343 uiaa.uiaa_device = uaa->uaa_device;
344 uiaa.uiaa_configno = -1;
345 uiaa.uiaa_ifaceno = -1;
346
347 ugenif_attach(parent, self, &uiaa);
348 }
349
350 static void
351 ugenif_attach(device_t parent, device_t self, void *aux)
352 {
353 struct ugen_softc *sc = device_private(self);
354 struct usbif_attach_arg *uiaa = aux;
355 struct usbd_device *udev;
356 char *devinfop;
357 usbd_status err;
358 int i, dir, conf;
359
360 aprint_naive("\n");
361 aprint_normal("\n");
362
363 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
364 cv_init(&sc->sc_detach_cv, "ugendet");
365
366 devinfop = usbd_devinfo_alloc(uiaa->uiaa_device, 0);
367 aprint_normal_dev(self, "%s\n", devinfop);
368 usbd_devinfo_free(devinfop);
369
370 sc->sc_dev = self;
371 sc->sc_udev = udev = uiaa->uiaa_device;
372
373 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
374 for (dir = OUT; dir <= IN; dir++) {
375 struct ugen_endpoint *sce;
376
377 sce = &sc->sc_endpoints[i][dir];
378 selinit(&sce->rsel);
379 cv_init(&sce->cv, "ugensce");
380 }
381 }
382
383 if (!pmf_device_register(self, NULL, NULL))
384 aprint_error_dev(self, "couldn't establish power handler\n");
385
386 if (uiaa->uiaa_ifaceno < 0) {
387 /*
388 * If we attach the whole device,
389 * set configuration index 0, the default one.
390 */
391 err = usbd_set_config_index(udev, 0, 0);
392 if (err) {
393 aprint_error_dev(self,
394 "setting configuration index 0 failed\n");
395 return;
396 }
397 }
398
399 /* Get current configuration */
400 conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
401
402 /* Set up all the local state for this configuration. */
403 err = ugen_set_config(sc, conf, uiaa->uiaa_ifaceno < 0);
404 if (err) {
405 aprint_error_dev(self, "setting configuration %d failed\n",
406 conf);
407 return;
408 }
409
410 ugenif_get_unit(sc);
411 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev);
412 sc->sc_attached = 1;
413 }
414
415 Static void
416 ugen_clear_endpoints(struct ugen_softc *sc)
417 {
418
419 /* Clear out the old info, but leave the selinfo and cv initialised. */
420 for (int i = 0; i < USB_MAX_ENDPOINTS; i++) {
421 for (int dir = OUT; dir <= IN; dir++) {
422 struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir];
423 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT);
424 }
425 }
426 }
427
428 Static int
429 ugen_set_config(struct ugen_softc *sc, int configno, int chkopen)
430 {
431 struct usbd_device *dev = sc->sc_udev;
432 usb_config_descriptor_t *cdesc;
433 struct usbd_interface *iface;
434 usb_endpoint_descriptor_t *ed;
435 struct ugen_endpoint *sce;
436 uint8_t niface, nendpt;
437 int ifaceno, endptno, endpt;
438 usbd_status err;
439 int dir;
440
441 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
442 device_xname(sc->sc_dev), configno, sc));
443
444 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
445
446 if (chkopen) {
447 /*
448 * We start at 1, not 0, because we don't care whether the
449 * control endpoint is open or not. It is always present.
450 */
451 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
452 if (sc->sc_is_open[endptno]) {
453 DPRINTFN(1,
454 ("ugen_set_config: %s - endpoint %d is open\n",
455 device_xname(sc->sc_dev), endptno));
456 return USBD_IN_USE;
457 }
458
459 /* Prevent opening while we're setting the config. */
460 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) {
461 KASSERT(!sc->sc_is_open[endptno]);
462 sc->sc_is_open[endptno] = 1;
463 }
464 }
465
466 /* Avoid setting the current value. */
467 cdesc = usbd_get_config_descriptor(dev);
468 if (!cdesc || cdesc->bConfigurationValue != configno) {
469 err = usbd_set_config_no(dev, configno, 1);
470 if (err)
471 goto out;
472 }
473
474 ugen_clear_endpoints(sc);
475
476 err = usbd_interface_count(dev, &niface);
477 if (err)
478 goto out;
479
480 for (ifaceno = 0; ifaceno < niface; ifaceno++) {
481 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
482 err = usbd_device2interface_handle(dev, ifaceno, &iface);
483 if (err)
484 goto out;
485 err = usbd_endpoint_count(iface, &nendpt);
486 if (err)
487 goto out;
488 for (endptno = 0; endptno < nendpt; endptno++) {
489 ed = usbd_interface2endpoint_descriptor(iface,endptno);
490 KASSERT(ed != NULL);
491 endpt = ed->bEndpointAddress;
492 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
493 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
494 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
495 "(%d,%d), sce=%p\n",
496 endptno, endpt, UE_GET_ADDR(endpt),
497 UE_GET_DIR(endpt), sce));
498 sce->sc = sc;
499 sce->edesc = ed;
500 sce->iface = iface;
501 }
502 }
503 err = USBD_NORMAL_COMPLETION;
504
505 out: if (chkopen) {
506 /*
507 * Allow open again now that we're done trying to set
508 * the config.
509 */
510 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) {
511 KASSERT(sc->sc_is_open[endptno]);
512 sc->sc_is_open[endptno] = 0;
513 }
514 }
515 return err;
516 }
517
518 static int
519 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
520 {
521 struct ugen_softc *sc;
522 int unit = UGENUNIT(dev);
523 int endpt = UGENENDPOINT(dev);
524 usb_endpoint_descriptor_t *edesc;
525 struct ugen_endpoint *sce;
526 int dir, isize;
527 usbd_status err;
528 struct usbd_xfer *xfer;
529 int i, j;
530 int error;
531 int opened;
532
533 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
534
535 if ((sc = ugenif_acquire(unit)) == NULL)
536 return ENXIO;
537
538 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
539 flag, mode, unit, endpt));
540
541 /* The control endpoint allows multiple opens. */
542 if (endpt == USB_CONTROL_ENDPOINT) {
543 opened = sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
544 error = 0;
545 goto out;
546 }
547
548 if (sc->sc_is_open[endpt]) {
549 error = EBUSY;
550 goto out;
551 }
552 opened = sc->sc_is_open[endpt] = 1;
553
554 /* Make sure there are pipes for all directions. */
555 for (dir = OUT; dir <= IN; dir++) {
556 if (flag & (dir == OUT ? FWRITE : FREAD)) {
557 sce = &sc->sc_endpoints[endpt][dir];
558 if (sce->edesc == NULL) {
559 error = ENXIO;
560 goto out;
561 }
562 }
563 }
564
565 /* Actually open the pipes. */
566 /* XXX Should back out properly if it fails. */
567 for (dir = OUT; dir <= IN; dir++) {
568 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
569 continue;
570 sce = &sc->sc_endpoints[endpt][dir];
571 sce->state = 0;
572 sce->timeout = USBD_NO_TIMEOUT;
573 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
574 sc, endpt, dir, sce));
575 edesc = sce->edesc;
576 switch (edesc->bmAttributes & UE_XFERTYPE) {
577 case UE_INTERRUPT:
578 if (dir == OUT) {
579 err = usbd_open_pipe(sce->iface,
580 edesc->bEndpointAddress, 0, &sce->pipeh);
581 if (err) {
582 error = EIO;
583 goto out;
584 }
585 break;
586 }
587 isize = UGETW(edesc->wMaxPacketSize);
588 if (isize == 0) { /* shouldn't happen */
589 error = EINVAL;
590 goto out;
591 }
592 sce->ibuf = kmem_alloc(isize, KM_SLEEP);
593 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
594 endpt, isize));
595 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) {
596 kmem_free(sce->ibuf, isize);
597 sce->ibuf = NULL;
598 error = ENOMEM;
599 goto out;
600 }
601 err = usbd_open_pipe_intr(sce->iface,
602 edesc->bEndpointAddress,
603 USBD_SHORT_XFER_OK, &sce->pipeh, sce,
604 sce->ibuf, isize, ugenintr,
605 USBD_DEFAULT_INTERVAL);
606 if (err) {
607 clfree(&sce->q);
608 kmem_free(sce->ibuf, isize);
609 sce->ibuf = NULL;
610 error = EIO;
611 goto out;
612 }
613 DPRINTFN(5, ("ugenopen: interrupt open done\n"));
614 break;
615 case UE_BULK:
616 err = usbd_open_pipe(sce->iface,
617 edesc->bEndpointAddress, 0, &sce->pipeh);
618 if (err) {
619 error = EIO;
620 goto out;
621 }
622 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
623 /*
624 * Use request size for non-RA/WB transfers
625 * as the default.
626 */
627 sce->ra_wb_reqsize = UGEN_BBSIZE;
628 break;
629 case UE_ISOCHRONOUS:
630 if (dir == OUT) {
631 error = EINVAL;
632 goto out;
633 }
634 isize = UGETW(edesc->wMaxPacketSize);
635 if (isize == 0) { /* shouldn't happen */
636 error = EINVAL;
637 goto out;
638 }
639 sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES,
640 KM_SLEEP);
641 sce->cur = sce->fill = sce->ibuf;
642 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
643 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
644 endpt, isize));
645 err = usbd_open_pipe(sce->iface,
646 edesc->bEndpointAddress, 0, &sce->pipeh);
647 if (err) {
648 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
649 sce->ibuf = NULL;
650 error = EIO;
651 goto out;
652 }
653 for (i = 0; i < UGEN_NISOREQS; ++i) {
654 sce->isoreqs[i].sce = sce;
655 err = usbd_create_xfer(sce->pipeh,
656 isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS,
657 &xfer);
658 if (err)
659 goto bad;
660 sce->isoreqs[i].xfer = xfer;
661 sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer);
662 for (j = 0; j < UGEN_NISORFRMS; ++j)
663 sce->isoreqs[i].sizes[j] = isize;
664 usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i],
665 sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0,
666 ugen_isoc_rintr);
667 (void)usbd_transfer(xfer);
668 }
669 DPRINTFN(5, ("ugenopen: isoc open done\n"));
670 break;
671 bad:
672 while (--i >= 0) /* implicit buffer free */
673 usbd_destroy_xfer(sce->isoreqs[i].xfer);
674 usbd_close_pipe(sce->pipeh);
675 sce->pipeh = NULL;
676 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES);
677 sce->ibuf = NULL;
678 error = ENOMEM;
679 goto out;
680 case UE_CONTROL:
681 sce->timeout = USBD_DEFAULT_TIMEOUT;
682 error = EINVAL;
683 goto out;
684 }
685 }
686 error = 0;
687 out: if (error && opened)
688 sc->sc_is_open[endpt] = 0;
689 ugenif_release(sc);
690 return error;
691 }
692
693 static int
694 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
695 {
696 int endpt = UGENENDPOINT(dev);
697 struct ugen_softc *sc;
698 struct ugen_endpoint *sce;
699 int dir;
700 int i;
701 int error;
702
703 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
704
705 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
706 return ENXIO;
707
708 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
709 flag, mode, UGENUNIT(dev), endpt));
710
711 KASSERT(sc->sc_is_open[endpt]);
712
713 if (endpt == USB_CONTROL_ENDPOINT) {
714 DPRINTFN(5, ("ugenclose: close control\n"));
715 sc->sc_is_open[endpt] = 0;
716 error = 0;
717 goto out;
718 }
719
720 for (dir = OUT; dir <= IN; dir++) {
721 if (!(flag & (dir == OUT ? FWRITE : FREAD)))
722 continue;
723 sce = &sc->sc_endpoints[endpt][dir];
724 if (sce->pipeh == NULL)
725 continue;
726 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
727 endpt, dir, sce));
728
729 usbd_abort_pipe(sce->pipeh);
730
731 int isize = UGETW(sce->edesc->wMaxPacketSize);
732 int msize = 0;
733
734 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
735 case UE_INTERRUPT:
736 ndflush(&sce->q, sce->q.c_cc);
737 clfree(&sce->q);
738 msize = isize;
739 break;
740 case UE_ISOCHRONOUS:
741 for (i = 0; i < UGEN_NISOREQS; ++i)
742 usbd_destroy_xfer(sce->isoreqs[i].xfer);
743 msize = isize * UGEN_NISOFRAMES;
744 break;
745 case UE_BULK:
746 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) {
747 usbd_destroy_xfer(sce->ra_wb_xfer);
748 msize = sce->ra_wb_bufsize;
749 }
750 break;
751 default:
752 break;
753 }
754 usbd_close_pipe(sce->pipeh);
755 sce->pipeh = NULL;
756 if (sce->ibuf != NULL) {
757 kmem_free(sce->ibuf, msize);
758 sce->ibuf = NULL;
759 }
760 }
761 sc->sc_is_open[endpt] = 0;
762 error = 0;
763
764 out: ugenif_release(sc);
765 return error;
766 }
767
768 Static int
769 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
770 {
771 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
772 uint32_t n, tn;
773 struct usbd_xfer *xfer;
774 usbd_status err;
775 int error = 0;
776
777 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
778
779 if (endpt == USB_CONTROL_ENDPOINT)
780 return ENODEV;
781
782 KASSERT(sce->edesc);
783 KASSERT(sce->pipeh);
784
785 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
786 case UE_INTERRUPT:
787 /* Block until activity occurred. */
788 mutex_enter(&sc->sc_lock);
789 while (sce->q.c_cc == 0) {
790 if (flag & IO_NDELAY) {
791 mutex_exit(&sc->sc_lock);
792 return EWOULDBLOCK;
793 }
794 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
795 /* "ugenri" */
796 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
797 mstohz(sce->timeout));
798 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
799 if (sc->sc_dying)
800 error = EIO;
801 if (error)
802 break;
803 }
804 mutex_exit(&sc->sc_lock);
805
806 /* Transfer as many chunks as possible. */
807 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
808 n = uimin(sce->q.c_cc, uio->uio_resid);
809 if (n > sizeof(sc->sc_buffer))
810 n = sizeof(sc->sc_buffer);
811
812 /* Remove a small chunk from the input queue. */
813 q_to_b(&sce->q, sc->sc_buffer, n);
814 DPRINTFN(5, ("ugenread: got %d chars\n", n));
815
816 /* Copy the data to the user process. */
817 error = uiomove(sc->sc_buffer, n, uio);
818 if (error)
819 break;
820 }
821 break;
822 case UE_BULK:
823 if (sce->state & UGEN_BULK_RA) {
824 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
825 uio->uio_resid, sce->ra_wb_used));
826 xfer = sce->ra_wb_xfer;
827
828 mutex_enter(&sc->sc_lock);
829 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
830 mutex_exit(&sc->sc_lock);
831 return EWOULDBLOCK;
832 }
833 while (uio->uio_resid > 0 && !error) {
834 while (sce->ra_wb_used == 0) {
835 DPRINTFN(5,
836 ("ugenread: sleep on %p\n",
837 sce));
838 /* "ugenrb" */
839 error = cv_timedwait_sig(&sce->cv,
840 &sc->sc_lock, mstohz(sce->timeout));
841 DPRINTFN(5,
842 ("ugenread: woke, error=%d\n",
843 error));
844 if (sc->sc_dying)
845 error = EIO;
846 if (error)
847 break;
848 }
849
850 /* Copy data to the process. */
851 while (uio->uio_resid > 0
852 && sce->ra_wb_used > 0) {
853 n = uimin(uio->uio_resid,
854 sce->ra_wb_used);
855 n = uimin(n, sce->limit - sce->cur);
856 error = uiomove(sce->cur, n, uio);
857 if (error)
858 break;
859 sce->cur += n;
860 sce->ra_wb_used -= n;
861 if (sce->cur == sce->limit)
862 sce->cur = sce->ibuf;
863 }
864
865 /*
866 * If the transfers stopped because the
867 * buffer was full, restart them.
868 */
869 if (sce->state & UGEN_RA_WB_STOP &&
870 sce->ra_wb_used < sce->limit - sce->ibuf) {
871 n = (sce->limit - sce->ibuf)
872 - sce->ra_wb_used;
873 usbd_setup_xfer(xfer, sce, NULL,
874 uimin(n, sce->ra_wb_xferlen),
875 0, USBD_NO_TIMEOUT,
876 ugen_bulkra_intr);
877 sce->state &= ~UGEN_RA_WB_STOP;
878 err = usbd_transfer(xfer);
879 if (err != USBD_IN_PROGRESS)
880 /*
881 * The transfer has not been
882 * queued. Setting STOP
883 * will make us try
884 * again at the next read.
885 */
886 sce->state |= UGEN_RA_WB_STOP;
887 }
888 }
889 mutex_exit(&sc->sc_lock);
890 break;
891 }
892 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
893 0, 0, &xfer);
894 if (error)
895 return error;
896 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
897 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
898 tn = n;
899 err = usbd_bulk_transfer(xfer, sce->pipeh,
900 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0,
901 sce->timeout, sc->sc_buffer, &tn);
902 if (err) {
903 if (err == USBD_INTERRUPTED)
904 error = EINTR;
905 else if (err == USBD_TIMEOUT)
906 error = ETIMEDOUT;
907 else
908 error = EIO;
909 break;
910 }
911 DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
912 error = uiomove(sc->sc_buffer, tn, uio);
913 if (error || tn < n)
914 break;
915 }
916 usbd_destroy_xfer(xfer);
917 break;
918 case UE_ISOCHRONOUS:
919 mutex_enter(&sc->sc_lock);
920 while (sce->cur == sce->fill) {
921 if (flag & IO_NDELAY) {
922 mutex_exit(&sc->sc_lock);
923 return EWOULDBLOCK;
924 }
925 /* "ugenri" */
926 DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
927 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock,
928 mstohz(sce->timeout));
929 DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
930 if (sc->sc_dying)
931 error = EIO;
932 if (error)
933 break;
934 }
935
936 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
937 if(sce->fill > sce->cur)
938 n = uimin(sce->fill - sce->cur, uio->uio_resid);
939 else
940 n = uimin(sce->limit - sce->cur, uio->uio_resid);
941
942 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
943
944 /* Copy the data to the user process. */
945 error = uiomove(sce->cur, n, uio);
946 if (error)
947 break;
948 sce->cur += n;
949 if (sce->cur >= sce->limit)
950 sce->cur = sce->ibuf;
951 }
952 mutex_exit(&sc->sc_lock);
953 break;
954
955
956 default:
957 return ENXIO;
958 }
959 return error;
960 }
961
962 static int
963 ugenread(dev_t dev, struct uio *uio, int flag)
964 {
965 int endpt = UGENENDPOINT(dev);
966 struct ugen_softc *sc;
967 int error;
968
969 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
970 return ENXIO;
971 error = ugen_do_read(sc, endpt, uio, flag);
972 ugenif_release(sc);
973
974 return error;
975 }
976
977 Static int
978 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
979 int flag)
980 {
981 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
982 uint32_t n;
983 int error = 0;
984 uint32_t tn;
985 char *dbuf;
986 struct usbd_xfer *xfer;
987 usbd_status err;
988
989 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
990
991 if (endpt == USB_CONTROL_ENDPOINT)
992 return ENODEV;
993
994 KASSERT(sce->edesc);
995 KASSERT(sce->pipeh);
996
997 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
998 case UE_BULK:
999 if (sce->state & UGEN_BULK_WB) {
1000 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
1001 uio->uio_resid, sce->ra_wb_used));
1002 xfer = sce->ra_wb_xfer;
1003
1004 mutex_enter(&sc->sc_lock);
1005 if (sce->ra_wb_used == sce->limit - sce->ibuf &&
1006 flag & IO_NDELAY) {
1007 mutex_exit(&sc->sc_lock);
1008 return EWOULDBLOCK;
1009 }
1010 while (uio->uio_resid > 0 && !error) {
1011 while (sce->ra_wb_used ==
1012 sce->limit - sce->ibuf) {
1013 DPRINTFN(5,
1014 ("ugenwrite: sleep on %p\n",
1015 sce));
1016 /* "ugenwb" */
1017 error = cv_timedwait_sig(&sce->cv,
1018 &sc->sc_lock, mstohz(sce->timeout));
1019 DPRINTFN(5,
1020 ("ugenwrite: woke, error=%d\n",
1021 error));
1022 if (sc->sc_dying)
1023 error = EIO;
1024 if (error)
1025 break;
1026 }
1027
1028 /* Copy data from the process. */
1029 while (uio->uio_resid > 0 &&
1030 sce->ra_wb_used < sce->limit - sce->ibuf) {
1031 n = uimin(uio->uio_resid,
1032 (sce->limit - sce->ibuf)
1033 - sce->ra_wb_used);
1034 n = uimin(n, sce->limit - sce->fill);
1035 error = uiomove(sce->fill, n, uio);
1036 if (error)
1037 break;
1038 sce->fill += n;
1039 sce->ra_wb_used += n;
1040 if (sce->fill == sce->limit)
1041 sce->fill = sce->ibuf;
1042 }
1043
1044 /*
1045 * If the transfers stopped because the
1046 * buffer was empty, restart them.
1047 */
1048 if (sce->state & UGEN_RA_WB_STOP &&
1049 sce->ra_wb_used > 0) {
1050 dbuf = (char *)usbd_get_buffer(xfer);
1051 n = uimin(sce->ra_wb_used,
1052 sce->ra_wb_xferlen);
1053 tn = uimin(n, sce->limit - sce->cur);
1054 memcpy(dbuf, sce->cur, tn);
1055 dbuf += tn;
1056 if (n - tn > 0)
1057 memcpy(dbuf, sce->ibuf,
1058 n - tn);
1059 usbd_setup_xfer(xfer, sce, NULL, n,
1060 0, USBD_NO_TIMEOUT,
1061 ugen_bulkwb_intr);
1062 sce->state &= ~UGEN_RA_WB_STOP;
1063 err = usbd_transfer(xfer);
1064 if (err != USBD_IN_PROGRESS)
1065 /*
1066 * The transfer has not been
1067 * queued. Setting STOP
1068 * will make us try again
1069 * at the next read.
1070 */
1071 sce->state |= UGEN_RA_WB_STOP;
1072 }
1073 }
1074 mutex_exit(&sc->sc_lock);
1075 break;
1076 }
1077 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE,
1078 0, 0, &xfer);
1079 if (error)
1080 return error;
1081 while ((n = uimin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
1082 error = uiomove(sc->sc_buffer, n, uio);
1083 if (error)
1084 break;
1085 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
1086 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout,
1087 sc->sc_buffer, &n);
1088 if (err) {
1089 if (err == USBD_INTERRUPTED)
1090 error = EINTR;
1091 else if (err == USBD_TIMEOUT)
1092 error = ETIMEDOUT;
1093 else
1094 error = EIO;
1095 break;
1096 }
1097 }
1098 usbd_destroy_xfer(xfer);
1099 break;
1100 case UE_INTERRUPT:
1101 error = usbd_create_xfer(sce->pipeh,
1102 UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer);
1103 if (error)
1104 return error;
1105 while ((n = uimin(UGETW(sce->edesc->wMaxPacketSize),
1106 uio->uio_resid)) != 0) {
1107 error = uiomove(sc->sc_buffer, n, uio);
1108 if (error)
1109 break;
1110 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
1111 err = usbd_intr_transfer(xfer, sce->pipeh, 0,
1112 sce->timeout, sc->sc_buffer, &n);
1113 if (err) {
1114 if (err == USBD_INTERRUPTED)
1115 error = EINTR;
1116 else if (err == USBD_TIMEOUT)
1117 error = ETIMEDOUT;
1118 else
1119 error = EIO;
1120 break;
1121 }
1122 }
1123 usbd_destroy_xfer(xfer);
1124 break;
1125 default:
1126 return ENXIO;
1127 }
1128 return error;
1129 }
1130
1131 static int
1132 ugenwrite(dev_t dev, struct uio *uio, int flag)
1133 {
1134 int endpt = UGENENDPOINT(dev);
1135 struct ugen_softc *sc;
1136 int error;
1137
1138 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
1139 return ENXIO;
1140 error = ugen_do_write(sc, endpt, uio, flag);
1141 ugenif_release(sc);
1142
1143 return error;
1144 }
1145
1146 static int
1147 ugen_activate(device_t self, enum devact act)
1148 {
1149 struct ugen_softc *sc = device_private(self);
1150
1151 switch (act) {
1152 case DVACT_DEACTIVATE:
1153 sc->sc_dying = 1;
1154 return 0;
1155 default:
1156 return EOPNOTSUPP;
1157 }
1158 }
1159
1160 static int
1161 ugen_detach(device_t self, int flags)
1162 {
1163 struct ugen_softc *sc = device_private(self);
1164 struct ugen_endpoint *sce;
1165 int i, dir;
1166 int maj, mn;
1167
1168 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1169
1170 KASSERT(KERNEL_LOCKED_P()); /* sc_is_open */
1171
1172 /*
1173 * Fail if we're not forced to detach and userland has any
1174 * endpoints open.
1175 */
1176 if ((flags & DETACH_FORCE) == 0) {
1177 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1178 if (sc->sc_is_open[i])
1179 return EBUSY;
1180 }
1181 }
1182
1183 /* Prevent new users. Prevent suspend/resume. */
1184 sc->sc_dying = 1;
1185 pmf_device_deregister(self);
1186
1187 /*
1188 * If we never finished attaching, skip nixing endpoints and
1189 * users because there aren't any.
1190 */
1191 if (!sc->sc_attached)
1192 goto out;
1193
1194 /* Abort all pipes. Causes processes waiting for transfer to wake. */
1195 for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1196 for (dir = OUT; dir <= IN; dir++) {
1197 sce = &sc->sc_endpoints[i][dir];
1198 if (sce->pipeh)
1199 usbd_abort_pipe(sce->pipeh);
1200 }
1201 }
1202
1203 mutex_enter(&sc->sc_lock);
1204 if (--sc->sc_refcnt >= 0) {
1205 /* Wake everyone */
1206 for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1207 cv_signal(&sc->sc_endpoints[i][IN].cv);
1208 /* Wait for processes to go away. */
1209 if (cv_timedwait(&sc->sc_detach_cv, &sc->sc_lock, hz * 60))
1210 aprint_error_dev(self, ": didn't detach\n");
1211 }
1212 mutex_exit(&sc->sc_lock);
1213
1214 /* locate the major number */
1215 maj = cdevsw_lookup_major(&ugen_cdevsw);
1216
1217 /* Nuke the vnodes for any open instances (calls close). */
1218 mn = sc->sc_unit * USB_MAX_ENDPOINTS;
1219 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1220
1221 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev);
1222 ugenif_put_unit(sc);
1223
1224 out: for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1225 for (dir = OUT; dir <= IN; dir++) {
1226 sce = &sc->sc_endpoints[i][dir];
1227 seldestroy(&sce->rsel);
1228 cv_destroy(&sce->cv);
1229 }
1230 }
1231
1232 cv_destroy(&sc->sc_detach_cv);
1233 mutex_destroy(&sc->sc_lock);
1234
1235 return 0;
1236 }
1237
1238 Static void
1239 ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
1240 {
1241 struct ugen_endpoint *sce = addr;
1242 struct ugen_softc *sc = sce->sc;
1243 uint32_t count;
1244 u_char *ibuf;
1245
1246 if (status == USBD_CANCELLED)
1247 return;
1248
1249 if (status != USBD_NORMAL_COMPLETION) {
1250 DPRINTF(("ugenintr: status=%d\n", status));
1251 if (status == USBD_STALLED)
1252 usbd_clear_endpoint_stall_async(sce->pipeh);
1253 return;
1254 }
1255
1256 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1257 ibuf = sce->ibuf;
1258
1259 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1260 xfer, status, count));
1261 DPRINTFN(5, (" data = %02x %02x %02x\n",
1262 ibuf[0], ibuf[1], ibuf[2]));
1263
1264 mutex_enter(&sc->sc_lock);
1265 (void)b_to_q(ibuf, count, &sce->q);
1266 cv_signal(&sce->cv);
1267 mutex_exit(&sc->sc_lock);
1268 selnotify(&sce->rsel, 0, 0);
1269 }
1270
1271 Static void
1272 ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr,
1273 usbd_status status)
1274 {
1275 struct isoreq *req = addr;
1276 struct ugen_endpoint *sce = req->sce;
1277 struct ugen_softc *sc = sce->sc;
1278 uint32_t count, n;
1279 int i, isize;
1280
1281 /* Return if we are aborting. */
1282 if (status == USBD_CANCELLED)
1283 return;
1284
1285 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1286 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1287 (long)(req - sce->isoreqs), count));
1288
1289 mutex_enter(&sc->sc_lock);
1290
1291 /* throw away oldest input if the buffer is full */
1292 if (sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1293 sce->cur += count;
1294 if (sce->cur >= sce->limit)
1295 sce->cur = sce->ibuf + (sce->limit - sce->cur);
1296 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1297 count));
1298 }
1299
1300 isize = UGETW(sce->edesc->wMaxPacketSize);
1301 for (i = 0; i < UGEN_NISORFRMS; i++) {
1302 uint32_t actlen = req->sizes[i];
1303 char const *tbuf = (char const *)req->dmabuf + isize * i;
1304
1305 /* copy data to buffer */
1306 while (actlen > 0) {
1307 n = uimin(actlen, sce->limit - sce->fill);
1308 memcpy(sce->fill, tbuf, n);
1309
1310 tbuf += n;
1311 actlen -= n;
1312 sce->fill += n;
1313 if (sce->fill == sce->limit)
1314 sce->fill = sce->ibuf;
1315 }
1316
1317 /* setup size for next transfer */
1318 req->sizes[i] = isize;
1319 }
1320
1321 usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0,
1322 ugen_isoc_rintr);
1323 (void)usbd_transfer(xfer);
1324
1325 cv_signal(&sce->cv);
1326 mutex_exit(&sc->sc_lock);
1327 selnotify(&sce->rsel, 0, 0);
1328 }
1329
1330 Static void
1331 ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr,
1332 usbd_status status)
1333 {
1334 struct ugen_endpoint *sce = addr;
1335 struct ugen_softc *sc = sce->sc;
1336 uint32_t count, n;
1337 char const *tbuf;
1338 usbd_status err;
1339
1340 /* Return if we are aborting. */
1341 if (status == USBD_CANCELLED)
1342 return;
1343
1344 if (status != USBD_NORMAL_COMPLETION) {
1345 DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1346 sce->state |= UGEN_RA_WB_STOP;
1347 if (status == USBD_STALLED)
1348 usbd_clear_endpoint_stall_async(sce->pipeh);
1349 return;
1350 }
1351
1352 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1353
1354 mutex_enter(&sc->sc_lock);
1355
1356 /* Keep track of how much is in the buffer. */
1357 sce->ra_wb_used += count;
1358
1359 /* Copy data to buffer. */
1360 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1361 n = uimin(count, sce->limit - sce->fill);
1362 memcpy(sce->fill, tbuf, n);
1363 tbuf += n;
1364 count -= n;
1365 sce->fill += n;
1366 if (sce->fill == sce->limit)
1367 sce->fill = sce->ibuf;
1368 if (count > 0) {
1369 memcpy(sce->fill, tbuf, count);
1370 sce->fill += count;
1371 }
1372
1373 /* Set up the next request if necessary. */
1374 n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1375 if (n > 0) {
1376 usbd_setup_xfer(xfer, sce, NULL, uimin(n, sce->ra_wb_xferlen), 0,
1377 USBD_NO_TIMEOUT, ugen_bulkra_intr);
1378 err = usbd_transfer(xfer);
1379 if (err != USBD_IN_PROGRESS) {
1380 printf("usbd_bulkra_intr: error=%d\n", err);
1381 /*
1382 * The transfer has not been queued. Setting STOP
1383 * will make us try again at the next read.
1384 */
1385 sce->state |= UGEN_RA_WB_STOP;
1386 }
1387 }
1388 else
1389 sce->state |= UGEN_RA_WB_STOP;
1390
1391 cv_signal(&sce->cv);
1392 mutex_exit(&sc->sc_lock);
1393 selnotify(&sce->rsel, 0, 0);
1394 }
1395
1396 Static void
1397 ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr,
1398 usbd_status status)
1399 {
1400 struct ugen_endpoint *sce = addr;
1401 struct ugen_softc *sc = sce->sc;
1402 uint32_t count, n;
1403 char *tbuf;
1404 usbd_status err;
1405
1406 /* Return if we are aborting. */
1407 if (status == USBD_CANCELLED)
1408 return;
1409
1410 if (status != USBD_NORMAL_COMPLETION) {
1411 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1412 sce->state |= UGEN_RA_WB_STOP;
1413 if (status == USBD_STALLED)
1414 usbd_clear_endpoint_stall_async(sce->pipeh);
1415 return;
1416 }
1417
1418 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1419
1420 mutex_enter(&sc->sc_lock);
1421
1422 /* Keep track of how much is in the buffer. */
1423 sce->ra_wb_used -= count;
1424
1425 /* Update buffer pointers. */
1426 sce->cur += count;
1427 if (sce->cur >= sce->limit)
1428 sce->cur = sce->ibuf + (sce->cur - sce->limit);
1429
1430 /* Set up next request if necessary. */
1431 if (sce->ra_wb_used > 0) {
1432 /* copy data from buffer */
1433 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1434 count = uimin(sce->ra_wb_used, sce->ra_wb_xferlen);
1435 n = uimin(count, sce->limit - sce->cur);
1436 memcpy(tbuf, sce->cur, n);
1437 tbuf += n;
1438 if (count - n > 0)
1439 memcpy(tbuf, sce->ibuf, count - n);
1440
1441 usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT,
1442 ugen_bulkwb_intr);
1443 err = usbd_transfer(xfer);
1444 if (err != USBD_IN_PROGRESS) {
1445 printf("usbd_bulkwb_intr: error=%d\n", err);
1446 /*
1447 * The transfer has not been queued. Setting STOP
1448 * will make us try again at the next write.
1449 */
1450 sce->state |= UGEN_RA_WB_STOP;
1451 }
1452 }
1453 else
1454 sce->state |= UGEN_RA_WB_STOP;
1455
1456 cv_signal(&sce->cv);
1457 mutex_exit(&sc->sc_lock);
1458 selnotify(&sce->rsel, 0, 0);
1459 }
1460
1461 Static usbd_status
1462 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1463 {
1464 struct usbd_interface *iface;
1465 usb_endpoint_descriptor_t *ed;
1466 usbd_status err;
1467 struct ugen_endpoint *sce;
1468 uint8_t niface, nendpt, endptno, endpt;
1469 int dir;
1470
1471 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1472
1473 err = usbd_interface_count(sc->sc_udev, &niface);
1474 if (err)
1475 return err;
1476 if (ifaceidx < 0 || ifaceidx >= niface)
1477 return USBD_INVAL;
1478
1479 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1480 if (err)
1481 return err;
1482 err = usbd_endpoint_count(iface, &nendpt);
1483 if (err)
1484 return err;
1485
1486 /* change setting */
1487 err = usbd_set_interface(iface, altno);
1488 if (err)
1489 return err;
1490
1491 err = usbd_endpoint_count(iface, &nendpt);
1492 if (err)
1493 return err;
1494
1495 ugen_clear_endpoints(sc);
1496
1497 for (endptno = 0; endptno < nendpt; endptno++) {
1498 ed = usbd_interface2endpoint_descriptor(iface,endptno);
1499 KASSERT(ed != NULL);
1500 endpt = ed->bEndpointAddress;
1501 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1502 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1503 sce->sc = sc;
1504 sce->edesc = ed;
1505 sce->iface = iface;
1506 }
1507 return 0;
1508 }
1509
1510 /* Retrieve a complete descriptor for a certain device and index. */
1511 Static usb_config_descriptor_t *
1512 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1513 {
1514 usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1515 int len;
1516 usbd_status err;
1517
1518 if (index == USB_CURRENT_CONFIG_INDEX) {
1519 tdesc = usbd_get_config_descriptor(sc->sc_udev);
1520 if (tdesc == NULL)
1521 return NULL;
1522 len = UGETW(tdesc->wTotalLength);
1523 if (lenp)
1524 *lenp = len;
1525 cdesc = kmem_alloc(len, KM_SLEEP);
1526 memcpy(cdesc, tdesc, len);
1527 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1528 } else {
1529 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1530 if (err)
1531 return 0;
1532 len = UGETW(cdescr.wTotalLength);
1533 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1534 if (lenp)
1535 *lenp = len;
1536 cdesc = kmem_alloc(len, KM_SLEEP);
1537 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1538 if (err) {
1539 kmem_free(cdesc, len);
1540 return 0;
1541 }
1542 }
1543 return cdesc;
1544 }
1545
1546 Static int
1547 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1548 {
1549 struct usbd_interface *iface;
1550 usbd_status err;
1551
1552 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1553 if (err)
1554 return -1;
1555 return usbd_get_interface_altindex(iface);
1556 }
1557
1558 Static int
1559 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1560 void *addr, int flag, struct lwp *l)
1561 {
1562 struct ugen_endpoint *sce;
1563 usbd_status err;
1564 struct usbd_interface *iface;
1565 struct usb_config_desc *cd;
1566 usb_config_descriptor_t *cdesc;
1567 struct usb_interface_desc *id;
1568 usb_interface_descriptor_t *idesc;
1569 struct usb_endpoint_desc *ed;
1570 usb_endpoint_descriptor_t *edesc;
1571 struct usb_alt_interface *ai;
1572 struct usb_string_desc *si;
1573 uint8_t conf, alt;
1574 int cdesclen;
1575 int error;
1576 int dir;
1577
1578 KASSERT(KERNEL_LOCKED_P()); /* ugen_set_config */
1579
1580 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1581
1582 switch (cmd) {
1583 case FIONBIO:
1584 /* All handled in the upper FS layer. */
1585 return 0;
1586 case USB_SET_SHORT_XFER:
1587 if (endpt == USB_CONTROL_ENDPOINT)
1588 return EINVAL;
1589 /* This flag only affects read */
1590 sce = &sc->sc_endpoints[endpt][IN];
1591 if (sce == NULL || sce->pipeh == NULL)
1592 return EINVAL;
1593 if (*(int *)addr)
1594 sce->state |= UGEN_SHORT_OK;
1595 else
1596 sce->state &= ~UGEN_SHORT_OK;
1597 return 0;
1598 case USB_SET_TIMEOUT:
1599 for (dir = OUT; dir <= IN; dir++) {
1600 sce = &sc->sc_endpoints[endpt][dir];
1601 if (sce == NULL)
1602 return EINVAL;
1603
1604 sce->timeout = *(int *)addr;
1605 }
1606 return 0;
1607 case USB_SET_BULK_RA:
1608 if (endpt == USB_CONTROL_ENDPOINT)
1609 return EINVAL;
1610 sce = &sc->sc_endpoints[endpt][IN];
1611 if (sce == NULL || sce->pipeh == NULL)
1612 return EINVAL;
1613 edesc = sce->edesc;
1614 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1615 return EINVAL;
1616
1617 if (*(int *)addr) {
1618 /* Only turn RA on if it's currently off. */
1619 if (sce->state & UGEN_BULK_RA)
1620 return 0;
1621
1622 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1623 /* shouldn't happen */
1624 return EINVAL;
1625 error = usbd_create_xfer(sce->pipeh,
1626 sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer);
1627 if (error)
1628 return error;
1629 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1630 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1631 sce->fill = sce->cur = sce->ibuf;
1632 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1633 sce->ra_wb_used = 0;
1634 sce->state |= UGEN_BULK_RA;
1635 sce->state &= ~UGEN_RA_WB_STOP;
1636 /* Now start reading. */
1637 usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL,
1638 uimin(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1639 0, USBD_NO_TIMEOUT, ugen_bulkra_intr);
1640 err = usbd_transfer(sce->ra_wb_xfer);
1641 if (err != USBD_IN_PROGRESS) {
1642 sce->state &= ~UGEN_BULK_RA;
1643 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1644 sce->ibuf = NULL;
1645 usbd_destroy_xfer(sce->ra_wb_xfer);
1646 return EIO;
1647 }
1648 } else {
1649 /* Only turn RA off if it's currently on. */
1650 if (!(sce->state & UGEN_BULK_RA))
1651 return 0;
1652
1653 sce->state &= ~UGEN_BULK_RA;
1654 usbd_abort_pipe(sce->pipeh);
1655 usbd_destroy_xfer(sce->ra_wb_xfer);
1656 /*
1657 * XXX Discard whatever's in the buffer, but we
1658 * should keep it around and drain the buffer
1659 * instead.
1660 */
1661 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1662 sce->ibuf = NULL;
1663 }
1664 return 0;
1665 case USB_SET_BULK_WB:
1666 if (endpt == USB_CONTROL_ENDPOINT)
1667 return EINVAL;
1668 sce = &sc->sc_endpoints[endpt][OUT];
1669 if (sce == NULL || sce->pipeh == NULL)
1670 return EINVAL;
1671 edesc = sce->edesc;
1672 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1673 return EINVAL;
1674
1675 if (*(int *)addr) {
1676 /* Only turn WB on if it's currently off. */
1677 if (sce->state & UGEN_BULK_WB)
1678 return 0;
1679
1680 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1681 /* shouldn't happen */
1682 return EINVAL;
1683 error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize,
1684 0, 0, &sce->ra_wb_xfer);
1685 sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1686 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP);
1687 sce->fill = sce->cur = sce->ibuf;
1688 sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1689 sce->ra_wb_used = 0;
1690 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1691 } else {
1692 /* Only turn WB off if it's currently on. */
1693 if (!(sce->state & UGEN_BULK_WB))
1694 return 0;
1695
1696 sce->state &= ~UGEN_BULK_WB;
1697 /*
1698 * XXX Discard whatever's in the buffer, but we
1699 * should keep it around and keep writing to
1700 * drain the buffer instead.
1701 */
1702 usbd_abort_pipe(sce->pipeh);
1703 usbd_destroy_xfer(sce->ra_wb_xfer);
1704 kmem_free(sce->ibuf, sce->ra_wb_bufsize);
1705 sce->ibuf = NULL;
1706 }
1707 return 0;
1708 case USB_SET_BULK_RA_OPT:
1709 case USB_SET_BULK_WB_OPT:
1710 {
1711 struct usb_bulk_ra_wb_opt *opt;
1712
1713 if (endpt == USB_CONTROL_ENDPOINT)
1714 return EINVAL;
1715 opt = (struct usb_bulk_ra_wb_opt *)addr;
1716 if (cmd == USB_SET_BULK_RA_OPT)
1717 sce = &sc->sc_endpoints[endpt][IN];
1718 else
1719 sce = &sc->sc_endpoints[endpt][OUT];
1720 if (sce == NULL || sce->pipeh == NULL)
1721 return EINVAL;
1722 if (opt->ra_wb_buffer_size < 1 ||
1723 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1724 opt->ra_wb_request_size < 1 ||
1725 opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1726 return EINVAL;
1727 /*
1728 * XXX These changes do not take effect until the
1729 * next time RA/WB mode is enabled but they ought to
1730 * take effect immediately.
1731 */
1732 sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1733 sce->ra_wb_reqsize = opt->ra_wb_request_size;
1734 return 0;
1735 }
1736 default:
1737 break;
1738 }
1739
1740 if (endpt != USB_CONTROL_ENDPOINT)
1741 return EINVAL;
1742
1743 switch (cmd) {
1744 #ifdef UGEN_DEBUG
1745 case USB_SETDEBUG:
1746 ugendebug = *(int *)addr;
1747 break;
1748 #endif
1749 case USB_GET_CONFIG:
1750 err = usbd_get_config(sc->sc_udev, &conf);
1751 if (err)
1752 return EIO;
1753 *(int *)addr = conf;
1754 break;
1755 case USB_SET_CONFIG:
1756 if (!(flag & FWRITE))
1757 return EPERM;
1758 err = ugen_set_config(sc, *(int *)addr, 1);
1759 switch (err) {
1760 case USBD_NORMAL_COMPLETION:
1761 break;
1762 case USBD_IN_USE:
1763 return EBUSY;
1764 default:
1765 return EIO;
1766 }
1767 break;
1768 case USB_GET_ALTINTERFACE:
1769 ai = (struct usb_alt_interface *)addr;
1770 err = usbd_device2interface_handle(sc->sc_udev,
1771 ai->uai_interface_index, &iface);
1772 if (err)
1773 return EINVAL;
1774 idesc = usbd_get_interface_descriptor(iface);
1775 if (idesc == NULL)
1776 return EIO;
1777 ai->uai_alt_no = idesc->bAlternateSetting;
1778 break;
1779 case USB_SET_ALTINTERFACE:
1780 if (!(flag & FWRITE))
1781 return EPERM;
1782 ai = (struct usb_alt_interface *)addr;
1783 err = usbd_device2interface_handle(sc->sc_udev,
1784 ai->uai_interface_index, &iface);
1785 if (err)
1786 return EINVAL;
1787 err = ugen_set_interface(sc, ai->uai_interface_index,
1788 ai->uai_alt_no);
1789 if (err)
1790 return EINVAL;
1791 break;
1792 case USB_GET_NO_ALT:
1793 ai = (struct usb_alt_interface *)addr;
1794 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen);
1795 if (cdesc == NULL)
1796 return EINVAL;
1797 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1798 if (idesc == NULL) {
1799 kmem_free(cdesc, cdesclen);
1800 return EINVAL;
1801 }
1802 ai->uai_alt_no = usbd_get_no_alts(cdesc,
1803 idesc->bInterfaceNumber);
1804 kmem_free(cdesc, cdesclen);
1805 break;
1806 case USB_GET_DEVICE_DESC:
1807 *(usb_device_descriptor_t *)addr =
1808 *usbd_get_device_descriptor(sc->sc_udev);
1809 break;
1810 case USB_GET_CONFIG_DESC:
1811 cd = (struct usb_config_desc *)addr;
1812 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen);
1813 if (cdesc == NULL)
1814 return EINVAL;
1815 cd->ucd_desc = *cdesc;
1816 kmem_free(cdesc, cdesclen);
1817 break;
1818 case USB_GET_INTERFACE_DESC:
1819 id = (struct usb_interface_desc *)addr;
1820 cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen);
1821 if (cdesc == NULL)
1822 return EINVAL;
1823 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1824 id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1825 alt = ugen_get_alt_index(sc, id->uid_interface_index);
1826 else
1827 alt = id->uid_alt_index;
1828 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1829 if (idesc == NULL) {
1830 kmem_free(cdesc, cdesclen);
1831 return EINVAL;
1832 }
1833 id->uid_desc = *idesc;
1834 kmem_free(cdesc, cdesclen);
1835 break;
1836 case USB_GET_ENDPOINT_DESC:
1837 ed = (struct usb_endpoint_desc *)addr;
1838 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen);
1839 if (cdesc == NULL)
1840 return EINVAL;
1841 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1842 ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1843 alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1844 else
1845 alt = ed->ued_alt_index;
1846 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1847 alt, ed->ued_endpoint_index);
1848 if (edesc == NULL) {
1849 kmem_free(cdesc, cdesclen);
1850 return EINVAL;
1851 }
1852 ed->ued_desc = *edesc;
1853 kmem_free(cdesc, cdesclen);
1854 break;
1855 case USB_GET_FULL_DESC:
1856 {
1857 int len;
1858 struct iovec iov;
1859 struct uio uio;
1860 struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1861
1862 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen);
1863 if (cdesc == NULL)
1864 return EINVAL;
1865 len = cdesclen;
1866 if (len > fd->ufd_size)
1867 len = fd->ufd_size;
1868 iov.iov_base = (void *)fd->ufd_data;
1869 iov.iov_len = len;
1870 uio.uio_iov = &iov;
1871 uio.uio_iovcnt = 1;
1872 uio.uio_resid = len;
1873 uio.uio_offset = 0;
1874 uio.uio_rw = UIO_READ;
1875 uio.uio_vmspace = l->l_proc->p_vmspace;
1876 error = uiomove((void *)cdesc, len, &uio);
1877 kmem_free(cdesc, cdesclen);
1878 return error;
1879 }
1880 case USB_GET_STRING_DESC: {
1881 int len;
1882 si = (struct usb_string_desc *)addr;
1883 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1884 si->usd_language_id, &si->usd_desc, &len);
1885 if (err)
1886 return EINVAL;
1887 break;
1888 }
1889 case USB_DO_REQUEST:
1890 {
1891 struct usb_ctl_request *ur = (void *)addr;
1892 int len = UGETW(ur->ucr_request.wLength);
1893 struct iovec iov;
1894 struct uio uio;
1895 void *ptr = 0;
1896 usbd_status xerr;
1897
1898 error = 0;
1899
1900 if (!(flag & FWRITE))
1901 return EPERM;
1902 /* Avoid requests that would damage the bus integrity. */
1903 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1904 ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1905 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1906 ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1907 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1908 ur->ucr_request.bRequest == UR_SET_INTERFACE))
1909 return EINVAL;
1910
1911 if (len < 0 || len > 32767)
1912 return EINVAL;
1913 if (len != 0) {
1914 iov.iov_base = (void *)ur->ucr_data;
1915 iov.iov_len = len;
1916 uio.uio_iov = &iov;
1917 uio.uio_iovcnt = 1;
1918 uio.uio_resid = len;
1919 uio.uio_offset = 0;
1920 uio.uio_rw =
1921 ur->ucr_request.bmRequestType & UT_READ ?
1922 UIO_READ : UIO_WRITE;
1923 uio.uio_vmspace = l->l_proc->p_vmspace;
1924 ptr = kmem_alloc(len, KM_SLEEP);
1925 if (uio.uio_rw == UIO_WRITE) {
1926 error = uiomove(ptr, len, &uio);
1927 if (error)
1928 goto ret;
1929 }
1930 }
1931 sce = &sc->sc_endpoints[endpt][IN];
1932 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1933 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1934 if (xerr) {
1935 error = EIO;
1936 goto ret;
1937 }
1938 if (len != 0) {
1939 if (uio.uio_rw == UIO_READ) {
1940 size_t alen = uimin(len, ur->ucr_actlen);
1941 error = uiomove(ptr, alen, &uio);
1942 if (error)
1943 goto ret;
1944 }
1945 }
1946 ret:
1947 if (ptr)
1948 kmem_free(ptr, len);
1949 return error;
1950 }
1951 case USB_GET_DEVICEINFO:
1952 usbd_fill_deviceinfo(sc->sc_udev,
1953 (struct usb_device_info *)addr, 0);
1954 break;
1955 case USB_GET_DEVICEINFO_OLD:
1956 {
1957 int ret;
1958 MODULE_HOOK_CALL(usb_subr_fill_30_hook,
1959 (sc->sc_udev, (struct usb_device_info_old *)addr, 0,
1960 usbd_devinfo_vp, usbd_printBCD),
1961 enosys(), ret);
1962 if (ret == 0)
1963 return 0;
1964 return EINVAL;
1965 }
1966 default:
1967 return EINVAL;
1968 }
1969 return 0;
1970 }
1971
1972 static int
1973 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1974 {
1975 int endpt = UGENENDPOINT(dev);
1976 struct ugen_softc *sc;
1977 int error;
1978
1979 if ((sc = ugenif_acquire(UGENUNIT(dev))) == 0)
1980 return ENXIO;
1981 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1982 ugenif_release(sc);
1983
1984 return error;
1985 }
1986
1987 static int
1988 ugenpoll(dev_t dev, int events, struct lwp *l)
1989 {
1990 struct ugen_softc *sc;
1991 struct ugen_endpoint *sce_in, *sce_out;
1992 int revents = 0;
1993
1994 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
1995 return POLLHUP;
1996
1997 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) {
1998 revents |= POLLERR;
1999 goto out;
2000 }
2001
2002 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2003 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2004 KASSERT(sce_in->edesc || sce_out->edesc);
2005 KASSERT(sce_in->pipeh || sce_out->pipeh);
2006
2007 mutex_enter(&sc->sc_lock);
2008 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
2009 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
2010 case UE_INTERRUPT:
2011 if (sce_in->q.c_cc > 0)
2012 revents |= events & (POLLIN | POLLRDNORM);
2013 else
2014 selrecord(l, &sce_in->rsel);
2015 break;
2016 case UE_ISOCHRONOUS:
2017 if (sce_in->cur != sce_in->fill)
2018 revents |= events & (POLLIN | POLLRDNORM);
2019 else
2020 selrecord(l, &sce_in->rsel);
2021 break;
2022 case UE_BULK:
2023 if (sce_in->state & UGEN_BULK_RA) {
2024 if (sce_in->ra_wb_used > 0)
2025 revents |= events &
2026 (POLLIN | POLLRDNORM);
2027 else
2028 selrecord(l, &sce_in->rsel);
2029 break;
2030 }
2031 /*
2032 * We have no easy way of determining if a read will
2033 * yield any data or a write will happen.
2034 * Pretend they will.
2035 */
2036 revents |= events & (POLLIN | POLLRDNORM);
2037 break;
2038 default:
2039 break;
2040 }
2041 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
2042 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
2043 case UE_INTERRUPT:
2044 case UE_ISOCHRONOUS:
2045 /* XXX unimplemented */
2046 break;
2047 case UE_BULK:
2048 if (sce_out->state & UGEN_BULK_WB) {
2049 if (sce_out->ra_wb_used <
2050 sce_out->limit - sce_out->ibuf)
2051 revents |= events &
2052 (POLLOUT | POLLWRNORM);
2053 else
2054 selrecord(l, &sce_out->rsel);
2055 break;
2056 }
2057 /*
2058 * We have no easy way of determining if a read will
2059 * yield any data or a write will happen.
2060 * Pretend they will.
2061 */
2062 revents |= events & (POLLOUT | POLLWRNORM);
2063 break;
2064 default:
2065 break;
2066 }
2067
2068 mutex_exit(&sc->sc_lock);
2069
2070 out: ugenif_release(sc);
2071 return revents;
2072 }
2073
2074 static void
2075 filt_ugenrdetach(struct knote *kn)
2076 {
2077 struct ugen_endpoint *sce = kn->kn_hook;
2078 struct ugen_softc *sc = sce->sc;
2079
2080 mutex_enter(&sc->sc_lock);
2081 selremove_knote(&sce->rsel, kn);
2082 mutex_exit(&sc->sc_lock);
2083 }
2084
2085 static int
2086 filt_ugenread_intr(struct knote *kn, long hint)
2087 {
2088 struct ugen_endpoint *sce = kn->kn_hook;
2089 struct ugen_softc *sc = sce->sc;
2090 int ret;
2091
2092 mutex_enter(&sc->sc_lock);
2093 if (sc->sc_dying) {
2094 ret = 0;
2095 } else {
2096 kn->kn_data = sce->q.c_cc;
2097 ret = kn->kn_data > 0;
2098 }
2099 mutex_exit(&sc->sc_lock);
2100
2101 return ret;
2102 }
2103
2104 static int
2105 filt_ugenread_isoc(struct knote *kn, long hint)
2106 {
2107 struct ugen_endpoint *sce = kn->kn_hook;
2108 struct ugen_softc *sc = sce->sc;
2109 int ret;
2110
2111 mutex_enter(&sc->sc_lock);
2112 if (sc->sc_dying) {
2113 ret = 0;
2114 } else if (sce->cur == sce->fill) {
2115 ret = 0;
2116 } else if (sce->cur < sce->fill) {
2117 kn->kn_data = sce->fill - sce->cur;
2118 ret = 1;
2119 } else {
2120 kn->kn_data = (sce->limit - sce->cur) +
2121 (sce->fill - sce->ibuf);
2122 ret = 1;
2123 }
2124 mutex_exit(&sc->sc_lock);
2125
2126 return ret;
2127 }
2128
2129 static int
2130 filt_ugenread_bulk(struct knote *kn, long hint)
2131 {
2132 struct ugen_endpoint *sce = kn->kn_hook;
2133 struct ugen_softc *sc = sce->sc;
2134 int ret;
2135
2136 mutex_enter(&sc->sc_lock);
2137 if (sc->sc_dying) {
2138 ret = 0;
2139 } else if (!(sce->state & UGEN_BULK_RA)) {
2140 /*
2141 * We have no easy way of determining if a read will
2142 * yield any data or a write will happen.
2143 * So, emulate "seltrue".
2144 */
2145 ret = filt_seltrue(kn, hint);
2146 } else if (sce->ra_wb_used == 0) {
2147 ret = 0;
2148 } else {
2149 kn->kn_data = sce->ra_wb_used;
2150 ret = 1;
2151 }
2152 mutex_exit(&sc->sc_lock);
2153
2154 return ret;
2155 }
2156
2157 static int
2158 filt_ugenwrite_bulk(struct knote *kn, long hint)
2159 {
2160 struct ugen_endpoint *sce = kn->kn_hook;
2161 struct ugen_softc *sc = sce->sc;
2162 int ret;
2163
2164 mutex_enter(&sc->sc_lock);
2165 if (sc->sc_dying) {
2166 ret = 0;
2167 } else if (!(sce->state & UGEN_BULK_WB)) {
2168 /*
2169 * We have no easy way of determining if a read will
2170 * yield any data or a write will happen.
2171 * So, emulate "seltrue".
2172 */
2173 ret = filt_seltrue(kn, hint);
2174 } else if (sce->ra_wb_used == sce->limit - sce->ibuf) {
2175 ret = 0;
2176 } else {
2177 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2178 ret = 1;
2179 }
2180 mutex_exit(&sc->sc_lock);
2181
2182 return ret;
2183 }
2184
2185 static const struct filterops ugenread_intr_filtops = {
2186 .f_isfd = 1,
2187 .f_attach = NULL,
2188 .f_detach = filt_ugenrdetach,
2189 .f_event = filt_ugenread_intr,
2190 };
2191
2192 static const struct filterops ugenread_isoc_filtops = {
2193 .f_isfd = 1,
2194 .f_attach = NULL,
2195 .f_detach = filt_ugenrdetach,
2196 .f_event = filt_ugenread_isoc,
2197 };
2198
2199 static const struct filterops ugenread_bulk_filtops = {
2200 .f_isfd = 1,
2201 .f_attach = NULL,
2202 .f_detach = filt_ugenrdetach,
2203 .f_event = filt_ugenread_bulk,
2204 };
2205
2206 static const struct filterops ugenwrite_bulk_filtops = {
2207 .f_isfd = 1,
2208 .f_attach = NULL,
2209 .f_detach = filt_ugenrdetach,
2210 .f_event = filt_ugenwrite_bulk,
2211 };
2212
2213 static int
2214 ugenkqfilter(dev_t dev, struct knote *kn)
2215 {
2216 struct ugen_softc *sc;
2217 struct ugen_endpoint *sce;
2218 struct selinfo *sip;
2219 int error;
2220
2221 if ((sc = ugenif_acquire(UGENUNIT(dev))) == NULL)
2222 return ENXIO;
2223
2224 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) {
2225 error = ENODEV;
2226 goto out;
2227 }
2228
2229 switch (kn->kn_filter) {
2230 case EVFILT_READ:
2231 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2232 if (sce == NULL) {
2233 error = EINVAL;
2234 goto out;
2235 }
2236
2237 sip = &sce->rsel;
2238 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2239 case UE_INTERRUPT:
2240 kn->kn_fop = &ugenread_intr_filtops;
2241 break;
2242 case UE_ISOCHRONOUS:
2243 kn->kn_fop = &ugenread_isoc_filtops;
2244 break;
2245 case UE_BULK:
2246 kn->kn_fop = &ugenread_bulk_filtops;
2247 break;
2248 default:
2249 error = EINVAL;
2250 goto out;
2251 }
2252 break;
2253
2254 case EVFILT_WRITE:
2255 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2256 if (sce == NULL) {
2257 error = EINVAL;
2258 goto out;
2259 }
2260
2261 sip = &sce->rsel;
2262 switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2263 case UE_INTERRUPT:
2264 case UE_ISOCHRONOUS:
2265 /* XXX poll doesn't support this */
2266 error = EINVAL;
2267 goto out;
2268
2269 case UE_BULK:
2270 kn->kn_fop = &ugenwrite_bulk_filtops;
2271 break;
2272 default:
2273 error = EINVAL;
2274 goto out;
2275 }
2276 break;
2277
2278 default:
2279 error = EINVAL;
2280 goto out;
2281 }
2282
2283 kn->kn_hook = sce;
2284
2285 mutex_enter(&sc->sc_lock);
2286 selrecord_knote(sip, kn);
2287 mutex_exit(&sc->sc_lock);
2288
2289 error = 0;
2290
2291 out: ugenif_release(sc);
2292 return error;
2293 }
2294
2295 MODULE(MODULE_CLASS_DRIVER, ugen, NULL);
2296
2297 static int
2298 ugen_modcmd(modcmd_t cmd, void *aux)
2299 {
2300
2301 switch (cmd) {
2302 case MODULE_CMD_INIT:
2303 mutex_init(&ugenif.lock, MUTEX_DEFAULT, IPL_NONE);
2304 rb_tree_init(&ugenif.tree, &ugenif_tree_ops);
2305 return 0;
2306 default:
2307 return ENOTTY;
2308 }
2309 }
2310