uhci.c revision 1.295 1 /* $NetBSD: uhci.c,v 1.295 2020/03/13 18:17:40 christos Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004, 2011, 2012 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology, Jared D. McNeill (jmcneill (at) invisible.ca)
10 * and Matthew R. Green (mrg (at) eterna.com.au).
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * USB Universal Host Controller driver.
36 * Handles e.g. PIIX3 and PIIX4.
37 *
38 * UHCI spec: http://www.intel.com/technology/usb/spec.htm
39 * USB spec: http://www.usb.org/developers/docs/
40 * PIIXn spec: ftp://download.intel.com/design/intarch/datashts/29055002.pdf
41 * ftp://download.intel.com/design/intarch/datashts/29056201.pdf
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: uhci.c,v 1.295 2020/03/13 18:17:40 christos Exp $");
46
47 #ifdef _KERNEL_OPT
48 #include "opt_usb.h"
49 #endif
50
51 #include <sys/param.h>
52
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/device.h>
56 #include <sys/kernel.h>
57 #include <sys/kmem.h>
58 #include <sys/mutex.h>
59 #include <sys/proc.h>
60 #include <sys/queue.h>
61 #include <sys/select.h>
62 #include <sys/sysctl.h>
63 #include <sys/systm.h>
64
65 #include <machine/endian.h>
66
67 #include <dev/usb/usb.h>
68 #include <dev/usb/usbdi.h>
69 #include <dev/usb/usbdivar.h>
70 #include <dev/usb/usb_mem.h>
71
72 #include <dev/usb/uhcireg.h>
73 #include <dev/usb/uhcivar.h>
74 #include <dev/usb/usbroothub.h>
75 #include <dev/usb/usbhist.h>
76
77 /* Use bandwidth reclamation for control transfers. Some devices choke on it. */
78 /*#define UHCI_CTL_LOOP */
79
80 #ifdef UHCI_DEBUG
81 uhci_softc_t *thesc;
82 int uhcinoloop = 0;
83 #endif
84
85 #ifdef USB_DEBUG
86 #ifndef UHCI_DEBUG
87 #define uhcidebug 0
88 #else
89 static int uhcidebug = 0;
90
91 SYSCTL_SETUP(sysctl_hw_uhci_setup, "sysctl hw.uhci setup")
92 {
93 int err;
94 const struct sysctlnode *rnode;
95 const struct sysctlnode *cnode;
96
97 err = sysctl_createv(clog, 0, NULL, &rnode,
98 CTLFLAG_PERMANENT, CTLTYPE_NODE, "uhci",
99 SYSCTL_DESCR("uhci global controls"),
100 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
101
102 if (err)
103 goto fail;
104
105 /* control debugging printfs */
106 err = sysctl_createv(clog, 0, &rnode, &cnode,
107 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
108 "debug", SYSCTL_DESCR("Enable debugging output"),
109 NULL, 0, &uhcidebug, sizeof(uhcidebug), CTL_CREATE, CTL_EOL);
110 if (err)
111 goto fail;
112
113 return;
114 fail:
115 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
116 }
117
118 #endif /* UHCI_DEBUG */
119 #endif /* USB_DEBUG */
120
121 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOGN(uhcidebug,1,FMT,A,B,C,D)
122 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(uhcidebug,N,FMT,A,B,C,D)
123 #define UHCIHIST_FUNC() USBHIST_FUNC()
124 #define UHCIHIST_CALLED(name) USBHIST_CALLED(uhcidebug)
125
126 /*
127 * The UHCI controller is little endian, so on big endian machines
128 * the data stored in memory needs to be swapped.
129 */
130
131 struct uhci_pipe {
132 struct usbd_pipe pipe;
133 int nexttoggle;
134
135 u_char aborting;
136 struct usbd_xfer *abortstart, abortend;
137
138 /* Info needed for different pipe kinds. */
139 union {
140 /* Control pipe */
141 struct {
142 uhci_soft_qh_t *sqh;
143 usb_dma_t reqdma;
144 uhci_soft_td_t *setup;
145 uhci_soft_td_t *stat;
146 } ctrl;
147 /* Interrupt pipe */
148 struct {
149 int npoll;
150 uhci_soft_qh_t **qhs;
151 } intr;
152 /* Bulk pipe */
153 struct {
154 uhci_soft_qh_t *sqh;
155 } bulk;
156 /* Isochronous pipe */
157 struct isoc {
158 uhci_soft_td_t **stds;
159 int next, inuse;
160 } isoc;
161 };
162 };
163
164 typedef TAILQ_HEAD(ux_completeq, uhci_xfer) ux_completeq_t;
165
166 Static void uhci_globalreset(uhci_softc_t *);
167 Static usbd_status uhci_portreset(uhci_softc_t*, int);
168 Static void uhci_reset(uhci_softc_t *);
169 Static usbd_status uhci_run(uhci_softc_t *, int, int);
170 Static uhci_soft_td_t *uhci_alloc_std(uhci_softc_t *);
171 Static void uhci_free_std(uhci_softc_t *, uhci_soft_td_t *);
172 Static void uhci_free_std_locked(uhci_softc_t *, uhci_soft_td_t *);
173 Static uhci_soft_qh_t *uhci_alloc_sqh(uhci_softc_t *);
174 Static void uhci_free_sqh(uhci_softc_t *, uhci_soft_qh_t *);
175 #if 0
176 Static void uhci_enter_ctl_q(uhci_softc_t *, uhci_soft_qh_t *,
177 uhci_intr_info_t *);
178 Static void uhci_exit_ctl_q(uhci_softc_t *, uhci_soft_qh_t *);
179 #endif
180
181 #if 0
182 Static void uhci_free_std_chain(uhci_softc_t *, uhci_soft_td_t *,
183 uhci_soft_td_t *);
184 #endif
185 Static int uhci_alloc_std_chain(uhci_softc_t *, struct usbd_xfer *,
186 int, int, uhci_soft_td_t **);
187 Static void uhci_free_stds(uhci_softc_t *, struct uhci_xfer *);
188
189 Static void uhci_reset_std_chain(uhci_softc_t *, struct usbd_xfer *,
190 int, int, int *, uhci_soft_td_t **);
191
192 Static void uhci_poll_hub(void *);
193 Static void uhci_check_intr(uhci_softc_t *, struct uhci_xfer *,
194 ux_completeq_t *);
195 Static void uhci_idone(struct uhci_xfer *, ux_completeq_t *);
196
197 Static void uhci_abortx(struct usbd_xfer *);
198
199 Static void uhci_add_ls_ctrl(uhci_softc_t *, uhci_soft_qh_t *);
200 Static void uhci_add_hs_ctrl(uhci_softc_t *, uhci_soft_qh_t *);
201 Static void uhci_add_bulk(uhci_softc_t *, uhci_soft_qh_t *);
202 Static void uhci_remove_ls_ctrl(uhci_softc_t *,uhci_soft_qh_t *);
203 Static void uhci_remove_hs_ctrl(uhci_softc_t *,uhci_soft_qh_t *);
204 Static void uhci_remove_bulk(uhci_softc_t *,uhci_soft_qh_t *);
205 Static void uhci_add_loop(uhci_softc_t *);
206 Static void uhci_rem_loop(uhci_softc_t *);
207
208 Static usbd_status uhci_setup_isoc(struct usbd_pipe *);
209
210 Static struct usbd_xfer *
211 uhci_allocx(struct usbd_bus *, unsigned int);
212 Static void uhci_freex(struct usbd_bus *, struct usbd_xfer *);
213 Static bool uhci_dying(struct usbd_bus *);
214 Static void uhci_get_lock(struct usbd_bus *, kmutex_t **);
215 Static int uhci_roothub_ctrl(struct usbd_bus *,
216 usb_device_request_t *, void *, int);
217
218 Static int uhci_device_ctrl_init(struct usbd_xfer *);
219 Static void uhci_device_ctrl_fini(struct usbd_xfer *);
220 Static usbd_status uhci_device_ctrl_transfer(struct usbd_xfer *);
221 Static usbd_status uhci_device_ctrl_start(struct usbd_xfer *);
222 Static void uhci_device_ctrl_abort(struct usbd_xfer *);
223 Static void uhci_device_ctrl_close(struct usbd_pipe *);
224 Static void uhci_device_ctrl_done(struct usbd_xfer *);
225
226 Static int uhci_device_intr_init(struct usbd_xfer *);
227 Static void uhci_device_intr_fini(struct usbd_xfer *);
228 Static usbd_status uhci_device_intr_transfer(struct usbd_xfer *);
229 Static usbd_status uhci_device_intr_start(struct usbd_xfer *);
230 Static void uhci_device_intr_abort(struct usbd_xfer *);
231 Static void uhci_device_intr_close(struct usbd_pipe *);
232 Static void uhci_device_intr_done(struct usbd_xfer *);
233
234 Static int uhci_device_bulk_init(struct usbd_xfer *);
235 Static void uhci_device_bulk_fini(struct usbd_xfer *);
236 Static usbd_status uhci_device_bulk_transfer(struct usbd_xfer *);
237 Static usbd_status uhci_device_bulk_start(struct usbd_xfer *);
238 Static void uhci_device_bulk_abort(struct usbd_xfer *);
239 Static void uhci_device_bulk_close(struct usbd_pipe *);
240 Static void uhci_device_bulk_done(struct usbd_xfer *);
241
242 Static int uhci_device_isoc_init(struct usbd_xfer *);
243 Static void uhci_device_isoc_fini(struct usbd_xfer *);
244 Static usbd_status uhci_device_isoc_transfer(struct usbd_xfer *);
245 Static void uhci_device_isoc_abort(struct usbd_xfer *);
246 Static void uhci_device_isoc_close(struct usbd_pipe *);
247 Static void uhci_device_isoc_done(struct usbd_xfer *);
248
249 Static usbd_status uhci_root_intr_transfer(struct usbd_xfer *);
250 Static usbd_status uhci_root_intr_start(struct usbd_xfer *);
251 Static void uhci_root_intr_abort(struct usbd_xfer *);
252 Static void uhci_root_intr_close(struct usbd_pipe *);
253 Static void uhci_root_intr_done(struct usbd_xfer *);
254
255 Static usbd_status uhci_open(struct usbd_pipe *);
256 Static void uhci_poll(struct usbd_bus *);
257 Static void uhci_softintr(void *);
258
259 Static void uhci_add_intr(uhci_softc_t *, uhci_soft_qh_t *);
260 Static void uhci_remove_intr(uhci_softc_t *, uhci_soft_qh_t *);
261 Static usbd_status uhci_device_setintr(uhci_softc_t *,
262 struct uhci_pipe *, int);
263
264 Static void uhci_device_clear_toggle(struct usbd_pipe *);
265 Static void uhci_noop(struct usbd_pipe *);
266
267 static inline uhci_soft_qh_t *
268 uhci_find_prev_qh(uhci_soft_qh_t *, uhci_soft_qh_t *);
269
270 #ifdef UHCI_DEBUG
271 Static void uhci_dump_all(uhci_softc_t *);
272 Static void uhci_dumpregs(uhci_softc_t *);
273 Static void uhci_dump_qhs(uhci_soft_qh_t *);
274 Static void uhci_dump_qh(uhci_soft_qh_t *);
275 Static void uhci_dump_tds(uhci_soft_td_t *);
276 Static void uhci_dump_td(uhci_soft_td_t *);
277 Static void uhci_dump_ii(struct uhci_xfer *);
278 void uhci_dump(void);
279 #endif
280
281 #define UBARR(sc) bus_space_barrier((sc)->iot, (sc)->ioh, 0, (sc)->sc_size, \
282 BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE)
283 #define UWRITE1(sc, r, x) \
284 do { UBARR(sc); bus_space_write_1((sc)->iot, (sc)->ioh, (r), (x)); \
285 } while (/*CONSTCOND*/0)
286 #define UWRITE2(sc, r, x) \
287 do { UBARR(sc); bus_space_write_2((sc)->iot, (sc)->ioh, (r), (x)); \
288 } while (/*CONSTCOND*/0)
289 #define UWRITE4(sc, r, x) \
290 do { UBARR(sc); bus_space_write_4((sc)->iot, (sc)->ioh, (r), (x)); \
291 } while (/*CONSTCOND*/0)
292
293 static __inline uint8_t
294 UREAD1(uhci_softc_t *sc, bus_size_t r)
295 {
296
297 UBARR(sc);
298 return bus_space_read_1(sc->iot, sc->ioh, r);
299 }
300
301 static __inline uint16_t
302 UREAD2(uhci_softc_t *sc, bus_size_t r)
303 {
304
305 UBARR(sc);
306 return bus_space_read_2(sc->iot, sc->ioh, r);
307 }
308
309 #ifdef UHCI_DEBUG
310 static __inline uint32_t
311 UREAD4(uhci_softc_t *sc, bus_size_t r)
312 {
313
314 UBARR(sc);
315 return bus_space_read_4(sc->iot, sc->ioh, r);
316 }
317 #endif
318
319 #define UHCICMD(sc, cmd) UWRITE2(sc, UHCI_CMD, cmd)
320 #define UHCISTS(sc) UREAD2(sc, UHCI_STS)
321
322 #define UHCI_RESET_TIMEOUT 100 /* ms, reset timeout */
323
324 #define UHCI_CURFRAME(sc) (UREAD2(sc, UHCI_FRNUM) & UHCI_FRNUM_MASK)
325
326 const struct usbd_bus_methods uhci_bus_methods = {
327 .ubm_open = uhci_open,
328 .ubm_softint = uhci_softintr,
329 .ubm_dopoll = uhci_poll,
330 .ubm_allocx = uhci_allocx,
331 .ubm_freex = uhci_freex,
332 .ubm_abortx = uhci_abortx,
333 .ubm_dying = uhci_dying,
334 .ubm_getlock = uhci_get_lock,
335 .ubm_rhctrl = uhci_roothub_ctrl,
336 };
337
338 const struct usbd_pipe_methods uhci_root_intr_methods = {
339 .upm_transfer = uhci_root_intr_transfer,
340 .upm_start = uhci_root_intr_start,
341 .upm_abort = uhci_root_intr_abort,
342 .upm_close = uhci_root_intr_close,
343 .upm_cleartoggle = uhci_noop,
344 .upm_done = uhci_root_intr_done,
345 };
346
347 const struct usbd_pipe_methods uhci_device_ctrl_methods = {
348 .upm_init = uhci_device_ctrl_init,
349 .upm_fini = uhci_device_ctrl_fini,
350 .upm_transfer = uhci_device_ctrl_transfer,
351 .upm_start = uhci_device_ctrl_start,
352 .upm_abort = uhci_device_ctrl_abort,
353 .upm_close = uhci_device_ctrl_close,
354 .upm_cleartoggle = uhci_noop,
355 .upm_done = uhci_device_ctrl_done,
356 };
357
358 const struct usbd_pipe_methods uhci_device_intr_methods = {
359 .upm_init = uhci_device_intr_init,
360 .upm_fini = uhci_device_intr_fini,
361 .upm_transfer = uhci_device_intr_transfer,
362 .upm_start = uhci_device_intr_start,
363 .upm_abort = uhci_device_intr_abort,
364 .upm_close = uhci_device_intr_close,
365 .upm_cleartoggle = uhci_device_clear_toggle,
366 .upm_done = uhci_device_intr_done,
367 };
368
369 const struct usbd_pipe_methods uhci_device_bulk_methods = {
370 .upm_init = uhci_device_bulk_init,
371 .upm_fini = uhci_device_bulk_fini,
372 .upm_transfer = uhci_device_bulk_transfer,
373 .upm_start = uhci_device_bulk_start,
374 .upm_abort = uhci_device_bulk_abort,
375 .upm_close = uhci_device_bulk_close,
376 .upm_cleartoggle = uhci_device_clear_toggle,
377 .upm_done = uhci_device_bulk_done,
378 };
379
380 const struct usbd_pipe_methods uhci_device_isoc_methods = {
381 .upm_init = uhci_device_isoc_init,
382 .upm_fini = uhci_device_isoc_fini,
383 .upm_transfer = uhci_device_isoc_transfer,
384 .upm_abort = uhci_device_isoc_abort,
385 .upm_close = uhci_device_isoc_close,
386 .upm_cleartoggle = uhci_noop,
387 .upm_done = uhci_device_isoc_done,
388 };
389
390 static inline void
391 uhci_add_intr_list(uhci_softc_t *sc, struct uhci_xfer *ux)
392 {
393
394 TAILQ_INSERT_TAIL(&sc->sc_intrhead, ux, ux_list);
395 }
396
397 static inline void
398 uhci_del_intr_list(uhci_softc_t *sc, struct uhci_xfer *ux)
399 {
400
401 TAILQ_REMOVE(&sc->sc_intrhead, ux, ux_list);
402 }
403
404 static inline uhci_soft_qh_t *
405 uhci_find_prev_qh(uhci_soft_qh_t *pqh, uhci_soft_qh_t *sqh)
406 {
407 UHCIHIST_FUNC(); UHCIHIST_CALLED();
408 DPRINTFN(15, "pqh=%#jx sqh=%#jx", (uintptr_t)pqh, (uintptr_t)sqh, 0, 0);
409
410 for (; pqh->hlink != sqh; pqh = pqh->hlink) {
411 #if defined(DIAGNOSTIC) || defined(UHCI_DEBUG)
412 usb_syncmem(&pqh->dma,
413 pqh->offs + offsetof(uhci_qh_t, qh_hlink),
414 sizeof(pqh->qh.qh_hlink),
415 BUS_DMASYNC_POSTWRITE);
416 if (le32toh(pqh->qh.qh_hlink) & UHCI_PTR_T) {
417 printf("%s: QH not found\n", __func__);
418 return NULL;
419 }
420 #endif
421 }
422 return pqh;
423 }
424
425 void
426 uhci_globalreset(uhci_softc_t *sc)
427 {
428 UHCICMD(sc, UHCI_CMD_GRESET); /* global reset */
429 usb_delay_ms(&sc->sc_bus, USB_BUS_RESET_DELAY); /* wait a little */
430 UHCICMD(sc, 0); /* do nothing */
431 }
432
433 int
434 uhci_init(uhci_softc_t *sc)
435 {
436 usbd_status err;
437 int i, j;
438 uhci_soft_qh_t *clsqh, *chsqh, *bsqh, *sqh, *lsqh;
439 uhci_soft_td_t *std;
440
441 UHCIHIST_FUNC(); UHCIHIST_CALLED();
442
443 #ifdef UHCI_DEBUG
444 thesc = sc;
445
446 if (uhcidebug >= 2)
447 uhci_dumpregs(sc);
448 #endif
449
450 sc->sc_suspend = PWR_RESUME;
451
452 UWRITE2(sc, UHCI_INTR, 0); /* disable interrupts */
453 uhci_globalreset(sc); /* reset the controller */
454 uhci_reset(sc);
455
456 /* Allocate and initialize real frame array. */
457 err = usb_allocmem(&sc->sc_bus,
458 UHCI_FRAMELIST_COUNT * sizeof(uhci_physaddr_t),
459 UHCI_FRAMELIST_ALIGN, &sc->sc_dma);
460 if (err)
461 return err;
462 sc->sc_pframes = KERNADDR(&sc->sc_dma, 0);
463 UWRITE2(sc, UHCI_FRNUM, 0); /* set frame number to 0 */
464 UWRITE4(sc, UHCI_FLBASEADDR, DMAADDR(&sc->sc_dma, 0)); /* set frame list*/
465
466 /* Initialise mutex early for uhci_alloc_* */
467 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
468 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
469
470 /*
471 * Allocate a TD, inactive, that hangs from the last QH.
472 * This is to avoid a bug in the PIIX that makes it run berserk
473 * otherwise.
474 */
475 std = uhci_alloc_std(sc);
476 if (std == NULL)
477 return ENOMEM;
478 std->link.std = NULL;
479 std->td.td_link = htole32(UHCI_PTR_T);
480 std->td.td_status = htole32(0); /* inactive */
481 std->td.td_token = htole32(0);
482 std->td.td_buffer = htole32(0);
483 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
484 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
485
486 /* Allocate the dummy QH marking the end and used for looping the QHs.*/
487 lsqh = uhci_alloc_sqh(sc);
488 if (lsqh == NULL)
489 goto fail1;
490 lsqh->hlink = NULL;
491 lsqh->qh.qh_hlink = htole32(UHCI_PTR_T); /* end of QH chain */
492 lsqh->elink = std;
493 lsqh->qh.qh_elink = htole32(std->physaddr | UHCI_PTR_TD);
494 sc->sc_last_qh = lsqh;
495 usb_syncmem(&lsqh->dma, lsqh->offs, sizeof(lsqh->qh),
496 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
497
498 /* Allocate the dummy QH where bulk traffic will be queued. */
499 bsqh = uhci_alloc_sqh(sc);
500 if (bsqh == NULL)
501 goto fail2;
502 bsqh->hlink = lsqh;
503 bsqh->qh.qh_hlink = htole32(lsqh->physaddr | UHCI_PTR_QH);
504 bsqh->elink = NULL;
505 bsqh->qh.qh_elink = htole32(UHCI_PTR_T);
506 sc->sc_bulk_start = sc->sc_bulk_end = bsqh;
507 usb_syncmem(&bsqh->dma, bsqh->offs, sizeof(bsqh->qh),
508 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
509
510 /* Allocate dummy QH where high speed control traffic will be queued. */
511 chsqh = uhci_alloc_sqh(sc);
512 if (chsqh == NULL)
513 goto fail3;
514 chsqh->hlink = bsqh;
515 chsqh->qh.qh_hlink = htole32(bsqh->physaddr | UHCI_PTR_QH);
516 chsqh->elink = NULL;
517 chsqh->qh.qh_elink = htole32(UHCI_PTR_T);
518 sc->sc_hctl_start = sc->sc_hctl_end = chsqh;
519 usb_syncmem(&chsqh->dma, chsqh->offs, sizeof(chsqh->qh),
520 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
521
522 /* Allocate dummy QH where control traffic will be queued. */
523 clsqh = uhci_alloc_sqh(sc);
524 if (clsqh == NULL)
525 goto fail4;
526 clsqh->hlink = chsqh;
527 clsqh->qh.qh_hlink = htole32(chsqh->physaddr | UHCI_PTR_QH);
528 clsqh->elink = NULL;
529 clsqh->qh.qh_elink = htole32(UHCI_PTR_T);
530 sc->sc_lctl_start = sc->sc_lctl_end = clsqh;
531 usb_syncmem(&clsqh->dma, clsqh->offs, sizeof(clsqh->qh),
532 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
533
534 /*
535 * Make all (virtual) frame list pointers point to the interrupt
536 * queue heads and the interrupt queue heads at the control
537 * queue head and point the physical frame list to the virtual.
538 */
539 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
540 std = uhci_alloc_std(sc);
541 sqh = uhci_alloc_sqh(sc);
542 if (std == NULL || sqh == NULL)
543 return USBD_NOMEM;
544 std->link.sqh = sqh;
545 std->td.td_link = htole32(sqh->physaddr | UHCI_PTR_QH);
546 std->td.td_status = htole32(UHCI_TD_IOS); /* iso, inactive */
547 std->td.td_token = htole32(0);
548 std->td.td_buffer = htole32(0);
549 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
550 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
551 sqh->hlink = clsqh;
552 sqh->qh.qh_hlink = htole32(clsqh->physaddr | UHCI_PTR_QH);
553 sqh->elink = NULL;
554 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
555 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
556 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
557 sc->sc_vframes[i].htd = std;
558 sc->sc_vframes[i].etd = std;
559 sc->sc_vframes[i].hqh = sqh;
560 sc->sc_vframes[i].eqh = sqh;
561 for (j = i;
562 j < UHCI_FRAMELIST_COUNT;
563 j += UHCI_VFRAMELIST_COUNT)
564 sc->sc_pframes[j] = htole32(std->physaddr);
565 }
566 usb_syncmem(&sc->sc_dma, 0,
567 UHCI_FRAMELIST_COUNT * sizeof(uhci_physaddr_t),
568 BUS_DMASYNC_PREWRITE);
569
570
571 TAILQ_INIT(&sc->sc_intrhead);
572
573 sc->sc_xferpool = pool_cache_init(sizeof(struct uhci_xfer), 0, 0, 0,
574 "uhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
575
576 callout_init(&sc->sc_poll_handle, CALLOUT_MPSAFE);
577 callout_setfunc(&sc->sc_poll_handle, uhci_poll_hub, sc);
578
579 /* Set up the bus struct. */
580 sc->sc_bus.ub_methods = &uhci_bus_methods;
581 sc->sc_bus.ub_pipesize = sizeof(struct uhci_pipe);
582 sc->sc_bus.ub_usedma = true;
583
584 UHCICMD(sc, UHCI_CMD_MAXP); /* Assume 64 byte packets at frame end */
585
586 DPRINTF("Enabling...", 0, 0, 0, 0);
587
588 err = uhci_run(sc, 1, 0); /* and here we go... */
589 UWRITE2(sc, UHCI_INTR, UHCI_INTR_TOCRCIE | UHCI_INTR_RIE |
590 UHCI_INTR_IOCE | UHCI_INTR_SPIE); /* enable interrupts */
591 return err;
592
593 fail4:
594 uhci_free_sqh(sc, chsqh);
595 fail3:
596 uhci_free_sqh(sc, lsqh);
597 fail2:
598 uhci_free_sqh(sc, lsqh);
599 fail1:
600 uhci_free_std(sc, std);
601
602 return ENOMEM;
603 }
604
605 int
606 uhci_activate(device_t self, enum devact act)
607 {
608 struct uhci_softc *sc = device_private(self);
609
610 switch (act) {
611 case DVACT_DEACTIVATE:
612 sc->sc_dying = 1;
613 return 0;
614 default:
615 return EOPNOTSUPP;
616 }
617 }
618
619 void
620 uhci_childdet(device_t self, device_t child)
621 {
622 struct uhci_softc *sc = device_private(self);
623
624 KASSERT(sc->sc_child == child);
625 sc->sc_child = NULL;
626 }
627
628 int
629 uhci_detach(struct uhci_softc *sc, int flags)
630 {
631 int rv = 0;
632
633 if (sc->sc_child != NULL)
634 rv = config_detach(sc->sc_child, flags);
635
636 if (rv != 0)
637 return rv;
638
639 callout_halt(&sc->sc_poll_handle, NULL);
640 callout_destroy(&sc->sc_poll_handle);
641
642 mutex_destroy(&sc->sc_lock);
643 mutex_destroy(&sc->sc_intr_lock);
644
645 pool_cache_destroy(sc->sc_xferpool);
646
647 /* XXX free other data structures XXX */
648
649 return rv;
650 }
651
652 struct usbd_xfer *
653 uhci_allocx(struct usbd_bus *bus, unsigned int nframes)
654 {
655 struct uhci_softc *sc = UHCI_BUS2SC(bus);
656 struct usbd_xfer *xfer;
657
658 xfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
659 if (xfer != NULL) {
660 memset(xfer, 0, sizeof(struct uhci_xfer));
661
662 #ifdef DIAGNOSTIC
663 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
664 uxfer->ux_isdone = true;
665 xfer->ux_state = XFER_BUSY;
666 #endif
667 }
668 return xfer;
669 }
670
671 void
672 uhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
673 {
674 struct uhci_softc *sc = UHCI_BUS2SC(bus);
675 struct uhci_xfer *uxfer __diagused = UHCI_XFER2UXFER(xfer);
676
677 KASSERTMSG(xfer->ux_state == XFER_BUSY ||
678 xfer->ux_status == USBD_NOT_STARTED,
679 "xfer %p state %d\n", xfer, xfer->ux_state);
680 KASSERTMSG(uxfer->ux_isdone || xfer->ux_status == USBD_NOT_STARTED,
681 "xfer %p not done\n", xfer);
682 #ifdef DIAGNOSTIC
683 xfer->ux_state = XFER_FREE;
684 #endif
685 pool_cache_put(sc->sc_xferpool, xfer);
686 }
687
688 Static bool
689 uhci_dying(struct usbd_bus *bus)
690 {
691 struct uhci_softc *sc = UHCI_BUS2SC(bus);
692
693 return sc->sc_dying;
694 }
695
696 Static void
697 uhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
698 {
699 struct uhci_softc *sc = UHCI_BUS2SC(bus);
700
701 *lock = &sc->sc_lock;
702 }
703
704
705 /*
706 * Handle suspend/resume.
707 *
708 * We need to switch to polling mode here, because this routine is
709 * called from an interrupt context. This is all right since we
710 * are almost suspended anyway.
711 */
712 bool
713 uhci_resume(device_t dv, const pmf_qual_t *qual)
714 {
715 uhci_softc_t *sc = device_private(dv);
716 int cmd;
717
718 mutex_spin_enter(&sc->sc_intr_lock);
719
720 cmd = UREAD2(sc, UHCI_CMD);
721 sc->sc_bus.ub_usepolling++;
722 UWRITE2(sc, UHCI_INTR, 0);
723 uhci_globalreset(sc);
724 uhci_reset(sc);
725 if (cmd & UHCI_CMD_RS)
726 uhci_run(sc, 0, 1);
727
728 /* restore saved state */
729 UWRITE4(sc, UHCI_FLBASEADDR, DMAADDR(&sc->sc_dma, 0));
730 UWRITE2(sc, UHCI_FRNUM, sc->sc_saved_frnum);
731 UWRITE1(sc, UHCI_SOF, sc->sc_saved_sof);
732
733 UHCICMD(sc, cmd | UHCI_CMD_FGR); /* force resume */
734 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_DELAY, &sc->sc_intr_lock);
735 UHCICMD(sc, cmd & ~UHCI_CMD_EGSM); /* back to normal */
736 UWRITE2(sc, UHCI_INTR, UHCI_INTR_TOCRCIE |
737 UHCI_INTR_RIE | UHCI_INTR_IOCE | UHCI_INTR_SPIE);
738 UHCICMD(sc, UHCI_CMD_MAXP);
739 uhci_run(sc, 1, 1); /* and start traffic again */
740 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_RECOVERY, &sc->sc_intr_lock);
741 sc->sc_bus.ub_usepolling--;
742 if (sc->sc_intr_xfer != NULL)
743 callout_schedule(&sc->sc_poll_handle, sc->sc_ival);
744 #ifdef UHCI_DEBUG
745 if (uhcidebug >= 2)
746 uhci_dumpregs(sc);
747 #endif
748
749 sc->sc_suspend = PWR_RESUME;
750 mutex_spin_exit(&sc->sc_intr_lock);
751
752 return true;
753 }
754
755 bool
756 uhci_suspend(device_t dv, const pmf_qual_t *qual)
757 {
758 uhci_softc_t *sc = device_private(dv);
759 int cmd;
760
761 mutex_spin_enter(&sc->sc_intr_lock);
762
763 cmd = UREAD2(sc, UHCI_CMD);
764
765 #ifdef UHCI_DEBUG
766 if (uhcidebug >= 2)
767 uhci_dumpregs(sc);
768 #endif
769 sc->sc_suspend = PWR_SUSPEND;
770 if (sc->sc_intr_xfer != NULL)
771 callout_halt(&sc->sc_poll_handle, &sc->sc_intr_lock);
772 sc->sc_bus.ub_usepolling++;
773
774 uhci_run(sc, 0, 1); /* stop the controller */
775 cmd &= ~UHCI_CMD_RS;
776
777 /* save some state if BIOS doesn't */
778 sc->sc_saved_frnum = UREAD2(sc, UHCI_FRNUM);
779 sc->sc_saved_sof = UREAD1(sc, UHCI_SOF);
780
781 UWRITE2(sc, UHCI_INTR, 0); /* disable intrs */
782
783 UHCICMD(sc, cmd | UHCI_CMD_EGSM); /* enter suspend */
784 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_WAIT, &sc->sc_intr_lock);
785 sc->sc_bus.ub_usepolling--;
786
787 mutex_spin_exit(&sc->sc_intr_lock);
788
789 return true;
790 }
791
792 #ifdef UHCI_DEBUG
793 Static void
794 uhci_dumpregs(uhci_softc_t *sc)
795 {
796 UHCIHIST_FUNC(); UHCIHIST_CALLED();
797 DPRINTF("cmd =%04jx sts =%04jx intr =%04jx frnum =%04jx",
798 UREAD2(sc, UHCI_CMD), UREAD2(sc, UHCI_STS),
799 UREAD2(sc, UHCI_INTR), UREAD2(sc, UHCI_FRNUM));
800 DPRINTF("sof =%04jx portsc1=%04jx portsc2=%04jx flbase=%08jx",
801 UREAD1(sc, UHCI_SOF), UREAD2(sc, UHCI_PORTSC1),
802 UREAD2(sc, UHCI_PORTSC2), UREAD4(sc, UHCI_FLBASEADDR));
803 }
804
805 void
806 uhci_dump_td(uhci_soft_td_t *p)
807 {
808 UHCIHIST_FUNC(); UHCIHIST_CALLED();
809
810 usb_syncmem(&p->dma, p->offs, sizeof(p->td),
811 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
812
813 DPRINTF("TD(%#jx) at %#08jx", (uintptr_t)p, p->physaddr, 0, 0);
814 DPRINTF(" link=%#08jx status=%#08jx "
815 "token=%#08x buffer=%#08x",
816 le32toh(p->td.td_link),
817 le32toh(p->td.td_status),
818 le32toh(p->td.td_token),
819 le32toh(p->td.td_buffer));
820
821 DPRINTF("bitstuff=%jd crcto =%jd nak =%jd babble =%jd",
822 !!(le32toh(p->td.td_status) & UHCI_TD_BITSTUFF),
823 !!(le32toh(p->td.td_status) & UHCI_TD_CRCTO),
824 !!(le32toh(p->td.td_status) & UHCI_TD_NAK),
825 !!(le32toh(p->td.td_status) & UHCI_TD_BABBLE));
826 DPRINTF("dbuffer =%jd stalled =%jd active =%jd ioc =%jd",
827 !!(le32toh(p->td.td_status) & UHCI_TD_DBUFFER),
828 !!(le32toh(p->td.td_status) & UHCI_TD_STALLED),
829 !!(le32toh(p->td.td_status) & UHCI_TD_ACTIVE),
830 !!(le32toh(p->td.td_status) & UHCI_TD_IOC));
831 DPRINTF("ios =%jd ls =%jd spd =%jd",
832 !!(le32toh(p->td.td_status) & UHCI_TD_IOS),
833 !!(le32toh(p->td.td_status) & UHCI_TD_LS),
834 !!(le32toh(p->td.td_status) & UHCI_TD_SPD), 0);
835 DPRINTF("errcnt =%d actlen =%d pid=%02x",
836 UHCI_TD_GET_ERRCNT(le32toh(p->td.td_status)),
837 UHCI_TD_GET_ACTLEN(le32toh(p->td.td_status)),
838 UHCI_TD_GET_PID(le32toh(p->td.td_token)), 0);
839 DPRINTF("addr=%jd endpt=%jd D=%jd maxlen=%jd,",
840 UHCI_TD_GET_DEVADDR(le32toh(p->td.td_token)),
841 UHCI_TD_GET_ENDPT(le32toh(p->td.td_token)),
842 UHCI_TD_GET_DT(le32toh(p->td.td_token)),
843 UHCI_TD_GET_MAXLEN(le32toh(p->td.td_token)));
844 }
845
846 void
847 uhci_dump_qh(uhci_soft_qh_t *sqh)
848 {
849 UHCIHIST_FUNC(); UHCIHIST_CALLED();
850
851 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
852 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
853
854 DPRINTF("QH(%#jx) at %#08jx: hlink=%08jx elink=%08jx", (uintptr_t)sqh,
855 (int)sqh->physaddr, le32toh(sqh->qh.qh_hlink),
856 le32toh(sqh->qh.qh_elink));
857
858 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), BUS_DMASYNC_PREREAD);
859 }
860
861
862 #if 1
863 void
864 uhci_dump(void)
865 {
866 uhci_dump_all(thesc);
867 }
868 #endif
869
870 void
871 uhci_dump_all(uhci_softc_t *sc)
872 {
873 uhci_dumpregs(sc);
874 /*printf("framelist[i].link = %08x\n", sc->sc_framelist[0].link);*/
875 uhci_dump_qhs(sc->sc_lctl_start);
876 }
877
878
879 void
880 uhci_dump_qhs(uhci_soft_qh_t *sqh)
881 {
882 UHCIHIST_FUNC(); UHCIHIST_CALLED();
883
884 uhci_dump_qh(sqh);
885
886 /*
887 * uhci_dump_qhs displays all the QHs and TDs from the given QH onwards
888 * Traverses sideways first, then down.
889 *
890 * QH1
891 * QH2
892 * No QH
893 * TD2.1
894 * TD2.2
895 * TD1.1
896 * etc.
897 *
898 * TD2.x being the TDs queued at QH2 and QH1 being referenced from QH1.
899 */
900
901 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
902 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
903 if (sqh->hlink != NULL && !(le32toh(sqh->qh.qh_hlink) & UHCI_PTR_T))
904 uhci_dump_qhs(sqh->hlink);
905 else
906 DPRINTF("No QH", 0, 0, 0, 0);
907 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), BUS_DMASYNC_PREREAD);
908
909 if (sqh->elink != NULL && !(le32toh(sqh->qh.qh_elink) & UHCI_PTR_T))
910 uhci_dump_tds(sqh->elink);
911 else
912 DPRINTF("No QH", 0, 0, 0, 0);
913 }
914
915 void
916 uhci_dump_tds(uhci_soft_td_t *std)
917 {
918 uhci_soft_td_t *td;
919 int stop;
920
921 for (td = std; td != NULL; td = td->link.std) {
922 uhci_dump_td(td);
923
924 /*
925 * Check whether the link pointer in this TD marks
926 * the link pointer as end of queue. This avoids
927 * printing the free list in case the queue/TD has
928 * already been moved there (seatbelt).
929 */
930 usb_syncmem(&td->dma, td->offs + offsetof(uhci_td_t, td_link),
931 sizeof(td->td.td_link),
932 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
933 stop = (le32toh(td->td.td_link) & UHCI_PTR_T ||
934 le32toh(td->td.td_link) == 0);
935 usb_syncmem(&td->dma, td->offs + offsetof(uhci_td_t, td_link),
936 sizeof(td->td.td_link), BUS_DMASYNC_PREREAD);
937 if (stop)
938 break;
939 }
940 }
941
942 Static void
943 uhci_dump_ii(struct uhci_xfer *ux)
944 {
945 struct usbd_pipe *pipe;
946 usb_endpoint_descriptor_t *ed;
947 struct usbd_device *dev;
948
949 if (ux == NULL) {
950 printf("ux NULL\n");
951 return;
952 }
953 pipe = ux->ux_xfer.ux_pipe;
954 if (pipe == NULL) {
955 printf("ux %p: done=%d pipe=NULL\n", ux, ux->ux_isdone);
956 return;
957 }
958 if (pipe->up_endpoint == NULL) {
959 printf("ux %p: done=%d pipe=%p pipe->up_endpoint=NULL\n",
960 ux, ux->ux_isdone, pipe);
961 return;
962 }
963 if (pipe->up_dev == NULL) {
964 printf("ux %p: done=%d pipe=%p pipe->up_dev=NULL\n",
965 ux, ux->ux_isdone, pipe);
966 return;
967 }
968 ed = pipe->up_endpoint->ue_edesc;
969 dev = pipe->up_dev;
970 printf("ux %p: done=%d dev=%p vid=%#04x pid=%#04x addr=%d pipe=%p ep=%#02x attr=%#02x\n",
971 ux, ux->ux_isdone, dev,
972 UGETW(dev->ud_ddesc.idVendor),
973 UGETW(dev->ud_ddesc.idProduct),
974 dev->ud_addr, pipe,
975 ed->bEndpointAddress, ed->bmAttributes);
976 }
977
978 void uhci_dump_iis(struct uhci_softc *sc);
979 void
980 uhci_dump_iis(struct uhci_softc *sc)
981 {
982 struct uhci_xfer *ux;
983
984 printf("interrupt list:\n");
985 TAILQ_FOREACH(ux, &sc->sc_intrhead, ux_list)
986 uhci_dump_ii(ux);
987 }
988
989 void iidump(void);
990 void iidump(void) { uhci_dump_iis(thesc); }
991
992 #endif
993
994 /*
995 * This routine is executed periodically and simulates interrupts
996 * from the root controller interrupt pipe for port status change.
997 */
998 void
999 uhci_poll_hub(void *addr)
1000 {
1001 struct uhci_softc *sc = addr;
1002 struct usbd_xfer *xfer;
1003 u_char *p;
1004
1005 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1006
1007 mutex_enter(&sc->sc_lock);
1008
1009 /*
1010 * If the intr xfer has completed or been synchronously
1011 * aborted, we have nothing to do.
1012 */
1013 xfer = sc->sc_intr_xfer;
1014 if (xfer == NULL)
1015 goto out;
1016 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
1017
1018 /*
1019 * If the intr xfer for which we were scheduled is done, and
1020 * another intr xfer has been submitted, let that one be dealt
1021 * with when the callout fires again.
1022 *
1023 * The call to callout_pending is racy, but the the transition
1024 * from pending to invoking happens atomically. The
1025 * callout_ack ensures callout_invoking does not return true
1026 * due to this invocation of the callout; the lock ensures the
1027 * next invocation of the callout cannot callout_ack (unless it
1028 * had already run to completion and nulled sc->sc_intr_xfer,
1029 * in which case would have bailed out already).
1030 */
1031 callout_ack(&sc->sc_poll_handle);
1032 if (callout_pending(&sc->sc_poll_handle) ||
1033 callout_invoking(&sc->sc_poll_handle))
1034 goto out;
1035
1036 /*
1037 * Check flags for the two interrupt ports, and set them in the
1038 * buffer if an interrupt arrived; otherwise arrange .
1039 */
1040 p = xfer->ux_buf;
1041 p[0] = 0;
1042 if (UREAD2(sc, UHCI_PORTSC1) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC))
1043 p[0] |= 1<<1;
1044 if (UREAD2(sc, UHCI_PORTSC2) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC))
1045 p[0] |= 1<<2;
1046 if (p[0] == 0) {
1047 /*
1048 * No change -- try again in a while, unless we're
1049 * suspending, in which case we'll try again after
1050 * resume.
1051 */
1052 if (sc->sc_suspend != PWR_SUSPEND)
1053 callout_schedule(&sc->sc_poll_handle, sc->sc_ival);
1054 goto out;
1055 }
1056
1057 /*
1058 * Interrupt completed, and the xfer has not been completed or
1059 * synchronously aborted. Complete the xfer now.
1060 */
1061 xfer->ux_actlen = 1;
1062 xfer->ux_status = USBD_NORMAL_COMPLETION;
1063 #ifdef DIAGNOSTIC
1064 UHCI_XFER2UXFER(xfer)->ux_isdone = true;
1065 #endif
1066 usb_transfer_complete(xfer);
1067
1068 out: mutex_exit(&sc->sc_lock);
1069 }
1070
1071 void
1072 uhci_root_intr_done(struct usbd_xfer *xfer)
1073 {
1074 struct uhci_softc *sc = UHCI_XFER2SC(xfer);
1075
1076 KASSERT(mutex_owned(&sc->sc_lock));
1077
1078 /* Claim the xfer so it doesn't get completed again. */
1079 KASSERT(sc->sc_intr_xfer == xfer);
1080 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
1081 sc->sc_intr_xfer = NULL;
1082 }
1083
1084 /*
1085 * Let the last QH loop back to the high speed control transfer QH.
1086 * This is what intel calls "bandwidth reclamation" and improves
1087 * USB performance a lot for some devices.
1088 * If we are already looping, just count it.
1089 */
1090 void
1091 uhci_add_loop(uhci_softc_t *sc)
1092 {
1093 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1094
1095 #ifdef UHCI_DEBUG
1096 if (uhcinoloop)
1097 return;
1098 #endif
1099 if (++sc->sc_loops == 1) {
1100 DPRINTFN(5, "add loop", 0, 0, 0, 0);
1101 /* Note, we don't loop back the soft pointer. */
1102 sc->sc_last_qh->qh.qh_hlink =
1103 htole32(sc->sc_hctl_start->physaddr | UHCI_PTR_QH);
1104 usb_syncmem(&sc->sc_last_qh->dma,
1105 sc->sc_last_qh->offs + offsetof(uhci_qh_t, qh_hlink),
1106 sizeof(sc->sc_last_qh->qh.qh_hlink),
1107 BUS_DMASYNC_PREWRITE);
1108 }
1109 }
1110
1111 void
1112 uhci_rem_loop(uhci_softc_t *sc)
1113 {
1114 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1115
1116 #ifdef UHCI_DEBUG
1117 if (uhcinoloop)
1118 return;
1119 #endif
1120 if (--sc->sc_loops == 0) {
1121 DPRINTFN(5, "remove loop", 0, 0, 0, 0);
1122 sc->sc_last_qh->qh.qh_hlink = htole32(UHCI_PTR_T);
1123 usb_syncmem(&sc->sc_last_qh->dma,
1124 sc->sc_last_qh->offs + offsetof(uhci_qh_t, qh_hlink),
1125 sizeof(sc->sc_last_qh->qh.qh_hlink),
1126 BUS_DMASYNC_PREWRITE);
1127 }
1128 }
1129
1130 /* Add high speed control QH, called with lock held. */
1131 void
1132 uhci_add_hs_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1133 {
1134 uhci_soft_qh_t *eqh;
1135
1136 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1137
1138 KASSERT(mutex_owned(&sc->sc_lock));
1139
1140 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1141 eqh = sc->sc_hctl_end;
1142 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1143 sizeof(eqh->qh.qh_hlink),
1144 BUS_DMASYNC_POSTWRITE);
1145 sqh->hlink = eqh->hlink;
1146 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1147 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1148 BUS_DMASYNC_PREWRITE);
1149 eqh->hlink = sqh;
1150 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1151 sc->sc_hctl_end = sqh;
1152 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1153 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1154 #ifdef UHCI_CTL_LOOP
1155 uhci_add_loop(sc);
1156 #endif
1157 }
1158
1159 /* Remove high speed control QH, called with lock held. */
1160 void
1161 uhci_remove_hs_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1162 {
1163 uhci_soft_qh_t *pqh;
1164 uint32_t elink;
1165
1166 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1167
1168 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1169 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1170 #ifdef UHCI_CTL_LOOP
1171 uhci_rem_loop(sc);
1172 #endif
1173 /*
1174 * The T bit should be set in the elink of the QH so that the HC
1175 * doesn't follow the pointer. This condition may fail if the
1176 * the transferred packet was short so that the QH still points
1177 * at the last used TD.
1178 * In this case we set the T bit and wait a little for the HC
1179 * to stop looking at the TD.
1180 * Note that if the TD chain is large enough, the controller
1181 * may still be looking at the chain at the end of this function.
1182 * uhci_free_std_chain() will make sure the controller stops
1183 * looking at it quickly, but until then we should not change
1184 * sqh->hlink.
1185 */
1186 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1187 sizeof(sqh->qh.qh_elink),
1188 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1189 elink = le32toh(sqh->qh.qh_elink);
1190 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1191 sizeof(sqh->qh.qh_elink), BUS_DMASYNC_PREREAD);
1192 if (!(elink & UHCI_PTR_T)) {
1193 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1194 usb_syncmem(&sqh->dma,
1195 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1196 sizeof(sqh->qh.qh_elink),
1197 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1198 delay(UHCI_QH_REMOVE_DELAY);
1199 }
1200
1201 pqh = uhci_find_prev_qh(sc->sc_hctl_start, sqh);
1202 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1203 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1204 pqh->hlink = sqh->hlink;
1205 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1206 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1207 sizeof(pqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1208 delay(UHCI_QH_REMOVE_DELAY);
1209 if (sc->sc_hctl_end == sqh)
1210 sc->sc_hctl_end = pqh;
1211 }
1212
1213 /* Add low speed control QH, called with lock held. */
1214 void
1215 uhci_add_ls_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1216 {
1217 uhci_soft_qh_t *eqh;
1218
1219 KASSERT(mutex_owned(&sc->sc_lock));
1220
1221 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1222 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1223
1224 eqh = sc->sc_lctl_end;
1225 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1226 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1227 sqh->hlink = eqh->hlink;
1228 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1229 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1230 BUS_DMASYNC_PREWRITE);
1231 eqh->hlink = sqh;
1232 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1233 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1234 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1235 sc->sc_lctl_end = sqh;
1236 }
1237
1238 /* Remove low speed control QH, called with lock held. */
1239 void
1240 uhci_remove_ls_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1241 {
1242 uhci_soft_qh_t *pqh;
1243 uint32_t elink;
1244
1245 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1246
1247 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1248 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1249
1250 /* See comment in uhci_remove_hs_ctrl() */
1251 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1252 sizeof(sqh->qh.qh_elink),
1253 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1254 elink = le32toh(sqh->qh.qh_elink);
1255 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1256 sizeof(sqh->qh.qh_elink), BUS_DMASYNC_PREREAD);
1257 if (!(elink & UHCI_PTR_T)) {
1258 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1259 usb_syncmem(&sqh->dma,
1260 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1261 sizeof(sqh->qh.qh_elink),
1262 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1263 delay(UHCI_QH_REMOVE_DELAY);
1264 }
1265 pqh = uhci_find_prev_qh(sc->sc_lctl_start, sqh);
1266 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1267 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1268 pqh->hlink = sqh->hlink;
1269 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1270 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1271 sizeof(pqh->qh.qh_hlink),
1272 BUS_DMASYNC_PREWRITE);
1273 delay(UHCI_QH_REMOVE_DELAY);
1274 if (sc->sc_lctl_end == sqh)
1275 sc->sc_lctl_end = pqh;
1276 }
1277
1278 /* Add bulk QH, called with lock held. */
1279 void
1280 uhci_add_bulk(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1281 {
1282 uhci_soft_qh_t *eqh;
1283
1284 KASSERT(mutex_owned(&sc->sc_lock));
1285
1286 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1287 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1288
1289 eqh = sc->sc_bulk_end;
1290 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1291 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1292 sqh->hlink = eqh->hlink;
1293 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1294 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1295 BUS_DMASYNC_PREWRITE);
1296 eqh->hlink = sqh;
1297 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1298 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1299 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1300 sc->sc_bulk_end = sqh;
1301 uhci_add_loop(sc);
1302 }
1303
1304 /* Remove bulk QH, called with lock held. */
1305 void
1306 uhci_remove_bulk(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1307 {
1308 uhci_soft_qh_t *pqh;
1309
1310 KASSERT(mutex_owned(&sc->sc_lock));
1311
1312 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1313 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1314
1315 uhci_rem_loop(sc);
1316 /* See comment in uhci_remove_hs_ctrl() */
1317 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1318 sizeof(sqh->qh.qh_elink),
1319 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1320 if (!(sqh->qh.qh_elink & htole32(UHCI_PTR_T))) {
1321 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1322 usb_syncmem(&sqh->dma,
1323 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1324 sizeof(sqh->qh.qh_elink),
1325 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1326 delay(UHCI_QH_REMOVE_DELAY);
1327 }
1328 pqh = uhci_find_prev_qh(sc->sc_bulk_start, sqh);
1329 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1330 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1331 pqh->hlink = sqh->hlink;
1332 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1333 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1334 sizeof(pqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1335 delay(UHCI_QH_REMOVE_DELAY);
1336 if (sc->sc_bulk_end == sqh)
1337 sc->sc_bulk_end = pqh;
1338 }
1339
1340 Static int uhci_intr1(uhci_softc_t *);
1341
1342 int
1343 uhci_intr(void *arg)
1344 {
1345 uhci_softc_t *sc = arg;
1346 int ret = 0;
1347
1348 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1349
1350 mutex_spin_enter(&sc->sc_intr_lock);
1351
1352 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1353 goto done;
1354
1355 if (sc->sc_bus.ub_usepolling || UREAD2(sc, UHCI_INTR) == 0) {
1356 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1357 goto done;
1358 }
1359
1360 ret = uhci_intr1(sc);
1361
1362 done:
1363 mutex_spin_exit(&sc->sc_intr_lock);
1364 return ret;
1365 }
1366
1367 int
1368 uhci_intr1(uhci_softc_t *sc)
1369 {
1370 int status;
1371 int ack;
1372
1373 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1374
1375 #ifdef UHCI_DEBUG
1376 if (uhcidebug >= 15) {
1377 DPRINTF("sc %#jx", (uintptr_t)sc, 0, 0, 0);
1378 uhci_dumpregs(sc);
1379 }
1380 #endif
1381
1382 KASSERT(mutex_owned(&sc->sc_intr_lock));
1383
1384 status = UREAD2(sc, UHCI_STS) & UHCI_STS_ALLINTRS;
1385 /* Check if the interrupt was for us. */
1386 if (status == 0)
1387 return 0;
1388
1389 if (sc->sc_suspend != PWR_RESUME) {
1390 #ifdef DIAGNOSTIC
1391 printf("%s: interrupt while not operating ignored\n",
1392 device_xname(sc->sc_dev));
1393 #endif
1394 UWRITE2(sc, UHCI_STS, status); /* acknowledge the ints */
1395 return 0;
1396 }
1397
1398 ack = 0;
1399 if (status & UHCI_STS_USBINT)
1400 ack |= UHCI_STS_USBINT;
1401 if (status & UHCI_STS_USBEI)
1402 ack |= UHCI_STS_USBEI;
1403 if (status & UHCI_STS_RD) {
1404 ack |= UHCI_STS_RD;
1405 #ifdef UHCI_DEBUG
1406 printf("%s: resume detect\n", device_xname(sc->sc_dev));
1407 #endif
1408 }
1409 if (status & UHCI_STS_HSE) {
1410 ack |= UHCI_STS_HSE;
1411 printf("%s: host system error\n", device_xname(sc->sc_dev));
1412 }
1413 if (status & UHCI_STS_HCPE) {
1414 ack |= UHCI_STS_HCPE;
1415 printf("%s: host controller process error\n",
1416 device_xname(sc->sc_dev));
1417 }
1418
1419 /* When HCHalted=1 and Run/Stop=0 , it is normal */
1420 if ((status & UHCI_STS_HCH) && (UREAD2(sc, UHCI_CMD) & UHCI_CMD_RS)) {
1421 /* no acknowledge needed */
1422 if (!sc->sc_dying) {
1423 printf("%s: host controller halted\n",
1424 device_xname(sc->sc_dev));
1425 #ifdef UHCI_DEBUG
1426 uhci_dump_all(sc);
1427 #endif
1428 }
1429 sc->sc_dying = 1;
1430 }
1431
1432 if (!ack)
1433 return 0; /* nothing to acknowledge */
1434 UWRITE2(sc, UHCI_STS, ack); /* acknowledge the ints */
1435
1436 usb_schedsoftintr(&sc->sc_bus);
1437
1438 DPRINTFN(15, "sc %#jx done", (uintptr_t)sc, 0, 0, 0);
1439
1440 return 1;
1441 }
1442
1443 void
1444 uhci_softintr(void *v)
1445 {
1446 struct usbd_bus *bus = v;
1447 uhci_softc_t *sc = UHCI_BUS2SC(bus);
1448 struct uhci_xfer *ux, *nextux;
1449 ux_completeq_t cq;
1450
1451 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1452 DPRINTF("sc %#jx", (uintptr_t)sc, 0, 0, 0);
1453
1454 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1455
1456 TAILQ_INIT(&cq);
1457 /*
1458 * Interrupts on UHCI really suck. When the host controller
1459 * interrupts because a transfer is completed there is no
1460 * way of knowing which transfer it was. You can scan down
1461 * the TDs and QHs of the previous frame to limit the search,
1462 * but that assumes that the interrupt was not delayed by more
1463 * than 1 ms, which may not always be true (e.g. after debug
1464 * output on a slow console).
1465 * We scan all interrupt descriptors to see if any have
1466 * completed.
1467 */
1468 TAILQ_FOREACH_SAFE(ux, &sc->sc_intrhead, ux_list, nextux) {
1469 uhci_check_intr(sc, ux, &cq);
1470 }
1471
1472 /*
1473 * We abuse ux_list for the interrupt and complete lists and
1474 * interrupt transfers will get re-added here so use
1475 * the _SAFE version of TAILQ_FOREACH.
1476 */
1477 TAILQ_FOREACH_SAFE(ux, &cq, ux_list, nextux) {
1478 DPRINTF("ux %#jx", (uintptr_t)ux, 0, 0, 0);
1479 usb_transfer_complete(&ux->ux_xfer);
1480 }
1481
1482 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1483 }
1484
1485 /* Check for an interrupt. */
1486 void
1487 uhci_check_intr(uhci_softc_t *sc, struct uhci_xfer *ux, ux_completeq_t *cqp)
1488 {
1489 uhci_soft_td_t *std, *fstd = NULL, *lstd = NULL;
1490 uint32_t status;
1491
1492 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1493 DPRINTFN(15, "ux %#jx", (uintptr_t)ux, 0, 0, 0);
1494
1495 KASSERT(ux != NULL);
1496
1497 struct usbd_xfer *xfer = &ux->ux_xfer;
1498 if (xfer->ux_status == USBD_CANCELLED ||
1499 xfer->ux_status == USBD_TIMEOUT) {
1500 DPRINTF("aborted xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
1501 return;
1502 }
1503
1504 switch (ux->ux_type) {
1505 case UX_CTRL:
1506 fstd = ux->ux_setup;
1507 lstd = ux->ux_stat;
1508 break;
1509 case UX_BULK:
1510 case UX_INTR:
1511 case UX_ISOC:
1512 fstd = ux->ux_stdstart;
1513 lstd = ux->ux_stdend;
1514 break;
1515 default:
1516 KASSERT(false);
1517 break;
1518 }
1519 if (fstd == NULL)
1520 return;
1521
1522 KASSERT(lstd != NULL);
1523
1524 usb_syncmem(&lstd->dma,
1525 lstd->offs + offsetof(uhci_td_t, td_status),
1526 sizeof(lstd->td.td_status),
1527 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1528 status = le32toh(lstd->td.td_status);
1529 usb_syncmem(&lstd->dma,
1530 lstd->offs + offsetof(uhci_td_t, td_status),
1531 sizeof(lstd->td.td_status),
1532 BUS_DMASYNC_PREREAD);
1533
1534 /* If the last TD is not marked active we can complete */
1535 if (!(status & UHCI_TD_ACTIVE)) {
1536 done:
1537 DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0);
1538 uhci_idone(ux, cqp);
1539 return;
1540 }
1541
1542 /*
1543 * If the last TD is still active we need to check whether there
1544 * is an error somewhere in the middle, or whether there was a
1545 * short packet (SPD and not ACTIVE).
1546 */
1547 DPRINTFN(12, "active ux=%#jx", (uintptr_t)ux, 0, 0, 0);
1548 for (std = fstd; std != lstd; std = std->link.std) {
1549 usb_syncmem(&std->dma,
1550 std->offs + offsetof(uhci_td_t, td_status),
1551 sizeof(std->td.td_status),
1552 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1553 status = le32toh(std->td.td_status);
1554 usb_syncmem(&std->dma,
1555 std->offs + offsetof(uhci_td_t, td_status),
1556 sizeof(std->td.td_status), BUS_DMASYNC_PREREAD);
1557
1558 /* If there's an active TD the xfer isn't done. */
1559 if (status & UHCI_TD_ACTIVE) {
1560 DPRINTFN(12, "ux=%#jx std=%#jx still active",
1561 (uintptr_t)ux, (uintptr_t)std, 0, 0);
1562 return;
1563 }
1564
1565 /* Any kind of error makes the xfer done. */
1566 if (status & UHCI_TD_STALLED)
1567 goto done;
1568
1569 /*
1570 * If the data phase of a control transfer is short, we need
1571 * to complete the status stage
1572 */
1573
1574 if ((status & UHCI_TD_SPD) && ux->ux_type == UX_CTRL) {
1575 struct uhci_pipe *upipe =
1576 UHCI_PIPE2UPIPE(xfer->ux_pipe);
1577 uhci_soft_qh_t *sqh = upipe->ctrl.sqh;
1578 uhci_soft_td_t *stat = upipe->ctrl.stat;
1579
1580 DPRINTFN(12, "ux=%#jx std=%#jx control status"
1581 "phase needs completion", (uintptr_t)ux,
1582 (uintptr_t)ux->ux_stdstart, 0, 0);
1583
1584 sqh->qh.qh_elink =
1585 htole32(stat->physaddr | UHCI_PTR_TD);
1586 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1587 BUS_DMASYNC_PREWRITE);
1588 break;
1589 }
1590
1591 /* We want short packets, and it is short: it's done */
1592 usb_syncmem(&std->dma,
1593 std->offs + offsetof(uhci_td_t, td_token),
1594 sizeof(std->td.td_token),
1595 BUS_DMASYNC_POSTWRITE);
1596
1597 if ((status & UHCI_TD_SPD) &&
1598 UHCI_TD_GET_ACTLEN(status) <
1599 UHCI_TD_GET_MAXLEN(le32toh(std->td.td_token))) {
1600 goto done;
1601 }
1602 }
1603 }
1604
1605 /* Called with USB lock held. */
1606 void
1607 uhci_idone(struct uhci_xfer *ux, ux_completeq_t *cqp)
1608 {
1609 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1610 struct usbd_xfer *xfer = &ux->ux_xfer;
1611 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
1612 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
1613 uhci_soft_td_t *std;
1614 uint32_t status = 0, nstatus;
1615 const bool polling __diagused = sc->sc_bus.ub_usepolling;
1616 int actlen;
1617
1618 KASSERT(polling || mutex_owned(&sc->sc_lock));
1619
1620 DPRINTFN(12, "ux=%#jx", (uintptr_t)ux, 0, 0, 0);
1621
1622 /*
1623 * Try to claim this xfer for completion. If it has already
1624 * completed or aborted, drop it on the floor.
1625 */
1626 if (!usbd_xfer_trycomplete(xfer))
1627 return;
1628
1629 #ifdef DIAGNOSTIC
1630 #ifdef UHCI_DEBUG
1631 if (ux->ux_isdone) {
1632 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1633 uhci_dump_ii(ux);
1634 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1635 }
1636 #endif
1637 KASSERT(!ux->ux_isdone);
1638 KASSERTMSG(!ux->ux_isdone, "xfer %p type %d status %d", xfer,
1639 ux->ux_type, xfer->ux_status);
1640 ux->ux_isdone = true;
1641 #endif
1642
1643 if (xfer->ux_nframes != 0) {
1644 /* Isoc transfer, do things differently. */
1645 uhci_soft_td_t **stds = upipe->isoc.stds;
1646 int i, n, nframes, len;
1647
1648 DPRINTFN(5, "ux=%#jx isoc ready", (uintptr_t)ux, 0, 0, 0);
1649
1650 nframes = xfer->ux_nframes;
1651 actlen = 0;
1652 n = ux->ux_curframe;
1653 for (i = 0; i < nframes; i++) {
1654 std = stds[n];
1655 #ifdef UHCI_DEBUG
1656 if (uhcidebug >= 5) {
1657 DPRINTF("isoc TD %jd", i, 0, 0, 0);
1658 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1659 uhci_dump_td(std);
1660 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1661 }
1662 #endif
1663 if (++n >= UHCI_VFRAMELIST_COUNT)
1664 n = 0;
1665 usb_syncmem(&std->dma,
1666 std->offs + offsetof(uhci_td_t, td_status),
1667 sizeof(std->td.td_status),
1668 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1669 status = le32toh(std->td.td_status);
1670 len = UHCI_TD_GET_ACTLEN(status);
1671 xfer->ux_frlengths[i] = len;
1672 actlen += len;
1673 }
1674 upipe->isoc.inuse -= nframes;
1675 xfer->ux_actlen = actlen;
1676 xfer->ux_status = USBD_NORMAL_COMPLETION;
1677 goto end;
1678 }
1679
1680 #ifdef UHCI_DEBUG
1681 DPRINTFN(10, "ux=%#jx, xfer=%#jx, pipe=%#jx ready", (uintptr_t)ux,
1682 (uintptr_t)xfer, (uintptr_t)upipe, 0);
1683 if (uhcidebug >= 10) {
1684 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1685 uhci_dump_tds(ux->ux_stdstart);
1686 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1687 }
1688 #endif
1689
1690 /* The transfer is done, compute actual length and status. */
1691 actlen = 0;
1692 for (std = ux->ux_stdstart; std != NULL; std = std->link.std) {
1693 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
1694 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1695 nstatus = le32toh(std->td.td_status);
1696 if (nstatus & UHCI_TD_ACTIVE)
1697 break;
1698
1699 status = nstatus;
1700 if (UHCI_TD_GET_PID(le32toh(std->td.td_token)) !=
1701 UHCI_TD_PID_SETUP)
1702 actlen += UHCI_TD_GET_ACTLEN(status);
1703 else {
1704 /*
1705 * UHCI will report CRCTO in addition to a STALL or NAK
1706 * for a SETUP transaction. See section 3.2.2, "TD
1707 * CONTROL AND STATUS".
1708 */
1709 if (status & (UHCI_TD_STALLED | UHCI_TD_NAK))
1710 status &= ~UHCI_TD_CRCTO;
1711 }
1712 }
1713 /* If there are left over TDs we need to update the toggle. */
1714 if (std != NULL)
1715 upipe->nexttoggle = UHCI_TD_GET_DT(le32toh(std->td.td_token));
1716
1717 status &= UHCI_TD_ERROR;
1718 DPRINTFN(10, "actlen=%jd, status=%#jx", actlen, status, 0, 0);
1719 xfer->ux_actlen = actlen;
1720 if (status != 0) {
1721
1722 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1723 "error, addr=%jd, endpt=%#02jx",
1724 xfer->ux_pipe->up_dev->ud_addr,
1725 xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress,
1726 0, 0);
1727 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1728 "bitstuff=%jd crcto =%jd nak =%jd babble =%jd",
1729 !!(status & UHCI_TD_BITSTUFF),
1730 !!(status & UHCI_TD_CRCTO),
1731 !!(status & UHCI_TD_NAK),
1732 !!(status & UHCI_TD_BABBLE));
1733 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1734 "dbuffer =%jd stalled =%jd active =%jd",
1735 !!(status & UHCI_TD_DBUFFER),
1736 !!(status & UHCI_TD_STALLED),
1737 !!(status & UHCI_TD_ACTIVE),
1738 0);
1739
1740 if (status == UHCI_TD_STALLED)
1741 xfer->ux_status = USBD_STALLED;
1742 else
1743 xfer->ux_status = USBD_IOERROR; /* more info XXX */
1744 } else {
1745 xfer->ux_status = USBD_NORMAL_COMPLETION;
1746 }
1747
1748 end:
1749 uhci_del_intr_list(sc, ux);
1750 if (cqp)
1751 TAILQ_INSERT_TAIL(cqp, ux, ux_list);
1752
1753 KASSERT(polling || mutex_owned(&sc->sc_lock));
1754 DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0);
1755 }
1756
1757 void
1758 uhci_poll(struct usbd_bus *bus)
1759 {
1760 uhci_softc_t *sc = UHCI_BUS2SC(bus);
1761
1762 if (UREAD2(sc, UHCI_STS) & UHCI_STS_USBINT) {
1763 mutex_spin_enter(&sc->sc_intr_lock);
1764 uhci_intr1(sc);
1765 mutex_spin_exit(&sc->sc_intr_lock);
1766 }
1767 }
1768
1769 void
1770 uhci_reset(uhci_softc_t *sc)
1771 {
1772 int n;
1773
1774 UHCICMD(sc, UHCI_CMD_HCRESET);
1775 /* The reset bit goes low when the controller is done. */
1776 for (n = 0; n < UHCI_RESET_TIMEOUT &&
1777 (UREAD2(sc, UHCI_CMD) & UHCI_CMD_HCRESET); n++)
1778 usb_delay_ms(&sc->sc_bus, 1);
1779 if (n >= UHCI_RESET_TIMEOUT)
1780 printf("%s: controller did not reset\n",
1781 device_xname(sc->sc_dev));
1782 }
1783
1784 usbd_status
1785 uhci_run(uhci_softc_t *sc, int run, int locked)
1786 {
1787 int n, running;
1788 uint16_t cmd;
1789
1790 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1791
1792 run = run != 0;
1793 if (!locked)
1794 mutex_spin_enter(&sc->sc_intr_lock);
1795
1796 DPRINTF("setting run=%jd", run, 0, 0, 0);
1797 cmd = UREAD2(sc, UHCI_CMD);
1798 if (run)
1799 cmd |= UHCI_CMD_RS;
1800 else
1801 cmd &= ~UHCI_CMD_RS;
1802 UHCICMD(sc, cmd);
1803 for (n = 0; n < 10; n++) {
1804 running = !(UREAD2(sc, UHCI_STS) & UHCI_STS_HCH);
1805 /* return when we've entered the state we want */
1806 if (run == running) {
1807 if (!locked)
1808 mutex_spin_exit(&sc->sc_intr_lock);
1809 DPRINTF("done cmd=%#jx sts=%#jx",
1810 UREAD2(sc, UHCI_CMD), UREAD2(sc, UHCI_STS), 0, 0);
1811 return USBD_NORMAL_COMPLETION;
1812 }
1813 usb_delay_ms_locked(&sc->sc_bus, 1, &sc->sc_intr_lock);
1814 }
1815 if (!locked)
1816 mutex_spin_exit(&sc->sc_intr_lock);
1817 printf("%s: cannot %s\n", device_xname(sc->sc_dev),
1818 run ? "start" : "stop");
1819 return USBD_IOERROR;
1820 }
1821
1822 /*
1823 * Memory management routines.
1824 * uhci_alloc_std allocates TDs
1825 * uhci_alloc_sqh allocates QHs
1826 * These two routines do their own free list management,
1827 * partly for speed, partly because allocating DMAable memory
1828 * has page size granularity so much memory would be wasted if
1829 * only one TD/QH (32 bytes) was placed in each allocated chunk.
1830 */
1831
1832 uhci_soft_td_t *
1833 uhci_alloc_std(uhci_softc_t *sc)
1834 {
1835 uhci_soft_td_t *std;
1836 usbd_status err;
1837 int i, offs;
1838 usb_dma_t dma;
1839
1840 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1841
1842 mutex_enter(&sc->sc_lock);
1843 if (sc->sc_freetds == NULL) {
1844 DPRINTFN(2, "allocating chunk", 0, 0, 0, 0);
1845 mutex_exit(&sc->sc_lock);
1846
1847 err = usb_allocmem(&sc->sc_bus, UHCI_STD_SIZE * UHCI_STD_CHUNK,
1848 UHCI_TD_ALIGN, &dma);
1849 if (err)
1850 return NULL;
1851
1852 mutex_enter(&sc->sc_lock);
1853 for (i = 0; i < UHCI_STD_CHUNK; i++) {
1854 offs = i * UHCI_STD_SIZE;
1855 std = KERNADDR(&dma, offs);
1856 std->physaddr = DMAADDR(&dma, offs);
1857 std->dma = dma;
1858 std->offs = offs;
1859 std->link.std = sc->sc_freetds;
1860 sc->sc_freetds = std;
1861 }
1862 }
1863 std = sc->sc_freetds;
1864 sc->sc_freetds = std->link.std;
1865 mutex_exit(&sc->sc_lock);
1866
1867 memset(&std->td, 0, sizeof(uhci_td_t));
1868
1869 return std;
1870 }
1871
1872 #define TD_IS_FREE 0x12345678
1873
1874 void
1875 uhci_free_std_locked(uhci_softc_t *sc, uhci_soft_td_t *std)
1876 {
1877 KASSERT(mutex_owned(&sc->sc_lock));
1878
1879 #ifdef DIAGNOSTIC
1880 if (le32toh(std->td.td_token) == TD_IS_FREE) {
1881 printf("%s: freeing free TD %p\n", __func__, std);
1882 return;
1883 }
1884 std->td.td_token = htole32(TD_IS_FREE);
1885 #endif
1886
1887 std->link.std = sc->sc_freetds;
1888 sc->sc_freetds = std;
1889 }
1890
1891 void
1892 uhci_free_std(uhci_softc_t *sc, uhci_soft_td_t *std)
1893 {
1894 mutex_enter(&sc->sc_lock);
1895 uhci_free_std_locked(sc, std);
1896 mutex_exit(&sc->sc_lock);
1897 }
1898
1899 uhci_soft_qh_t *
1900 uhci_alloc_sqh(uhci_softc_t *sc)
1901 {
1902 uhci_soft_qh_t *sqh;
1903 usbd_status err;
1904 int i, offs;
1905 usb_dma_t dma;
1906
1907 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1908
1909 mutex_enter(&sc->sc_lock);
1910 if (sc->sc_freeqhs == NULL) {
1911 DPRINTFN(2, "allocating chunk", 0, 0, 0, 0);
1912 mutex_exit(&sc->sc_lock);
1913
1914 err = usb_allocmem(&sc->sc_bus, UHCI_SQH_SIZE * UHCI_SQH_CHUNK,
1915 UHCI_QH_ALIGN, &dma);
1916 if (err)
1917 return NULL;
1918
1919 mutex_enter(&sc->sc_lock);
1920 for (i = 0; i < UHCI_SQH_CHUNK; i++) {
1921 offs = i * UHCI_SQH_SIZE;
1922 sqh = KERNADDR(&dma, offs);
1923 sqh->physaddr = DMAADDR(&dma, offs);
1924 sqh->dma = dma;
1925 sqh->offs = offs;
1926 sqh->hlink = sc->sc_freeqhs;
1927 sc->sc_freeqhs = sqh;
1928 }
1929 }
1930 sqh = sc->sc_freeqhs;
1931 sc->sc_freeqhs = sqh->hlink;
1932 mutex_exit(&sc->sc_lock);
1933
1934 memset(&sqh->qh, 0, sizeof(uhci_qh_t));
1935
1936 return sqh;
1937 }
1938
1939 void
1940 uhci_free_sqh(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1941 {
1942 KASSERT(mutex_owned(&sc->sc_lock));
1943
1944 sqh->hlink = sc->sc_freeqhs;
1945 sc->sc_freeqhs = sqh;
1946 }
1947
1948 #if 0
1949 void
1950 uhci_free_std_chain(uhci_softc_t *sc, uhci_soft_td_t *std,
1951 uhci_soft_td_t *stdend)
1952 {
1953 uhci_soft_td_t *p;
1954 uint32_t td_link;
1955
1956 /*
1957 * to avoid race condition with the controller which may be looking
1958 * at this chain, we need to first invalidate all links, and
1959 * then wait for the controller to move to another queue
1960 */
1961 for (p = std; p != stdend; p = p->link.std) {
1962 usb_syncmem(&p->dma,
1963 p->offs + offsetof(uhci_td_t, td_link),
1964 sizeof(p->td.td_link),
1965 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1966 td_link = le32toh(p->td.td_link);
1967 usb_syncmem(&p->dma,
1968 p->offs + offsetof(uhci_td_t, td_link),
1969 sizeof(p->td.td_link),
1970 BUS_DMASYNC_PREREAD);
1971 if ((td_link & UHCI_PTR_T) == 0) {
1972 p->td.td_link = htole32(UHCI_PTR_T);
1973 usb_syncmem(&p->dma,
1974 p->offs + offsetof(uhci_td_t, td_link),
1975 sizeof(p->td.td_link),
1976 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1977 }
1978 }
1979 delay(UHCI_QH_REMOVE_DELAY);
1980
1981 for (; std != stdend; std = p) {
1982 p = std->link.std;
1983 uhci_free_std(sc, std);
1984 }
1985 }
1986 #endif
1987
1988 int
1989 uhci_alloc_std_chain(uhci_softc_t *sc, struct usbd_xfer *xfer, int len,
1990 int rd, uhci_soft_td_t **sp)
1991 {
1992 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
1993 uint16_t flags = xfer->ux_flags;
1994 uhci_soft_td_t *p;
1995
1996 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1997
1998 DPRINTFN(8, "xfer=%#jx pipe=%#jx", (uintptr_t)xfer,
1999 (uintptr_t)xfer->ux_pipe, 0, 0);
2000
2001 ASSERT_SLEEPABLE();
2002 KASSERT(sp);
2003
2004 int maxp = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
2005 if (maxp == 0) {
2006 printf("%s: maxp=0\n", __func__);
2007 return EINVAL;
2008 }
2009 size_t ntd = (len + maxp - 1) / maxp;
2010 if (!rd && (flags & USBD_FORCE_SHORT_XFER)) {
2011 ntd++;
2012 }
2013 DPRINTFN(10, "maxp=%jd ntd=%jd", maxp, ntd, 0, 0);
2014
2015 uxfer->ux_stds = NULL;
2016 uxfer->ux_nstd = ntd;
2017 if (ntd == 0) {
2018 *sp = NULL;
2019 DPRINTF("ntd=0", 0, 0, 0, 0);
2020 return 0;
2021 }
2022 uxfer->ux_stds = kmem_alloc(sizeof(uhci_soft_td_t *) * ntd,
2023 KM_SLEEP);
2024
2025 for (int i = 0; i < ntd; i++) {
2026 p = uhci_alloc_std(sc);
2027 if (p == NULL) {
2028 if (i != 0) {
2029 uxfer->ux_nstd = i;
2030 uhci_free_stds(sc, uxfer);
2031 }
2032 kmem_free(uxfer->ux_stds,
2033 sizeof(uhci_soft_td_t *) * ntd);
2034 return ENOMEM;
2035 }
2036 uxfer->ux_stds[i] = p;
2037 }
2038
2039 *sp = uxfer->ux_stds[0];
2040
2041 return 0;
2042 }
2043
2044 Static void
2045 uhci_free_stds(uhci_softc_t *sc, struct uhci_xfer *ux)
2046 {
2047 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2048
2049 DPRINTFN(8, "ux=%#jx", (uintptr_t)ux, 0, 0, 0);
2050
2051 mutex_enter(&sc->sc_lock);
2052 for (size_t i = 0; i < ux->ux_nstd; i++) {
2053 uhci_soft_td_t *std = ux->ux_stds[i];
2054 #ifdef DIAGNOSTIC
2055 if (le32toh(std->td.td_token) == TD_IS_FREE) {
2056 printf("%s: freeing free TD %p\n", __func__, std);
2057 return;
2058 }
2059 std->td.td_token = htole32(TD_IS_FREE);
2060 #endif
2061 ux->ux_stds[i]->link.std = sc->sc_freetds;
2062 sc->sc_freetds = std;
2063 }
2064 mutex_exit(&sc->sc_lock);
2065 }
2066
2067
2068 Static void
2069 uhci_reset_std_chain(uhci_softc_t *sc, struct usbd_xfer *xfer,
2070 int length, int isread, int *toggle, uhci_soft_td_t **lstd)
2071 {
2072 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2073 struct usbd_pipe *pipe = xfer->ux_pipe;
2074 usb_dma_t *dma = &xfer->ux_dmabuf;
2075 uint16_t flags = xfer->ux_flags;
2076 uhci_soft_td_t *std, *prev;
2077 int len = length;
2078 int tog = *toggle;
2079 int maxp;
2080 uint32_t status;
2081 size_t i;
2082
2083 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2084 DPRINTFN(8, "xfer=%#jx len %jd isread %jd toggle %jd", (uintptr_t)xfer,
2085 len, isread, *toggle);
2086
2087 KASSERT(len != 0 || (!isread && (flags & USBD_FORCE_SHORT_XFER)));
2088
2089 maxp = UGETW(pipe->up_endpoint->ue_edesc->wMaxPacketSize);
2090 KASSERT(maxp != 0);
2091
2092 int addr = xfer->ux_pipe->up_dev->ud_addr;
2093 int endpt = xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress;
2094
2095 status = UHCI_TD_ZERO_ACTLEN(UHCI_TD_SET_ERRCNT(3) | UHCI_TD_ACTIVE);
2096 if (pipe->up_dev->ud_speed == USB_SPEED_LOW)
2097 status |= UHCI_TD_LS;
2098 if (flags & USBD_SHORT_XFER_OK)
2099 status |= UHCI_TD_SPD;
2100 usb_syncmem(dma, 0, len,
2101 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2102 std = prev = NULL;
2103 for (i = 0; len != 0 && i < uxfer->ux_nstd; i++, prev = std) {
2104 int l = len;
2105 std = uxfer->ux_stds[i];
2106 if (l > maxp)
2107 l = maxp;
2108
2109 if (prev) {
2110 prev->link.std = std;
2111 prev->td.td_link = htole32(
2112 std->physaddr | UHCI_PTR_VF | UHCI_PTR_TD
2113 );
2114 usb_syncmem(&prev->dma, prev->offs, sizeof(prev->td),
2115 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2116 }
2117
2118 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2119 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2120
2121 std->td.td_link = htole32(UHCI_PTR_T | UHCI_PTR_VF | UHCI_PTR_TD);
2122 std->td.td_status = htole32(status);
2123 std->td.td_token = htole32(
2124 UHCI_TD_SET_ENDPT(UE_GET_ADDR(endpt)) |
2125 UHCI_TD_SET_DEVADDR(addr) |
2126 UHCI_TD_SET_PID(isread ? UHCI_TD_PID_IN : UHCI_TD_PID_OUT) |
2127 UHCI_TD_SET_DT(tog) |
2128 UHCI_TD_SET_MAXLEN(l)
2129 );
2130 std->td.td_buffer = htole32(DMAADDR(dma, i * maxp));
2131
2132 std->link.std = NULL;
2133
2134 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2135 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2136 tog ^= 1;
2137
2138 len -= l;
2139 }
2140 KASSERTMSG(len == 0, "xfer %p alen %d len %d mps %d ux_nqtd %zu i %zu",
2141 xfer, length, len, maxp, uxfer->ux_nstd, i);
2142
2143 if (!isread &&
2144 (flags & USBD_FORCE_SHORT_XFER) &&
2145 length % maxp == 0) {
2146 /* Force a 0 length transfer at the end. */
2147 KASSERTMSG(i < uxfer->ux_nstd, "i=%zu nstd=%zu", i,
2148 uxfer->ux_nstd);
2149 std = uxfer->ux_stds[i++];
2150
2151 std->td.td_link = htole32(UHCI_PTR_T | UHCI_PTR_VF | UHCI_PTR_TD);
2152 std->td.td_status = htole32(status);
2153 std->td.td_token = htole32(
2154 UHCI_TD_SET_ENDPT(UE_GET_ADDR(endpt)) |
2155 UHCI_TD_SET_DEVADDR(addr) |
2156 UHCI_TD_SET_PID(UHCI_TD_PID_OUT) |
2157 UHCI_TD_SET_DT(tog) |
2158 UHCI_TD_SET_MAXLEN(0)
2159 );
2160 std->td.td_buffer = 0;
2161 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2162 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2163
2164 std->link.std = NULL;
2165 if (prev) {
2166 prev->link.std = std;
2167 prev->td.td_link = htole32(
2168 std->physaddr | UHCI_PTR_VF | UHCI_PTR_TD
2169 );
2170 usb_syncmem(&prev->dma, prev->offs, sizeof(prev->td),
2171 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2172 }
2173 tog ^= 1;
2174 }
2175 *lstd = std;
2176 *toggle = tog;
2177 }
2178
2179 void
2180 uhci_device_clear_toggle(struct usbd_pipe *pipe)
2181 {
2182 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2183 upipe->nexttoggle = 0;
2184 }
2185
2186 void
2187 uhci_noop(struct usbd_pipe *pipe)
2188 {
2189 }
2190
2191 int
2192 uhci_device_bulk_init(struct usbd_xfer *xfer)
2193 {
2194 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2195 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2196 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
2197 int endpt = ed->bEndpointAddress;
2198 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2199 int len = xfer->ux_bufsize;
2200 int err = 0;
2201
2202
2203 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2204 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, len,
2205 xfer->ux_flags, 0);
2206
2207 if (sc->sc_dying)
2208 return USBD_IOERROR;
2209
2210 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2211
2212 uxfer->ux_type = UX_BULK;
2213 err = uhci_alloc_std_chain(sc, xfer, len, isread, &uxfer->ux_stdstart);
2214 if (err)
2215 return err;
2216
2217 #ifdef UHCI_DEBUG
2218 if (uhcidebug >= 10) {
2219 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2220 uhci_dump_tds(uxfer->ux_stdstart);
2221 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2222 }
2223 #endif
2224
2225 return 0;
2226 }
2227
2228 Static void
2229 uhci_device_bulk_fini(struct usbd_xfer *xfer)
2230 {
2231 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2232 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2233
2234 KASSERT(ux->ux_type == UX_BULK);
2235
2236 if (ux->ux_nstd) {
2237 uhci_free_stds(sc, ux);
2238 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2239 }
2240 }
2241
2242 usbd_status
2243 uhci_device_bulk_transfer(struct usbd_xfer *xfer)
2244 {
2245 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2246 usbd_status err;
2247
2248 /* Insert last in queue. */
2249 mutex_enter(&sc->sc_lock);
2250 err = usb_insert_transfer(xfer);
2251 mutex_exit(&sc->sc_lock);
2252 if (err)
2253 return err;
2254
2255 /*
2256 * Pipe isn't running (otherwise err would be USBD_INPROG),
2257 * so start it first.
2258 */
2259 return uhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2260 }
2261
2262 usbd_status
2263 uhci_device_bulk_start(struct usbd_xfer *xfer)
2264 {
2265 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2266 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2267 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2268 uhci_soft_td_t *data, *dataend;
2269 uhci_soft_qh_t *sqh;
2270 const bool polling = sc->sc_bus.ub_usepolling;
2271 int len;
2272 int endpt;
2273 int isread;
2274
2275 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2276 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2277 xfer->ux_length, xfer->ux_flags, 0);
2278
2279 if (sc->sc_dying)
2280 return USBD_IOERROR;
2281
2282 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2283 KASSERT(xfer->ux_length <= xfer->ux_bufsize);
2284
2285 len = xfer->ux_length;
2286 endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2287 isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2288 sqh = upipe->bulk.sqh;
2289
2290 /* Take lock here to protect nexttoggle */
2291 if (!polling)
2292 mutex_enter(&sc->sc_lock);
2293
2294 uhci_reset_std_chain(sc, xfer, len, isread, &upipe->nexttoggle,
2295 &dataend);
2296
2297 data = ux->ux_stdstart;
2298 ux->ux_stdend = dataend;
2299 dataend->td.td_status |= htole32(UHCI_TD_IOC);
2300 usb_syncmem(&dataend->dma,
2301 dataend->offs + offsetof(uhci_td_t, td_status),
2302 sizeof(dataend->td.td_status),
2303 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2304
2305 #ifdef UHCI_DEBUG
2306 if (uhcidebug >= 10) {
2307 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2308 DPRINTFN(10, "before transfer", 0, 0, 0, 0);
2309 uhci_dump_tds(data);
2310 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2311 }
2312 #endif
2313
2314 KASSERT(ux->ux_isdone);
2315 #ifdef DIAGNOSTIC
2316 ux->ux_isdone = false;
2317 #endif
2318
2319 sqh->elink = data;
2320 sqh->qh.qh_elink = htole32(data->physaddr | UHCI_PTR_TD);
2321 /* uhci_add_bulk() will do usb_syncmem(sqh) */
2322
2323 uhci_add_bulk(sc, sqh);
2324 uhci_add_intr_list(sc, ux);
2325 usbd_xfer_schedule_timeout(xfer);
2326 xfer->ux_status = USBD_IN_PROGRESS;
2327 if (!polling)
2328 mutex_exit(&sc->sc_lock);
2329
2330 return USBD_IN_PROGRESS;
2331 }
2332
2333 /* Abort a device bulk request. */
2334 void
2335 uhci_device_bulk_abort(struct usbd_xfer *xfer)
2336 {
2337 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2338
2339 KASSERT(mutex_owned(&sc->sc_lock));
2340
2341 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2342
2343 usbd_xfer_abort(xfer);
2344 }
2345
2346 /*
2347 * To allow the hardware time to notice we simply wait.
2348 */
2349 Static void
2350 uhci_abortx(struct usbd_xfer *xfer)
2351 {
2352 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2353 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2354 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2355 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2356 uhci_soft_td_t *std;
2357
2358 DPRINTFN(1,"xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
2359
2360 KASSERT(mutex_owned(&sc->sc_lock));
2361 ASSERT_SLEEPABLE();
2362
2363 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2364 xfer->ux_status == USBD_TIMEOUT),
2365 "bad abort status: %d", xfer->ux_status);
2366
2367 /*
2368 * If we're dying, skip the hardware action and just notify the
2369 * software that we're done.
2370 */
2371 if (sc->sc_dying) {
2372 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer,
2373 xfer->ux_status, 0, 0);
2374 goto dying;
2375 }
2376
2377 /*
2378 * HC Step 1: Make interrupt routine and hardware ignore xfer.
2379 */
2380 uhci_del_intr_list(sc, ux);
2381
2382 DPRINTF("stop ux=%#jx", (uintptr_t)ux, 0, 0, 0);
2383 for (std = ux->ux_stdstart; std != NULL; std = std->link.std) {
2384 usb_syncmem(&std->dma,
2385 std->offs + offsetof(uhci_td_t, td_status),
2386 sizeof(std->td.td_status),
2387 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2388 std->td.td_status &= htole32(~(UHCI_TD_ACTIVE | UHCI_TD_IOC));
2389 usb_syncmem(&std->dma,
2390 std->offs + offsetof(uhci_td_t, td_status),
2391 sizeof(std->td.td_status),
2392 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2393 }
2394
2395 /*
2396 * HC Step 2: Wait until we know hardware has finished any possible
2397 * use of the xfer.
2398 */
2399 /* Hardware finishes in 1ms */
2400 usb_delay_ms_locked(upipe->pipe.up_dev->ud_bus, 2, &sc->sc_lock);
2401
2402 /*
2403 * HC Step 3: Notify completion to waiting xfers.
2404 */
2405 dying:
2406 #ifdef DIAGNOSTIC
2407 ux->ux_isdone = true;
2408 #endif
2409 usb_transfer_complete(xfer);
2410 DPRINTFN(14, "end", 0, 0, 0, 0);
2411
2412 KASSERT(mutex_owned(&sc->sc_lock));
2413 }
2414
2415 /* Close a device bulk pipe. */
2416 void
2417 uhci_device_bulk_close(struct usbd_pipe *pipe)
2418 {
2419 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2420 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2421
2422 KASSERT(mutex_owned(&sc->sc_lock));
2423
2424 uhci_free_sqh(sc, upipe->bulk.sqh);
2425
2426 pipe->up_endpoint->ue_toggle = upipe->nexttoggle;
2427 }
2428
2429 int
2430 uhci_device_ctrl_init(struct usbd_xfer *xfer)
2431 {
2432 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2433 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2434 usb_device_request_t *req = &xfer->ux_request;
2435 struct usbd_device *dev = upipe->pipe.up_dev;
2436 uhci_softc_t *sc = dev->ud_bus->ub_hcpriv;
2437 uhci_soft_td_t *data = NULL;
2438 int len;
2439 usbd_status err;
2440 int isread;
2441
2442 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2443 DPRINTFN(3, "xfer=%#jx len=%jd, addr=%jd, endpt=%jd",
2444 (uintptr_t)xfer, xfer->ux_bufsize, dev->ud_addr,
2445 upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress);
2446
2447 isread = req->bmRequestType & UT_READ;
2448 len = xfer->ux_bufsize;
2449
2450 uxfer->ux_type = UX_CTRL;
2451 /* Set up data transaction */
2452 if (len != 0) {
2453 err = uhci_alloc_std_chain(sc, xfer, len, isread, &data);
2454 if (err)
2455 return err;
2456 }
2457 /* Set up interrupt info. */
2458 uxfer->ux_setup = upipe->ctrl.setup;
2459 uxfer->ux_stat = upipe->ctrl.stat;
2460 uxfer->ux_data = data;
2461
2462 return 0;
2463 }
2464
2465 Static void
2466 uhci_device_ctrl_fini(struct usbd_xfer *xfer)
2467 {
2468 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2469 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2470
2471 KASSERT(ux->ux_type == UX_CTRL);
2472
2473 if (ux->ux_nstd) {
2474 uhci_free_stds(sc, ux);
2475 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2476 }
2477 }
2478
2479 usbd_status
2480 uhci_device_ctrl_transfer(struct usbd_xfer *xfer)
2481 {
2482 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2483 usbd_status err;
2484
2485 /* Insert last in queue. */
2486 mutex_enter(&sc->sc_lock);
2487 err = usb_insert_transfer(xfer);
2488 mutex_exit(&sc->sc_lock);
2489 if (err)
2490 return err;
2491
2492 /*
2493 * Pipe isn't running (otherwise err would be USBD_INPROG),
2494 * so start it first.
2495 */
2496 return uhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2497 }
2498
2499 usbd_status
2500 uhci_device_ctrl_start(struct usbd_xfer *xfer)
2501 {
2502 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2503 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2504 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2505 usb_device_request_t *req = &xfer->ux_request;
2506 struct usbd_device *dev = upipe->pipe.up_dev;
2507 int addr = dev->ud_addr;
2508 int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2509 uhci_soft_td_t *setup, *stat, *next, *dataend;
2510 uhci_soft_qh_t *sqh;
2511 const bool polling = sc->sc_bus.ub_usepolling;
2512 int len;
2513 int isread;
2514
2515 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2516
2517 if (sc->sc_dying)
2518 return USBD_IOERROR;
2519
2520 KASSERT(xfer->ux_rqflags & URQ_REQUEST);
2521
2522 DPRINTFN(3, "type=%#02jx, request=%#02jx, "
2523 "wValue=%#04jx, wIndex=%#04jx",
2524 req->bmRequestType, req->bRequest, UGETW(req->wValue),
2525 UGETW(req->wIndex));
2526 DPRINTFN(3, "len=%jd, addr=%jd, endpt=%jd",
2527 UGETW(req->wLength), dev->ud_addr, endpt, 0);
2528
2529 isread = req->bmRequestType & UT_READ;
2530 len = UGETW(req->wLength);
2531
2532 setup = upipe->ctrl.setup;
2533 stat = upipe->ctrl.stat;
2534 sqh = upipe->ctrl.sqh;
2535
2536 memcpy(KERNADDR(&upipe->ctrl.reqdma, 0), req, sizeof(*req));
2537 usb_syncmem(&upipe->ctrl.reqdma, 0, sizeof(*req), BUS_DMASYNC_PREWRITE);
2538
2539 if (!polling)
2540 mutex_enter(&sc->sc_lock);
2541
2542 /* Set up data transaction */
2543 if (len != 0) {
2544 upipe->nexttoggle = 1;
2545 next = uxfer->ux_data;
2546 uhci_reset_std_chain(sc, xfer, len, isread,
2547 &upipe->nexttoggle, &dataend);
2548 dataend->link.std = stat;
2549 dataend->td.td_link = htole32(stat->physaddr | UHCI_PTR_TD);
2550 usb_syncmem(&dataend->dma,
2551 dataend->offs + offsetof(uhci_td_t, td_link),
2552 sizeof(dataend->td.td_link),
2553 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2554 } else {
2555 next = stat;
2556 }
2557
2558 const uint32_t status = UHCI_TD_ZERO_ACTLEN(
2559 UHCI_TD_SET_ERRCNT(3) |
2560 UHCI_TD_ACTIVE |
2561 (dev->ud_speed == USB_SPEED_LOW ? UHCI_TD_LS : 0)
2562 );
2563 setup->link.std = next;
2564 setup->td.td_link = htole32(next->physaddr | UHCI_PTR_TD);
2565 setup->td.td_status = htole32(status);
2566 setup->td.td_token = htole32(UHCI_TD_SETUP(sizeof(*req), endpt, addr));
2567 setup->td.td_buffer = htole32(DMAADDR(&upipe->ctrl.reqdma, 0));
2568
2569 usb_syncmem(&setup->dma, setup->offs, sizeof(setup->td),
2570 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2571
2572 stat->link.std = NULL;
2573 stat->td.td_link = htole32(UHCI_PTR_T);
2574 stat->td.td_status = htole32(status | UHCI_TD_IOC);
2575 stat->td.td_token =
2576 htole32(isread ? UHCI_TD_OUT(0, endpt, addr, 1) :
2577 UHCI_TD_IN (0, endpt, addr, 1));
2578 stat->td.td_buffer = htole32(0);
2579 usb_syncmem(&stat->dma, stat->offs, sizeof(stat->td),
2580 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2581
2582 #ifdef UHCI_DEBUG
2583 if (uhcidebug >= 10) {
2584 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2585 DPRINTF("before transfer", 0, 0, 0, 0);
2586 uhci_dump_tds(setup);
2587 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2588 }
2589 #endif
2590
2591 /* Set up interrupt info. */
2592 uxfer->ux_setup = setup;
2593 uxfer->ux_stat = stat;
2594 KASSERT(uxfer->ux_isdone);
2595 #ifdef DIAGNOSTIC
2596 uxfer->ux_isdone = false;
2597 #endif
2598
2599 sqh->elink = setup;
2600 sqh->qh.qh_elink = htole32(setup->physaddr | UHCI_PTR_TD);
2601 /* uhci_add_?s_ctrl() will do usb_syncmem(sqh) */
2602
2603 if (dev->ud_speed == USB_SPEED_LOW)
2604 uhci_add_ls_ctrl(sc, sqh);
2605 else
2606 uhci_add_hs_ctrl(sc, sqh);
2607 uhci_add_intr_list(sc, uxfer);
2608 #ifdef UHCI_DEBUG
2609 if (uhcidebug >= 12) {
2610 uhci_soft_td_t *std;
2611 uhci_soft_qh_t *xqh;
2612 uhci_soft_qh_t *sxqh;
2613 int maxqh = 0;
2614 uhci_physaddr_t link;
2615 DPRINTFN(12, "--- dump start ---", 0, 0, 0, 0);
2616 DPRINTFN(12, "follow from [0]", 0, 0, 0, 0);
2617 for (std = sc->sc_vframes[0].htd, link = 0;
2618 (link & UHCI_PTR_QH) == 0;
2619 std = std->link.std) {
2620 link = le32toh(std->td.td_link);
2621 uhci_dump_td(std);
2622 }
2623 sxqh = (uhci_soft_qh_t *)std;
2624 uhci_dump_qh(sxqh);
2625 for (xqh = sxqh;
2626 xqh != NULL;
2627 xqh = (maxqh++ == 5 || xqh->hlink == sxqh ||
2628 xqh->hlink == xqh ? NULL : xqh->hlink)) {
2629 uhci_dump_qh(xqh);
2630 }
2631 DPRINTFN(12, "Enqueued QH:", 0, 0, 0, 0);
2632 uhci_dump_qh(sqh);
2633 uhci_dump_tds(sqh->elink);
2634 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2635 }
2636 #endif
2637 usbd_xfer_schedule_timeout(xfer);
2638 xfer->ux_status = USBD_IN_PROGRESS;
2639 if (!polling)
2640 mutex_exit(&sc->sc_lock);
2641
2642 return USBD_IN_PROGRESS;
2643 }
2644
2645 int
2646 uhci_device_intr_init(struct usbd_xfer *xfer)
2647 {
2648 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2649 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2650 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
2651 int endpt = ed->bEndpointAddress;
2652 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2653 int len = xfer->ux_bufsize;
2654 int err;
2655
2656 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2657
2658 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2659 xfer->ux_length, xfer->ux_flags, 0);
2660
2661 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2662 KASSERT(len != 0);
2663
2664 ux->ux_type = UX_INTR;
2665 ux->ux_nstd = 0;
2666 err = uhci_alloc_std_chain(sc, xfer, len, isread, &ux->ux_stdstart);
2667
2668 return err;
2669 }
2670
2671 Static void
2672 uhci_device_intr_fini(struct usbd_xfer *xfer)
2673 {
2674 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2675 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2676
2677 KASSERT(ux->ux_type == UX_INTR);
2678
2679 if (ux->ux_nstd) {
2680 uhci_free_stds(sc, ux);
2681 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2682 }
2683 }
2684
2685 usbd_status
2686 uhci_device_intr_transfer(struct usbd_xfer *xfer)
2687 {
2688 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2689 usbd_status err;
2690
2691 /* Insert last in queue. */
2692 mutex_enter(&sc->sc_lock);
2693 err = usb_insert_transfer(xfer);
2694 mutex_exit(&sc->sc_lock);
2695 if (err)
2696 return err;
2697
2698 /*
2699 * Pipe isn't running (otherwise err would be USBD_INPROG),
2700 * so start it first.
2701 */
2702 return uhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2703 }
2704
2705 usbd_status
2706 uhci_device_intr_start(struct usbd_xfer *xfer)
2707 {
2708 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2709 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2710 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2711 uhci_soft_td_t *data, *dataend;
2712 uhci_soft_qh_t *sqh;
2713 const bool polling = sc->sc_bus.ub_usepolling;
2714 int isread, endpt;
2715 int i;
2716
2717 if (sc->sc_dying)
2718 return USBD_IOERROR;
2719
2720 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2721
2722 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2723 xfer->ux_length, xfer->ux_flags, 0);
2724
2725 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2726 KASSERT(xfer->ux_length <= xfer->ux_bufsize);
2727
2728 endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2729 isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2730
2731 data = ux->ux_stdstart;
2732
2733 KASSERT(ux->ux_isdone);
2734 #ifdef DIAGNOSTIC
2735 ux->ux_isdone = false;
2736 #endif
2737
2738 /* Take lock to protect nexttoggle */
2739 if (!polling)
2740 mutex_enter(&sc->sc_lock);
2741 uhci_reset_std_chain(sc, xfer, xfer->ux_length, isread,
2742 &upipe->nexttoggle, &dataend);
2743
2744 dataend->td.td_status |= htole32(UHCI_TD_IOC);
2745 usb_syncmem(&dataend->dma,
2746 dataend->offs + offsetof(uhci_td_t, td_status),
2747 sizeof(dataend->td.td_status),
2748 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2749 ux->ux_stdend = dataend;
2750
2751 #ifdef UHCI_DEBUG
2752 if (uhcidebug >= 10) {
2753 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2754 uhci_dump_tds(data);
2755 uhci_dump_qh(upipe->intr.qhs[0]);
2756 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2757 }
2758 #endif
2759
2760 DPRINTFN(10, "qhs[0]=%#jx", (uintptr_t)upipe->intr.qhs[0], 0, 0, 0);
2761 for (i = 0; i < upipe->intr.npoll; i++) {
2762 sqh = upipe->intr.qhs[i];
2763 sqh->elink = data;
2764 sqh->qh.qh_elink = htole32(data->physaddr | UHCI_PTR_TD);
2765 usb_syncmem(&sqh->dma,
2766 sqh->offs + offsetof(uhci_qh_t, qh_elink),
2767 sizeof(sqh->qh.qh_elink),
2768 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2769 }
2770 uhci_add_intr_list(sc, ux);
2771 xfer->ux_status = USBD_IN_PROGRESS;
2772 if (!polling)
2773 mutex_exit(&sc->sc_lock);
2774
2775 #ifdef UHCI_DEBUG
2776 if (uhcidebug >= 10) {
2777 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2778 uhci_dump_tds(data);
2779 uhci_dump_qh(upipe->intr.qhs[0]);
2780 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2781 }
2782 #endif
2783
2784 return USBD_IN_PROGRESS;
2785 }
2786
2787 /* Abort a device control request. */
2788 void
2789 uhci_device_ctrl_abort(struct usbd_xfer *xfer)
2790 {
2791 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2792
2793 KASSERT(mutex_owned(&sc->sc_lock));
2794
2795 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2796 usbd_xfer_abort(xfer);
2797 }
2798
2799 /* Close a device control pipe. */
2800 void
2801 uhci_device_ctrl_close(struct usbd_pipe *pipe)
2802 {
2803 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2804 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2805
2806 uhci_free_sqh(sc, upipe->ctrl.sqh);
2807 uhci_free_std_locked(sc, upipe->ctrl.setup);
2808 uhci_free_std_locked(sc, upipe->ctrl.stat);
2809
2810 usb_freemem(&sc->sc_bus, &upipe->ctrl.reqdma);
2811 }
2812
2813 /* Abort a device interrupt request. */
2814 void
2815 uhci_device_intr_abort(struct usbd_xfer *xfer)
2816 {
2817 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2818
2819 KASSERT(mutex_owned(&sc->sc_lock));
2820 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
2821
2822 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2823 DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
2824
2825 usbd_xfer_abort(xfer);
2826 }
2827
2828 /* Close a device interrupt pipe. */
2829 void
2830 uhci_device_intr_close(struct usbd_pipe *pipe)
2831 {
2832 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2833 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2834 int i, npoll;
2835
2836 KASSERT(mutex_owned(&sc->sc_lock));
2837
2838 /* Unlink descriptors from controller data structures. */
2839 npoll = upipe->intr.npoll;
2840 for (i = 0; i < npoll; i++)
2841 uhci_remove_intr(sc, upipe->intr.qhs[i]);
2842
2843 /*
2844 * We now have to wait for any activity on the physical
2845 * descriptors to stop.
2846 */
2847 usb_delay_ms_locked(&sc->sc_bus, 2, &sc->sc_lock);
2848
2849 for (i = 0; i < npoll; i++)
2850 uhci_free_sqh(sc, upipe->intr.qhs[i]);
2851 kmem_free(upipe->intr.qhs, npoll * sizeof(uhci_soft_qh_t *));
2852 }
2853
2854 int
2855 uhci_device_isoc_init(struct usbd_xfer *xfer)
2856 {
2857 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2858
2859 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2860 KASSERT(xfer->ux_nframes != 0);
2861 KASSERT(ux->ux_isdone);
2862
2863 ux->ux_type = UX_ISOC;
2864 return 0;
2865 }
2866
2867 Static void
2868 uhci_device_isoc_fini(struct usbd_xfer *xfer)
2869 {
2870 struct uhci_xfer *ux __diagused = UHCI_XFER2UXFER(xfer);
2871
2872 KASSERT(ux->ux_type == UX_ISOC);
2873 }
2874
2875 usbd_status
2876 uhci_device_isoc_transfer(struct usbd_xfer *xfer)
2877 {
2878 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2879 usbd_status err __diagused;
2880
2881 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2882 DPRINTFN(5, "xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
2883
2884 /* Put it on our queue, */
2885 mutex_enter(&sc->sc_lock);
2886 err = usb_insert_transfer(xfer);
2887 mutex_exit(&sc->sc_lock);
2888
2889 KASSERT(err == USBD_NORMAL_COMPLETION);
2890
2891 /* insert into schedule, */
2892
2893 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2894 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2895 struct isoc *isoc = &upipe->isoc;
2896 uhci_soft_td_t *std = NULL;
2897 uint32_t buf, len, status, offs;
2898 int i, next, nframes;
2899 int rd = UE_GET_DIR(upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress) == UE_DIR_IN;
2900
2901 DPRINTFN(5, "used=%jd next=%jd xfer=%#jx nframes=%jd",
2902 isoc->inuse, isoc->next, (uintptr_t)xfer, xfer->ux_nframes);
2903
2904 if (sc->sc_dying)
2905 return USBD_IOERROR;
2906
2907 if (xfer->ux_status == USBD_IN_PROGRESS) {
2908 /* This request has already been entered into the frame list */
2909 printf("%s: xfer=%p in frame list\n", __func__, xfer);
2910 /* XXX */
2911 }
2912
2913 #ifdef DIAGNOSTIC
2914 if (isoc->inuse >= UHCI_VFRAMELIST_COUNT)
2915 printf("%s: overflow!\n", __func__);
2916 #endif
2917
2918 KASSERT(xfer->ux_nframes != 0);
2919
2920 mutex_enter(&sc->sc_lock);
2921 next = isoc->next;
2922 if (next == -1) {
2923 /* Not in use yet, schedule it a few frames ahead. */
2924 next = (UREAD2(sc, UHCI_FRNUM) + 3) % UHCI_VFRAMELIST_COUNT;
2925 DPRINTFN(2, "start next=%jd", next, 0, 0, 0);
2926 }
2927
2928 xfer->ux_status = USBD_IN_PROGRESS;
2929 ux->ux_curframe = next;
2930
2931 buf = DMAADDR(&xfer->ux_dmabuf, 0);
2932 offs = 0;
2933 status = UHCI_TD_ZERO_ACTLEN(UHCI_TD_SET_ERRCNT(0) |
2934 UHCI_TD_ACTIVE |
2935 UHCI_TD_IOS);
2936 nframes = xfer->ux_nframes;
2937 for (i = 0; i < nframes; i++) {
2938 std = isoc->stds[next];
2939 if (++next >= UHCI_VFRAMELIST_COUNT)
2940 next = 0;
2941 len = xfer->ux_frlengths[i];
2942 std->td.td_buffer = htole32(buf);
2943 usb_syncmem(&xfer->ux_dmabuf, offs, len,
2944 rd ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2945 if (i == nframes - 1)
2946 status |= UHCI_TD_IOC;
2947 std->td.td_status = htole32(status);
2948 std->td.td_token &= htole32(~UHCI_TD_MAXLEN_MASK);
2949 std->td.td_token |= htole32(UHCI_TD_SET_MAXLEN(len));
2950 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2951 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2952 #ifdef UHCI_DEBUG
2953 if (uhcidebug >= 5) {
2954 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2955 DPRINTF("TD %jd", i, 0, 0, 0);
2956 uhci_dump_td(std);
2957 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2958 }
2959 #endif
2960 buf += len;
2961 offs += len;
2962 }
2963 isoc->next = next;
2964 isoc->inuse += xfer->ux_nframes;
2965
2966 /* Set up interrupt info. */
2967 ux->ux_stdstart = std;
2968 ux->ux_stdend = std;
2969
2970 KASSERT(ux->ux_isdone);
2971 #ifdef DIAGNOSTIC
2972 ux->ux_isdone = false;
2973 #endif
2974 uhci_add_intr_list(sc, ux);
2975
2976 mutex_exit(&sc->sc_lock);
2977
2978 return USBD_IN_PROGRESS;
2979 }
2980
2981 void
2982 uhci_device_isoc_abort(struct usbd_xfer *xfer)
2983 {
2984 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2985 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2986 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2987 uhci_soft_td_t **stds = upipe->isoc.stds;
2988 uhci_soft_td_t *std;
2989 int i, n, nframes, maxlen, len;
2990
2991 KASSERT(mutex_owned(&sc->sc_lock));
2992
2993 /* Transfer is already done. */
2994 if (xfer->ux_status != USBD_NOT_STARTED &&
2995 xfer->ux_status != USBD_IN_PROGRESS) {
2996 return;
2997 }
2998
2999 /* Give xfer the requested abort code. */
3000 xfer->ux_status = USBD_CANCELLED;
3001
3002 /* make hardware ignore it, */
3003 nframes = xfer->ux_nframes;
3004 n = ux->ux_curframe;
3005 maxlen = 0;
3006 for (i = 0; i < nframes; i++) {
3007 std = stds[n];
3008 usb_syncmem(&std->dma,
3009 std->offs + offsetof(uhci_td_t, td_status),
3010 sizeof(std->td.td_status),
3011 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3012 std->td.td_status &= htole32(~(UHCI_TD_ACTIVE | UHCI_TD_IOC));
3013 usb_syncmem(&std->dma,
3014 std->offs + offsetof(uhci_td_t, td_status),
3015 sizeof(std->td.td_status),
3016 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3017 usb_syncmem(&std->dma,
3018 std->offs + offsetof(uhci_td_t, td_token),
3019 sizeof(std->td.td_token),
3020 BUS_DMASYNC_POSTWRITE);
3021 len = UHCI_TD_GET_MAXLEN(le32toh(std->td.td_token));
3022 if (len > maxlen)
3023 maxlen = len;
3024 if (++n >= UHCI_VFRAMELIST_COUNT)
3025 n = 0;
3026 }
3027
3028 /* and wait until we are sure the hardware has finished. */
3029 delay(maxlen);
3030
3031 #ifdef DIAGNOSTIC
3032 ux->ux_isdone = true;
3033 #endif
3034 /* Remove from interrupt list. */
3035 uhci_del_intr_list(sc, ux);
3036
3037 /* Run callback. */
3038 usb_transfer_complete(xfer);
3039
3040 KASSERT(mutex_owned(&sc->sc_lock));
3041 }
3042
3043 void
3044 uhci_device_isoc_close(struct usbd_pipe *pipe)
3045 {
3046 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
3047 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3048 uhci_soft_td_t *std, *vstd;
3049 struct isoc *isoc;
3050 int i;
3051
3052 KASSERT(mutex_owned(&sc->sc_lock));
3053
3054 /*
3055 * Make sure all TDs are marked as inactive.
3056 * Wait for completion.
3057 * Unschedule.
3058 * Deallocate.
3059 */
3060 isoc = &upipe->isoc;
3061
3062 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3063 std = isoc->stds[i];
3064 usb_syncmem(&std->dma,
3065 std->offs + offsetof(uhci_td_t, td_status),
3066 sizeof(std->td.td_status),
3067 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3068 std->td.td_status &= htole32(~UHCI_TD_ACTIVE);
3069 usb_syncmem(&std->dma,
3070 std->offs + offsetof(uhci_td_t, td_status),
3071 sizeof(std->td.td_status),
3072 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3073 }
3074 /* wait for completion */
3075 usb_delay_ms_locked(&sc->sc_bus, 2, &sc->sc_lock);
3076
3077 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3078 std = isoc->stds[i];
3079 for (vstd = sc->sc_vframes[i].htd;
3080 vstd != NULL && vstd->link.std != std;
3081 vstd = vstd->link.std)
3082 ;
3083 if (vstd == NULL) {
3084 /*panic*/
3085 printf("%s: %p not found\n", __func__, std);
3086 mutex_exit(&sc->sc_lock);
3087 return;
3088 }
3089 vstd->link = std->link;
3090 usb_syncmem(&std->dma,
3091 std->offs + offsetof(uhci_td_t, td_link),
3092 sizeof(std->td.td_link),
3093 BUS_DMASYNC_POSTWRITE);
3094 vstd->td.td_link = std->td.td_link;
3095 usb_syncmem(&vstd->dma,
3096 vstd->offs + offsetof(uhci_td_t, td_link),
3097 sizeof(vstd->td.td_link),
3098 BUS_DMASYNC_PREWRITE);
3099 uhci_free_std_locked(sc, std);
3100 }
3101
3102 kmem_free(isoc->stds, UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *));
3103 }
3104
3105 usbd_status
3106 uhci_setup_isoc(struct usbd_pipe *pipe)
3107 {
3108 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
3109 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3110 int addr = upipe->pipe.up_dev->ud_addr;
3111 int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
3112 int rd = UE_GET_DIR(endpt) == UE_DIR_IN;
3113 uhci_soft_td_t *std, *vstd;
3114 uint32_t token;
3115 struct isoc *isoc;
3116 int i;
3117
3118 isoc = &upipe->isoc;
3119
3120 isoc->stds = kmem_alloc(
3121 UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *), KM_SLEEP);
3122 if (isoc->stds == NULL)
3123 return USBD_NOMEM;
3124
3125 token = rd ? UHCI_TD_IN (0, endpt, addr, 0) :
3126 UHCI_TD_OUT(0, endpt, addr, 0);
3127
3128 /* Allocate the TDs and mark as inactive; */
3129 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3130 std = uhci_alloc_std(sc);
3131 if (std == 0)
3132 goto bad;
3133 std->td.td_status = htole32(UHCI_TD_IOS); /* iso, inactive */
3134 std->td.td_token = htole32(token);
3135 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
3136 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3137 isoc->stds[i] = std;
3138 }
3139
3140 mutex_enter(&sc->sc_lock);
3141
3142 /* Insert TDs into schedule. */
3143 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3144 std = isoc->stds[i];
3145 vstd = sc->sc_vframes[i].htd;
3146 usb_syncmem(&vstd->dma,
3147 vstd->offs + offsetof(uhci_td_t, td_link),
3148 sizeof(vstd->td.td_link),
3149 BUS_DMASYNC_POSTWRITE);
3150 std->link = vstd->link;
3151 std->td.td_link = vstd->td.td_link;
3152 usb_syncmem(&std->dma,
3153 std->offs + offsetof(uhci_td_t, td_link),
3154 sizeof(std->td.td_link),
3155 BUS_DMASYNC_PREWRITE);
3156 vstd->link.std = std;
3157 vstd->td.td_link = htole32(std->physaddr | UHCI_PTR_TD);
3158 usb_syncmem(&vstd->dma,
3159 vstd->offs + offsetof(uhci_td_t, td_link),
3160 sizeof(vstd->td.td_link),
3161 BUS_DMASYNC_PREWRITE);
3162 }
3163 mutex_exit(&sc->sc_lock);
3164
3165 isoc->next = -1;
3166 isoc->inuse = 0;
3167
3168 return USBD_NORMAL_COMPLETION;
3169
3170 bad:
3171 while (--i >= 0)
3172 uhci_free_std(sc, isoc->stds[i]);
3173 kmem_free(isoc->stds, UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *));
3174 return USBD_NOMEM;
3175 }
3176
3177 void
3178 uhci_device_isoc_done(struct usbd_xfer *xfer)
3179 {
3180 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3181 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
3182 int i, offs;
3183 int rd = UE_GET_DIR(upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress) == UE_DIR_IN;
3184
3185 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3186 DPRINTFN(4, "length=%jd, ux_state=%#08jx",
3187 xfer->ux_actlen, xfer->ux_state, 0, 0);
3188
3189 #ifdef DIAGNOSTIC
3190 if (ux->ux_stdend == NULL) {
3191 printf("%s: xfer=%p stdend==NULL\n", __func__, xfer);
3192 #ifdef UHCI_DEBUG
3193 DPRINTF("--- dump start ---", 0, 0, 0, 0);
3194 uhci_dump_ii(ux);
3195 DPRINTF("--- dump end ---", 0, 0, 0, 0);
3196 #endif
3197 return;
3198 }
3199 #endif
3200
3201 /* Turn off the interrupt since it is active even if the TD is not. */
3202 usb_syncmem(&ux->ux_stdend->dma,
3203 ux->ux_stdend->offs + offsetof(uhci_td_t, td_status),
3204 sizeof(ux->ux_stdend->td.td_status),
3205 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3206 ux->ux_stdend->td.td_status &= htole32(~UHCI_TD_IOC);
3207 usb_syncmem(&ux->ux_stdend->dma,
3208 ux->ux_stdend->offs + offsetof(uhci_td_t, td_status),
3209 sizeof(ux->ux_stdend->td.td_status),
3210 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3211
3212 offs = 0;
3213 for (i = 0; i < xfer->ux_nframes; i++) {
3214 usb_syncmem(&xfer->ux_dmabuf, offs, xfer->ux_frlengths[i],
3215 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3216 offs += xfer->ux_frlengths[i];
3217 }
3218 }
3219
3220 void
3221 uhci_device_intr_done(struct usbd_xfer *xfer)
3222 {
3223 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
3224 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3225 uhci_soft_qh_t *sqh;
3226 int i, npoll;
3227
3228 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3229 DPRINTFN(5, "length=%jd", xfer->ux_actlen, 0, 0, 0);
3230
3231 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3232
3233 npoll = upipe->intr.npoll;
3234 for (i = 0; i < npoll; i++) {
3235 sqh = upipe->intr.qhs[i];
3236 sqh->elink = NULL;
3237 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3238 usb_syncmem(&sqh->dma,
3239 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3240 sizeof(sqh->qh.qh_elink),
3241 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3242 }
3243 const int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
3244 const bool isread = UE_GET_DIR(endpt) == UE_DIR_IN;
3245 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
3246 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3247 }
3248
3249 /* Deallocate request data structures */
3250 void
3251 uhci_device_ctrl_done(struct usbd_xfer *xfer)
3252 {
3253 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3254 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3255 int len = UGETW(xfer->ux_request.wLength);
3256 int isread = (xfer->ux_request.bmRequestType & UT_READ);
3257
3258 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3259
3260 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3261
3262 KASSERT(xfer->ux_rqflags & URQ_REQUEST);
3263
3264 /* XXXNH move to uhci_idone??? */
3265 if (upipe->pipe.up_dev->ud_speed == USB_SPEED_LOW)
3266 uhci_remove_ls_ctrl(sc, upipe->ctrl.sqh);
3267 else
3268 uhci_remove_hs_ctrl(sc, upipe->ctrl.sqh);
3269
3270 if (len) {
3271 usb_syncmem(&xfer->ux_dmabuf, 0, len,
3272 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3273 }
3274 usb_syncmem(&upipe->ctrl.reqdma, 0,
3275 sizeof(usb_device_request_t), BUS_DMASYNC_POSTWRITE);
3276
3277 DPRINTF("length=%jd", xfer->ux_actlen, 0, 0, 0);
3278 }
3279
3280 /* Deallocate request data structures */
3281 void
3282 uhci_device_bulk_done(struct usbd_xfer *xfer)
3283 {
3284 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3285 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3286 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
3287 int endpt = ed->bEndpointAddress;
3288 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
3289
3290 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3291 DPRINTFN(5, "xfer=%#jx sc=%#jx upipe=%#jx", (uintptr_t)xfer,
3292 (uintptr_t)sc, (uintptr_t)upipe, 0);
3293
3294 KASSERT(mutex_owned(&sc->sc_lock));
3295
3296 uhci_remove_bulk(sc, upipe->bulk.sqh);
3297
3298 if (xfer->ux_length) {
3299 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
3300 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3301 }
3302
3303 DPRINTFN(5, "length=%jd", xfer->ux_actlen, 0, 0, 0);
3304 }
3305
3306 /* Add interrupt QH, called with vflock. */
3307 void
3308 uhci_add_intr(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
3309 {
3310 struct uhci_vframe *vf = &sc->sc_vframes[sqh->pos];
3311 uhci_soft_qh_t *eqh;
3312
3313 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3314 DPRINTFN(4, "n=%jd sqh=%#jx", sqh->pos, (uintptr_t)sqh, 0, 0);
3315
3316 eqh = vf->eqh;
3317 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
3318 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
3319 sqh->hlink = eqh->hlink;
3320 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
3321 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
3322 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
3323 eqh->hlink = sqh;
3324 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
3325 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
3326 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
3327 vf->eqh = sqh;
3328 vf->bandwidth++;
3329 }
3330
3331 /* Remove interrupt QH. */
3332 void
3333 uhci_remove_intr(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
3334 {
3335 struct uhci_vframe *vf = &sc->sc_vframes[sqh->pos];
3336 uhci_soft_qh_t *pqh;
3337
3338 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3339 DPRINTFN(4, "n=%jd sqh=%#jx", sqh->pos, (uintptr_t)sqh, 0, 0);
3340
3341 /* See comment in uhci_remove_ctrl() */
3342
3343 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
3344 sizeof(sqh->qh.qh_elink),
3345 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3346 if (!(sqh->qh.qh_elink & htole32(UHCI_PTR_T))) {
3347 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3348 usb_syncmem(&sqh->dma,
3349 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3350 sizeof(sqh->qh.qh_elink),
3351 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3352 delay(UHCI_QH_REMOVE_DELAY);
3353 }
3354
3355 pqh = uhci_find_prev_qh(vf->hqh, sqh);
3356 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
3357 sizeof(sqh->qh.qh_hlink),
3358 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3359 pqh->hlink = sqh->hlink;
3360 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
3361 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
3362 sizeof(pqh->qh.qh_hlink),
3363 BUS_DMASYNC_PREWRITE);
3364 delay(UHCI_QH_REMOVE_DELAY);
3365 if (vf->eqh == sqh)
3366 vf->eqh = pqh;
3367 vf->bandwidth--;
3368 }
3369
3370 usbd_status
3371 uhci_device_setintr(uhci_softc_t *sc, struct uhci_pipe *upipe, int ival)
3372 {
3373 uhci_soft_qh_t *sqh;
3374 int i, npoll;
3375 u_int bestbw, bw, bestoffs, offs;
3376
3377 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3378 DPRINTFN(2, "pipe=%#jx", (uintptr_t)upipe, 0, 0, 0);
3379 if (ival == 0) {
3380 printf("%s: 0 interval\n", __func__);
3381 return USBD_INVAL;
3382 }
3383
3384 if (ival > UHCI_VFRAMELIST_COUNT)
3385 ival = UHCI_VFRAMELIST_COUNT;
3386 npoll = (UHCI_VFRAMELIST_COUNT + ival - 1) / ival;
3387 DPRINTF("ival=%jd npoll=%jd", ival, npoll, 0, 0);
3388
3389 upipe->intr.npoll = npoll;
3390 upipe->intr.qhs =
3391 kmem_alloc(npoll * sizeof(uhci_soft_qh_t *), KM_SLEEP);
3392
3393 /*
3394 * Figure out which offset in the schedule that has most
3395 * bandwidth left over.
3396 */
3397 #define MOD(i) ((i) & (UHCI_VFRAMELIST_COUNT-1))
3398 for (bestoffs = offs = 0, bestbw = ~0; offs < ival; offs++) {
3399 for (bw = i = 0; i < npoll; i++)
3400 bw += sc->sc_vframes[MOD(i * ival + offs)].bandwidth;
3401 if (bw < bestbw) {
3402 bestbw = bw;
3403 bestoffs = offs;
3404 }
3405 }
3406 DPRINTF("bw=%jd offs=%jd", bestbw, bestoffs, 0, 0);
3407 for (i = 0; i < npoll; i++) {
3408 upipe->intr.qhs[i] = sqh = uhci_alloc_sqh(sc);
3409 sqh->elink = NULL;
3410 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3411 usb_syncmem(&sqh->dma,
3412 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3413 sizeof(sqh->qh.qh_elink),
3414 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3415 sqh->pos = MOD(i * ival + bestoffs);
3416 }
3417 #undef MOD
3418
3419 mutex_enter(&sc->sc_lock);
3420 /* Enter QHs into the controller data structures. */
3421 for (i = 0; i < npoll; i++)
3422 uhci_add_intr(sc, upipe->intr.qhs[i]);
3423 mutex_exit(&sc->sc_lock);
3424
3425 DPRINTFN(5, "returns %#jx", (uintptr_t)upipe, 0, 0, 0);
3426
3427 return USBD_NORMAL_COMPLETION;
3428 }
3429
3430 /* Open a new pipe. */
3431 usbd_status
3432 uhci_open(struct usbd_pipe *pipe)
3433 {
3434 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3435 struct usbd_bus *bus = pipe->up_dev->ud_bus;
3436 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
3437 usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc;
3438 usbd_status err = USBD_NOMEM;
3439 int ival;
3440
3441 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3442 DPRINTF("pipe=%#jx, addr=%jd, endpt=%jd (%jd)",
3443 (uintptr_t)pipe, pipe->up_dev->ud_addr, ed->bEndpointAddress,
3444 bus->ub_rhaddr);
3445
3446 if (sc->sc_dying)
3447 return USBD_IOERROR;
3448
3449 upipe->aborting = 0;
3450 /* toggle state needed for bulk endpoints */
3451 upipe->nexttoggle = pipe->up_endpoint->ue_toggle;
3452
3453 if (pipe->up_dev->ud_addr == bus->ub_rhaddr) {
3454 switch (ed->bEndpointAddress) {
3455 case USB_CONTROL_ENDPOINT:
3456 pipe->up_methods = &roothub_ctrl_methods;
3457 break;
3458 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
3459 pipe->up_methods = &uhci_root_intr_methods;
3460 break;
3461 default:
3462 return USBD_INVAL;
3463 }
3464 } else {
3465 switch (ed->bmAttributes & UE_XFERTYPE) {
3466 case UE_CONTROL:
3467 pipe->up_methods = &uhci_device_ctrl_methods;
3468 upipe->ctrl.sqh = uhci_alloc_sqh(sc);
3469 if (upipe->ctrl.sqh == NULL)
3470 goto bad;
3471 upipe->ctrl.setup = uhci_alloc_std(sc);
3472 if (upipe->ctrl.setup == NULL) {
3473 uhci_free_sqh(sc, upipe->ctrl.sqh);
3474 goto bad;
3475 }
3476 upipe->ctrl.stat = uhci_alloc_std(sc);
3477 if (upipe->ctrl.stat == NULL) {
3478 uhci_free_sqh(sc, upipe->ctrl.sqh);
3479 uhci_free_std(sc, upipe->ctrl.setup);
3480 goto bad;
3481 }
3482 err = usb_allocmem(&sc->sc_bus,
3483 sizeof(usb_device_request_t),
3484 0, &upipe->ctrl.reqdma);
3485 if (err) {
3486 uhci_free_sqh(sc, upipe->ctrl.sqh);
3487 uhci_free_std(sc, upipe->ctrl.setup);
3488 uhci_free_std(sc, upipe->ctrl.stat);
3489 goto bad;
3490 }
3491 break;
3492 case UE_INTERRUPT:
3493 pipe->up_methods = &uhci_device_intr_methods;
3494 ival = pipe->up_interval;
3495 if (ival == USBD_DEFAULT_INTERVAL)
3496 ival = ed->bInterval;
3497 return uhci_device_setintr(sc, upipe, ival);
3498 case UE_ISOCHRONOUS:
3499 pipe->up_serialise = false;
3500 pipe->up_methods = &uhci_device_isoc_methods;
3501 return uhci_setup_isoc(pipe);
3502 case UE_BULK:
3503 pipe->up_methods = &uhci_device_bulk_methods;
3504 upipe->bulk.sqh = uhci_alloc_sqh(sc);
3505 if (upipe->bulk.sqh == NULL)
3506 goto bad;
3507 break;
3508 }
3509 }
3510 return USBD_NORMAL_COMPLETION;
3511
3512 bad:
3513 return USBD_NOMEM;
3514 }
3515
3516 /*
3517 * Data structures and routines to emulate the root hub.
3518 */
3519 /*
3520 * The USB hub protocol requires that SET_FEATURE(PORT_RESET) also
3521 * enables the port, and also states that SET_FEATURE(PORT_ENABLE)
3522 * should not be used by the USB subsystem. As we cannot issue a
3523 * SET_FEATURE(PORT_ENABLE) externally, we must ensure that the port
3524 * will be enabled as part of the reset.
3525 *
3526 * On the VT83C572, the port cannot be successfully enabled until the
3527 * outstanding "port enable change" and "connection status change"
3528 * events have been reset.
3529 */
3530 Static usbd_status
3531 uhci_portreset(uhci_softc_t *sc, int index)
3532 {
3533 int lim, port, x;
3534 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3535
3536 if (index == 1)
3537 port = UHCI_PORTSC1;
3538 else if (index == 2)
3539 port = UHCI_PORTSC2;
3540 else
3541 return USBD_IOERROR;
3542
3543 x = URWMASK(UREAD2(sc, port));
3544 UWRITE2(sc, port, x | UHCI_PORTSC_PR);
3545
3546 usb_delay_ms(&sc->sc_bus, USB_PORT_ROOT_RESET_DELAY);
3547
3548 DPRINTF("uhci port %jd reset, status0 = %#04jx", index,
3549 UREAD2(sc, port), 0, 0);
3550
3551 x = URWMASK(UREAD2(sc, port));
3552 UWRITE2(sc, port, x & ~(UHCI_PORTSC_PR | UHCI_PORTSC_SUSP));
3553
3554 delay(100);
3555
3556 DPRINTF("uhci port %jd reset, status1 = %#04jx", index,
3557 UREAD2(sc, port), 0, 0);
3558
3559 x = URWMASK(UREAD2(sc, port));
3560 UWRITE2(sc, port, x | UHCI_PORTSC_PE);
3561
3562 for (lim = 10; --lim > 0;) {
3563 usb_delay_ms(&sc->sc_bus, USB_PORT_RESET_DELAY);
3564
3565 x = UREAD2(sc, port);
3566 DPRINTF("uhci port %jd iteration %ju, status = %#04jx", index,
3567 lim, x, 0);
3568
3569 if (!(x & UHCI_PORTSC_CCS)) {
3570 /*
3571 * No device is connected (or was disconnected
3572 * during reset). Consider the port reset.
3573 * The delay must be long enough to ensure on
3574 * the initial iteration that the device
3575 * connection will have been registered. 50ms
3576 * appears to be sufficient, but 20ms is not.
3577 */
3578 DPRINTFN(3, "uhci port %jd loop %ju, device detached",
3579 index, lim, 0, 0);
3580 break;
3581 }
3582
3583 if (x & (UHCI_PORTSC_POEDC | UHCI_PORTSC_CSC)) {
3584 /*
3585 * Port enabled changed and/or connection
3586 * status changed were set. Reset either or
3587 * both raised flags (by writing a 1 to that
3588 * bit), and wait again for state to settle.
3589 */
3590 UWRITE2(sc, port, URWMASK(x) |
3591 (x & (UHCI_PORTSC_POEDC | UHCI_PORTSC_CSC)));
3592 continue;
3593 }
3594
3595 if (x & UHCI_PORTSC_PE)
3596 /* Port is enabled */
3597 break;
3598
3599 UWRITE2(sc, port, URWMASK(x) | UHCI_PORTSC_PE);
3600 }
3601
3602 DPRINTFN(3, "uhci port %jd reset, status2 = %#04jx", index,
3603 UREAD2(sc, port), 0, 0);
3604
3605 if (lim <= 0) {
3606 DPRINTF("uhci port %jd reset timed out", index,
3607 0, 0, 0);
3608 return USBD_TIMEOUT;
3609 }
3610
3611 sc->sc_isreset = 1;
3612 return USBD_NORMAL_COMPLETION;
3613 }
3614
3615 Static int
3616 uhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
3617 void *buf, int buflen)
3618 {
3619 uhci_softc_t *sc = UHCI_BUS2SC(bus);
3620 int port, x;
3621 int status, change, totlen = 0;
3622 uint16_t len, value, index;
3623 usb_port_status_t ps;
3624 usbd_status err;
3625
3626 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3627
3628 if (sc->sc_dying)
3629 return -1;
3630
3631 DPRINTF("type=%#02jx request=%02jx", req->bmRequestType,
3632 req->bRequest, 0, 0);
3633
3634 len = UGETW(req->wLength);
3635 value = UGETW(req->wValue);
3636 index = UGETW(req->wIndex);
3637
3638 #define C(x,y) ((x) | ((y) << 8))
3639 switch (C(req->bRequest, req->bmRequestType)) {
3640 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
3641 DPRINTF("wValue=%#04jx", value, 0, 0, 0);
3642 if (len == 0)
3643 break;
3644 switch (value) {
3645 #define sd ((usb_string_descriptor_t *)buf)
3646 case C(2, UDESC_STRING):
3647 /* Product */
3648 totlen = usb_makestrdesc(sd, len, "UHCI root hub");
3649 break;
3650 #undef sd
3651 default:
3652 /* default from usbroothub */
3653 return buflen;
3654 }
3655 break;
3656
3657 /* Hub requests */
3658 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
3659 break;
3660 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
3661 DPRINTF("UR_CLEAR_PORT_FEATURE port=%jd feature=%jd", index,
3662 value, 0, 0);
3663 if (index == 1)
3664 port = UHCI_PORTSC1;
3665 else if (index == 2)
3666 port = UHCI_PORTSC2;
3667 else {
3668 return -1;
3669 }
3670 switch(value) {
3671 case UHF_PORT_ENABLE:
3672 x = URWMASK(UREAD2(sc, port));
3673 UWRITE2(sc, port, x & ~UHCI_PORTSC_PE);
3674 break;
3675 case UHF_PORT_SUSPEND:
3676 x = URWMASK(UREAD2(sc, port));
3677 if (!(x & UHCI_PORTSC_SUSP)) /* not suspended */
3678 break;
3679 UWRITE2(sc, port, x | UHCI_PORTSC_RD);
3680 /* see USB2 spec ch. 7.1.7.7 */
3681 usb_delay_ms(&sc->sc_bus, 20);
3682 UWRITE2(sc, port, x & ~UHCI_PORTSC_SUSP);
3683 /* 10ms resume delay must be provided by caller */
3684 break;
3685 case UHF_PORT_RESET:
3686 x = URWMASK(UREAD2(sc, port));
3687 UWRITE2(sc, port, x & ~UHCI_PORTSC_PR);
3688 break;
3689 case UHF_C_PORT_CONNECTION:
3690 x = URWMASK(UREAD2(sc, port));
3691 UWRITE2(sc, port, x | UHCI_PORTSC_CSC);
3692 break;
3693 case UHF_C_PORT_ENABLE:
3694 x = URWMASK(UREAD2(sc, port));
3695 UWRITE2(sc, port, x | UHCI_PORTSC_POEDC);
3696 break;
3697 case UHF_C_PORT_OVER_CURRENT:
3698 x = URWMASK(UREAD2(sc, port));
3699 UWRITE2(sc, port, x | UHCI_PORTSC_OCIC);
3700 break;
3701 case UHF_C_PORT_RESET:
3702 sc->sc_isreset = 0;
3703 break;
3704 case UHF_PORT_CONNECTION:
3705 case UHF_PORT_OVER_CURRENT:
3706 case UHF_PORT_POWER:
3707 case UHF_PORT_LOW_SPEED:
3708 case UHF_C_PORT_SUSPEND:
3709 default:
3710 return -1;
3711 }
3712 break;
3713 case C(UR_GET_BUS_STATE, UT_READ_CLASS_OTHER):
3714 if (index == 1)
3715 port = UHCI_PORTSC1;
3716 else if (index == 2)
3717 port = UHCI_PORTSC2;
3718 else {
3719 return -1;
3720 }
3721 if (len > 0) {
3722 *(uint8_t *)buf =
3723 UHCI_PORTSC_GET_LS(UREAD2(sc, port));
3724 totlen = 1;
3725 }
3726 break;
3727 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
3728 if (len == 0)
3729 break;
3730 if ((value & 0xff) != 0) {
3731 return -1;
3732 }
3733 usb_hub_descriptor_t hubd;
3734
3735 totlen = uimin(buflen, sizeof(hubd));
3736 memcpy(&hubd, buf, totlen);
3737 hubd.bNbrPorts = 2;
3738 memcpy(buf, &hubd, totlen);
3739 break;
3740 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
3741 if (len != 4) {
3742 return -1;
3743 }
3744 memset(buf, 0, len);
3745 totlen = len;
3746 break;
3747 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
3748 if (index == 1)
3749 port = UHCI_PORTSC1;
3750 else if (index == 2)
3751 port = UHCI_PORTSC2;
3752 else {
3753 return -1;
3754 }
3755 if (len != 4) {
3756 return -1;
3757 }
3758 x = UREAD2(sc, port);
3759 status = change = 0;
3760 if (x & UHCI_PORTSC_CCS)
3761 status |= UPS_CURRENT_CONNECT_STATUS;
3762 if (x & UHCI_PORTSC_CSC)
3763 change |= UPS_C_CONNECT_STATUS;
3764 if (x & UHCI_PORTSC_PE)
3765 status |= UPS_PORT_ENABLED;
3766 if (x & UHCI_PORTSC_POEDC)
3767 change |= UPS_C_PORT_ENABLED;
3768 if (x & UHCI_PORTSC_OCI)
3769 status |= UPS_OVERCURRENT_INDICATOR;
3770 if (x & UHCI_PORTSC_OCIC)
3771 change |= UPS_C_OVERCURRENT_INDICATOR;
3772 if (x & UHCI_PORTSC_SUSP)
3773 status |= UPS_SUSPEND;
3774 if (x & UHCI_PORTSC_LSDA)
3775 status |= UPS_LOW_SPEED;
3776 status |= UPS_PORT_POWER;
3777 if (sc->sc_isreset)
3778 change |= UPS_C_PORT_RESET;
3779 USETW(ps.wPortStatus, status);
3780 USETW(ps.wPortChange, change);
3781 totlen = uimin(len, sizeof(ps));
3782 memcpy(buf, &ps, totlen);
3783 break;
3784 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
3785 return -1;
3786 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
3787 break;
3788 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
3789 if (index == 1)
3790 port = UHCI_PORTSC1;
3791 else if (index == 2)
3792 port = UHCI_PORTSC2;
3793 else {
3794 return -1;
3795 }
3796 switch(value) {
3797 case UHF_PORT_ENABLE:
3798 x = URWMASK(UREAD2(sc, port));
3799 UWRITE2(sc, port, x | UHCI_PORTSC_PE);
3800 break;
3801 case UHF_PORT_SUSPEND:
3802 x = URWMASK(UREAD2(sc, port));
3803 UWRITE2(sc, port, x | UHCI_PORTSC_SUSP);
3804 break;
3805 case UHF_PORT_RESET:
3806 err = uhci_portreset(sc, index);
3807 if (err != USBD_NORMAL_COMPLETION)
3808 return -1;
3809 return 0;
3810 case UHF_PORT_POWER:
3811 /* Pretend we turned on power */
3812 return 0;
3813 case UHF_C_PORT_CONNECTION:
3814 case UHF_C_PORT_ENABLE:
3815 case UHF_C_PORT_OVER_CURRENT:
3816 case UHF_PORT_CONNECTION:
3817 case UHF_PORT_OVER_CURRENT:
3818 case UHF_PORT_LOW_SPEED:
3819 case UHF_C_PORT_SUSPEND:
3820 case UHF_C_PORT_RESET:
3821 default:
3822 return -1;
3823 }
3824 break;
3825 default:
3826 /* default from usbroothub */
3827 DPRINTF("returning %jd (usbroothub default)",
3828 buflen, 0, 0, 0);
3829 return buflen;
3830 }
3831
3832 DPRINTF("returning %jd", totlen, 0, 0, 0);
3833
3834 return totlen;
3835 }
3836
3837 /* Abort a root interrupt request. */
3838 void
3839 uhci_root_intr_abort(struct usbd_xfer *xfer)
3840 {
3841 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3842
3843 KASSERT(mutex_owned(&sc->sc_lock));
3844 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
3845
3846 /*
3847 * Try to stop the callout before it starts. If we got in too
3848 * late, too bad; but if the callout had yet to run and time
3849 * out the xfer, cancel it ourselves.
3850 */
3851 callout_stop(&sc->sc_poll_handle);
3852 if (sc->sc_intr_xfer == NULL)
3853 return;
3854
3855 KASSERT(sc->sc_intr_xfer == xfer);
3856 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
3857 xfer->ux_status = USBD_CANCELLED;
3858 #ifdef DIAGNOSTIC
3859 UHCI_XFER2UXFER(xfer)->ux_isdone = true;
3860 #endif
3861 usb_transfer_complete(xfer);
3862 }
3863
3864 usbd_status
3865 uhci_root_intr_transfer(struct usbd_xfer *xfer)
3866 {
3867 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3868 usbd_status err;
3869
3870 /* Insert last in queue. */
3871 mutex_enter(&sc->sc_lock);
3872 err = usb_insert_transfer(xfer);
3873 mutex_exit(&sc->sc_lock);
3874 if (err)
3875 return err;
3876
3877 /*
3878 * Pipe isn't running (otherwise err would be USBD_INPROG),
3879 * start first
3880 */
3881 return uhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
3882 }
3883
3884 /* Start a transfer on the root interrupt pipe */
3885 usbd_status
3886 uhci_root_intr_start(struct usbd_xfer *xfer)
3887 {
3888 struct usbd_pipe *pipe = xfer->ux_pipe;
3889 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3890 unsigned int ival;
3891 const bool polling = sc->sc_bus.ub_usepolling;
3892
3893 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3894 DPRINTF("xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, xfer->ux_length,
3895 xfer->ux_flags, 0);
3896
3897 if (sc->sc_dying)
3898 return USBD_IOERROR;
3899
3900 if (!polling)
3901 mutex_enter(&sc->sc_lock);
3902
3903 KASSERT(sc->sc_intr_xfer == NULL);
3904
3905 /* XXX temporary variable needed to avoid gcc3 warning */
3906 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
3907 sc->sc_ival = mstohz(ival);
3908 callout_schedule(&sc->sc_poll_handle, sc->sc_ival);
3909 sc->sc_intr_xfer = xfer;
3910 xfer->ux_status = USBD_IN_PROGRESS;
3911
3912 if (!polling)
3913 mutex_exit(&sc->sc_lock);
3914
3915 return USBD_IN_PROGRESS;
3916 }
3917
3918 /* Close the root interrupt pipe. */
3919 void
3920 uhci_root_intr_close(struct usbd_pipe *pipe)
3921 {
3922 uhci_softc_t *sc __diagused = UHCI_PIPE2SC(pipe);
3923 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3924
3925 KASSERT(mutex_owned(&sc->sc_lock));
3926
3927 /*
3928 * The caller must arrange to have aborted the pipe already, so
3929 * there can be no intr xfer in progress. The callout may
3930 * still be pending from a prior intr xfer -- if it has already
3931 * fired, it will see there is nothing to do, and do nothing.
3932 */
3933 KASSERT(sc->sc_intr_xfer == NULL);
3934 KASSERT(!callout_pending(&sc->sc_poll_handle));
3935 }
3936