uhci.c revision 1.319 1 /* $NetBSD: uhci.c,v 1.319 2024/10/06 14:08:58 jakllsch Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004, 2011, 2012, 2016, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology, Jared D. McNeill (jmcneill (at) invisible.ca),
10 * Matthew R. Green (mrg (at) eterna23.net) and Nick Hudson.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * USB Universal Host Controller driver.
36 * Handles e.g. PIIX3 and PIIX4.
37 *
38 * UHCI spec: http://www.intel.com/technology/usb/spec.htm
39 * USB spec: http://www.usb.org/developers/docs/
40 * PIIXn spec: ftp://download.intel.com/design/intarch/datashts/29055002.pdf
41 * ftp://download.intel.com/design/intarch/datashts/29056201.pdf
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: uhci.c,v 1.319 2024/10/06 14:08:58 jakllsch Exp $");
46
47 #ifdef _KERNEL_OPT
48 #include "opt_usb.h"
49 #endif
50
51 #include <sys/param.h>
52
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/device.h>
56 #include <sys/kernel.h>
57 #include <sys/kmem.h>
58 #include <sys/mutex.h>
59 #include <sys/proc.h>
60 #include <sys/queue.h>
61 #include <sys/select.h>
62 #include <sys/sysctl.h>
63 #include <sys/systm.h>
64
65 #include <machine/endian.h>
66
67 #include <dev/usb/usb.h>
68 #include <dev/usb/usbdi.h>
69 #include <dev/usb/usbdivar.h>
70 #include <dev/usb/usb_mem.h>
71
72 #include <dev/usb/uhcireg.h>
73 #include <dev/usb/uhcivar.h>
74 #include <dev/usb/usbroothub.h>
75 #include <dev/usb/usbhist.h>
76
77 /* Use bandwidth reclamation for control transfers. Some devices choke on it. */
78 /*#define UHCI_CTL_LOOP */
79
80 #ifdef UHCI_DEBUG
81 uhci_softc_t *thesc;
82 int uhcinoloop = 0;
83 #endif
84
85 #ifdef USB_DEBUG
86 #ifndef UHCI_DEBUG
87 #define uhcidebug 0
88 #else
89 static int uhcidebug = 0;
90
91 SYSCTL_SETUP(sysctl_hw_uhci_setup, "sysctl hw.uhci setup")
92 {
93 int err;
94 const struct sysctlnode *rnode;
95 const struct sysctlnode *cnode;
96
97 err = sysctl_createv(clog, 0, NULL, &rnode,
98 CTLFLAG_PERMANENT, CTLTYPE_NODE, "uhci",
99 SYSCTL_DESCR("uhci global controls"),
100 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
101
102 if (err)
103 goto fail;
104
105 /* control debugging printfs */
106 err = sysctl_createv(clog, 0, &rnode, &cnode,
107 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
108 "debug", SYSCTL_DESCR("Enable debugging output"),
109 NULL, 0, &uhcidebug, sizeof(uhcidebug), CTL_CREATE, CTL_EOL);
110 if (err)
111 goto fail;
112
113 return;
114 fail:
115 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
116 }
117
118 #endif /* UHCI_DEBUG */
119 #endif /* USB_DEBUG */
120
121 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOGN(uhcidebug,1,FMT,A,B,C,D)
122 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(uhcidebug,N,FMT,A,B,C,D)
123 #define UHCIHIST_FUNC() USBHIST_FUNC()
124 #define UHCIHIST_CALLED(name) USBHIST_CALLED(uhcidebug)
125
126 /*
127 * The UHCI controller is little endian, so on big endian machines
128 * the data stored in memory needs to be swapped.
129 */
130
131 struct uhci_pipe {
132 struct usbd_pipe pipe;
133 int nexttoggle;
134
135 u_char aborting;
136 struct usbd_xfer *abortstart, abortend;
137
138 /* Info needed for different pipe kinds. */
139 union {
140 /* Control pipe */
141 struct {
142 uhci_soft_qh_t *sqh;
143 usb_dma_t reqdma;
144 uhci_soft_td_t *setup;
145 uhci_soft_td_t *stat;
146 } ctrl;
147 /* Interrupt pipe */
148 struct {
149 int npoll;
150 uhci_soft_qh_t **qhs;
151 } intr;
152 /* Bulk pipe */
153 struct {
154 uhci_soft_qh_t *sqh;
155 } bulk;
156 /* Isochronous pipe */
157 struct isoc {
158 uhci_soft_td_t **stds;
159 int next, inuse;
160 } isoc;
161 };
162 };
163
164 typedef TAILQ_HEAD(ux_completeq, uhci_xfer) ux_completeq_t;
165
166 Static void uhci_globalreset(uhci_softc_t *);
167 Static usbd_status uhci_portreset(uhci_softc_t*, int);
168 Static void uhci_reset(uhci_softc_t *);
169 Static usbd_status uhci_run(uhci_softc_t *, int);
170 Static uhci_soft_td_t *uhci_alloc_std(uhci_softc_t *);
171 Static void uhci_free_std(uhci_softc_t *, uhci_soft_td_t *);
172 Static void uhci_free_std_locked(uhci_softc_t *, uhci_soft_td_t *);
173 Static uhci_soft_qh_t *uhci_alloc_sqh(uhci_softc_t *);
174 Static void uhci_free_sqh(uhci_softc_t *, uhci_soft_qh_t *);
175 #if 0
176 Static void uhci_enter_ctl_q(uhci_softc_t *, uhci_soft_qh_t *,
177 uhci_intr_info_t *);
178 Static void uhci_exit_ctl_q(uhci_softc_t *, uhci_soft_qh_t *);
179 #endif
180
181 #if 0
182 Static void uhci_free_std_chain(uhci_softc_t *, uhci_soft_td_t *,
183 uhci_soft_td_t *);
184 #endif
185 Static int uhci_alloc_std_chain(uhci_softc_t *, struct usbd_xfer *,
186 int, int, uhci_soft_td_t **);
187 Static void uhci_free_stds(uhci_softc_t *, struct uhci_xfer *);
188
189 Static void uhci_reset_std_chain(uhci_softc_t *, struct usbd_xfer *,
190 int, int, int *, uhci_soft_td_t **);
191
192 Static void uhci_poll_hub(void *);
193 Static void uhci_check_intr(uhci_softc_t *, struct uhci_xfer *,
194 ux_completeq_t *);
195 Static void uhci_idone(struct uhci_xfer *, ux_completeq_t *);
196
197 Static void uhci_abortx(struct usbd_xfer *);
198
199 Static void uhci_add_ls_ctrl(uhci_softc_t *, uhci_soft_qh_t *);
200 Static void uhci_add_hs_ctrl(uhci_softc_t *, uhci_soft_qh_t *);
201 Static void uhci_add_bulk(uhci_softc_t *, uhci_soft_qh_t *);
202 Static void uhci_remove_ls_ctrl(uhci_softc_t *,uhci_soft_qh_t *);
203 Static void uhci_remove_hs_ctrl(uhci_softc_t *,uhci_soft_qh_t *);
204 Static void uhci_remove_bulk(uhci_softc_t *,uhci_soft_qh_t *);
205 Static void uhci_add_loop(uhci_softc_t *);
206 Static void uhci_rem_loop(uhci_softc_t *);
207
208 Static usbd_status uhci_setup_isoc(struct usbd_pipe *);
209
210 Static struct usbd_xfer *
211 uhci_allocx(struct usbd_bus *, unsigned int);
212 Static void uhci_freex(struct usbd_bus *, struct usbd_xfer *);
213 Static bool uhci_dying(struct usbd_bus *);
214 Static void uhci_get_lock(struct usbd_bus *, kmutex_t **);
215 Static int uhci_roothub_ctrl(struct usbd_bus *,
216 usb_device_request_t *, void *, int);
217
218 Static int uhci_device_ctrl_init(struct usbd_xfer *);
219 Static void uhci_device_ctrl_fini(struct usbd_xfer *);
220 Static usbd_status uhci_device_ctrl_transfer(struct usbd_xfer *);
221 Static usbd_status uhci_device_ctrl_start(struct usbd_xfer *);
222 Static void uhci_device_ctrl_abort(struct usbd_xfer *);
223 Static void uhci_device_ctrl_close(struct usbd_pipe *);
224 Static void uhci_device_ctrl_done(struct usbd_xfer *);
225
226 Static int uhci_device_intr_init(struct usbd_xfer *);
227 Static void uhci_device_intr_fini(struct usbd_xfer *);
228 Static usbd_status uhci_device_intr_transfer(struct usbd_xfer *);
229 Static usbd_status uhci_device_intr_start(struct usbd_xfer *);
230 Static void uhci_device_intr_abort(struct usbd_xfer *);
231 Static void uhci_device_intr_close(struct usbd_pipe *);
232 Static void uhci_device_intr_done(struct usbd_xfer *);
233
234 Static int uhci_device_bulk_init(struct usbd_xfer *);
235 Static void uhci_device_bulk_fini(struct usbd_xfer *);
236 Static usbd_status uhci_device_bulk_transfer(struct usbd_xfer *);
237 Static usbd_status uhci_device_bulk_start(struct usbd_xfer *);
238 Static void uhci_device_bulk_abort(struct usbd_xfer *);
239 Static void uhci_device_bulk_close(struct usbd_pipe *);
240 Static void uhci_device_bulk_done(struct usbd_xfer *);
241
242 Static int uhci_device_isoc_init(struct usbd_xfer *);
243 Static void uhci_device_isoc_fini(struct usbd_xfer *);
244 Static usbd_status uhci_device_isoc_transfer(struct usbd_xfer *);
245 Static void uhci_device_isoc_abort(struct usbd_xfer *);
246 Static void uhci_device_isoc_close(struct usbd_pipe *);
247 Static void uhci_device_isoc_done(struct usbd_xfer *);
248
249 Static usbd_status uhci_root_intr_transfer(struct usbd_xfer *);
250 Static usbd_status uhci_root_intr_start(struct usbd_xfer *);
251 Static void uhci_root_intr_abort(struct usbd_xfer *);
252 Static void uhci_root_intr_close(struct usbd_pipe *);
253 Static void uhci_root_intr_done(struct usbd_xfer *);
254
255 Static usbd_status uhci_open(struct usbd_pipe *);
256 Static void uhci_poll(struct usbd_bus *);
257 Static void uhci_softintr(void *);
258
259 Static void uhci_add_intr(uhci_softc_t *, uhci_soft_qh_t *);
260 Static void uhci_remove_intr(uhci_softc_t *, uhci_soft_qh_t *);
261 Static usbd_status uhci_device_setintr(uhci_softc_t *,
262 struct uhci_pipe *, int);
263
264 Static void uhci_device_clear_toggle(struct usbd_pipe *);
265 Static void uhci_noop(struct usbd_pipe *);
266
267 static inline uhci_soft_qh_t *
268 uhci_find_prev_qh(uhci_soft_qh_t *, uhci_soft_qh_t *);
269
270 #ifdef UHCI_DEBUG
271 Static void uhci_dump_all(uhci_softc_t *);
272 Static void uhci_dumpregs(uhci_softc_t *);
273 Static void uhci_dump_qhs(uhci_soft_qh_t *);
274 Static void uhci_dump_qh(uhci_soft_qh_t *);
275 Static void uhci_dump_tds(uhci_soft_td_t *);
276 Static void uhci_dump_td(uhci_soft_td_t *);
277 Static void uhci_dump_ii(struct uhci_xfer *);
278 void uhci_dump(void);
279 #endif
280
281 #define UBARR(sc) bus_space_barrier((sc)->iot, (sc)->ioh, 0, (sc)->sc_size, \
282 BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE)
283 #define UWRITE1(sc, r, x) \
284 do { UBARR(sc); bus_space_write_1((sc)->iot, (sc)->ioh, (r), (x)); \
285 } while (/*CONSTCOND*/0)
286 #define UWRITE2(sc, r, x) \
287 do { UBARR(sc); bus_space_write_2((sc)->iot, (sc)->ioh, (r), (x)); \
288 } while (/*CONSTCOND*/0)
289 #define UWRITE4(sc, r, x) \
290 do { UBARR(sc); bus_space_write_4((sc)->iot, (sc)->ioh, (r), (x)); \
291 } while (/*CONSTCOND*/0)
292
293 static __inline uint8_t
294 UREAD1(uhci_softc_t *sc, bus_size_t r)
295 {
296
297 UBARR(sc);
298 return bus_space_read_1(sc->iot, sc->ioh, r);
299 }
300
301 static __inline uint16_t
302 UREAD2(uhci_softc_t *sc, bus_size_t r)
303 {
304
305 UBARR(sc);
306 return bus_space_read_2(sc->iot, sc->ioh, r);
307 }
308
309 #ifdef UHCI_DEBUG
310 static __inline uint32_t
311 UREAD4(uhci_softc_t *sc, bus_size_t r)
312 {
313
314 UBARR(sc);
315 return bus_space_read_4(sc->iot, sc->ioh, r);
316 }
317 #endif
318
319 #define UHCICMD(sc, cmd) UWRITE2(sc, UHCI_CMD, cmd)
320 #define UHCISTS(sc) UREAD2(sc, UHCI_STS)
321
322 #define UHCI_RESET_TIMEOUT 100 /* ms, reset timeout */
323
324 #define UHCI_CURFRAME(sc) (UREAD2(sc, UHCI_FRNUM) & UHCI_FRNUM_MASK)
325
326 const struct usbd_bus_methods uhci_bus_methods = {
327 .ubm_open = uhci_open,
328 .ubm_softint = uhci_softintr,
329 .ubm_dopoll = uhci_poll,
330 .ubm_allocx = uhci_allocx,
331 .ubm_freex = uhci_freex,
332 .ubm_abortx = uhci_abortx,
333 .ubm_dying = uhci_dying,
334 .ubm_getlock = uhci_get_lock,
335 .ubm_rhctrl = uhci_roothub_ctrl,
336 };
337
338 const struct usbd_pipe_methods uhci_root_intr_methods = {
339 .upm_transfer = uhci_root_intr_transfer,
340 .upm_start = uhci_root_intr_start,
341 .upm_abort = uhci_root_intr_abort,
342 .upm_close = uhci_root_intr_close,
343 .upm_cleartoggle = uhci_noop,
344 .upm_done = uhci_root_intr_done,
345 };
346
347 const struct usbd_pipe_methods uhci_device_ctrl_methods = {
348 .upm_init = uhci_device_ctrl_init,
349 .upm_fini = uhci_device_ctrl_fini,
350 .upm_transfer = uhci_device_ctrl_transfer,
351 .upm_start = uhci_device_ctrl_start,
352 .upm_abort = uhci_device_ctrl_abort,
353 .upm_close = uhci_device_ctrl_close,
354 .upm_cleartoggle = uhci_noop,
355 .upm_done = uhci_device_ctrl_done,
356 };
357
358 const struct usbd_pipe_methods uhci_device_intr_methods = {
359 .upm_init = uhci_device_intr_init,
360 .upm_fini = uhci_device_intr_fini,
361 .upm_transfer = uhci_device_intr_transfer,
362 .upm_start = uhci_device_intr_start,
363 .upm_abort = uhci_device_intr_abort,
364 .upm_close = uhci_device_intr_close,
365 .upm_cleartoggle = uhci_device_clear_toggle,
366 .upm_done = uhci_device_intr_done,
367 };
368
369 const struct usbd_pipe_methods uhci_device_bulk_methods = {
370 .upm_init = uhci_device_bulk_init,
371 .upm_fini = uhci_device_bulk_fini,
372 .upm_transfer = uhci_device_bulk_transfer,
373 .upm_start = uhci_device_bulk_start,
374 .upm_abort = uhci_device_bulk_abort,
375 .upm_close = uhci_device_bulk_close,
376 .upm_cleartoggle = uhci_device_clear_toggle,
377 .upm_done = uhci_device_bulk_done,
378 };
379
380 const struct usbd_pipe_methods uhci_device_isoc_methods = {
381 .upm_init = uhci_device_isoc_init,
382 .upm_fini = uhci_device_isoc_fini,
383 .upm_transfer = uhci_device_isoc_transfer,
384 .upm_abort = uhci_device_isoc_abort,
385 .upm_close = uhci_device_isoc_close,
386 .upm_cleartoggle = uhci_noop,
387 .upm_done = uhci_device_isoc_done,
388 };
389
390 static inline void
391 uhci_add_intr_list(uhci_softc_t *sc, struct uhci_xfer *ux)
392 {
393
394 TAILQ_INSERT_TAIL(&sc->sc_intrhead, ux, ux_list);
395 }
396
397 static inline void
398 uhci_del_intr_list(uhci_softc_t *sc, struct uhci_xfer *ux)
399 {
400
401 TAILQ_REMOVE(&sc->sc_intrhead, ux, ux_list);
402 }
403
404 static inline uhci_soft_qh_t *
405 uhci_find_prev_qh(uhci_soft_qh_t *pqh, uhci_soft_qh_t *sqh)
406 {
407 UHCIHIST_FUNC(); UHCIHIST_CALLED();
408 DPRINTFN(15, "pqh=%#jx sqh=%#jx", (uintptr_t)pqh, (uintptr_t)sqh, 0, 0);
409
410 for (; pqh->hlink != sqh; pqh = pqh->hlink) {
411 #if defined(DIAGNOSTIC) || defined(UHCI_DEBUG)
412 usb_syncmem(&pqh->dma,
413 pqh->offs + offsetof(uhci_qh_t, qh_hlink),
414 sizeof(pqh->qh.qh_hlink),
415 BUS_DMASYNC_POSTWRITE);
416 if (le32toh(pqh->qh.qh_hlink) & UHCI_PTR_T) {
417 printf("%s: QH not found\n", __func__);
418 return NULL;
419 }
420 #endif
421 }
422 return pqh;
423 }
424
425 void
426 uhci_globalreset(uhci_softc_t *sc)
427 {
428 UHCICMD(sc, UHCI_CMD_GRESET); /* global reset */
429 usb_delay_ms(&sc->sc_bus, USB_BUS_RESET_DELAY); /* wait a little */
430 UHCICMD(sc, 0); /* do nothing */
431 }
432
433 int
434 uhci_init(uhci_softc_t *sc)
435 {
436 int i, j;
437 uhci_soft_qh_t *clsqh, *chsqh, *bsqh, *sqh, *lsqh;
438 uhci_soft_td_t *std;
439
440 UHCIHIST_FUNC(); UHCIHIST_CALLED();
441
442 #ifdef UHCI_DEBUG
443 thesc = sc;
444
445 if (uhcidebug >= 2)
446 uhci_dumpregs(sc);
447 #endif
448
449 sc->sc_suspend = PWR_RESUME;
450
451 UWRITE2(sc, UHCI_INTR, 0); /* disable interrupts */
452 uhci_globalreset(sc); /* reset the controller */
453 uhci_reset(sc);
454
455 /* Allocate and initialize real frame array. */
456 int err = usb_allocmem(sc->sc_bus.ub_dmatag,
457 UHCI_FRAMELIST_COUNT * sizeof(uhci_physaddr_t),
458 UHCI_FRAMELIST_ALIGN, USBMALLOC_COHERENT, &sc->sc_dma);
459 if (err)
460 return err;
461 sc->sc_pframes = KERNADDR(&sc->sc_dma, 0);
462 /* set frame number to 0 */
463 UWRITE2(sc, UHCI_FRNUM, 0);
464 /* set frame list */
465 UWRITE4(sc, UHCI_FLBASEADDR, DMAADDR(&sc->sc_dma, 0));
466
467 /* Initialise mutex early for uhci_alloc_* */
468 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
469 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
470
471 /*
472 * Allocate a TD, inactive, that hangs from the last QH.
473 * This is to avoid a bug in the PIIX that makes it run berserk
474 * otherwise.
475 */
476 std = uhci_alloc_std(sc);
477 if (std == NULL)
478 return ENOMEM;
479 std->link.std = NULL;
480 std->td.td_link = htole32(UHCI_PTR_T);
481 std->td.td_status = htole32(0); /* inactive */
482 std->td.td_token = htole32(0);
483 std->td.td_buffer = htole32(0);
484 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
485 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
486
487 /* Allocate the dummy QH marking the end and used for looping the QHs.*/
488 lsqh = uhci_alloc_sqh(sc);
489 if (lsqh == NULL)
490 goto fail1;
491 lsqh->hlink = NULL;
492 lsqh->qh.qh_hlink = htole32(UHCI_PTR_T); /* end of QH chain */
493 lsqh->elink = std;
494 lsqh->qh.qh_elink = htole32(std->physaddr | UHCI_PTR_TD);
495 sc->sc_last_qh = lsqh;
496 usb_syncmem(&lsqh->dma, lsqh->offs, sizeof(lsqh->qh),
497 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
498
499 /* Allocate the dummy QH where bulk traffic will be queued. */
500 bsqh = uhci_alloc_sqh(sc);
501 if (bsqh == NULL)
502 goto fail2;
503 bsqh->hlink = lsqh;
504 bsqh->qh.qh_hlink = htole32(lsqh->physaddr | UHCI_PTR_QH);
505 bsqh->elink = NULL;
506 bsqh->qh.qh_elink = htole32(UHCI_PTR_T);
507 sc->sc_bulk_start = sc->sc_bulk_end = bsqh;
508 usb_syncmem(&bsqh->dma, bsqh->offs, sizeof(bsqh->qh),
509 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
510
511 /* Allocate dummy QH where high speed control traffic will be queued. */
512 chsqh = uhci_alloc_sqh(sc);
513 if (chsqh == NULL)
514 goto fail3;
515 chsqh->hlink = bsqh;
516 chsqh->qh.qh_hlink = htole32(bsqh->physaddr | UHCI_PTR_QH);
517 chsqh->elink = NULL;
518 chsqh->qh.qh_elink = htole32(UHCI_PTR_T);
519 sc->sc_hctl_start = sc->sc_hctl_end = chsqh;
520 usb_syncmem(&chsqh->dma, chsqh->offs, sizeof(chsqh->qh),
521 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
522
523 /* Allocate dummy QH where control traffic will be queued. */
524 clsqh = uhci_alloc_sqh(sc);
525 if (clsqh == NULL)
526 goto fail4;
527 clsqh->hlink = chsqh;
528 clsqh->qh.qh_hlink = htole32(chsqh->physaddr | UHCI_PTR_QH);
529 clsqh->elink = NULL;
530 clsqh->qh.qh_elink = htole32(UHCI_PTR_T);
531 sc->sc_lctl_start = sc->sc_lctl_end = clsqh;
532 usb_syncmem(&clsqh->dma, clsqh->offs, sizeof(clsqh->qh),
533 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
534
535 /*
536 * Make all (virtual) frame list pointers point to the interrupt
537 * queue heads and the interrupt queue heads at the control
538 * queue head and point the physical frame list to the virtual.
539 */
540 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
541 std = uhci_alloc_std(sc);
542 sqh = uhci_alloc_sqh(sc);
543 if (std == NULL || sqh == NULL)
544 return USBD_NOMEM;
545 std->link.sqh = sqh;
546 std->td.td_link = htole32(sqh->physaddr | UHCI_PTR_QH);
547 std->td.td_status = htole32(UHCI_TD_IOS); /* iso, inactive */
548 std->td.td_token = htole32(0);
549 std->td.td_buffer = htole32(0);
550 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
551 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
552 sqh->hlink = clsqh;
553 sqh->qh.qh_hlink = htole32(clsqh->physaddr | UHCI_PTR_QH);
554 sqh->elink = NULL;
555 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
556 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
557 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
558 sc->sc_vframes[i].htd = std;
559 sc->sc_vframes[i].etd = std;
560 sc->sc_vframes[i].hqh = sqh;
561 sc->sc_vframes[i].eqh = sqh;
562 for (j = i;
563 j < UHCI_FRAMELIST_COUNT;
564 j += UHCI_VFRAMELIST_COUNT)
565 sc->sc_pframes[j] = htole32(std->physaddr);
566 }
567 usb_syncmem(&sc->sc_dma, 0,
568 UHCI_FRAMELIST_COUNT * sizeof(uhci_physaddr_t),
569 BUS_DMASYNC_PREWRITE);
570
571
572 TAILQ_INIT(&sc->sc_intrhead);
573
574 sc->sc_xferpool = pool_cache_init(sizeof(struct uhci_xfer), 0, 0, 0,
575 "uhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
576
577 callout_init(&sc->sc_poll_handle, CALLOUT_MPSAFE);
578 callout_setfunc(&sc->sc_poll_handle, uhci_poll_hub, sc);
579
580 /* Set up the bus struct. */
581 sc->sc_bus.ub_methods = &uhci_bus_methods;
582 sc->sc_bus.ub_pipesize = sizeof(struct uhci_pipe);
583 sc->sc_bus.ub_usedma = true;
584 sc->sc_bus.ub_dmaflags = USBMALLOC_MULTISEG;
585
586 UHCICMD(sc, UHCI_CMD_MAXP); /* Assume 64 byte packets at frame end */
587
588 DPRINTF("Enabling...", 0, 0, 0, 0);
589
590 err = uhci_run(sc, 1); /* and here we go... */
591 UWRITE2(sc, UHCI_INTR, UHCI_INTR_TOCRCIE | UHCI_INTR_RIE |
592 UHCI_INTR_IOCE | UHCI_INTR_SPIE); /* enable interrupts */
593 return err;
594
595 fail4:
596 uhci_free_sqh(sc, chsqh);
597 fail3:
598 uhci_free_sqh(sc, lsqh);
599 fail2:
600 uhci_free_sqh(sc, lsqh);
601 fail1:
602 uhci_free_std(sc, std);
603
604 return ENOMEM;
605 }
606
607 int
608 uhci_activate(device_t self, enum devact act)
609 {
610 struct uhci_softc *sc = device_private(self);
611
612 switch (act) {
613 case DVACT_DEACTIVATE:
614 sc->sc_dying = 1;
615 return 0;
616 default:
617 return EOPNOTSUPP;
618 }
619 }
620
621 void
622 uhci_childdet(device_t self, device_t child)
623 {
624 struct uhci_softc *sc = device_private(self);
625
626 KASSERT(sc->sc_child == child);
627 sc->sc_child = NULL;
628 }
629
630 int
631 uhci_detach(struct uhci_softc *sc, int flags)
632 {
633 int rv = 0;
634
635 if (sc->sc_child != NULL)
636 rv = config_detach(sc->sc_child, flags);
637
638 if (rv != 0)
639 return rv;
640
641 KASSERT(sc->sc_intr_xfer == NULL);
642 callout_halt(&sc->sc_poll_handle, NULL);
643 callout_destroy(&sc->sc_poll_handle);
644
645 mutex_destroy(&sc->sc_lock);
646 mutex_destroy(&sc->sc_intr_lock);
647
648 pool_cache_destroy(sc->sc_xferpool);
649
650 /* XXX free other data structures XXX */
651
652 return rv;
653 }
654
655 struct usbd_xfer *
656 uhci_allocx(struct usbd_bus *bus, unsigned int nframes)
657 {
658 struct uhci_softc *sc = UHCI_BUS2SC(bus);
659 struct usbd_xfer *xfer;
660
661 xfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
662 if (xfer != NULL) {
663 memset(xfer, 0, sizeof(struct uhci_xfer));
664
665 #ifdef DIAGNOSTIC
666 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
667 uxfer->ux_isdone = true;
668 xfer->ux_state = XFER_BUSY;
669 #endif
670 }
671 return xfer;
672 }
673
674 void
675 uhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
676 {
677 struct uhci_softc *sc = UHCI_BUS2SC(bus);
678 struct uhci_xfer *uxfer __diagused = UHCI_XFER2UXFER(xfer);
679
680 KASSERTMSG(xfer->ux_state == XFER_BUSY ||
681 xfer->ux_status == USBD_NOT_STARTED,
682 "xfer %p state %d\n", xfer, xfer->ux_state);
683 KASSERTMSG(uxfer->ux_isdone || xfer->ux_status == USBD_NOT_STARTED,
684 "xfer %p not done\n", xfer);
685 #ifdef DIAGNOSTIC
686 xfer->ux_state = XFER_FREE;
687 #endif
688 pool_cache_put(sc->sc_xferpool, xfer);
689 }
690
691 Static bool
692 uhci_dying(struct usbd_bus *bus)
693 {
694 struct uhci_softc *sc = UHCI_BUS2SC(bus);
695
696 return sc->sc_dying;
697 }
698
699 Static void
700 uhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
701 {
702 struct uhci_softc *sc = UHCI_BUS2SC(bus);
703
704 *lock = &sc->sc_lock;
705 }
706
707
708 /*
709 * Handle suspend/resume.
710 *
711 * We need to switch to polling mode here, because this routine is
712 * called from an interrupt context. This is all right since we
713 * are almost suspended anyway.
714 */
715 bool
716 uhci_resume(device_t dv, const pmf_qual_t *qual)
717 {
718 uhci_softc_t *sc = device_private(dv);
719 int cmd;
720
721 cmd = UREAD2(sc, UHCI_CMD);
722 UWRITE2(sc, UHCI_INTR, 0);
723 uhci_globalreset(sc);
724 uhci_reset(sc);
725 if (cmd & UHCI_CMD_RS)
726 uhci_run(sc, 0);
727
728 /* restore saved state */
729 UWRITE4(sc, UHCI_FLBASEADDR, DMAADDR(&sc->sc_dma, 0));
730 UWRITE2(sc, UHCI_FRNUM, sc->sc_saved_frnum);
731 UWRITE1(sc, UHCI_SOF, sc->sc_saved_sof);
732
733 UHCICMD(sc, cmd | UHCI_CMD_FGR); /* force resume */
734 usb_delay_ms(&sc->sc_bus, USB_RESUME_DELAY);
735 UHCICMD(sc, cmd & ~UHCI_CMD_EGSM); /* back to normal */
736 UWRITE2(sc, UHCI_INTR, UHCI_INTR_TOCRCIE |
737 UHCI_INTR_RIE | UHCI_INTR_IOCE | UHCI_INTR_SPIE);
738 UHCICMD(sc, UHCI_CMD_MAXP);
739 uhci_run(sc, 1); /* and start traffic again */
740 usb_delay_ms(&sc->sc_bus, USB_RESUME_RECOVERY);
741 #ifdef UHCI_DEBUG
742 if (uhcidebug >= 2)
743 uhci_dumpregs(sc);
744 #endif
745
746 mutex_enter(&sc->sc_lock);
747 sc->sc_suspend = PWR_RESUME;
748 if (sc->sc_intr_xfer != NULL)
749 callout_schedule(&sc->sc_poll_handle, sc->sc_ival);
750 mutex_exit(&sc->sc_lock);
751
752 return true;
753 }
754
755 bool
756 uhci_suspend(device_t dv, const pmf_qual_t *qual)
757 {
758 uhci_softc_t *sc = device_private(dv);
759 int cmd;
760
761 mutex_enter(&sc->sc_lock);
762 sc->sc_suspend = PWR_SUSPEND;
763 if (sc->sc_intr_xfer != NULL)
764 callout_halt(&sc->sc_poll_handle, &sc->sc_lock);
765 mutex_exit(&sc->sc_lock);
766
767 cmd = UREAD2(sc, UHCI_CMD);
768
769 #ifdef UHCI_DEBUG
770 if (uhcidebug >= 2)
771 uhci_dumpregs(sc);
772 #endif
773
774 uhci_run(sc, 0); /* stop the controller */
775 cmd &= ~UHCI_CMD_RS;
776
777 /* save some state if BIOS doesn't */
778 sc->sc_saved_frnum = UREAD2(sc, UHCI_FRNUM);
779 sc->sc_saved_sof = UREAD1(sc, UHCI_SOF);
780
781 UWRITE2(sc, UHCI_INTR, 0); /* disable intrs */
782
783 UHCICMD(sc, cmd | UHCI_CMD_EGSM); /* enter suspend */
784 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
785
786 return true;
787 }
788
789 #ifdef UHCI_DEBUG
790 Static void
791 uhci_dumpregs(uhci_softc_t *sc)
792 {
793 UHCIHIST_FUNC(); UHCIHIST_CALLED();
794 DPRINTF("cmd =%04jx sts =%04jx intr =%04jx frnum =%04jx",
795 UREAD2(sc, UHCI_CMD), UREAD2(sc, UHCI_STS),
796 UREAD2(sc, UHCI_INTR), UREAD2(sc, UHCI_FRNUM));
797 DPRINTF("sof =%04jx portsc1=%04jx portsc2=%04jx flbase=%08jx",
798 UREAD1(sc, UHCI_SOF), UREAD2(sc, UHCI_PORTSC1),
799 UREAD2(sc, UHCI_PORTSC2), UREAD4(sc, UHCI_FLBASEADDR));
800 }
801
802 void
803 uhci_dump_td(uhci_soft_td_t *p)
804 {
805 UHCIHIST_FUNC(); UHCIHIST_CALLED();
806
807 usb_syncmem(&p->dma, p->offs, sizeof(p->td),
808 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
809
810 DPRINTF("TD(%#jx) at 0x%08jx", (uintptr_t)p, p->physaddr, 0, 0);
811 DPRINTF(" link=0x%08jx status=0x%08jx "
812 "token=0x%08x buffer=0x%08x",
813 le32toh(p->td.td_link),
814 le32toh(p->td.td_status),
815 le32toh(p->td.td_token),
816 le32toh(p->td.td_buffer));
817
818 DPRINTF("bitstuff=%jd crcto =%jd nak =%jd babble =%jd",
819 !!(le32toh(p->td.td_status) & UHCI_TD_BITSTUFF),
820 !!(le32toh(p->td.td_status) & UHCI_TD_CRCTO),
821 !!(le32toh(p->td.td_status) & UHCI_TD_NAK),
822 !!(le32toh(p->td.td_status) & UHCI_TD_BABBLE));
823 DPRINTF("dbuffer =%jd stalled =%jd active =%jd ioc =%jd",
824 !!(le32toh(p->td.td_status) & UHCI_TD_DBUFFER),
825 !!(le32toh(p->td.td_status) & UHCI_TD_STALLED),
826 !!(le32toh(p->td.td_status) & UHCI_TD_ACTIVE),
827 !!(le32toh(p->td.td_status) & UHCI_TD_IOC));
828 DPRINTF("ios =%jd ls =%jd spd =%jd",
829 !!(le32toh(p->td.td_status) & UHCI_TD_IOS),
830 !!(le32toh(p->td.td_status) & UHCI_TD_LS),
831 !!(le32toh(p->td.td_status) & UHCI_TD_SPD), 0);
832 DPRINTF("errcnt =%d actlen =%d pid=%02x",
833 UHCI_TD_GET_ERRCNT(le32toh(p->td.td_status)),
834 UHCI_TD_GET_ACTLEN(le32toh(p->td.td_status)),
835 UHCI_TD_GET_PID(le32toh(p->td.td_token)), 0);
836 DPRINTF("addr=%jd endpt=%jd D=%jd maxlen=%jd,",
837 UHCI_TD_GET_DEVADDR(le32toh(p->td.td_token)),
838 UHCI_TD_GET_ENDPT(le32toh(p->td.td_token)),
839 UHCI_TD_GET_DT(le32toh(p->td.td_token)),
840 UHCI_TD_GET_MAXLEN(le32toh(p->td.td_token)));
841 }
842
843 void
844 uhci_dump_qh(uhci_soft_qh_t *sqh)
845 {
846 UHCIHIST_FUNC(); UHCIHIST_CALLED();
847
848 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
849 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
850
851 DPRINTF("QH(%#jx) at 0x%08jx: hlink=%08jx elink=%08jx", (uintptr_t)sqh,
852 (int)sqh->physaddr, le32toh(sqh->qh.qh_hlink),
853 le32toh(sqh->qh.qh_elink));
854
855 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), BUS_DMASYNC_PREREAD);
856 }
857
858
859 #if 1
860 void
861 uhci_dump(void)
862 {
863 uhci_dump_all(thesc);
864 }
865 #endif
866
867 void
868 uhci_dump_all(uhci_softc_t *sc)
869 {
870 uhci_dumpregs(sc);
871 /*printf("framelist[i].link = %08x\n", sc->sc_framelist[0].link);*/
872 uhci_dump_qhs(sc->sc_lctl_start);
873 }
874
875
876 void
877 uhci_dump_qhs(uhci_soft_qh_t *sqh)
878 {
879 UHCIHIST_FUNC(); UHCIHIST_CALLED();
880
881 uhci_dump_qh(sqh);
882
883 /*
884 * uhci_dump_qhs displays all the QHs and TDs from the given QH onwards
885 * Traverses sideways first, then down.
886 *
887 * QH1
888 * QH2
889 * No QH
890 * TD2.1
891 * TD2.2
892 * TD1.1
893 * etc.
894 *
895 * TD2.x being the TDs queued at QH2 and QH1 being referenced from QH1.
896 */
897
898 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
899 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
900 if (sqh->hlink != NULL && !(le32toh(sqh->qh.qh_hlink) & UHCI_PTR_T))
901 uhci_dump_qhs(sqh->hlink);
902 else
903 DPRINTF("No QH", 0, 0, 0, 0);
904 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), BUS_DMASYNC_PREREAD);
905
906 if (sqh->elink != NULL && !(le32toh(sqh->qh.qh_elink) & UHCI_PTR_T))
907 uhci_dump_tds(sqh->elink);
908 else
909 DPRINTF("No QH", 0, 0, 0, 0);
910 }
911
912 void
913 uhci_dump_tds(uhci_soft_td_t *std)
914 {
915 uhci_soft_td_t *td;
916 int stop;
917
918 for (td = std; td != NULL; td = td->link.std) {
919 uhci_dump_td(td);
920
921 /*
922 * Check whether the link pointer in this TD marks
923 * the link pointer as end of queue. This avoids
924 * printing the free list in case the queue/TD has
925 * already been moved there (seatbelt).
926 */
927 usb_syncmem(&td->dma, td->offs + offsetof(uhci_td_t, td_link),
928 sizeof(td->td.td_link),
929 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
930 stop = (le32toh(td->td.td_link) & UHCI_PTR_T ||
931 le32toh(td->td.td_link) == 0);
932 usb_syncmem(&td->dma, td->offs + offsetof(uhci_td_t, td_link),
933 sizeof(td->td.td_link), BUS_DMASYNC_PREREAD);
934 if (stop)
935 break;
936 }
937 }
938
939 Static void
940 uhci_dump_ii(struct uhci_xfer *ux)
941 {
942 struct usbd_pipe *pipe;
943 usb_endpoint_descriptor_t *ed;
944 struct usbd_device *dev;
945
946 if (ux == NULL) {
947 printf("ux NULL\n");
948 return;
949 }
950 pipe = ux->ux_xfer.ux_pipe;
951 if (pipe == NULL) {
952 printf("ux %p: done=%d pipe=NULL\n", ux, ux->ux_isdone);
953 return;
954 }
955 if (pipe->up_endpoint == NULL) {
956 printf("ux %p: done=%d pipe=%p pipe->up_endpoint=NULL\n",
957 ux, ux->ux_isdone, pipe);
958 return;
959 }
960 if (pipe->up_dev == NULL) {
961 printf("ux %p: done=%d pipe=%p pipe->up_dev=NULL\n",
962 ux, ux->ux_isdone, pipe);
963 return;
964 }
965 ed = pipe->up_endpoint->ue_edesc;
966 dev = pipe->up_dev;
967 printf("ux %p: done=%d dev=%p vid=0x%04x pid=0x%04x addr=%d pipe=%p ep=0x%02x attr=0x%02x\n",
968 ux, ux->ux_isdone, dev,
969 UGETW(dev->ud_ddesc.idVendor),
970 UGETW(dev->ud_ddesc.idProduct),
971 dev->ud_addr, pipe,
972 ed->bEndpointAddress, ed->bmAttributes);
973 }
974
975 void uhci_dump_iis(struct uhci_softc *sc);
976 void
977 uhci_dump_iis(struct uhci_softc *sc)
978 {
979 struct uhci_xfer *ux;
980
981 printf("interrupt list:\n");
982 TAILQ_FOREACH(ux, &sc->sc_intrhead, ux_list)
983 uhci_dump_ii(ux);
984 }
985
986 void iidump(void);
987 void iidump(void) { uhci_dump_iis(thesc); }
988
989 #endif
990
991 /*
992 * This routine is executed periodically and simulates interrupts
993 * from the root controller interrupt pipe for port status change.
994 */
995 void
996 uhci_poll_hub(void *addr)
997 {
998 struct uhci_softc *sc = addr;
999 struct usbd_xfer *xfer;
1000 u_char *p;
1001
1002 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1003
1004 mutex_enter(&sc->sc_lock);
1005
1006 /*
1007 * If the intr xfer has completed or been synchronously
1008 * aborted, we have nothing to do.
1009 */
1010 xfer = sc->sc_intr_xfer;
1011 if (xfer == NULL)
1012 goto out;
1013 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
1014
1015 /*
1016 * If the intr xfer for which we were scheduled is done, and
1017 * another intr xfer has been submitted, let that one be dealt
1018 * with when the callout fires again.
1019 *
1020 * The call to callout_pending is racy, but the transition
1021 * from pending to invoking happens atomically. The
1022 * callout_ack ensures callout_invoking does not return true
1023 * due to this invocation of the callout; the lock ensures the
1024 * next invocation of the callout cannot callout_ack (unless it
1025 * had already run to completion and nulled sc->sc_intr_xfer,
1026 * in which case would have bailed out already).
1027 */
1028 callout_ack(&sc->sc_poll_handle);
1029 if (callout_pending(&sc->sc_poll_handle) ||
1030 callout_invoking(&sc->sc_poll_handle))
1031 goto out;
1032
1033 /*
1034 * Check flags for the two interrupt ports, and set them in the
1035 * buffer if an interrupt arrived; otherwise arrange .
1036 */
1037 p = xfer->ux_buf;
1038 p[0] = 0;
1039 if (UREAD2(sc, UHCI_PORTSC1) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC))
1040 p[0] |= 1<<1;
1041 if (UREAD2(sc, UHCI_PORTSC2) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC))
1042 p[0] |= 1<<2;
1043 if (p[0] == 0) {
1044 /*
1045 * No change -- try again in a while, unless we're
1046 * suspending, in which case we'll try again after
1047 * resume.
1048 */
1049 if (sc->sc_suspend != PWR_SUSPEND)
1050 callout_schedule(&sc->sc_poll_handle, sc->sc_ival);
1051 goto out;
1052 }
1053
1054 /*
1055 * Interrupt completed, and the xfer has not been completed or
1056 * synchronously aborted. Complete the xfer now.
1057 */
1058 xfer->ux_actlen = 1;
1059 xfer->ux_status = USBD_NORMAL_COMPLETION;
1060 #ifdef DIAGNOSTIC
1061 UHCI_XFER2UXFER(xfer)->ux_isdone = true;
1062 #endif
1063 usb_transfer_complete(xfer);
1064
1065 out: mutex_exit(&sc->sc_lock);
1066 }
1067
1068 void
1069 uhci_root_intr_done(struct usbd_xfer *xfer)
1070 {
1071 struct uhci_softc *sc = UHCI_XFER2SC(xfer);
1072
1073 KASSERT(mutex_owned(&sc->sc_lock));
1074
1075 /* Claim the xfer so it doesn't get completed again. */
1076 KASSERT(sc->sc_intr_xfer == xfer);
1077 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
1078 sc->sc_intr_xfer = NULL;
1079 }
1080
1081 /*
1082 * Let the last QH loop back to the high speed control transfer QH.
1083 * This is what intel calls "bandwidth reclamation" and improves
1084 * USB performance a lot for some devices.
1085 * If we are already looping, just count it.
1086 */
1087 void
1088 uhci_add_loop(uhci_softc_t *sc)
1089 {
1090 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1091
1092 #ifdef UHCI_DEBUG
1093 if (uhcinoloop)
1094 return;
1095 #endif
1096 if (++sc->sc_loops == 1) {
1097 DPRINTFN(5, "add loop", 0, 0, 0, 0);
1098 /* Note, we don't loop back the soft pointer. */
1099 sc->sc_last_qh->qh.qh_hlink =
1100 htole32(sc->sc_hctl_start->physaddr | UHCI_PTR_QH);
1101 usb_syncmem(&sc->sc_last_qh->dma,
1102 sc->sc_last_qh->offs + offsetof(uhci_qh_t, qh_hlink),
1103 sizeof(sc->sc_last_qh->qh.qh_hlink),
1104 BUS_DMASYNC_PREWRITE);
1105 }
1106 }
1107
1108 void
1109 uhci_rem_loop(uhci_softc_t *sc)
1110 {
1111 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1112
1113 #ifdef UHCI_DEBUG
1114 if (uhcinoloop)
1115 return;
1116 #endif
1117 if (--sc->sc_loops == 0) {
1118 DPRINTFN(5, "remove loop", 0, 0, 0, 0);
1119 sc->sc_last_qh->qh.qh_hlink = htole32(UHCI_PTR_T);
1120 usb_syncmem(&sc->sc_last_qh->dma,
1121 sc->sc_last_qh->offs + offsetof(uhci_qh_t, qh_hlink),
1122 sizeof(sc->sc_last_qh->qh.qh_hlink),
1123 BUS_DMASYNC_PREWRITE);
1124 }
1125 }
1126
1127 /* Add high speed control QH, called with lock held. */
1128 void
1129 uhci_add_hs_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1130 {
1131 uhci_soft_qh_t *eqh;
1132
1133 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1134
1135 KASSERT(mutex_owned(&sc->sc_lock));
1136
1137 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1138 eqh = sc->sc_hctl_end;
1139 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1140 sizeof(eqh->qh.qh_hlink),
1141 BUS_DMASYNC_POSTWRITE);
1142 sqh->hlink = eqh->hlink;
1143 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1144 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1145 BUS_DMASYNC_PREWRITE);
1146 eqh->hlink = sqh;
1147 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1148 sc->sc_hctl_end = sqh;
1149 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1150 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1151 #ifdef UHCI_CTL_LOOP
1152 uhci_add_loop(sc);
1153 #endif
1154 }
1155
1156 /* Remove high speed control QH, called with lock held. */
1157 void
1158 uhci_remove_hs_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1159 {
1160 uhci_soft_qh_t *pqh;
1161 uint32_t elink;
1162
1163 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1164
1165 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1166 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1167 #ifdef UHCI_CTL_LOOP
1168 uhci_rem_loop(sc);
1169 #endif
1170 /*
1171 * The T bit should be set in the elink of the QH so that the HC
1172 * doesn't follow the pointer. This condition may fail if the
1173 * the transferred packet was short so that the QH still points
1174 * at the last used TD.
1175 * In this case we set the T bit and wait a little for the HC
1176 * to stop looking at the TD.
1177 * Note that if the TD chain is large enough, the controller
1178 * may still be looking at the chain at the end of this function.
1179 * uhci_free_std_chain() will make sure the controller stops
1180 * looking at it quickly, but until then we should not change
1181 * sqh->hlink.
1182 */
1183 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1184 sizeof(sqh->qh.qh_elink),
1185 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1186 elink = le32toh(sqh->qh.qh_elink);
1187 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1188 sizeof(sqh->qh.qh_elink), BUS_DMASYNC_PREREAD);
1189 if (!(elink & UHCI_PTR_T)) {
1190 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1191 usb_syncmem(&sqh->dma,
1192 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1193 sizeof(sqh->qh.qh_elink),
1194 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1195 delay(UHCI_QH_REMOVE_DELAY);
1196 }
1197
1198 pqh = uhci_find_prev_qh(sc->sc_hctl_start, sqh);
1199 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1200 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1201 pqh->hlink = sqh->hlink;
1202 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1203 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1204 sizeof(pqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1205 delay(UHCI_QH_REMOVE_DELAY);
1206 if (sc->sc_hctl_end == sqh)
1207 sc->sc_hctl_end = pqh;
1208 }
1209
1210 /* Add low speed control QH, called with lock held. */
1211 void
1212 uhci_add_ls_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1213 {
1214 uhci_soft_qh_t *eqh;
1215
1216 KASSERT(mutex_owned(&sc->sc_lock));
1217
1218 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1219 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1220
1221 eqh = sc->sc_lctl_end;
1222 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1223 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1224 sqh->hlink = eqh->hlink;
1225 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1226 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1227 BUS_DMASYNC_PREWRITE);
1228 eqh->hlink = sqh;
1229 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1230 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1231 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1232 sc->sc_lctl_end = sqh;
1233 }
1234
1235 /* Remove low speed control QH, called with lock held. */
1236 void
1237 uhci_remove_ls_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1238 {
1239 uhci_soft_qh_t *pqh;
1240 uint32_t elink;
1241
1242 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1243
1244 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1245 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1246
1247 /* See comment in uhci_remove_hs_ctrl() */
1248 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1249 sizeof(sqh->qh.qh_elink),
1250 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1251 elink = le32toh(sqh->qh.qh_elink);
1252 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1253 sizeof(sqh->qh.qh_elink), BUS_DMASYNC_PREREAD);
1254 if (!(elink & UHCI_PTR_T)) {
1255 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1256 usb_syncmem(&sqh->dma,
1257 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1258 sizeof(sqh->qh.qh_elink),
1259 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1260 delay(UHCI_QH_REMOVE_DELAY);
1261 }
1262 pqh = uhci_find_prev_qh(sc->sc_lctl_start, sqh);
1263 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1264 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1265 pqh->hlink = sqh->hlink;
1266 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1267 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1268 sizeof(pqh->qh.qh_hlink),
1269 BUS_DMASYNC_PREWRITE);
1270 delay(UHCI_QH_REMOVE_DELAY);
1271 if (sc->sc_lctl_end == sqh)
1272 sc->sc_lctl_end = pqh;
1273 }
1274
1275 /* Add bulk QH, called with lock held. */
1276 void
1277 uhci_add_bulk(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1278 {
1279 uhci_soft_qh_t *eqh;
1280
1281 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1282
1283 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1284 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1285
1286 eqh = sc->sc_bulk_end;
1287 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1288 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1289 sqh->hlink = eqh->hlink;
1290 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1291 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1292 BUS_DMASYNC_PREWRITE);
1293 eqh->hlink = sqh;
1294 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1295 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1296 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1297 sc->sc_bulk_end = sqh;
1298 uhci_add_loop(sc);
1299 }
1300
1301 /* Remove bulk QH, called with lock held. */
1302 void
1303 uhci_remove_bulk(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1304 {
1305 uhci_soft_qh_t *pqh;
1306
1307 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1308
1309 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1310 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1311
1312 uhci_rem_loop(sc);
1313 /* See comment in uhci_remove_hs_ctrl() */
1314 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1315 sizeof(sqh->qh.qh_elink),
1316 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1317 if (!(sqh->qh.qh_elink & htole32(UHCI_PTR_T))) {
1318 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1319 usb_syncmem(&sqh->dma,
1320 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1321 sizeof(sqh->qh.qh_elink),
1322 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1323 delay(UHCI_QH_REMOVE_DELAY);
1324 }
1325 pqh = uhci_find_prev_qh(sc->sc_bulk_start, sqh);
1326 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1327 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1328 pqh->hlink = sqh->hlink;
1329 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1330 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1331 sizeof(pqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1332 delay(UHCI_QH_REMOVE_DELAY);
1333 if (sc->sc_bulk_end == sqh)
1334 sc->sc_bulk_end = pqh;
1335 }
1336
1337 Static int uhci_intr1(uhci_softc_t *);
1338
1339 int
1340 uhci_intr(void *arg)
1341 {
1342 uhci_softc_t *sc = arg;
1343 int ret = 0;
1344
1345 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1346
1347 mutex_spin_enter(&sc->sc_intr_lock);
1348
1349 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1350 goto done;
1351
1352 if (sc->sc_bus.ub_usepolling || UREAD2(sc, UHCI_INTR) == 0) {
1353 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1354 goto done;
1355 }
1356
1357 ret = uhci_intr1(sc);
1358
1359 done:
1360 mutex_spin_exit(&sc->sc_intr_lock);
1361 return ret;
1362 }
1363
1364 int
1365 uhci_intr1(uhci_softc_t *sc)
1366 {
1367 int status;
1368 int ack;
1369
1370 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1371
1372 #ifdef UHCI_DEBUG
1373 if (uhcidebug >= 15) {
1374 DPRINTF("sc %#jx", (uintptr_t)sc, 0, 0, 0);
1375 uhci_dumpregs(sc);
1376 }
1377 #endif
1378
1379 KASSERT(mutex_owned(&sc->sc_intr_lock));
1380
1381 status = UREAD2(sc, UHCI_STS) & UHCI_STS_ALLINTRS;
1382 /* Check if the interrupt was for us. */
1383 if (status == 0)
1384 return 0;
1385
1386 if (sc->sc_suspend != PWR_RESUME) {
1387 #ifdef DIAGNOSTIC
1388 printf("%s: interrupt while not operating ignored\n",
1389 device_xname(sc->sc_dev));
1390 #endif
1391 UWRITE2(sc, UHCI_STS, status); /* acknowledge the ints */
1392 return 0;
1393 }
1394
1395 ack = 0;
1396 if (status & UHCI_STS_USBINT)
1397 ack |= UHCI_STS_USBINT;
1398 if (status & UHCI_STS_USBEI)
1399 ack |= UHCI_STS_USBEI;
1400 if (status & UHCI_STS_RD) {
1401 ack |= UHCI_STS_RD;
1402 #ifdef UHCI_DEBUG
1403 printf("%s: resume detect\n", device_xname(sc->sc_dev));
1404 #endif
1405 }
1406 if (status & UHCI_STS_HSE) {
1407 ack |= UHCI_STS_HSE;
1408 printf("%s: host system error\n", device_xname(sc->sc_dev));
1409 }
1410 if (status & UHCI_STS_HCPE) {
1411 ack |= UHCI_STS_HCPE;
1412 printf("%s: host controller process error\n",
1413 device_xname(sc->sc_dev));
1414 }
1415
1416 /* When HCHalted=1 and Run/Stop=0 , it is normal */
1417 if ((status & UHCI_STS_HCH) && (UREAD2(sc, UHCI_CMD) & UHCI_CMD_RS)) {
1418 /* no acknowledge needed */
1419 if (!sc->sc_dying) {
1420 printf("%s: host controller halted\n",
1421 device_xname(sc->sc_dev));
1422 #ifdef UHCI_DEBUG
1423 uhci_dump_all(sc);
1424 #endif
1425 }
1426 sc->sc_dying = 1;
1427 }
1428
1429 if (!ack)
1430 return 0; /* nothing to acknowledge */
1431 UWRITE2(sc, UHCI_STS, ack); /* acknowledge the ints */
1432
1433 usb_schedsoftintr(&sc->sc_bus);
1434
1435 DPRINTFN(15, "sc %#jx done", (uintptr_t)sc, 0, 0, 0);
1436
1437 return 1;
1438 }
1439
1440 void
1441 uhci_softintr(void *v)
1442 {
1443 struct usbd_bus *bus = v;
1444 uhci_softc_t *sc = UHCI_BUS2SC(bus);
1445 struct uhci_xfer *ux, *nextux;
1446 ux_completeq_t cq;
1447
1448 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1449 DPRINTF("sc %#jx", (uintptr_t)sc, 0, 0, 0);
1450
1451 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1452
1453 TAILQ_INIT(&cq);
1454 /*
1455 * Interrupts on UHCI really suck. When the host controller
1456 * interrupts because a transfer is completed there is no
1457 * way of knowing which transfer it was. You can scan down
1458 * the TDs and QHs of the previous frame to limit the search,
1459 * but that assumes that the interrupt was not delayed by more
1460 * than 1 ms, which may not always be true (e.g. after debug
1461 * output on a slow console).
1462 * We scan all interrupt descriptors to see if any have
1463 * completed.
1464 */
1465 TAILQ_FOREACH_SAFE(ux, &sc->sc_intrhead, ux_list, nextux) {
1466 uhci_check_intr(sc, ux, &cq);
1467 }
1468
1469 /*
1470 * We abuse ux_list for the interrupt and complete lists and
1471 * interrupt transfers will get re-added here so use
1472 * the _SAFE version of TAILQ_FOREACH.
1473 */
1474 TAILQ_FOREACH_SAFE(ux, &cq, ux_list, nextux) {
1475 DPRINTF("ux %#jx", (uintptr_t)ux, 0, 0, 0);
1476 usb_transfer_complete(&ux->ux_xfer);
1477 }
1478
1479 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1480 }
1481
1482 /* Check for an interrupt. */
1483 void
1484 uhci_check_intr(uhci_softc_t *sc, struct uhci_xfer *ux, ux_completeq_t *cqp)
1485 {
1486 uhci_soft_td_t *std, *fstd = NULL, *lstd = NULL;
1487 uint32_t status;
1488
1489 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1490 DPRINTFN(15, "ux %#jx", (uintptr_t)ux, 0, 0, 0);
1491
1492 KASSERT(ux != NULL);
1493
1494 struct usbd_xfer *xfer = &ux->ux_xfer;
1495 if (xfer->ux_status == USBD_CANCELLED ||
1496 xfer->ux_status == USBD_TIMEOUT) {
1497 DPRINTF("aborted xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
1498 return;
1499 }
1500
1501 switch (ux->ux_type) {
1502 case UX_CTRL:
1503 fstd = ux->ux_setup;
1504 lstd = ux->ux_stat;
1505 break;
1506 case UX_BULK:
1507 case UX_INTR:
1508 case UX_ISOC:
1509 fstd = ux->ux_stdstart;
1510 lstd = ux->ux_stdend;
1511 break;
1512 default:
1513 KASSERT(false);
1514 break;
1515 }
1516 if (fstd == NULL)
1517 return;
1518
1519 KASSERT(lstd != NULL);
1520
1521 usb_syncmem(&lstd->dma,
1522 lstd->offs + offsetof(uhci_td_t, td_status),
1523 sizeof(lstd->td.td_status),
1524 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1525 status = le32toh(lstd->td.td_status);
1526 usb_syncmem(&lstd->dma,
1527 lstd->offs + offsetof(uhci_td_t, td_status),
1528 sizeof(lstd->td.td_status),
1529 BUS_DMASYNC_PREREAD);
1530
1531 /* If the last TD is not marked active we can complete */
1532 if (!(status & UHCI_TD_ACTIVE)) {
1533 done:
1534 DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0);
1535 uhci_idone(ux, cqp);
1536 return;
1537 }
1538
1539 /*
1540 * If the last TD is still active we need to check whether there
1541 * is an error somewhere in the middle, or whether there was a
1542 * short packet (SPD and not ACTIVE).
1543 */
1544 DPRINTFN(12, "active ux=%#jx", (uintptr_t)ux, 0, 0, 0);
1545 for (std = fstd; std != lstd; std = std->link.std) {
1546 usb_syncmem(&std->dma,
1547 std->offs + offsetof(uhci_td_t, td_status),
1548 sizeof(std->td.td_status),
1549 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1550 status = le32toh(std->td.td_status);
1551 usb_syncmem(&std->dma,
1552 std->offs + offsetof(uhci_td_t, td_status),
1553 sizeof(std->td.td_status), BUS_DMASYNC_PREREAD);
1554
1555 /* If there's an active TD the xfer isn't done. */
1556 if (status & UHCI_TD_ACTIVE) {
1557 DPRINTFN(12, "ux=%#jx std=%#jx still active",
1558 (uintptr_t)ux, (uintptr_t)std, 0, 0);
1559 return;
1560 }
1561
1562 /* Any kind of error makes the xfer done. */
1563 if (status & UHCI_TD_STALLED)
1564 goto done;
1565
1566 /*
1567 * If the data phase of a control transfer is short, we need
1568 * to complete the status stage
1569 */
1570
1571 if ((status & UHCI_TD_SPD) && ux->ux_type == UX_CTRL) {
1572 struct uhci_pipe *upipe =
1573 UHCI_PIPE2UPIPE(xfer->ux_pipe);
1574 uhci_soft_qh_t *sqh = upipe->ctrl.sqh;
1575 uhci_soft_td_t *stat = upipe->ctrl.stat;
1576
1577 DPRINTFN(12, "ux=%#jx std=%#jx control status"
1578 "phase needs completion", (uintptr_t)ux,
1579 (uintptr_t)ux->ux_stdstart, 0, 0);
1580
1581 sqh->qh.qh_elink =
1582 htole32(stat->physaddr | UHCI_PTR_TD);
1583 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1584 BUS_DMASYNC_PREWRITE);
1585 break;
1586 }
1587
1588 /* We want short packets, and it is short: it's done */
1589 usb_syncmem(&std->dma,
1590 std->offs + offsetof(uhci_td_t, td_token),
1591 sizeof(std->td.td_token),
1592 BUS_DMASYNC_POSTWRITE);
1593
1594 if ((status & UHCI_TD_SPD) &&
1595 UHCI_TD_GET_ACTLEN(status) <
1596 UHCI_TD_GET_MAXLEN(le32toh(std->td.td_token))) {
1597 goto done;
1598 }
1599 }
1600 }
1601
1602 /* Called with USB lock held. */
1603 void
1604 uhci_idone(struct uhci_xfer *ux, ux_completeq_t *cqp)
1605 {
1606 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1607 struct usbd_xfer *xfer = &ux->ux_xfer;
1608 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
1609 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
1610 uhci_soft_td_t *std;
1611 uint32_t status = 0, nstatus;
1612 int actlen;
1613
1614 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1615
1616 DPRINTFN(12, "ux=%#jx", (uintptr_t)ux, 0, 0, 0);
1617
1618 /*
1619 * Try to claim this xfer for completion. If it has already
1620 * completed or aborted, drop it on the floor.
1621 */
1622 if (!usbd_xfer_trycomplete(xfer))
1623 return;
1624
1625 #ifdef DIAGNOSTIC
1626 #ifdef UHCI_DEBUG
1627 if (ux->ux_isdone) {
1628 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1629 uhci_dump_ii(ux);
1630 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1631 }
1632 #endif
1633 KASSERT(!ux->ux_isdone);
1634 KASSERTMSG(!ux->ux_isdone, "xfer %p type %d status %d", xfer,
1635 ux->ux_type, xfer->ux_status);
1636 ux->ux_isdone = true;
1637 #endif
1638
1639 if (xfer->ux_nframes != 0) {
1640 /* Isoc transfer, do things differently. */
1641 uhci_soft_td_t **stds = upipe->isoc.stds;
1642 int i, n, nframes, len;
1643
1644 DPRINTFN(5, "ux=%#jx isoc ready", (uintptr_t)ux, 0, 0, 0);
1645
1646 nframes = xfer->ux_nframes;
1647 actlen = 0;
1648 n = ux->ux_curframe;
1649 for (i = 0; i < nframes; i++) {
1650 std = stds[n];
1651 #ifdef UHCI_DEBUG
1652 if (uhcidebug >= 5) {
1653 DPRINTF("isoc TD %jd", i, 0, 0, 0);
1654 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1655 uhci_dump_td(std);
1656 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1657 }
1658 #endif
1659 if (++n >= UHCI_VFRAMELIST_COUNT)
1660 n = 0;
1661 usb_syncmem(&std->dma,
1662 std->offs + offsetof(uhci_td_t, td_status),
1663 sizeof(std->td.td_status),
1664 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1665 status = le32toh(std->td.td_status);
1666 len = UHCI_TD_GET_ACTLEN(status);
1667 xfer->ux_frlengths[i] = len;
1668 actlen += len;
1669 }
1670 upipe->isoc.inuse -= nframes;
1671 xfer->ux_actlen = actlen;
1672 xfer->ux_status = USBD_NORMAL_COMPLETION;
1673 goto end;
1674 }
1675
1676 #ifdef UHCI_DEBUG
1677 DPRINTFN(10, "ux=%#jx, xfer=%#jx, pipe=%#jx ready", (uintptr_t)ux,
1678 (uintptr_t)xfer, (uintptr_t)upipe, 0);
1679 if (uhcidebug >= 10) {
1680 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1681 uhci_dump_tds(ux->ux_stdstart);
1682 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1683 }
1684 #endif
1685
1686 /* The transfer is done, compute actual length and status. */
1687 actlen = 0;
1688 for (std = ux->ux_stdstart; std != NULL; std = std->link.std) {
1689 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
1690 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1691 nstatus = le32toh(std->td.td_status);
1692 if (nstatus & UHCI_TD_ACTIVE)
1693 break;
1694
1695 status = nstatus;
1696 if (UHCI_TD_GET_PID(le32toh(std->td.td_token)) !=
1697 UHCI_TD_PID_SETUP)
1698 actlen += UHCI_TD_GET_ACTLEN(status);
1699 else {
1700 /*
1701 * UHCI will report CRCTO in addition to a STALL or NAK
1702 * for a SETUP transaction. See section 3.2.2, "TD
1703 * CONTROL AND STATUS".
1704 */
1705 if (status & (UHCI_TD_STALLED | UHCI_TD_NAK))
1706 status &= ~UHCI_TD_CRCTO;
1707 }
1708 }
1709 /* If there are left over TDs we need to update the toggle. */
1710 if (std != NULL)
1711 upipe->nexttoggle = UHCI_TD_GET_DT(le32toh(std->td.td_token));
1712
1713 status &= UHCI_TD_ERROR;
1714 DPRINTFN(10, "actlen=%jd, status=%#jx", actlen, status, 0, 0);
1715 xfer->ux_actlen = actlen;
1716 if (status != 0) {
1717
1718 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1719 "error, addr=%jd, endpt=0x%02jx",
1720 xfer->ux_pipe->up_dev->ud_addr,
1721 xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress,
1722 0, 0);
1723 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1724 "bitstuff=%jd crcto =%jd nak =%jd babble =%jd",
1725 !!(status & UHCI_TD_BITSTUFF),
1726 !!(status & UHCI_TD_CRCTO),
1727 !!(status & UHCI_TD_NAK),
1728 !!(status & UHCI_TD_BABBLE));
1729 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1730 "dbuffer =%jd stalled =%jd active =%jd",
1731 !!(status & UHCI_TD_DBUFFER),
1732 !!(status & UHCI_TD_STALLED),
1733 !!(status & UHCI_TD_ACTIVE),
1734 0);
1735
1736 if (status == UHCI_TD_STALLED)
1737 xfer->ux_status = USBD_STALLED;
1738 else
1739 xfer->ux_status = USBD_IOERROR; /* more info XXX */
1740 } else {
1741 xfer->ux_status = USBD_NORMAL_COMPLETION;
1742 }
1743
1744 end:
1745 uhci_del_intr_list(sc, ux);
1746 if (cqp)
1747 TAILQ_INSERT_TAIL(cqp, ux, ux_list);
1748
1749 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1750 DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0);
1751 }
1752
1753 void
1754 uhci_poll(struct usbd_bus *bus)
1755 {
1756 uhci_softc_t *sc = UHCI_BUS2SC(bus);
1757
1758 if (UREAD2(sc, UHCI_STS) & UHCI_STS_USBINT) {
1759 mutex_spin_enter(&sc->sc_intr_lock);
1760 uhci_intr1(sc);
1761 mutex_spin_exit(&sc->sc_intr_lock);
1762 }
1763 }
1764
1765 void
1766 uhci_reset(uhci_softc_t *sc)
1767 {
1768 int n;
1769
1770 UHCICMD(sc, UHCI_CMD_HCRESET);
1771 /* The reset bit goes low when the controller is done. */
1772 for (n = 0; n < UHCI_RESET_TIMEOUT &&
1773 (UREAD2(sc, UHCI_CMD) & UHCI_CMD_HCRESET); n++)
1774 usb_delay_ms(&sc->sc_bus, 1);
1775 if (n >= UHCI_RESET_TIMEOUT)
1776 printf("%s: controller did not reset\n",
1777 device_xname(sc->sc_dev));
1778 }
1779
1780 usbd_status
1781 uhci_run(uhci_softc_t *sc, int run)
1782 {
1783 int n, running;
1784 uint16_t cmd;
1785
1786 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1787
1788 run = run != 0;
1789
1790 DPRINTF("setting run=%jd", run, 0, 0, 0);
1791 cmd = UREAD2(sc, UHCI_CMD);
1792 if (run)
1793 cmd |= UHCI_CMD_RS;
1794 else
1795 cmd &= ~UHCI_CMD_RS;
1796 UHCICMD(sc, cmd);
1797 for (n = 0; n < 10; n++) {
1798 running = !(UREAD2(sc, UHCI_STS) & UHCI_STS_HCH);
1799 /* return when we've entered the state we want */
1800 if (run == running) {
1801 DPRINTF("done cmd=%#jx sts=%#jx",
1802 UREAD2(sc, UHCI_CMD), UREAD2(sc, UHCI_STS), 0, 0);
1803 return USBD_NORMAL_COMPLETION;
1804 }
1805 usb_delay_ms(&sc->sc_bus, 1);
1806 }
1807 printf("%s: cannot %s\n", device_xname(sc->sc_dev),
1808 run ? "start" : "stop");
1809 return USBD_IOERROR;
1810 }
1811
1812 /*
1813 * Memory management routines.
1814 * uhci_alloc_std allocates TDs
1815 * uhci_alloc_sqh allocates QHs
1816 * These two routines do their own free list management,
1817 * partly for speed, partly because allocating DMAable memory
1818 * has page size granularity so much memory would be wasted if
1819 * only one TD/QH (32 bytes) was placed in each allocated chunk.
1820 */
1821
1822 uhci_soft_td_t *
1823 uhci_alloc_std(uhci_softc_t *sc)
1824 {
1825 uhci_soft_td_t *std;
1826 int i, offs;
1827 usb_dma_t dma;
1828
1829 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1830
1831 mutex_enter(&sc->sc_lock);
1832 if (sc->sc_freetds == NULL) {
1833 DPRINTFN(2, "allocating chunk", 0, 0, 0, 0);
1834 mutex_exit(&sc->sc_lock);
1835
1836 int err = usb_allocmem(sc->sc_bus.ub_dmatag, UHCI_STD_SIZE * UHCI_STD_CHUNK,
1837 UHCI_TD_ALIGN, USBMALLOC_COHERENT, &dma);
1838 if (err)
1839 return NULL;
1840
1841 mutex_enter(&sc->sc_lock);
1842 for (i = 0; i < UHCI_STD_CHUNK; i++) {
1843 offs = i * UHCI_STD_SIZE;
1844 std = KERNADDR(&dma, offs);
1845 std->physaddr = DMAADDR(&dma, offs);
1846 std->dma = dma;
1847 std->offs = offs;
1848 std->link.std = sc->sc_freetds;
1849 sc->sc_freetds = std;
1850 }
1851 }
1852 std = sc->sc_freetds;
1853 sc->sc_freetds = std->link.std;
1854 mutex_exit(&sc->sc_lock);
1855
1856 memset(&std->td, 0, sizeof(uhci_td_t));
1857
1858 return std;
1859 }
1860
1861 #define TD_IS_FREE 0x12345678
1862
1863 void
1864 uhci_free_std_locked(uhci_softc_t *sc, uhci_soft_td_t *std)
1865 {
1866 KASSERT(mutex_owned(&sc->sc_lock));
1867
1868 #ifdef DIAGNOSTIC
1869 if (le32toh(std->td.td_token) == TD_IS_FREE) {
1870 printf("%s: freeing free TD %p\n", __func__, std);
1871 return;
1872 }
1873 std->td.td_token = htole32(TD_IS_FREE);
1874 #endif
1875
1876 std->link.std = sc->sc_freetds;
1877 sc->sc_freetds = std;
1878 }
1879
1880 void
1881 uhci_free_std(uhci_softc_t *sc, uhci_soft_td_t *std)
1882 {
1883 mutex_enter(&sc->sc_lock);
1884 uhci_free_std_locked(sc, std);
1885 mutex_exit(&sc->sc_lock);
1886 }
1887
1888 uhci_soft_qh_t *
1889 uhci_alloc_sqh(uhci_softc_t *sc)
1890 {
1891 uhci_soft_qh_t *sqh;
1892 int i, offs;
1893 usb_dma_t dma;
1894
1895 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1896
1897 mutex_enter(&sc->sc_lock);
1898 if (sc->sc_freeqhs == NULL) {
1899 DPRINTFN(2, "allocating chunk", 0, 0, 0, 0);
1900 mutex_exit(&sc->sc_lock);
1901
1902 int err = usb_allocmem(sc->sc_bus.ub_dmatag, UHCI_SQH_SIZE * UHCI_SQH_CHUNK,
1903 UHCI_QH_ALIGN, USBMALLOC_COHERENT, &dma);
1904 if (err)
1905 return NULL;
1906
1907 mutex_enter(&sc->sc_lock);
1908 for (i = 0; i < UHCI_SQH_CHUNK; i++) {
1909 offs = i * UHCI_SQH_SIZE;
1910 sqh = KERNADDR(&dma, offs);
1911 sqh->physaddr = DMAADDR(&dma, offs);
1912 sqh->dma = dma;
1913 sqh->offs = offs;
1914 sqh->hlink = sc->sc_freeqhs;
1915 sc->sc_freeqhs = sqh;
1916 }
1917 }
1918 sqh = sc->sc_freeqhs;
1919 sc->sc_freeqhs = sqh->hlink;
1920 mutex_exit(&sc->sc_lock);
1921
1922 memset(&sqh->qh, 0, sizeof(uhci_qh_t));
1923
1924 return sqh;
1925 }
1926
1927 void
1928 uhci_free_sqh(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1929 {
1930 KASSERT(mutex_owned(&sc->sc_lock));
1931
1932 sqh->hlink = sc->sc_freeqhs;
1933 sc->sc_freeqhs = sqh;
1934 }
1935
1936 #if 0
1937 void
1938 uhci_free_std_chain(uhci_softc_t *sc, uhci_soft_td_t *std,
1939 uhci_soft_td_t *stdend)
1940 {
1941 uhci_soft_td_t *p;
1942 uint32_t td_link;
1943
1944 /*
1945 * to avoid race condition with the controller which may be looking
1946 * at this chain, we need to first invalidate all links, and
1947 * then wait for the controller to move to another queue
1948 */
1949 for (p = std; p != stdend; p = p->link.std) {
1950 usb_syncmem(&p->dma,
1951 p->offs + offsetof(uhci_td_t, td_link),
1952 sizeof(p->td.td_link),
1953 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1954 td_link = le32toh(p->td.td_link);
1955 usb_syncmem(&p->dma,
1956 p->offs + offsetof(uhci_td_t, td_link),
1957 sizeof(p->td.td_link),
1958 BUS_DMASYNC_PREREAD);
1959 if ((td_link & UHCI_PTR_T) == 0) {
1960 p->td.td_link = htole32(UHCI_PTR_T);
1961 usb_syncmem(&p->dma,
1962 p->offs + offsetof(uhci_td_t, td_link),
1963 sizeof(p->td.td_link),
1964 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1965 }
1966 }
1967 delay(UHCI_QH_REMOVE_DELAY);
1968
1969 for (; std != stdend; std = p) {
1970 p = std->link.std;
1971 uhci_free_std(sc, std);
1972 }
1973 }
1974 #endif
1975
1976 int
1977 uhci_alloc_std_chain(uhci_softc_t *sc, struct usbd_xfer *xfer, int len,
1978 int rd, uhci_soft_td_t **sp)
1979 {
1980 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
1981 uint16_t flags = xfer->ux_flags;
1982 uhci_soft_td_t *p;
1983
1984 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1985
1986 DPRINTFN(8, "xfer=%#jx pipe=%#jx", (uintptr_t)xfer,
1987 (uintptr_t)xfer->ux_pipe, 0, 0);
1988
1989 ASSERT_SLEEPABLE();
1990 KASSERT(sp);
1991
1992 int maxp = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
1993 if (maxp == 0) {
1994 printf("%s: maxp=0\n", __func__);
1995 return EINVAL;
1996 }
1997 size_t ntd = howmany(len, maxp);
1998 /*
1999 * if our transfer is bigger than PAGE_SIZE and maxp not a factor of
2000 * PAGE_SIZE then we will need another TD per page.
2001 */
2002 if (len > PAGE_SIZE && (PAGE_SIZE % maxp) != 0) {
2003 ntd += howmany(len, PAGE_SIZE);
2004 }
2005
2006 /*
2007 * Might need one more TD if we're writing a ZLP
2008 */
2009 if (!rd && (flags & USBD_FORCE_SHORT_XFER)) {
2010 ntd++;
2011 }
2012 DPRINTFN(10, "maxp=%jd ntd=%jd", maxp, ntd, 0, 0);
2013
2014 uxfer->ux_stds = NULL;
2015 uxfer->ux_nstd = ntd;
2016 if (ntd == 0) {
2017 *sp = NULL;
2018 DPRINTF("ntd=0", 0, 0, 0, 0);
2019 return 0;
2020 }
2021 uxfer->ux_stds = kmem_alloc(sizeof(uhci_soft_td_t *) * ntd,
2022 KM_SLEEP);
2023
2024 for (int i = 0; i < ntd; i++) {
2025 p = uhci_alloc_std(sc);
2026 if (p == NULL) {
2027 if (i != 0) {
2028 uxfer->ux_nstd = i;
2029 uhci_free_stds(sc, uxfer);
2030 }
2031 kmem_free(uxfer->ux_stds,
2032 sizeof(uhci_soft_td_t *) * ntd);
2033 return ENOMEM;
2034 }
2035 uxfer->ux_stds[i] = p;
2036 }
2037
2038 *sp = uxfer->ux_stds[0];
2039
2040 return 0;
2041 }
2042
2043 Static void
2044 uhci_free_stds(uhci_softc_t *sc, struct uhci_xfer *ux)
2045 {
2046 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2047
2048 DPRINTFN(8, "ux=%#jx", (uintptr_t)ux, 0, 0, 0);
2049
2050 mutex_enter(&sc->sc_lock);
2051 for (size_t i = 0; i < ux->ux_nstd; i++) {
2052 uhci_soft_td_t *std = ux->ux_stds[i];
2053 #ifdef DIAGNOSTIC
2054 if (le32toh(std->td.td_token) == TD_IS_FREE) {
2055 printf("%s: freeing free TD %p\n", __func__, std);
2056 return;
2057 }
2058 std->td.td_token = htole32(TD_IS_FREE);
2059 #endif
2060 ux->ux_stds[i]->link.std = sc->sc_freetds;
2061 sc->sc_freetds = std;
2062 }
2063 mutex_exit(&sc->sc_lock);
2064 }
2065
2066
2067 Static void
2068 uhci_reset_std_chain(uhci_softc_t *sc, struct usbd_xfer *xfer,
2069 int length, int isread, int *toggle, uhci_soft_td_t **lstd)
2070 {
2071 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2072 struct usbd_pipe *pipe = xfer->ux_pipe;
2073 usb_dma_t *dma = &xfer->ux_dmabuf;
2074 uint16_t flags = xfer->ux_flags;
2075 uhci_soft_td_t *std, *prev;
2076 int len = length;
2077 int tog = *toggle;
2078 int maxp;
2079 uint32_t status;
2080 size_t i, offs;
2081
2082 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2083 DPRINTFN(8, "xfer=%#jx len %jd isread %jd toggle %jd", (uintptr_t)xfer,
2084 len, isread, *toggle);
2085
2086 KASSERT(len != 0 || (!isread && (flags & USBD_FORCE_SHORT_XFER)));
2087
2088 maxp = UGETW(pipe->up_endpoint->ue_edesc->wMaxPacketSize);
2089 KASSERT(maxp != 0);
2090
2091 int addr = xfer->ux_pipe->up_dev->ud_addr;
2092 int endpt = xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress;
2093
2094 status = UHCI_TD_ZERO_ACTLEN(UHCI_TD_SET_ERRCNT(3) | UHCI_TD_ACTIVE);
2095 if (pipe->up_dev->ud_speed == USB_SPEED_LOW)
2096 status |= UHCI_TD_LS;
2097 if (flags & USBD_SHORT_XFER_OK)
2098 status |= UHCI_TD_SPD;
2099 usb_syncmem(dma, 0, len,
2100 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2101 std = prev = NULL;
2102 for (offs = i = 0; len != 0 && i < uxfer->ux_nstd; i++, prev = std) {
2103 int l = len;
2104 std = uxfer->ux_stds[i];
2105
2106 const bus_addr_t sbp = DMAADDR(dma, offs);
2107 const bus_addr_t ebp = DMAADDR(dma, offs + l - 1);
2108 if (((sbp ^ ebp) & ~PAGE_MASK) != 0)
2109 l = PAGE_SIZE - (DMAADDR(dma, offs) & PAGE_MASK);
2110
2111 if (l > maxp)
2112 l = maxp;
2113
2114 if (prev) {
2115 prev->link.std = std;
2116 prev->td.td_link = htole32(
2117 std->physaddr | UHCI_PTR_VF | UHCI_PTR_TD
2118 );
2119 usb_syncmem(&prev->dma, prev->offs, sizeof(prev->td),
2120 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2121 }
2122
2123 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2124 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2125
2126 std->td.td_link = htole32(UHCI_PTR_T | UHCI_PTR_VF | UHCI_PTR_TD);
2127 std->td.td_status = htole32(status);
2128 std->td.td_token = htole32(
2129 UHCI_TD_SET_ENDPT(UE_GET_ADDR(endpt)) |
2130 UHCI_TD_SET_DEVADDR(addr) |
2131 UHCI_TD_SET_PID(isread ? UHCI_TD_PID_IN : UHCI_TD_PID_OUT) |
2132 UHCI_TD_SET_DT(tog) |
2133 UHCI_TD_SET_MAXLEN(l)
2134 );
2135 std->td.td_buffer = htole32(DMAADDR(dma, offs));
2136
2137 std->link.std = NULL;
2138
2139 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2140 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2141 tog ^= 1;
2142
2143 offs += l;
2144 len -= l;
2145 }
2146 KASSERTMSG(len == 0, "xfer %p alen %d len %d mps %d ux_nqtd %zu i %zu",
2147 xfer, length, len, maxp, uxfer->ux_nstd, i);
2148
2149 if (!isread &&
2150 (flags & USBD_FORCE_SHORT_XFER) &&
2151 length % maxp == 0) {
2152 /* Force a 0 length transfer at the end. */
2153 KASSERTMSG(i < uxfer->ux_nstd, "i=%zu nstd=%zu", i,
2154 uxfer->ux_nstd);
2155 std = uxfer->ux_stds[i++];
2156
2157 std->td.td_link = htole32(UHCI_PTR_T | UHCI_PTR_VF | UHCI_PTR_TD);
2158 std->td.td_status = htole32(status);
2159 std->td.td_token = htole32(
2160 UHCI_TD_SET_ENDPT(UE_GET_ADDR(endpt)) |
2161 UHCI_TD_SET_DEVADDR(addr) |
2162 UHCI_TD_SET_PID(UHCI_TD_PID_OUT) |
2163 UHCI_TD_SET_DT(tog) |
2164 UHCI_TD_SET_MAXLEN(0)
2165 );
2166 std->td.td_buffer = 0;
2167 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2168 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2169
2170 std->link.std = NULL;
2171 if (prev) {
2172 prev->link.std = std;
2173 prev->td.td_link = htole32(
2174 std->physaddr | UHCI_PTR_VF | UHCI_PTR_TD
2175 );
2176 usb_syncmem(&prev->dma, prev->offs, sizeof(prev->td),
2177 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2178 }
2179 tog ^= 1;
2180 }
2181 *lstd = std;
2182 *toggle = tog;
2183 }
2184
2185 void
2186 uhci_device_clear_toggle(struct usbd_pipe *pipe)
2187 {
2188 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2189 upipe->nexttoggle = 0;
2190 }
2191
2192 void
2193 uhci_noop(struct usbd_pipe *pipe)
2194 {
2195 }
2196
2197 int
2198 uhci_device_bulk_init(struct usbd_xfer *xfer)
2199 {
2200 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2201 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2202 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
2203 int endpt = ed->bEndpointAddress;
2204 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2205 int len = xfer->ux_bufsize;
2206 int err = 0;
2207
2208 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2209 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, len,
2210 xfer->ux_flags, 0);
2211
2212 if (sc->sc_dying)
2213 return USBD_IOERROR;
2214
2215 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2216
2217 uxfer->ux_type = UX_BULK;
2218 err = uhci_alloc_std_chain(sc, xfer, len, isread, &uxfer->ux_stdstart);
2219 if (err)
2220 return err;
2221
2222 #ifdef UHCI_DEBUG
2223 if (uhcidebug >= 10) {
2224 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2225 uhci_dump_tds(uxfer->ux_stdstart);
2226 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2227 }
2228 #endif
2229
2230 return 0;
2231 }
2232
2233 Static void
2234 uhci_device_bulk_fini(struct usbd_xfer *xfer)
2235 {
2236 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2237 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2238
2239 KASSERT(ux->ux_type == UX_BULK);
2240
2241 if (ux->ux_nstd) {
2242 uhci_free_stds(sc, ux);
2243 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2244 }
2245 }
2246
2247 usbd_status
2248 uhci_device_bulk_transfer(struct usbd_xfer *xfer)
2249 {
2250
2251 /* Pipe isn't running, so start it first. */
2252 return uhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2253 }
2254
2255 usbd_status
2256 uhci_device_bulk_start(struct usbd_xfer *xfer)
2257 {
2258 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2259 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2260 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2261 uhci_soft_td_t *data, *dataend;
2262 uhci_soft_qh_t *sqh;
2263 int len;
2264 int endpt;
2265 int isread;
2266
2267 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2268 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2269 xfer->ux_length, xfer->ux_flags, 0);
2270
2271 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
2272
2273 if (sc->sc_dying)
2274 return USBD_IOERROR;
2275
2276 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2277 KASSERT(xfer->ux_length <= xfer->ux_bufsize);
2278
2279 len = xfer->ux_length;
2280 endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2281 isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2282 sqh = upipe->bulk.sqh;
2283
2284 uhci_reset_std_chain(sc, xfer, len, isread, &upipe->nexttoggle,
2285 &dataend);
2286
2287 data = ux->ux_stdstart;
2288 ux->ux_stdend = dataend;
2289 dataend->td.td_status |= htole32(UHCI_TD_IOC);
2290 usb_syncmem(&dataend->dma,
2291 dataend->offs + offsetof(uhci_td_t, td_status),
2292 sizeof(dataend->td.td_status),
2293 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2294
2295 #ifdef UHCI_DEBUG
2296 if (uhcidebug >= 10) {
2297 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2298 DPRINTFN(10, "before transfer", 0, 0, 0, 0);
2299 uhci_dump_tds(data);
2300 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2301 }
2302 #endif
2303
2304 KASSERT(ux->ux_isdone);
2305 #ifdef DIAGNOSTIC
2306 ux->ux_isdone = false;
2307 #endif
2308
2309 sqh->elink = data;
2310 sqh->qh.qh_elink = htole32(data->physaddr | UHCI_PTR_TD);
2311 /* uhci_add_bulk() will do usb_syncmem(sqh) */
2312
2313 uhci_add_bulk(sc, sqh);
2314 uhci_add_intr_list(sc, ux);
2315 xfer->ux_status = USBD_IN_PROGRESS;
2316 usbd_xfer_schedule_timeout(xfer);
2317
2318 return USBD_IN_PROGRESS;
2319 }
2320
2321 /* Abort a device bulk request. */
2322 void
2323 uhci_device_bulk_abort(struct usbd_xfer *xfer)
2324 {
2325 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2326
2327 KASSERT(mutex_owned(&sc->sc_lock));
2328
2329 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2330
2331 usbd_xfer_abort(xfer);
2332 }
2333
2334 /*
2335 * To allow the hardware time to notice we simply wait.
2336 */
2337 Static void
2338 uhci_abortx(struct usbd_xfer *xfer)
2339 {
2340 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2341 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2342 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2343 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2344 uhci_soft_td_t *std;
2345
2346 DPRINTFN(1,"xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
2347
2348 KASSERT(mutex_owned(&sc->sc_lock));
2349 ASSERT_SLEEPABLE();
2350
2351 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2352 xfer->ux_status == USBD_TIMEOUT),
2353 "bad abort status: %d", xfer->ux_status);
2354
2355 /*
2356 * If we're dying, skip the hardware action and just notify the
2357 * software that we're done.
2358 */
2359 if (sc->sc_dying) {
2360 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer,
2361 xfer->ux_status, 0, 0);
2362 goto dying;
2363 }
2364
2365 /*
2366 * HC Step 1: Make interrupt routine and hardware ignore xfer.
2367 */
2368 uhci_del_intr_list(sc, ux);
2369
2370 DPRINTF("stop ux=%#jx", (uintptr_t)ux, 0, 0, 0);
2371 for (std = ux->ux_stdstart; std != NULL; std = std->link.std) {
2372 usb_syncmem(&std->dma,
2373 std->offs + offsetof(uhci_td_t, td_status),
2374 sizeof(std->td.td_status),
2375 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2376 std->td.td_status &= htole32(~(UHCI_TD_ACTIVE | UHCI_TD_IOC));
2377 usb_syncmem(&std->dma,
2378 std->offs + offsetof(uhci_td_t, td_status),
2379 sizeof(std->td.td_status),
2380 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2381 }
2382
2383 /*
2384 * HC Step 2: Wait until we know hardware has finished any possible
2385 * use of the xfer.
2386 */
2387 /* Hardware finishes in 1ms */
2388 usb_delay_ms_locked(upipe->pipe.up_dev->ud_bus, 2, &sc->sc_lock);
2389 dying:
2390 #ifdef DIAGNOSTIC
2391 ux->ux_isdone = true;
2392 #endif
2393 DPRINTFN(14, "end", 0, 0, 0, 0);
2394
2395 KASSERT(mutex_owned(&sc->sc_lock));
2396 }
2397
2398 /* Close a device bulk pipe. */
2399 void
2400 uhci_device_bulk_close(struct usbd_pipe *pipe)
2401 {
2402 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2403 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2404
2405 KASSERT(mutex_owned(&sc->sc_lock));
2406
2407 uhci_free_sqh(sc, upipe->bulk.sqh);
2408
2409 pipe->up_endpoint->ue_toggle = upipe->nexttoggle;
2410 }
2411
2412 int
2413 uhci_device_ctrl_init(struct usbd_xfer *xfer)
2414 {
2415 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2416 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2417 usb_device_request_t *req = &xfer->ux_request;
2418 struct usbd_device *dev = upipe->pipe.up_dev;
2419 uhci_softc_t *sc = dev->ud_bus->ub_hcpriv;
2420 uhci_soft_td_t *data = NULL;
2421 int len;
2422 usbd_status err;
2423 int isread;
2424
2425 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2426 DPRINTFN(3, "xfer=%#jx len=%jd, addr=%jd, endpt=%jd",
2427 (uintptr_t)xfer, xfer->ux_bufsize, dev->ud_addr,
2428 upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress);
2429
2430 isread = req->bmRequestType & UT_READ;
2431 len = xfer->ux_bufsize;
2432
2433 uxfer->ux_type = UX_CTRL;
2434 /* Set up data transaction */
2435 if (len != 0) {
2436 err = uhci_alloc_std_chain(sc, xfer, len, isread, &data);
2437 if (err)
2438 return err;
2439 }
2440 /* Set up interrupt info. */
2441 uxfer->ux_setup = upipe->ctrl.setup;
2442 uxfer->ux_stat = upipe->ctrl.stat;
2443 uxfer->ux_data = data;
2444
2445 return 0;
2446 }
2447
2448 Static void
2449 uhci_device_ctrl_fini(struct usbd_xfer *xfer)
2450 {
2451 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2452 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2453
2454 KASSERT(ux->ux_type == UX_CTRL);
2455
2456 if (ux->ux_nstd) {
2457 uhci_free_stds(sc, ux);
2458 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2459 }
2460 }
2461
2462 usbd_status
2463 uhci_device_ctrl_transfer(struct usbd_xfer *xfer)
2464 {
2465
2466 /* Pipe isn't running, so start it first. */
2467 return uhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2468 }
2469
2470 usbd_status
2471 uhci_device_ctrl_start(struct usbd_xfer *xfer)
2472 {
2473 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2474 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2475 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2476 usb_device_request_t *req = &xfer->ux_request;
2477 struct usbd_device *dev = upipe->pipe.up_dev;
2478 int addr = dev->ud_addr;
2479 int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2480 uhci_soft_td_t *setup, *stat, *next, *dataend;
2481 uhci_soft_qh_t *sqh;
2482 int len;
2483 int isread;
2484
2485 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2486
2487 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
2488
2489 if (sc->sc_dying)
2490 return USBD_IOERROR;
2491
2492 KASSERT(xfer->ux_rqflags & URQ_REQUEST);
2493
2494 DPRINTFN(3, "type=0x%02jx, request=0x%02jx, "
2495 "wValue=0x%04jx, wIndex=0x%04jx",
2496 req->bmRequestType, req->bRequest, UGETW(req->wValue),
2497 UGETW(req->wIndex));
2498 DPRINTFN(3, "len=%jd, addr=%jd, endpt=%jd",
2499 UGETW(req->wLength), dev->ud_addr, endpt, 0);
2500
2501 isread = req->bmRequestType & UT_READ;
2502 len = UGETW(req->wLength);
2503
2504 setup = upipe->ctrl.setup;
2505 stat = upipe->ctrl.stat;
2506 sqh = upipe->ctrl.sqh;
2507
2508 memcpy(KERNADDR(&upipe->ctrl.reqdma, 0), req, sizeof(*req));
2509 usb_syncmem(&upipe->ctrl.reqdma, 0, sizeof(*req), BUS_DMASYNC_PREWRITE);
2510
2511 /* Set up data transaction */
2512 if (len != 0) {
2513 upipe->nexttoggle = 1;
2514 next = uxfer->ux_data;
2515 uhci_reset_std_chain(sc, xfer, len, isread,
2516 &upipe->nexttoggle, &dataend);
2517 dataend->link.std = stat;
2518 dataend->td.td_link = htole32(stat->physaddr | UHCI_PTR_TD);
2519 usb_syncmem(&dataend->dma,
2520 dataend->offs + offsetof(uhci_td_t, td_link),
2521 sizeof(dataend->td.td_link),
2522 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2523 } else {
2524 next = stat;
2525 }
2526
2527 const uint32_t status = UHCI_TD_ZERO_ACTLEN(
2528 UHCI_TD_SET_ERRCNT(3) |
2529 UHCI_TD_ACTIVE |
2530 (dev->ud_speed == USB_SPEED_LOW ? UHCI_TD_LS : 0)
2531 );
2532 setup->link.std = next;
2533 setup->td.td_link = htole32(next->physaddr | UHCI_PTR_TD);
2534 setup->td.td_status = htole32(status);
2535 setup->td.td_token = htole32(UHCI_TD_SETUP(sizeof(*req), endpt, addr));
2536 setup->td.td_buffer = htole32(DMAADDR(&upipe->ctrl.reqdma, 0));
2537
2538 usb_syncmem(&setup->dma, setup->offs, sizeof(setup->td),
2539 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2540
2541 stat->link.std = NULL;
2542 stat->td.td_link = htole32(UHCI_PTR_T);
2543 stat->td.td_status = htole32(status | UHCI_TD_IOC);
2544 stat->td.td_token =
2545 htole32(isread ? UHCI_TD_OUT(0, endpt, addr, 1) :
2546 UHCI_TD_IN (0, endpt, addr, 1));
2547 stat->td.td_buffer = htole32(0);
2548 usb_syncmem(&stat->dma, stat->offs, sizeof(stat->td),
2549 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2550
2551 #ifdef UHCI_DEBUG
2552 if (uhcidebug >= 10) {
2553 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2554 DPRINTF("before transfer", 0, 0, 0, 0);
2555 uhci_dump_tds(setup);
2556 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2557 }
2558 #endif
2559
2560 /* Set up interrupt info. */
2561 uxfer->ux_setup = setup;
2562 uxfer->ux_stat = stat;
2563 KASSERT(uxfer->ux_isdone);
2564 #ifdef DIAGNOSTIC
2565 uxfer->ux_isdone = false;
2566 #endif
2567
2568 sqh->elink = setup;
2569 sqh->qh.qh_elink = htole32(setup->physaddr | UHCI_PTR_TD);
2570 /* uhci_add_?s_ctrl() will do usb_syncmem(sqh) */
2571
2572 if (dev->ud_speed == USB_SPEED_LOW)
2573 uhci_add_ls_ctrl(sc, sqh);
2574 else
2575 uhci_add_hs_ctrl(sc, sqh);
2576 uhci_add_intr_list(sc, uxfer);
2577 #ifdef UHCI_DEBUG
2578 if (uhcidebug >= 12) {
2579 uhci_soft_td_t *std;
2580 uhci_soft_qh_t *xqh;
2581 uhci_soft_qh_t *sxqh;
2582 int maxqh = 0;
2583 uhci_physaddr_t link;
2584 DPRINTFN(12, "--- dump start ---", 0, 0, 0, 0);
2585 DPRINTFN(12, "follow from [0]", 0, 0, 0, 0);
2586 for (std = sc->sc_vframes[0].htd, link = 0;
2587 (link & UHCI_PTR_QH) == 0;
2588 std = std->link.std) {
2589 link = le32toh(std->td.td_link);
2590 uhci_dump_td(std);
2591 }
2592 sxqh = (uhci_soft_qh_t *)std;
2593 uhci_dump_qh(sxqh);
2594 for (xqh = sxqh;
2595 xqh != NULL;
2596 xqh = (maxqh++ == 5 || xqh->hlink == sxqh ||
2597 xqh->hlink == xqh ? NULL : xqh->hlink)) {
2598 uhci_dump_qh(xqh);
2599 }
2600 DPRINTFN(12, "Enqueued QH:", 0, 0, 0, 0);
2601 uhci_dump_qh(sqh);
2602 uhci_dump_tds(sqh->elink);
2603 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2604 }
2605 #endif
2606 xfer->ux_status = USBD_IN_PROGRESS;
2607 usbd_xfer_schedule_timeout(xfer);
2608
2609 return USBD_IN_PROGRESS;
2610 }
2611
2612 int
2613 uhci_device_intr_init(struct usbd_xfer *xfer)
2614 {
2615 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2616 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2617 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
2618 int endpt = ed->bEndpointAddress;
2619 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2620 int len = xfer->ux_bufsize;
2621 int err;
2622
2623 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2624
2625 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2626 xfer->ux_length, xfer->ux_flags, 0);
2627
2628 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2629 KASSERT(len != 0);
2630
2631 ux->ux_type = UX_INTR;
2632 ux->ux_nstd = 0;
2633 err = uhci_alloc_std_chain(sc, xfer, len, isread, &ux->ux_stdstart);
2634
2635 return err;
2636 }
2637
2638 Static void
2639 uhci_device_intr_fini(struct usbd_xfer *xfer)
2640 {
2641 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2642 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2643
2644 KASSERT(ux->ux_type == UX_INTR);
2645
2646 if (ux->ux_nstd) {
2647 uhci_free_stds(sc, ux);
2648 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2649 }
2650 }
2651
2652 usbd_status
2653 uhci_device_intr_transfer(struct usbd_xfer *xfer)
2654 {
2655
2656 /* Pipe isn't running, so start it first. */
2657 return uhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2658 }
2659
2660 usbd_status
2661 uhci_device_intr_start(struct usbd_xfer *xfer)
2662 {
2663 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2664 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2665 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2666 uhci_soft_td_t *data, *dataend;
2667 uhci_soft_qh_t *sqh;
2668 int isread, endpt;
2669 int i;
2670
2671 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2672
2673 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2674 xfer->ux_length, xfer->ux_flags, 0);
2675
2676 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
2677
2678 if (sc->sc_dying)
2679 return USBD_IOERROR;
2680
2681 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2682 KASSERT(xfer->ux_length <= xfer->ux_bufsize);
2683
2684 endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2685 isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2686
2687 data = ux->ux_stdstart;
2688
2689 KASSERT(ux->ux_isdone);
2690 #ifdef DIAGNOSTIC
2691 ux->ux_isdone = false;
2692 #endif
2693
2694 /* Take lock to protect nexttoggle */
2695 uhci_reset_std_chain(sc, xfer, xfer->ux_length, isread,
2696 &upipe->nexttoggle, &dataend);
2697
2698 dataend->td.td_status |= htole32(UHCI_TD_IOC);
2699 usb_syncmem(&dataend->dma,
2700 dataend->offs + offsetof(uhci_td_t, td_status),
2701 sizeof(dataend->td.td_status),
2702 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2703 ux->ux_stdend = dataend;
2704
2705 #ifdef UHCI_DEBUG
2706 if (uhcidebug >= 10) {
2707 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2708 uhci_dump_tds(data);
2709 uhci_dump_qh(upipe->intr.qhs[0]);
2710 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2711 }
2712 #endif
2713
2714 DPRINTFN(10, "qhs[0]=%#jx", (uintptr_t)upipe->intr.qhs[0], 0, 0, 0);
2715 for (i = 0; i < upipe->intr.npoll; i++) {
2716 sqh = upipe->intr.qhs[i];
2717 sqh->elink = data;
2718 sqh->qh.qh_elink = htole32(data->physaddr | UHCI_PTR_TD);
2719 usb_syncmem(&sqh->dma,
2720 sqh->offs + offsetof(uhci_qh_t, qh_elink),
2721 sizeof(sqh->qh.qh_elink),
2722 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2723 }
2724 uhci_add_intr_list(sc, ux);
2725 xfer->ux_status = USBD_IN_PROGRESS;
2726
2727 #ifdef UHCI_DEBUG
2728 if (uhcidebug >= 10) {
2729 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2730 uhci_dump_tds(data);
2731 uhci_dump_qh(upipe->intr.qhs[0]);
2732 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2733 }
2734 #endif
2735
2736 return USBD_IN_PROGRESS;
2737 }
2738
2739 /* Abort a device control request. */
2740 void
2741 uhci_device_ctrl_abort(struct usbd_xfer *xfer)
2742 {
2743 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2744
2745 KASSERT(mutex_owned(&sc->sc_lock));
2746
2747 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2748 usbd_xfer_abort(xfer);
2749 }
2750
2751 /* Close a device control pipe. */
2752 void
2753 uhci_device_ctrl_close(struct usbd_pipe *pipe)
2754 {
2755 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2756 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2757
2758 uhci_free_sqh(sc, upipe->ctrl.sqh);
2759 uhci_free_std_locked(sc, upipe->ctrl.setup);
2760 uhci_free_std_locked(sc, upipe->ctrl.stat);
2761
2762 usb_freemem(&upipe->ctrl.reqdma);
2763 }
2764
2765 /* Abort a device interrupt request. */
2766 void
2767 uhci_device_intr_abort(struct usbd_xfer *xfer)
2768 {
2769 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2770
2771 KASSERT(mutex_owned(&sc->sc_lock));
2772
2773 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2774 DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
2775
2776 usbd_xfer_abort(xfer);
2777 }
2778
2779 /* Close a device interrupt pipe. */
2780 void
2781 uhci_device_intr_close(struct usbd_pipe *pipe)
2782 {
2783 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2784 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2785 int i, npoll;
2786
2787 KASSERT(mutex_owned(&sc->sc_lock));
2788
2789 /* Unlink descriptors from controller data structures. */
2790 npoll = upipe->intr.npoll;
2791 for (i = 0; i < npoll; i++)
2792 uhci_remove_intr(sc, upipe->intr.qhs[i]);
2793
2794 /*
2795 * We now have to wait for any activity on the physical
2796 * descriptors to stop.
2797 */
2798 usb_delay_ms_locked(&sc->sc_bus, 2, &sc->sc_lock);
2799
2800 for (i = 0; i < npoll; i++)
2801 uhci_free_sqh(sc, upipe->intr.qhs[i]);
2802 kmem_free(upipe->intr.qhs, npoll * sizeof(uhci_soft_qh_t *));
2803 }
2804
2805 int
2806 uhci_device_isoc_init(struct usbd_xfer *xfer)
2807 {
2808 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2809
2810 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2811 KASSERT(xfer->ux_nframes != 0);
2812 KASSERT(ux->ux_isdone);
2813
2814 ux->ux_type = UX_ISOC;
2815 return 0;
2816 }
2817
2818 Static void
2819 uhci_device_isoc_fini(struct usbd_xfer *xfer)
2820 {
2821 struct uhci_xfer *ux __diagused = UHCI_XFER2UXFER(xfer);
2822
2823 KASSERT(ux->ux_type == UX_ISOC);
2824 }
2825
2826 usbd_status
2827 uhci_device_isoc_transfer(struct usbd_xfer *xfer)
2828 {
2829 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2830
2831 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2832 DPRINTFN(5, "xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
2833
2834 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
2835
2836 /* insert into schedule, */
2837
2838 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2839 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2840 struct isoc *isoc = &upipe->isoc;
2841 uhci_soft_td_t *std = NULL;
2842 uint32_t buf, len, status, offs;
2843 int i, next, nframes;
2844 int rd = UE_GET_DIR(upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress) == UE_DIR_IN;
2845
2846 DPRINTFN(5, "used=%jd next=%jd xfer=%#jx nframes=%jd",
2847 isoc->inuse, isoc->next, (uintptr_t)xfer, xfer->ux_nframes);
2848
2849 if (sc->sc_dying)
2850 return USBD_IOERROR;
2851
2852 if (xfer->ux_status == USBD_IN_PROGRESS) {
2853 /* This request has already been entered into the frame list */
2854 printf("%s: xfer=%p in frame list\n", __func__, xfer);
2855 /* XXX */
2856 }
2857
2858 #ifdef DIAGNOSTIC
2859 if (isoc->inuse >= UHCI_VFRAMELIST_COUNT)
2860 printf("%s: overflow!\n", __func__);
2861 #endif
2862
2863 KASSERT(xfer->ux_nframes != 0);
2864
2865 if (xfer->ux_length)
2866 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
2867 rd ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2868
2869 next = isoc->next;
2870 if (next == -1) {
2871 /* Not in use yet, schedule it a few frames ahead. */
2872 next = (UREAD2(sc, UHCI_FRNUM) + 3) % UHCI_VFRAMELIST_COUNT;
2873 DPRINTFN(2, "start next=%jd", next, 0, 0, 0);
2874 }
2875
2876 xfer->ux_status = USBD_IN_PROGRESS;
2877 ux->ux_curframe = next;
2878
2879 offs = 0;
2880 status = UHCI_TD_ZERO_ACTLEN(UHCI_TD_SET_ERRCNT(0) |
2881 UHCI_TD_ACTIVE |
2882 UHCI_TD_IOS);
2883 nframes = xfer->ux_nframes;
2884 for (i = 0; i < nframes; i++) {
2885 buf = DMAADDR(&xfer->ux_dmabuf, offs);
2886 std = isoc->stds[next];
2887 if (++next >= UHCI_VFRAMELIST_COUNT)
2888 next = 0;
2889 len = xfer->ux_frlengths[i];
2890
2891 KASSERTMSG(len <= __SHIFTOUT_MASK(UHCI_TD_MAXLEN_MASK),
2892 "len %d", len);
2893 std->td.td_buffer = htole32(buf);
2894 usb_syncmem(&xfer->ux_dmabuf, offs, len,
2895 rd ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2896 if (i == nframes - 1)
2897 status |= UHCI_TD_IOC;
2898 std->td.td_status = htole32(status);
2899 std->td.td_token &= htole32(~UHCI_TD_MAXLEN_MASK);
2900 std->td.td_token |= htole32(UHCI_TD_SET_MAXLEN(len));
2901 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2902 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2903 #ifdef UHCI_DEBUG
2904 if (uhcidebug >= 5) {
2905 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2906 DPRINTF("TD %jd", i, 0, 0, 0);
2907 uhci_dump_td(std);
2908 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2909 }
2910 #endif
2911 offs += len;
2912 const bus_addr_t bend __diagused =
2913 DMAADDR(&xfer->ux_dmabuf, offs - 1);
2914
2915 KASSERT(((buf ^ bend) & ~PAGE_MASK) == 0);
2916 }
2917 isoc->next = next;
2918 isoc->inuse += xfer->ux_nframes;
2919
2920 /* Set up interrupt info. */
2921 ux->ux_stdstart = std;
2922 ux->ux_stdend = std;
2923
2924 KASSERT(ux->ux_isdone);
2925 #ifdef DIAGNOSTIC
2926 ux->ux_isdone = false;
2927 #endif
2928 uhci_add_intr_list(sc, ux);
2929
2930 return USBD_IN_PROGRESS;
2931 }
2932
2933 void
2934 uhci_device_isoc_abort(struct usbd_xfer *xfer)
2935 {
2936 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2937 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2938 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2939 uhci_soft_td_t **stds = upipe->isoc.stds;
2940 uhci_soft_td_t *std;
2941 int i, n, nframes, maxlen, len;
2942
2943 KASSERT(mutex_owned(&sc->sc_lock));
2944
2945 /* Transfer is already done. */
2946 if (xfer->ux_status != USBD_NOT_STARTED &&
2947 xfer->ux_status != USBD_IN_PROGRESS) {
2948 return;
2949 }
2950
2951 /* Give xfer the requested abort code. */
2952 xfer->ux_status = USBD_CANCELLED;
2953
2954 /* make hardware ignore it, */
2955 nframes = xfer->ux_nframes;
2956 n = ux->ux_curframe;
2957 maxlen = 0;
2958 for (i = 0; i < nframes; i++) {
2959 std = stds[n];
2960 usb_syncmem(&std->dma,
2961 std->offs + offsetof(uhci_td_t, td_status),
2962 sizeof(std->td.td_status),
2963 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2964 std->td.td_status &= htole32(~(UHCI_TD_ACTIVE | UHCI_TD_IOC));
2965 usb_syncmem(&std->dma,
2966 std->offs + offsetof(uhci_td_t, td_status),
2967 sizeof(std->td.td_status),
2968 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2969 usb_syncmem(&std->dma,
2970 std->offs + offsetof(uhci_td_t, td_token),
2971 sizeof(std->td.td_token),
2972 BUS_DMASYNC_POSTWRITE);
2973 len = UHCI_TD_GET_MAXLEN(le32toh(std->td.td_token));
2974 if (len > maxlen)
2975 maxlen = len;
2976 if (++n >= UHCI_VFRAMELIST_COUNT)
2977 n = 0;
2978 }
2979
2980 /* and wait until we are sure the hardware has finished. */
2981 delay(maxlen);
2982
2983 #ifdef DIAGNOSTIC
2984 ux->ux_isdone = true;
2985 #endif
2986 /* Remove from interrupt list. */
2987 uhci_del_intr_list(sc, ux);
2988
2989 /* Run callback. */
2990 usb_transfer_complete(xfer);
2991
2992 KASSERT(mutex_owned(&sc->sc_lock));
2993 }
2994
2995 void
2996 uhci_device_isoc_close(struct usbd_pipe *pipe)
2997 {
2998 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2999 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3000 uhci_soft_td_t *std, *vstd;
3001 struct isoc *isoc;
3002 int i;
3003
3004 KASSERT(mutex_owned(&sc->sc_lock));
3005
3006 /*
3007 * Make sure all TDs are marked as inactive.
3008 * Wait for completion.
3009 * Unschedule.
3010 * Deallocate.
3011 */
3012 isoc = &upipe->isoc;
3013
3014 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3015 std = isoc->stds[i];
3016 usb_syncmem(&std->dma,
3017 std->offs + offsetof(uhci_td_t, td_status),
3018 sizeof(std->td.td_status),
3019 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3020 std->td.td_status &= htole32(~UHCI_TD_ACTIVE);
3021 usb_syncmem(&std->dma,
3022 std->offs + offsetof(uhci_td_t, td_status),
3023 sizeof(std->td.td_status),
3024 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3025 }
3026 /* wait for completion */
3027 usb_delay_ms_locked(&sc->sc_bus, 2, &sc->sc_lock);
3028
3029 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3030 std = isoc->stds[i];
3031 for (vstd = sc->sc_vframes[i].htd;
3032 vstd != NULL && vstd->link.std != std;
3033 vstd = vstd->link.std)
3034 ;
3035 if (vstd == NULL) {
3036 /*panic*/
3037 printf("%s: %p not found\n", __func__, std);
3038 mutex_exit(&sc->sc_lock);
3039 return;
3040 }
3041 vstd->link = std->link;
3042 usb_syncmem(&std->dma,
3043 std->offs + offsetof(uhci_td_t, td_link),
3044 sizeof(std->td.td_link),
3045 BUS_DMASYNC_POSTWRITE);
3046 vstd->td.td_link = std->td.td_link;
3047 usb_syncmem(&vstd->dma,
3048 vstd->offs + offsetof(uhci_td_t, td_link),
3049 sizeof(vstd->td.td_link),
3050 BUS_DMASYNC_PREWRITE);
3051 uhci_free_std_locked(sc, std);
3052 }
3053
3054 kmem_free(isoc->stds, UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *));
3055 }
3056
3057 usbd_status
3058 uhci_setup_isoc(struct usbd_pipe *pipe)
3059 {
3060 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
3061 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3062 int addr = upipe->pipe.up_dev->ud_addr;
3063 int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
3064 int rd = UE_GET_DIR(endpt) == UE_DIR_IN;
3065 uhci_soft_td_t *std, *vstd;
3066 uint32_t token;
3067 struct isoc *isoc;
3068 int i;
3069
3070 isoc = &upipe->isoc;
3071
3072 isoc->stds = kmem_alloc(
3073 UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *), KM_SLEEP);
3074 if (isoc->stds == NULL)
3075 return USBD_NOMEM;
3076
3077 token = rd ? UHCI_TD_IN (0, endpt, addr, 0) :
3078 UHCI_TD_OUT(0, endpt, addr, 0);
3079
3080 /* Allocate the TDs and mark as inactive; */
3081 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3082 std = uhci_alloc_std(sc);
3083 if (std == 0)
3084 goto bad;
3085 std->td.td_status = htole32(UHCI_TD_IOS); /* iso, inactive */
3086 std->td.td_token = htole32(token);
3087 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
3088 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3089 isoc->stds[i] = std;
3090 }
3091
3092 mutex_enter(&sc->sc_lock);
3093
3094 /* Insert TDs into schedule. */
3095 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3096 std = isoc->stds[i];
3097 vstd = sc->sc_vframes[i].htd;
3098 usb_syncmem(&vstd->dma,
3099 vstd->offs + offsetof(uhci_td_t, td_link),
3100 sizeof(vstd->td.td_link),
3101 BUS_DMASYNC_POSTWRITE);
3102 std->link = vstd->link;
3103 std->td.td_link = vstd->td.td_link;
3104 usb_syncmem(&std->dma,
3105 std->offs + offsetof(uhci_td_t, td_link),
3106 sizeof(std->td.td_link),
3107 BUS_DMASYNC_PREWRITE);
3108 vstd->link.std = std;
3109 vstd->td.td_link = htole32(std->physaddr | UHCI_PTR_TD);
3110 usb_syncmem(&vstd->dma,
3111 vstd->offs + offsetof(uhci_td_t, td_link),
3112 sizeof(vstd->td.td_link),
3113 BUS_DMASYNC_PREWRITE);
3114 }
3115 mutex_exit(&sc->sc_lock);
3116
3117 isoc->next = -1;
3118 isoc->inuse = 0;
3119
3120 return USBD_NORMAL_COMPLETION;
3121
3122 bad:
3123 while (--i >= 0)
3124 uhci_free_std(sc, isoc->stds[i]);
3125 kmem_free(isoc->stds, UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *));
3126 return USBD_NOMEM;
3127 }
3128
3129 void
3130 uhci_device_isoc_done(struct usbd_xfer *xfer)
3131 {
3132 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
3133 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3134 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
3135 int rd = UE_GET_DIR(upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress) == UE_DIR_IN;
3136
3137 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3138 DPRINTFN(4, "length=%jd, ux_state=0x%08jx",
3139 xfer->ux_actlen, xfer->ux_state, 0, 0);
3140
3141 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3142
3143 #ifdef DIAGNOSTIC
3144 if (ux->ux_stdend == NULL) {
3145 printf("%s: xfer=%p stdend==NULL\n", __func__, xfer);
3146 #ifdef UHCI_DEBUG
3147 DPRINTF("--- dump start ---", 0, 0, 0, 0);
3148 uhci_dump_ii(ux);
3149 DPRINTF("--- dump end ---", 0, 0, 0, 0);
3150 #endif
3151 return;
3152 }
3153 #endif
3154
3155 /* Turn off the interrupt since it is active even if the TD is not. */
3156 usb_syncmem(&ux->ux_stdend->dma,
3157 ux->ux_stdend->offs + offsetof(uhci_td_t, td_status),
3158 sizeof(ux->ux_stdend->td.td_status),
3159 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3160 ux->ux_stdend->td.td_status &= htole32(~UHCI_TD_IOC);
3161 usb_syncmem(&ux->ux_stdend->dma,
3162 ux->ux_stdend->offs + offsetof(uhci_td_t, td_status),
3163 sizeof(ux->ux_stdend->td.td_status),
3164 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3165
3166 if (xfer->ux_length)
3167 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
3168 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3169 }
3170
3171 void
3172 uhci_device_intr_done(struct usbd_xfer *xfer)
3173 {
3174 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
3175 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3176 uhci_soft_qh_t *sqh;
3177 int i, npoll;
3178
3179 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3180 DPRINTFN(5, "length=%jd", xfer->ux_actlen, 0, 0, 0);
3181
3182 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3183
3184 npoll = upipe->intr.npoll;
3185 for (i = 0; i < npoll; i++) {
3186 sqh = upipe->intr.qhs[i];
3187 sqh->elink = NULL;
3188 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3189 usb_syncmem(&sqh->dma,
3190 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3191 sizeof(sqh->qh.qh_elink),
3192 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3193 }
3194 const int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
3195 const bool isread = UE_GET_DIR(endpt) == UE_DIR_IN;
3196 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
3197 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3198 }
3199
3200 /* Deallocate request data structures */
3201 void
3202 uhci_device_ctrl_done(struct usbd_xfer *xfer)
3203 {
3204 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3205 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3206 int len = UGETW(xfer->ux_request.wLength);
3207 int isread = (xfer->ux_request.bmRequestType & UT_READ);
3208
3209 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3210
3211 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3212 KASSERT(xfer->ux_rqflags & URQ_REQUEST);
3213
3214 /* XXXNH move to uhci_idone??? */
3215 if (upipe->pipe.up_dev->ud_speed == USB_SPEED_LOW)
3216 uhci_remove_ls_ctrl(sc, upipe->ctrl.sqh);
3217 else
3218 uhci_remove_hs_ctrl(sc, upipe->ctrl.sqh);
3219
3220 if (len) {
3221 usb_syncmem(&xfer->ux_dmabuf, 0, len,
3222 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3223 }
3224 usb_syncmem(&upipe->ctrl.reqdma, 0,
3225 sizeof(usb_device_request_t), BUS_DMASYNC_POSTWRITE);
3226
3227 DPRINTF("length=%jd", xfer->ux_actlen, 0, 0, 0);
3228 }
3229
3230 /* Deallocate request data structures */
3231 void
3232 uhci_device_bulk_done(struct usbd_xfer *xfer)
3233 {
3234 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3235 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3236 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
3237 int endpt = ed->bEndpointAddress;
3238 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
3239
3240 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3241 DPRINTFN(5, "xfer=%#jx sc=%#jx upipe=%#jx", (uintptr_t)xfer,
3242 (uintptr_t)sc, (uintptr_t)upipe, 0);
3243
3244 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3245
3246 uhci_remove_bulk(sc, upipe->bulk.sqh);
3247
3248 if (xfer->ux_length) {
3249 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
3250 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3251 }
3252
3253 DPRINTFN(5, "length=%jd", xfer->ux_actlen, 0, 0, 0);
3254 }
3255
3256 /* Add interrupt QH, called with vflock. */
3257 void
3258 uhci_add_intr(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
3259 {
3260 struct uhci_vframe *vf = &sc->sc_vframes[sqh->pos];
3261 uhci_soft_qh_t *eqh;
3262
3263 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3264 DPRINTFN(4, "n=%jd sqh=%#jx", sqh->pos, (uintptr_t)sqh, 0, 0);
3265
3266 eqh = vf->eqh;
3267 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
3268 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
3269 sqh->hlink = eqh->hlink;
3270 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
3271 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
3272 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
3273 eqh->hlink = sqh;
3274 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
3275 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
3276 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
3277 vf->eqh = sqh;
3278 vf->bandwidth++;
3279 }
3280
3281 /* Remove interrupt QH. */
3282 void
3283 uhci_remove_intr(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
3284 {
3285 struct uhci_vframe *vf = &sc->sc_vframes[sqh->pos];
3286 uhci_soft_qh_t *pqh;
3287
3288 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3289 DPRINTFN(4, "n=%jd sqh=%#jx", sqh->pos, (uintptr_t)sqh, 0, 0);
3290
3291 /* See comment in uhci_remove_ctrl() */
3292
3293 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
3294 sizeof(sqh->qh.qh_elink),
3295 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3296 if (!(sqh->qh.qh_elink & htole32(UHCI_PTR_T))) {
3297 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3298 usb_syncmem(&sqh->dma,
3299 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3300 sizeof(sqh->qh.qh_elink),
3301 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3302 delay(UHCI_QH_REMOVE_DELAY);
3303 }
3304
3305 pqh = uhci_find_prev_qh(vf->hqh, sqh);
3306 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
3307 sizeof(sqh->qh.qh_hlink),
3308 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3309 pqh->hlink = sqh->hlink;
3310 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
3311 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
3312 sizeof(pqh->qh.qh_hlink),
3313 BUS_DMASYNC_PREWRITE);
3314 delay(UHCI_QH_REMOVE_DELAY);
3315 if (vf->eqh == sqh)
3316 vf->eqh = pqh;
3317 vf->bandwidth--;
3318 }
3319
3320 usbd_status
3321 uhci_device_setintr(uhci_softc_t *sc, struct uhci_pipe *upipe, int ival)
3322 {
3323 uhci_soft_qh_t *sqh;
3324 int i, npoll;
3325 u_int bestbw, bw, bestoffs, offs;
3326
3327 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3328 DPRINTFN(2, "pipe=%#jx", (uintptr_t)upipe, 0, 0, 0);
3329 if (ival == 0) {
3330 printf("%s: 0 interval\n", __func__);
3331 return USBD_INVAL;
3332 }
3333
3334 if (ival > UHCI_VFRAMELIST_COUNT)
3335 ival = UHCI_VFRAMELIST_COUNT;
3336 npoll = howmany(UHCI_VFRAMELIST_COUNT, ival);
3337 DPRINTF("ival=%jd npoll=%jd", ival, npoll, 0, 0);
3338
3339 upipe->intr.npoll = npoll;
3340 upipe->intr.qhs =
3341 kmem_alloc(npoll * sizeof(uhci_soft_qh_t *), KM_SLEEP);
3342
3343 /*
3344 * Figure out which offset in the schedule that has most
3345 * bandwidth left over.
3346 */
3347 #define MOD(i) ((i) & (UHCI_VFRAMELIST_COUNT-1))
3348 for (bestoffs = offs = 0, bestbw = ~0; offs < ival; offs++) {
3349 for (bw = i = 0; i < npoll; i++)
3350 bw += sc->sc_vframes[MOD(i * ival + offs)].bandwidth;
3351 if (bw < bestbw) {
3352 bestbw = bw;
3353 bestoffs = offs;
3354 }
3355 }
3356 DPRINTF("bw=%jd offs=%jd", bestbw, bestoffs, 0, 0);
3357 for (i = 0; i < npoll; i++) {
3358 upipe->intr.qhs[i] = sqh = uhci_alloc_sqh(sc);
3359 sqh->elink = NULL;
3360 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3361 usb_syncmem(&sqh->dma,
3362 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3363 sizeof(sqh->qh.qh_elink),
3364 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3365 sqh->pos = MOD(i * ival + bestoffs);
3366 }
3367 #undef MOD
3368
3369 mutex_enter(&sc->sc_lock);
3370 /* Enter QHs into the controller data structures. */
3371 for (i = 0; i < npoll; i++)
3372 uhci_add_intr(sc, upipe->intr.qhs[i]);
3373 mutex_exit(&sc->sc_lock);
3374
3375 DPRINTFN(5, "returns %#jx", (uintptr_t)upipe, 0, 0, 0);
3376
3377 return USBD_NORMAL_COMPLETION;
3378 }
3379
3380 /* Open a new pipe. */
3381 usbd_status
3382 uhci_open(struct usbd_pipe *pipe)
3383 {
3384 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3385 struct usbd_bus *bus = pipe->up_dev->ud_bus;
3386 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
3387 usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc;
3388 int ival;
3389
3390 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3391 DPRINTF("pipe=%#jx, addr=%jd, endpt=%jd (%jd)",
3392 (uintptr_t)pipe, pipe->up_dev->ud_addr, ed->bEndpointAddress,
3393 bus->ub_rhaddr);
3394
3395 if (sc->sc_dying)
3396 return USBD_IOERROR;
3397
3398 upipe->aborting = 0;
3399 /* toggle state needed for bulk endpoints */
3400 upipe->nexttoggle = pipe->up_endpoint->ue_toggle;
3401
3402 if (pipe->up_dev->ud_addr == bus->ub_rhaddr) {
3403 switch (ed->bEndpointAddress) {
3404 case USB_CONTROL_ENDPOINT:
3405 pipe->up_methods = &roothub_ctrl_methods;
3406 break;
3407 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
3408 pipe->up_methods = &uhci_root_intr_methods;
3409 break;
3410 default:
3411 return USBD_INVAL;
3412 }
3413 } else {
3414 switch (ed->bmAttributes & UE_XFERTYPE) {
3415 case UE_CONTROL:
3416 pipe->up_methods = &uhci_device_ctrl_methods;
3417 upipe->ctrl.sqh = uhci_alloc_sqh(sc);
3418 if (upipe->ctrl.sqh == NULL)
3419 goto bad;
3420 upipe->ctrl.setup = uhci_alloc_std(sc);
3421 if (upipe->ctrl.setup == NULL) {
3422 uhci_free_sqh(sc, upipe->ctrl.sqh);
3423 goto bad;
3424 }
3425 upipe->ctrl.stat = uhci_alloc_std(sc);
3426 if (upipe->ctrl.stat == NULL) {
3427 uhci_free_sqh(sc, upipe->ctrl.sqh);
3428 uhci_free_std(sc, upipe->ctrl.setup);
3429 goto bad;
3430 }
3431 int err = usb_allocmem(sc->sc_bus.ub_dmatag,
3432 sizeof(usb_device_request_t), 0,
3433 USBMALLOC_COHERENT, &upipe->ctrl.reqdma);
3434 if (err) {
3435 uhci_free_sqh(sc, upipe->ctrl.sqh);
3436 uhci_free_std(sc, upipe->ctrl.setup);
3437 uhci_free_std(sc, upipe->ctrl.stat);
3438 goto bad;
3439 }
3440 break;
3441 case UE_INTERRUPT:
3442 pipe->up_methods = &uhci_device_intr_methods;
3443 ival = pipe->up_interval;
3444 if (ival == USBD_DEFAULT_INTERVAL)
3445 ival = ed->bInterval;
3446 return uhci_device_setintr(sc, upipe, ival);
3447 case UE_ISOCHRONOUS:
3448 pipe->up_serialise = false;
3449 pipe->up_methods = &uhci_device_isoc_methods;
3450 return uhci_setup_isoc(pipe);
3451 case UE_BULK:
3452 pipe->up_methods = &uhci_device_bulk_methods;
3453 upipe->bulk.sqh = uhci_alloc_sqh(sc);
3454 if (upipe->bulk.sqh == NULL)
3455 goto bad;
3456 break;
3457 }
3458 }
3459 return USBD_NORMAL_COMPLETION;
3460
3461 bad:
3462 return USBD_NOMEM;
3463 }
3464
3465 /*
3466 * Data structures and routines to emulate the root hub.
3467 */
3468 /*
3469 * The USB hub protocol requires that SET_FEATURE(PORT_RESET) also
3470 * enables the port, and also states that SET_FEATURE(PORT_ENABLE)
3471 * should not be used by the USB subsystem. As we cannot issue a
3472 * SET_FEATURE(PORT_ENABLE) externally, we must ensure that the port
3473 * will be enabled as part of the reset.
3474 *
3475 * On the VT83C572, the port cannot be successfully enabled until the
3476 * outstanding "port enable change" and "connection status change"
3477 * events have been reset.
3478 */
3479 Static usbd_status
3480 uhci_portreset(uhci_softc_t *sc, int index)
3481 {
3482 int lim, port, x;
3483 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3484
3485 if (index == 1)
3486 port = UHCI_PORTSC1;
3487 else if (index == 2)
3488 port = UHCI_PORTSC2;
3489 else
3490 return USBD_IOERROR;
3491
3492 x = URWMASK(UREAD2(sc, port));
3493 UWRITE2(sc, port, x | UHCI_PORTSC_PR);
3494
3495 usb_delay_ms(&sc->sc_bus, USB_PORT_ROOT_RESET_DELAY);
3496
3497 DPRINTF("uhci port %jd reset, status0 = 0x%04jx", index,
3498 UREAD2(sc, port), 0, 0);
3499
3500 x = URWMASK(UREAD2(sc, port));
3501 UWRITE2(sc, port, x & ~(UHCI_PORTSC_PR | UHCI_PORTSC_SUSP));
3502
3503 delay(100);
3504
3505 DPRINTF("uhci port %jd reset, status1 = 0x%04jx", index,
3506 UREAD2(sc, port), 0, 0);
3507
3508 x = URWMASK(UREAD2(sc, port));
3509 UWRITE2(sc, port, x | UHCI_PORTSC_PE);
3510
3511 for (lim = 10; --lim > 0;) {
3512 usb_delay_ms(&sc->sc_bus, USB_PORT_RESET_DELAY);
3513
3514 x = UREAD2(sc, port);
3515 DPRINTF("uhci port %jd iteration %ju, status = 0x%04jx", index,
3516 lim, x, 0);
3517
3518 if (!(x & UHCI_PORTSC_CCS)) {
3519 /*
3520 * No device is connected (or was disconnected
3521 * during reset). Consider the port reset.
3522 * The delay must be long enough to ensure on
3523 * the initial iteration that the device
3524 * connection will have been registered. 50ms
3525 * appears to be sufficient, but 20ms is not.
3526 */
3527 DPRINTFN(3, "uhci port %jd loop %ju, device detached",
3528 index, lim, 0, 0);
3529 break;
3530 }
3531
3532 if (x & (UHCI_PORTSC_POEDC | UHCI_PORTSC_CSC)) {
3533 /*
3534 * Port enabled changed and/or connection
3535 * status changed were set. Reset either or
3536 * both raised flags (by writing a 1 to that
3537 * bit), and wait again for state to settle.
3538 */
3539 UWRITE2(sc, port, URWMASK(x) |
3540 (x & (UHCI_PORTSC_POEDC | UHCI_PORTSC_CSC)));
3541 continue;
3542 }
3543
3544 if (x & UHCI_PORTSC_PE)
3545 /* Port is enabled */
3546 break;
3547
3548 UWRITE2(sc, port, URWMASK(x) | UHCI_PORTSC_PE);
3549 }
3550
3551 DPRINTFN(3, "uhci port %jd reset, status2 = 0x%04jx", index,
3552 UREAD2(sc, port), 0, 0);
3553
3554 if (lim <= 0) {
3555 DPRINTF("uhci port %jd reset timed out", index,
3556 0, 0, 0);
3557 return USBD_TIMEOUT;
3558 }
3559
3560 sc->sc_isreset = 1;
3561 return USBD_NORMAL_COMPLETION;
3562 }
3563
3564 Static int
3565 uhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
3566 void *buf, int buflen)
3567 {
3568 uhci_softc_t *sc = UHCI_BUS2SC(bus);
3569 int port, x;
3570 int status, change, totlen = 0;
3571 uint16_t len, value, index;
3572 usb_port_status_t ps;
3573 usbd_status err;
3574
3575 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3576
3577 if (sc->sc_dying)
3578 return -1;
3579
3580 DPRINTF("type=0x%02jx request=%02jx", req->bmRequestType,
3581 req->bRequest, 0, 0);
3582
3583 len = UGETW(req->wLength);
3584 value = UGETW(req->wValue);
3585 index = UGETW(req->wIndex);
3586
3587 #define C(x,y) ((x) | ((y) << 8))
3588 switch (C(req->bRequest, req->bmRequestType)) {
3589 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
3590 DPRINTF("wValue=0x%04jx", value, 0, 0, 0);
3591 if (len == 0)
3592 break;
3593 switch (value) {
3594 #define sd ((usb_string_descriptor_t *)buf)
3595 case C(2, UDESC_STRING):
3596 /* Product */
3597 totlen = usb_makestrdesc(sd, len, "UHCI root hub");
3598 break;
3599 #undef sd
3600 default:
3601 /* default from usbroothub */
3602 return buflen;
3603 }
3604 break;
3605
3606 /* Hub requests */
3607 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
3608 break;
3609 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
3610 DPRINTF("UR_CLEAR_PORT_FEATURE port=%jd feature=%jd", index,
3611 value, 0, 0);
3612 if (index == 1)
3613 port = UHCI_PORTSC1;
3614 else if (index == 2)
3615 port = UHCI_PORTSC2;
3616 else {
3617 return -1;
3618 }
3619 switch(value) {
3620 case UHF_PORT_ENABLE:
3621 x = URWMASK(UREAD2(sc, port));
3622 UWRITE2(sc, port, x & ~UHCI_PORTSC_PE);
3623 break;
3624 case UHF_PORT_SUSPEND:
3625 x = URWMASK(UREAD2(sc, port));
3626 if (!(x & UHCI_PORTSC_SUSP)) /* not suspended */
3627 break;
3628 UWRITE2(sc, port, x | UHCI_PORTSC_RD);
3629 /* see USB2 spec ch. 7.1.7.7 */
3630 usb_delay_ms(&sc->sc_bus, 20);
3631 UWRITE2(sc, port, x & ~UHCI_PORTSC_SUSP);
3632 /* 10ms resume delay must be provided by caller */
3633 break;
3634 case UHF_PORT_RESET:
3635 x = URWMASK(UREAD2(sc, port));
3636 UWRITE2(sc, port, x & ~UHCI_PORTSC_PR);
3637 break;
3638 case UHF_C_PORT_CONNECTION:
3639 x = URWMASK(UREAD2(sc, port));
3640 UWRITE2(sc, port, x | UHCI_PORTSC_CSC);
3641 break;
3642 case UHF_C_PORT_ENABLE:
3643 x = URWMASK(UREAD2(sc, port));
3644 UWRITE2(sc, port, x | UHCI_PORTSC_POEDC);
3645 break;
3646 case UHF_C_PORT_OVER_CURRENT:
3647 x = URWMASK(UREAD2(sc, port));
3648 UWRITE2(sc, port, x | UHCI_PORTSC_OCIC);
3649 break;
3650 case UHF_C_PORT_RESET:
3651 sc->sc_isreset = 0;
3652 break;
3653 case UHF_PORT_CONNECTION:
3654 case UHF_PORT_OVER_CURRENT:
3655 case UHF_PORT_POWER:
3656 case UHF_PORT_LOW_SPEED:
3657 case UHF_C_PORT_SUSPEND:
3658 default:
3659 return -1;
3660 }
3661 break;
3662 case C(UR_GET_BUS_STATE, UT_READ_CLASS_OTHER):
3663 if (index == 1)
3664 port = UHCI_PORTSC1;
3665 else if (index == 2)
3666 port = UHCI_PORTSC2;
3667 else {
3668 return -1;
3669 }
3670 if (len > 0) {
3671 *(uint8_t *)buf =
3672 UHCI_PORTSC_GET_LS(UREAD2(sc, port));
3673 totlen = 1;
3674 }
3675 break;
3676 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
3677 if (len == 0)
3678 break;
3679 if ((value & 0xff) != 0) {
3680 return -1;
3681 }
3682 usb_hub_descriptor_t hubd;
3683
3684 totlen = uimin(buflen, sizeof(hubd));
3685 memcpy(&hubd, buf, totlen);
3686 hubd.bNbrPorts = 2;
3687 memcpy(buf, &hubd, totlen);
3688 break;
3689 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
3690 if (len != 4) {
3691 return -1;
3692 }
3693 memset(buf, 0, len);
3694 totlen = len;
3695 break;
3696 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
3697 if (index == 1)
3698 port = UHCI_PORTSC1;
3699 else if (index == 2)
3700 port = UHCI_PORTSC2;
3701 else {
3702 return -1;
3703 }
3704 if (len != 4) {
3705 return -1;
3706 }
3707 x = UREAD2(sc, port);
3708 status = change = 0;
3709 if (x & UHCI_PORTSC_CCS)
3710 status |= UPS_CURRENT_CONNECT_STATUS;
3711 if (x & UHCI_PORTSC_CSC)
3712 change |= UPS_C_CONNECT_STATUS;
3713 if (x & UHCI_PORTSC_PE)
3714 status |= UPS_PORT_ENABLED;
3715 if (x & UHCI_PORTSC_POEDC)
3716 change |= UPS_C_PORT_ENABLED;
3717 if (x & UHCI_PORTSC_OCI)
3718 status |= UPS_OVERCURRENT_INDICATOR;
3719 if (x & UHCI_PORTSC_OCIC)
3720 change |= UPS_C_OVERCURRENT_INDICATOR;
3721 if (x & UHCI_PORTSC_SUSP)
3722 status |= UPS_SUSPEND;
3723 if (x & UHCI_PORTSC_LSDA)
3724 status |= UPS_LOW_SPEED;
3725 status |= UPS_PORT_POWER;
3726 if (sc->sc_isreset)
3727 change |= UPS_C_PORT_RESET;
3728 USETW(ps.wPortStatus, status);
3729 USETW(ps.wPortChange, change);
3730 totlen = uimin(len, sizeof(ps));
3731 memcpy(buf, &ps, totlen);
3732 break;
3733 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
3734 return -1;
3735 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
3736 break;
3737 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
3738 if (index == 1)
3739 port = UHCI_PORTSC1;
3740 else if (index == 2)
3741 port = UHCI_PORTSC2;
3742 else {
3743 return -1;
3744 }
3745 switch(value) {
3746 case UHF_PORT_ENABLE:
3747 x = URWMASK(UREAD2(sc, port));
3748 UWRITE2(sc, port, x | UHCI_PORTSC_PE);
3749 break;
3750 case UHF_PORT_SUSPEND:
3751 x = URWMASK(UREAD2(sc, port));
3752 UWRITE2(sc, port, x | UHCI_PORTSC_SUSP);
3753 break;
3754 case UHF_PORT_RESET:
3755 err = uhci_portreset(sc, index);
3756 if (err != USBD_NORMAL_COMPLETION)
3757 return -1;
3758 return 0;
3759 case UHF_PORT_POWER:
3760 /* Pretend we turned on power */
3761 return 0;
3762 case UHF_C_PORT_CONNECTION:
3763 case UHF_C_PORT_ENABLE:
3764 case UHF_C_PORT_OVER_CURRENT:
3765 case UHF_PORT_CONNECTION:
3766 case UHF_PORT_OVER_CURRENT:
3767 case UHF_PORT_LOW_SPEED:
3768 case UHF_C_PORT_SUSPEND:
3769 case UHF_C_PORT_RESET:
3770 default:
3771 return -1;
3772 }
3773 break;
3774 default:
3775 /* default from usbroothub */
3776 DPRINTF("returning %jd (usbroothub default)",
3777 buflen, 0, 0, 0);
3778 return buflen;
3779 }
3780
3781 DPRINTF("returning %jd", totlen, 0, 0, 0);
3782
3783 return totlen;
3784 }
3785
3786 /* Abort a root interrupt request. */
3787 void
3788 uhci_root_intr_abort(struct usbd_xfer *xfer)
3789 {
3790 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3791
3792 KASSERT(mutex_owned(&sc->sc_lock));
3793 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
3794
3795 /*
3796 * Try to stop the callout before it starts. If we got in too
3797 * late, too bad; but if the callout had yet to run and time
3798 * out the xfer, cancel it ourselves.
3799 */
3800 callout_stop(&sc->sc_poll_handle);
3801 if (sc->sc_intr_xfer == NULL)
3802 return;
3803
3804 KASSERT(sc->sc_intr_xfer == xfer);
3805 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
3806 xfer->ux_status = USBD_CANCELLED;
3807 #ifdef DIAGNOSTIC
3808 UHCI_XFER2UXFER(xfer)->ux_isdone = true;
3809 #endif
3810 usb_transfer_complete(xfer);
3811 }
3812
3813 usbd_status
3814 uhci_root_intr_transfer(struct usbd_xfer *xfer)
3815 {
3816
3817 /* Pipe isn't running, start first */
3818 return uhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
3819 }
3820
3821 /* Start a transfer on the root interrupt pipe */
3822 usbd_status
3823 uhci_root_intr_start(struct usbd_xfer *xfer)
3824 {
3825 struct usbd_pipe *pipe = xfer->ux_pipe;
3826 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3827 unsigned int ival;
3828
3829 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3830 DPRINTF("xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, xfer->ux_length,
3831 xfer->ux_flags, 0);
3832
3833 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3834
3835 if (sc->sc_dying)
3836 return USBD_IOERROR;
3837
3838 KASSERT(sc->sc_intr_xfer == NULL);
3839
3840 /* XXX temporary variable needed to avoid gcc3 warning */
3841 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
3842 sc->sc_ival = mstohz(ival);
3843 if (sc->sc_suspend == PWR_RESUME)
3844 callout_schedule(&sc->sc_poll_handle, sc->sc_ival);
3845 sc->sc_intr_xfer = xfer;
3846 xfer->ux_status = USBD_IN_PROGRESS;
3847
3848 return USBD_IN_PROGRESS;
3849 }
3850
3851 /* Close the root interrupt pipe. */
3852 void
3853 uhci_root_intr_close(struct usbd_pipe *pipe)
3854 {
3855 uhci_softc_t *sc __diagused = UHCI_PIPE2SC(pipe);
3856 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3857
3858 KASSERT(mutex_owned(&sc->sc_lock));
3859
3860 /*
3861 * The caller must arrange to have aborted the pipe already, so
3862 * there can be no intr xfer in progress. The callout may
3863 * still be pending from a prior intr xfer -- if it has already
3864 * fired, it will see there is nothing to do, and do nothing.
3865 */
3866 KASSERT(sc->sc_intr_xfer == NULL);
3867 KASSERT(!callout_pending(&sc->sc_poll_handle));
3868 }
3869