uhci.c revision 1.304.2.2 1 /* $NetBSD: uhci.c,v 1.304.2.2 2021/04/03 22:28:50 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004, 2011, 2012, 2016, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology, Jared D. McNeill (jmcneill (at) invisible.ca),
10 * Matthew R. Green (mrg (at) eterna.com.au) and Nick Hudson.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * USB Universal Host Controller driver.
36 * Handles e.g. PIIX3 and PIIX4.
37 *
38 * UHCI spec: http://www.intel.com/technology/usb/spec.htm
39 * USB spec: http://www.usb.org/developers/docs/
40 * PIIXn spec: ftp://download.intel.com/design/intarch/datashts/29055002.pdf
41 * ftp://download.intel.com/design/intarch/datashts/29056201.pdf
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: uhci.c,v 1.304.2.2 2021/04/03 22:28:50 thorpej Exp $");
46
47 #ifdef _KERNEL_OPT
48 #include "opt_usb.h"
49 #endif
50
51 #include <sys/param.h>
52
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/device.h>
56 #include <sys/kernel.h>
57 #include <sys/kmem.h>
58 #include <sys/mutex.h>
59 #include <sys/proc.h>
60 #include <sys/queue.h>
61 #include <sys/select.h>
62 #include <sys/sysctl.h>
63 #include <sys/systm.h>
64
65 #include <machine/endian.h>
66
67 #include <dev/usb/usb.h>
68 #include <dev/usb/usbdi.h>
69 #include <dev/usb/usbdivar.h>
70 #include <dev/usb/usb_mem.h>
71
72 #include <dev/usb/uhcireg.h>
73 #include <dev/usb/uhcivar.h>
74 #include <dev/usb/usbroothub.h>
75 #include <dev/usb/usbhist.h>
76
77 /* Use bandwidth reclamation for control transfers. Some devices choke on it. */
78 /*#define UHCI_CTL_LOOP */
79
80 #ifdef UHCI_DEBUG
81 uhci_softc_t *thesc;
82 int uhcinoloop = 0;
83 #endif
84
85 #ifdef USB_DEBUG
86 #ifndef UHCI_DEBUG
87 #define uhcidebug 0
88 #else
89 static int uhcidebug = 0;
90
91 SYSCTL_SETUP(sysctl_hw_uhci_setup, "sysctl hw.uhci setup")
92 {
93 int err;
94 const struct sysctlnode *rnode;
95 const struct sysctlnode *cnode;
96
97 err = sysctl_createv(clog, 0, NULL, &rnode,
98 CTLFLAG_PERMANENT, CTLTYPE_NODE, "uhci",
99 SYSCTL_DESCR("uhci global controls"),
100 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
101
102 if (err)
103 goto fail;
104
105 /* control debugging printfs */
106 err = sysctl_createv(clog, 0, &rnode, &cnode,
107 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
108 "debug", SYSCTL_DESCR("Enable debugging output"),
109 NULL, 0, &uhcidebug, sizeof(uhcidebug), CTL_CREATE, CTL_EOL);
110 if (err)
111 goto fail;
112
113 return;
114 fail:
115 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
116 }
117
118 #endif /* UHCI_DEBUG */
119 #endif /* USB_DEBUG */
120
121 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOGN(uhcidebug,1,FMT,A,B,C,D)
122 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(uhcidebug,N,FMT,A,B,C,D)
123 #define UHCIHIST_FUNC() USBHIST_FUNC()
124 #define UHCIHIST_CALLED(name) USBHIST_CALLED(uhcidebug)
125
126 /*
127 * The UHCI controller is little endian, so on big endian machines
128 * the data stored in memory needs to be swapped.
129 */
130
131 struct uhci_pipe {
132 struct usbd_pipe pipe;
133 int nexttoggle;
134
135 u_char aborting;
136 struct usbd_xfer *abortstart, abortend;
137
138 /* Info needed for different pipe kinds. */
139 union {
140 /* Control pipe */
141 struct {
142 uhci_soft_qh_t *sqh;
143 usb_dma_t reqdma;
144 uhci_soft_td_t *setup;
145 uhci_soft_td_t *stat;
146 } ctrl;
147 /* Interrupt pipe */
148 struct {
149 int npoll;
150 uhci_soft_qh_t **qhs;
151 } intr;
152 /* Bulk pipe */
153 struct {
154 uhci_soft_qh_t *sqh;
155 } bulk;
156 /* Isochronous pipe */
157 struct isoc {
158 uhci_soft_td_t **stds;
159 int next, inuse;
160 } isoc;
161 };
162 };
163
164 typedef TAILQ_HEAD(ux_completeq, uhci_xfer) ux_completeq_t;
165
166 Static void uhci_globalreset(uhci_softc_t *);
167 Static usbd_status uhci_portreset(uhci_softc_t*, int);
168 Static void uhci_reset(uhci_softc_t *);
169 Static usbd_status uhci_run(uhci_softc_t *, int, int);
170 Static uhci_soft_td_t *uhci_alloc_std(uhci_softc_t *);
171 Static void uhci_free_std(uhci_softc_t *, uhci_soft_td_t *);
172 Static void uhci_free_std_locked(uhci_softc_t *, uhci_soft_td_t *);
173 Static uhci_soft_qh_t *uhci_alloc_sqh(uhci_softc_t *);
174 Static void uhci_free_sqh(uhci_softc_t *, uhci_soft_qh_t *);
175 #if 0
176 Static void uhci_enter_ctl_q(uhci_softc_t *, uhci_soft_qh_t *,
177 uhci_intr_info_t *);
178 Static void uhci_exit_ctl_q(uhci_softc_t *, uhci_soft_qh_t *);
179 #endif
180
181 #if 0
182 Static void uhci_free_std_chain(uhci_softc_t *, uhci_soft_td_t *,
183 uhci_soft_td_t *);
184 #endif
185 Static int uhci_alloc_std_chain(uhci_softc_t *, struct usbd_xfer *,
186 int, int, uhci_soft_td_t **);
187 Static void uhci_free_stds(uhci_softc_t *, struct uhci_xfer *);
188
189 Static void uhci_reset_std_chain(uhci_softc_t *, struct usbd_xfer *,
190 int, int, int *, uhci_soft_td_t **);
191
192 Static void uhci_poll_hub(void *);
193 Static void uhci_check_intr(uhci_softc_t *, struct uhci_xfer *,
194 ux_completeq_t *);
195 Static void uhci_idone(struct uhci_xfer *, ux_completeq_t *);
196
197 Static void uhci_abortx(struct usbd_xfer *);
198
199 Static void uhci_add_ls_ctrl(uhci_softc_t *, uhci_soft_qh_t *);
200 Static void uhci_add_hs_ctrl(uhci_softc_t *, uhci_soft_qh_t *);
201 Static void uhci_add_bulk(uhci_softc_t *, uhci_soft_qh_t *);
202 Static void uhci_remove_ls_ctrl(uhci_softc_t *,uhci_soft_qh_t *);
203 Static void uhci_remove_hs_ctrl(uhci_softc_t *,uhci_soft_qh_t *);
204 Static void uhci_remove_bulk(uhci_softc_t *,uhci_soft_qh_t *);
205 Static void uhci_add_loop(uhci_softc_t *);
206 Static void uhci_rem_loop(uhci_softc_t *);
207
208 Static usbd_status uhci_setup_isoc(struct usbd_pipe *);
209
210 Static struct usbd_xfer *
211 uhci_allocx(struct usbd_bus *, unsigned int);
212 Static void uhci_freex(struct usbd_bus *, struct usbd_xfer *);
213 Static bool uhci_dying(struct usbd_bus *);
214 Static void uhci_get_lock(struct usbd_bus *, kmutex_t **);
215 Static int uhci_roothub_ctrl(struct usbd_bus *,
216 usb_device_request_t *, void *, int);
217
218 Static int uhci_device_ctrl_init(struct usbd_xfer *);
219 Static void uhci_device_ctrl_fini(struct usbd_xfer *);
220 Static usbd_status uhci_device_ctrl_transfer(struct usbd_xfer *);
221 Static usbd_status uhci_device_ctrl_start(struct usbd_xfer *);
222 Static void uhci_device_ctrl_abort(struct usbd_xfer *);
223 Static void uhci_device_ctrl_close(struct usbd_pipe *);
224 Static void uhci_device_ctrl_done(struct usbd_xfer *);
225
226 Static int uhci_device_intr_init(struct usbd_xfer *);
227 Static void uhci_device_intr_fini(struct usbd_xfer *);
228 Static usbd_status uhci_device_intr_transfer(struct usbd_xfer *);
229 Static usbd_status uhci_device_intr_start(struct usbd_xfer *);
230 Static void uhci_device_intr_abort(struct usbd_xfer *);
231 Static void uhci_device_intr_close(struct usbd_pipe *);
232 Static void uhci_device_intr_done(struct usbd_xfer *);
233
234 Static int uhci_device_bulk_init(struct usbd_xfer *);
235 Static void uhci_device_bulk_fini(struct usbd_xfer *);
236 Static usbd_status uhci_device_bulk_transfer(struct usbd_xfer *);
237 Static usbd_status uhci_device_bulk_start(struct usbd_xfer *);
238 Static void uhci_device_bulk_abort(struct usbd_xfer *);
239 Static void uhci_device_bulk_close(struct usbd_pipe *);
240 Static void uhci_device_bulk_done(struct usbd_xfer *);
241
242 Static int uhci_device_isoc_init(struct usbd_xfer *);
243 Static void uhci_device_isoc_fini(struct usbd_xfer *);
244 Static usbd_status uhci_device_isoc_transfer(struct usbd_xfer *);
245 Static void uhci_device_isoc_abort(struct usbd_xfer *);
246 Static void uhci_device_isoc_close(struct usbd_pipe *);
247 Static void uhci_device_isoc_done(struct usbd_xfer *);
248
249 Static usbd_status uhci_root_intr_transfer(struct usbd_xfer *);
250 Static usbd_status uhci_root_intr_start(struct usbd_xfer *);
251 Static void uhci_root_intr_abort(struct usbd_xfer *);
252 Static void uhci_root_intr_close(struct usbd_pipe *);
253 Static void uhci_root_intr_done(struct usbd_xfer *);
254
255 Static usbd_status uhci_open(struct usbd_pipe *);
256 Static void uhci_poll(struct usbd_bus *);
257 Static void uhci_softintr(void *);
258
259 Static void uhci_add_intr(uhci_softc_t *, uhci_soft_qh_t *);
260 Static void uhci_remove_intr(uhci_softc_t *, uhci_soft_qh_t *);
261 Static usbd_status uhci_device_setintr(uhci_softc_t *,
262 struct uhci_pipe *, int);
263
264 Static void uhci_device_clear_toggle(struct usbd_pipe *);
265 Static void uhci_noop(struct usbd_pipe *);
266
267 static inline uhci_soft_qh_t *
268 uhci_find_prev_qh(uhci_soft_qh_t *, uhci_soft_qh_t *);
269
270 #ifdef UHCI_DEBUG
271 Static void uhci_dump_all(uhci_softc_t *);
272 Static void uhci_dumpregs(uhci_softc_t *);
273 Static void uhci_dump_qhs(uhci_soft_qh_t *);
274 Static void uhci_dump_qh(uhci_soft_qh_t *);
275 Static void uhci_dump_tds(uhci_soft_td_t *);
276 Static void uhci_dump_td(uhci_soft_td_t *);
277 Static void uhci_dump_ii(struct uhci_xfer *);
278 void uhci_dump(void);
279 #endif
280
281 #define UBARR(sc) bus_space_barrier((sc)->iot, (sc)->ioh, 0, (sc)->sc_size, \
282 BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE)
283 #define UWRITE1(sc, r, x) \
284 do { UBARR(sc); bus_space_write_1((sc)->iot, (sc)->ioh, (r), (x)); \
285 } while (/*CONSTCOND*/0)
286 #define UWRITE2(sc, r, x) \
287 do { UBARR(sc); bus_space_write_2((sc)->iot, (sc)->ioh, (r), (x)); \
288 } while (/*CONSTCOND*/0)
289 #define UWRITE4(sc, r, x) \
290 do { UBARR(sc); bus_space_write_4((sc)->iot, (sc)->ioh, (r), (x)); \
291 } while (/*CONSTCOND*/0)
292
293 static __inline uint8_t
294 UREAD1(uhci_softc_t *sc, bus_size_t r)
295 {
296
297 UBARR(sc);
298 return bus_space_read_1(sc->iot, sc->ioh, r);
299 }
300
301 static __inline uint16_t
302 UREAD2(uhci_softc_t *sc, bus_size_t r)
303 {
304
305 UBARR(sc);
306 return bus_space_read_2(sc->iot, sc->ioh, r);
307 }
308
309 #ifdef UHCI_DEBUG
310 static __inline uint32_t
311 UREAD4(uhci_softc_t *sc, bus_size_t r)
312 {
313
314 UBARR(sc);
315 return bus_space_read_4(sc->iot, sc->ioh, r);
316 }
317 #endif
318
319 #define UHCICMD(sc, cmd) UWRITE2(sc, UHCI_CMD, cmd)
320 #define UHCISTS(sc) UREAD2(sc, UHCI_STS)
321
322 #define UHCI_RESET_TIMEOUT 100 /* ms, reset timeout */
323
324 #define UHCI_CURFRAME(sc) (UREAD2(sc, UHCI_FRNUM) & UHCI_FRNUM_MASK)
325
326 const struct usbd_bus_methods uhci_bus_methods = {
327 .ubm_open = uhci_open,
328 .ubm_softint = uhci_softintr,
329 .ubm_dopoll = uhci_poll,
330 .ubm_allocx = uhci_allocx,
331 .ubm_freex = uhci_freex,
332 .ubm_abortx = uhci_abortx,
333 .ubm_dying = uhci_dying,
334 .ubm_getlock = uhci_get_lock,
335 .ubm_rhctrl = uhci_roothub_ctrl,
336 };
337
338 const struct usbd_pipe_methods uhci_root_intr_methods = {
339 .upm_transfer = uhci_root_intr_transfer,
340 .upm_start = uhci_root_intr_start,
341 .upm_abort = uhci_root_intr_abort,
342 .upm_close = uhci_root_intr_close,
343 .upm_cleartoggle = uhci_noop,
344 .upm_done = uhci_root_intr_done,
345 };
346
347 const struct usbd_pipe_methods uhci_device_ctrl_methods = {
348 .upm_init = uhci_device_ctrl_init,
349 .upm_fini = uhci_device_ctrl_fini,
350 .upm_transfer = uhci_device_ctrl_transfer,
351 .upm_start = uhci_device_ctrl_start,
352 .upm_abort = uhci_device_ctrl_abort,
353 .upm_close = uhci_device_ctrl_close,
354 .upm_cleartoggle = uhci_noop,
355 .upm_done = uhci_device_ctrl_done,
356 };
357
358 const struct usbd_pipe_methods uhci_device_intr_methods = {
359 .upm_init = uhci_device_intr_init,
360 .upm_fini = uhci_device_intr_fini,
361 .upm_transfer = uhci_device_intr_transfer,
362 .upm_start = uhci_device_intr_start,
363 .upm_abort = uhci_device_intr_abort,
364 .upm_close = uhci_device_intr_close,
365 .upm_cleartoggle = uhci_device_clear_toggle,
366 .upm_done = uhci_device_intr_done,
367 };
368
369 const struct usbd_pipe_methods uhci_device_bulk_methods = {
370 .upm_init = uhci_device_bulk_init,
371 .upm_fini = uhci_device_bulk_fini,
372 .upm_transfer = uhci_device_bulk_transfer,
373 .upm_start = uhci_device_bulk_start,
374 .upm_abort = uhci_device_bulk_abort,
375 .upm_close = uhci_device_bulk_close,
376 .upm_cleartoggle = uhci_device_clear_toggle,
377 .upm_done = uhci_device_bulk_done,
378 };
379
380 const struct usbd_pipe_methods uhci_device_isoc_methods = {
381 .upm_init = uhci_device_isoc_init,
382 .upm_fini = uhci_device_isoc_fini,
383 .upm_transfer = uhci_device_isoc_transfer,
384 .upm_abort = uhci_device_isoc_abort,
385 .upm_close = uhci_device_isoc_close,
386 .upm_cleartoggle = uhci_noop,
387 .upm_done = uhci_device_isoc_done,
388 };
389
390 static inline void
391 uhci_add_intr_list(uhci_softc_t *sc, struct uhci_xfer *ux)
392 {
393
394 TAILQ_INSERT_TAIL(&sc->sc_intrhead, ux, ux_list);
395 }
396
397 static inline void
398 uhci_del_intr_list(uhci_softc_t *sc, struct uhci_xfer *ux)
399 {
400
401 TAILQ_REMOVE(&sc->sc_intrhead, ux, ux_list);
402 }
403
404 static inline uhci_soft_qh_t *
405 uhci_find_prev_qh(uhci_soft_qh_t *pqh, uhci_soft_qh_t *sqh)
406 {
407 UHCIHIST_FUNC(); UHCIHIST_CALLED();
408 DPRINTFN(15, "pqh=%#jx sqh=%#jx", (uintptr_t)pqh, (uintptr_t)sqh, 0, 0);
409
410 for (; pqh->hlink != sqh; pqh = pqh->hlink) {
411 #if defined(DIAGNOSTIC) || defined(UHCI_DEBUG)
412 usb_syncmem(&pqh->dma,
413 pqh->offs + offsetof(uhci_qh_t, qh_hlink),
414 sizeof(pqh->qh.qh_hlink),
415 BUS_DMASYNC_POSTWRITE);
416 if (le32toh(pqh->qh.qh_hlink) & UHCI_PTR_T) {
417 printf("%s: QH not found\n", __func__);
418 return NULL;
419 }
420 #endif
421 }
422 return pqh;
423 }
424
425 void
426 uhci_globalreset(uhci_softc_t *sc)
427 {
428 UHCICMD(sc, UHCI_CMD_GRESET); /* global reset */
429 usb_delay_ms(&sc->sc_bus, USB_BUS_RESET_DELAY); /* wait a little */
430 UHCICMD(sc, 0); /* do nothing */
431 }
432
433 int
434 uhci_init(uhci_softc_t *sc)
435 {
436 int i, j;
437 uhci_soft_qh_t *clsqh, *chsqh, *bsqh, *sqh, *lsqh;
438 uhci_soft_td_t *std;
439
440 UHCIHIST_FUNC(); UHCIHIST_CALLED();
441
442 #ifdef UHCI_DEBUG
443 thesc = sc;
444
445 if (uhcidebug >= 2)
446 uhci_dumpregs(sc);
447 #endif
448
449 sc->sc_suspend = PWR_RESUME;
450
451 UWRITE2(sc, UHCI_INTR, 0); /* disable interrupts */
452 uhci_globalreset(sc); /* reset the controller */
453 uhci_reset(sc);
454
455 /* Allocate and initialize real frame array. */
456 int err = usb_allocmem(&sc->sc_bus,
457 UHCI_FRAMELIST_COUNT * sizeof(uhci_physaddr_t),
458 UHCI_FRAMELIST_ALIGN, USBMALLOC_COHERENT, &sc->sc_dma);
459 if (err)
460 return err;
461 sc->sc_pframes = KERNADDR(&sc->sc_dma, 0);
462 /* set frame number to 0 */
463 UWRITE2(sc, UHCI_FRNUM, 0);
464 /* set frame list */
465 UWRITE4(sc, UHCI_FLBASEADDR, DMAADDR(&sc->sc_dma, 0));
466
467 /* Initialise mutex early for uhci_alloc_* */
468 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
469 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
470
471 /*
472 * Allocate a TD, inactive, that hangs from the last QH.
473 * This is to avoid a bug in the PIIX that makes it run berserk
474 * otherwise.
475 */
476 std = uhci_alloc_std(sc);
477 if (std == NULL)
478 return ENOMEM;
479 std->link.std = NULL;
480 std->td.td_link = htole32(UHCI_PTR_T);
481 std->td.td_status = htole32(0); /* inactive */
482 std->td.td_token = htole32(0);
483 std->td.td_buffer = htole32(0);
484 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
485 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
486
487 /* Allocate the dummy QH marking the end and used for looping the QHs.*/
488 lsqh = uhci_alloc_sqh(sc);
489 if (lsqh == NULL)
490 goto fail1;
491 lsqh->hlink = NULL;
492 lsqh->qh.qh_hlink = htole32(UHCI_PTR_T); /* end of QH chain */
493 lsqh->elink = std;
494 lsqh->qh.qh_elink = htole32(std->physaddr | UHCI_PTR_TD);
495 sc->sc_last_qh = lsqh;
496 usb_syncmem(&lsqh->dma, lsqh->offs, sizeof(lsqh->qh),
497 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
498
499 /* Allocate the dummy QH where bulk traffic will be queued. */
500 bsqh = uhci_alloc_sqh(sc);
501 if (bsqh == NULL)
502 goto fail2;
503 bsqh->hlink = lsqh;
504 bsqh->qh.qh_hlink = htole32(lsqh->physaddr | UHCI_PTR_QH);
505 bsqh->elink = NULL;
506 bsqh->qh.qh_elink = htole32(UHCI_PTR_T);
507 sc->sc_bulk_start = sc->sc_bulk_end = bsqh;
508 usb_syncmem(&bsqh->dma, bsqh->offs, sizeof(bsqh->qh),
509 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
510
511 /* Allocate dummy QH where high speed control traffic will be queued. */
512 chsqh = uhci_alloc_sqh(sc);
513 if (chsqh == NULL)
514 goto fail3;
515 chsqh->hlink = bsqh;
516 chsqh->qh.qh_hlink = htole32(bsqh->physaddr | UHCI_PTR_QH);
517 chsqh->elink = NULL;
518 chsqh->qh.qh_elink = htole32(UHCI_PTR_T);
519 sc->sc_hctl_start = sc->sc_hctl_end = chsqh;
520 usb_syncmem(&chsqh->dma, chsqh->offs, sizeof(chsqh->qh),
521 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
522
523 /* Allocate dummy QH where control traffic will be queued. */
524 clsqh = uhci_alloc_sqh(sc);
525 if (clsqh == NULL)
526 goto fail4;
527 clsqh->hlink = chsqh;
528 clsqh->qh.qh_hlink = htole32(chsqh->physaddr | UHCI_PTR_QH);
529 clsqh->elink = NULL;
530 clsqh->qh.qh_elink = htole32(UHCI_PTR_T);
531 sc->sc_lctl_start = sc->sc_lctl_end = clsqh;
532 usb_syncmem(&clsqh->dma, clsqh->offs, sizeof(clsqh->qh),
533 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
534
535 /*
536 * Make all (virtual) frame list pointers point to the interrupt
537 * queue heads and the interrupt queue heads at the control
538 * queue head and point the physical frame list to the virtual.
539 */
540 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
541 std = uhci_alloc_std(sc);
542 sqh = uhci_alloc_sqh(sc);
543 if (std == NULL || sqh == NULL)
544 return USBD_NOMEM;
545 std->link.sqh = sqh;
546 std->td.td_link = htole32(sqh->physaddr | UHCI_PTR_QH);
547 std->td.td_status = htole32(UHCI_TD_IOS); /* iso, inactive */
548 std->td.td_token = htole32(0);
549 std->td.td_buffer = htole32(0);
550 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
551 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
552 sqh->hlink = clsqh;
553 sqh->qh.qh_hlink = htole32(clsqh->physaddr | UHCI_PTR_QH);
554 sqh->elink = NULL;
555 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
556 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
557 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
558 sc->sc_vframes[i].htd = std;
559 sc->sc_vframes[i].etd = std;
560 sc->sc_vframes[i].hqh = sqh;
561 sc->sc_vframes[i].eqh = sqh;
562 for (j = i;
563 j < UHCI_FRAMELIST_COUNT;
564 j += UHCI_VFRAMELIST_COUNT)
565 sc->sc_pframes[j] = htole32(std->physaddr);
566 }
567 usb_syncmem(&sc->sc_dma, 0,
568 UHCI_FRAMELIST_COUNT * sizeof(uhci_physaddr_t),
569 BUS_DMASYNC_PREWRITE);
570
571
572 TAILQ_INIT(&sc->sc_intrhead);
573
574 sc->sc_xferpool = pool_cache_init(sizeof(struct uhci_xfer), 0, 0, 0,
575 "uhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
576
577 callout_init(&sc->sc_poll_handle, CALLOUT_MPSAFE);
578 callout_setfunc(&sc->sc_poll_handle, uhci_poll_hub, sc);
579
580 /* Set up the bus struct. */
581 sc->sc_bus.ub_methods = &uhci_bus_methods;
582 sc->sc_bus.ub_pipesize = sizeof(struct uhci_pipe);
583 sc->sc_bus.ub_usedma = true;
584 sc->sc_bus.ub_dmaflags = USBMALLOC_MULTISEG;
585
586 UHCICMD(sc, UHCI_CMD_MAXP); /* Assume 64 byte packets at frame end */
587
588 DPRINTF("Enabling...", 0, 0, 0, 0);
589
590 err = uhci_run(sc, 1, 0); /* and here we go... */
591 UWRITE2(sc, UHCI_INTR, UHCI_INTR_TOCRCIE | UHCI_INTR_RIE |
592 UHCI_INTR_IOCE | UHCI_INTR_SPIE); /* enable interrupts */
593 return err;
594
595 fail4:
596 uhci_free_sqh(sc, chsqh);
597 fail3:
598 uhci_free_sqh(sc, lsqh);
599 fail2:
600 uhci_free_sqh(sc, lsqh);
601 fail1:
602 uhci_free_std(sc, std);
603
604 return ENOMEM;
605 }
606
607 int
608 uhci_activate(device_t self, enum devact act)
609 {
610 struct uhci_softc *sc = device_private(self);
611
612 switch (act) {
613 case DVACT_DEACTIVATE:
614 sc->sc_dying = 1;
615 return 0;
616 default:
617 return EOPNOTSUPP;
618 }
619 }
620
621 void
622 uhci_childdet(device_t self, device_t child)
623 {
624 struct uhci_softc *sc = device_private(self);
625
626 KASSERT(sc->sc_child == child);
627 sc->sc_child = NULL;
628 }
629
630 int
631 uhci_detach(struct uhci_softc *sc, int flags)
632 {
633 int rv = 0;
634
635 if (sc->sc_child != NULL)
636 rv = config_detach(sc->sc_child, flags);
637
638 if (rv != 0)
639 return rv;
640
641 callout_halt(&sc->sc_poll_handle, NULL);
642 callout_destroy(&sc->sc_poll_handle);
643
644 mutex_destroy(&sc->sc_lock);
645 mutex_destroy(&sc->sc_intr_lock);
646
647 pool_cache_destroy(sc->sc_xferpool);
648
649 /* XXX free other data structures XXX */
650
651 return rv;
652 }
653
654 struct usbd_xfer *
655 uhci_allocx(struct usbd_bus *bus, unsigned int nframes)
656 {
657 struct uhci_softc *sc = UHCI_BUS2SC(bus);
658 struct usbd_xfer *xfer;
659
660 xfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
661 if (xfer != NULL) {
662 memset(xfer, 0, sizeof(struct uhci_xfer));
663
664 #ifdef DIAGNOSTIC
665 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
666 uxfer->ux_isdone = true;
667 xfer->ux_state = XFER_BUSY;
668 #endif
669 }
670 return xfer;
671 }
672
673 void
674 uhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
675 {
676 struct uhci_softc *sc = UHCI_BUS2SC(bus);
677 struct uhci_xfer *uxfer __diagused = UHCI_XFER2UXFER(xfer);
678
679 KASSERTMSG(xfer->ux_state == XFER_BUSY ||
680 xfer->ux_status == USBD_NOT_STARTED,
681 "xfer %p state %d\n", xfer, xfer->ux_state);
682 KASSERTMSG(uxfer->ux_isdone || xfer->ux_status == USBD_NOT_STARTED,
683 "xfer %p not done\n", xfer);
684 #ifdef DIAGNOSTIC
685 xfer->ux_state = XFER_FREE;
686 #endif
687 pool_cache_put(sc->sc_xferpool, xfer);
688 }
689
690 Static bool
691 uhci_dying(struct usbd_bus *bus)
692 {
693 struct uhci_softc *sc = UHCI_BUS2SC(bus);
694
695 return sc->sc_dying;
696 }
697
698 Static void
699 uhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
700 {
701 struct uhci_softc *sc = UHCI_BUS2SC(bus);
702
703 *lock = &sc->sc_lock;
704 }
705
706
707 /*
708 * Handle suspend/resume.
709 *
710 * We need to switch to polling mode here, because this routine is
711 * called from an interrupt context. This is all right since we
712 * are almost suspended anyway.
713 */
714 bool
715 uhci_resume(device_t dv, const pmf_qual_t *qual)
716 {
717 uhci_softc_t *sc = device_private(dv);
718 int cmd;
719
720 mutex_spin_enter(&sc->sc_intr_lock);
721
722 cmd = UREAD2(sc, UHCI_CMD);
723 sc->sc_bus.ub_usepolling++;
724 UWRITE2(sc, UHCI_INTR, 0);
725 uhci_globalreset(sc);
726 uhci_reset(sc);
727 if (cmd & UHCI_CMD_RS)
728 uhci_run(sc, 0, 1);
729
730 /* restore saved state */
731 UWRITE4(sc, UHCI_FLBASEADDR, DMAADDR(&sc->sc_dma, 0));
732 UWRITE2(sc, UHCI_FRNUM, sc->sc_saved_frnum);
733 UWRITE1(sc, UHCI_SOF, sc->sc_saved_sof);
734
735 UHCICMD(sc, cmd | UHCI_CMD_FGR); /* force resume */
736 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_DELAY, &sc->sc_intr_lock);
737 UHCICMD(sc, cmd & ~UHCI_CMD_EGSM); /* back to normal */
738 UWRITE2(sc, UHCI_INTR, UHCI_INTR_TOCRCIE |
739 UHCI_INTR_RIE | UHCI_INTR_IOCE | UHCI_INTR_SPIE);
740 UHCICMD(sc, UHCI_CMD_MAXP);
741 uhci_run(sc, 1, 1); /* and start traffic again */
742 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_RECOVERY, &sc->sc_intr_lock);
743 sc->sc_bus.ub_usepolling--;
744 if (sc->sc_intr_xfer != NULL)
745 callout_schedule(&sc->sc_poll_handle, sc->sc_ival);
746 #ifdef UHCI_DEBUG
747 if (uhcidebug >= 2)
748 uhci_dumpregs(sc);
749 #endif
750
751 sc->sc_suspend = PWR_RESUME;
752 mutex_spin_exit(&sc->sc_intr_lock);
753
754 return true;
755 }
756
757 bool
758 uhci_suspend(device_t dv, const pmf_qual_t *qual)
759 {
760 uhci_softc_t *sc = device_private(dv);
761 int cmd;
762
763 mutex_spin_enter(&sc->sc_intr_lock);
764
765 cmd = UREAD2(sc, UHCI_CMD);
766
767 #ifdef UHCI_DEBUG
768 if (uhcidebug >= 2)
769 uhci_dumpregs(sc);
770 #endif
771 sc->sc_suspend = PWR_SUSPEND;
772 if (sc->sc_intr_xfer != NULL)
773 callout_halt(&sc->sc_poll_handle, &sc->sc_intr_lock);
774 sc->sc_bus.ub_usepolling++;
775
776 uhci_run(sc, 0, 1); /* stop the controller */
777 cmd &= ~UHCI_CMD_RS;
778
779 /* save some state if BIOS doesn't */
780 sc->sc_saved_frnum = UREAD2(sc, UHCI_FRNUM);
781 sc->sc_saved_sof = UREAD1(sc, UHCI_SOF);
782
783 UWRITE2(sc, UHCI_INTR, 0); /* disable intrs */
784
785 UHCICMD(sc, cmd | UHCI_CMD_EGSM); /* enter suspend */
786 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_WAIT, &sc->sc_intr_lock);
787 sc->sc_bus.ub_usepolling--;
788
789 mutex_spin_exit(&sc->sc_intr_lock);
790
791 return true;
792 }
793
794 #ifdef UHCI_DEBUG
795 Static void
796 uhci_dumpregs(uhci_softc_t *sc)
797 {
798 UHCIHIST_FUNC(); UHCIHIST_CALLED();
799 DPRINTF("cmd =%04jx sts =%04jx intr =%04jx frnum =%04jx",
800 UREAD2(sc, UHCI_CMD), UREAD2(sc, UHCI_STS),
801 UREAD2(sc, UHCI_INTR), UREAD2(sc, UHCI_FRNUM));
802 DPRINTF("sof =%04jx portsc1=%04jx portsc2=%04jx flbase=%08jx",
803 UREAD1(sc, UHCI_SOF), UREAD2(sc, UHCI_PORTSC1),
804 UREAD2(sc, UHCI_PORTSC2), UREAD4(sc, UHCI_FLBASEADDR));
805 }
806
807 void
808 uhci_dump_td(uhci_soft_td_t *p)
809 {
810 UHCIHIST_FUNC(); UHCIHIST_CALLED();
811
812 usb_syncmem(&p->dma, p->offs, sizeof(p->td),
813 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
814
815 DPRINTF("TD(%#jx) at 0x%08jx", (uintptr_t)p, p->physaddr, 0, 0);
816 DPRINTF(" link=0x%08jx status=0x%08jx "
817 "token=0x%08x buffer=0x%08x",
818 le32toh(p->td.td_link),
819 le32toh(p->td.td_status),
820 le32toh(p->td.td_token),
821 le32toh(p->td.td_buffer));
822
823 DPRINTF("bitstuff=%jd crcto =%jd nak =%jd babble =%jd",
824 !!(le32toh(p->td.td_status) & UHCI_TD_BITSTUFF),
825 !!(le32toh(p->td.td_status) & UHCI_TD_CRCTO),
826 !!(le32toh(p->td.td_status) & UHCI_TD_NAK),
827 !!(le32toh(p->td.td_status) & UHCI_TD_BABBLE));
828 DPRINTF("dbuffer =%jd stalled =%jd active =%jd ioc =%jd",
829 !!(le32toh(p->td.td_status) & UHCI_TD_DBUFFER),
830 !!(le32toh(p->td.td_status) & UHCI_TD_STALLED),
831 !!(le32toh(p->td.td_status) & UHCI_TD_ACTIVE),
832 !!(le32toh(p->td.td_status) & UHCI_TD_IOC));
833 DPRINTF("ios =%jd ls =%jd spd =%jd",
834 !!(le32toh(p->td.td_status) & UHCI_TD_IOS),
835 !!(le32toh(p->td.td_status) & UHCI_TD_LS),
836 !!(le32toh(p->td.td_status) & UHCI_TD_SPD), 0);
837 DPRINTF("errcnt =%d actlen =%d pid=%02x",
838 UHCI_TD_GET_ERRCNT(le32toh(p->td.td_status)),
839 UHCI_TD_GET_ACTLEN(le32toh(p->td.td_status)),
840 UHCI_TD_GET_PID(le32toh(p->td.td_token)), 0);
841 DPRINTF("addr=%jd endpt=%jd D=%jd maxlen=%jd,",
842 UHCI_TD_GET_DEVADDR(le32toh(p->td.td_token)),
843 UHCI_TD_GET_ENDPT(le32toh(p->td.td_token)),
844 UHCI_TD_GET_DT(le32toh(p->td.td_token)),
845 UHCI_TD_GET_MAXLEN(le32toh(p->td.td_token)));
846 }
847
848 void
849 uhci_dump_qh(uhci_soft_qh_t *sqh)
850 {
851 UHCIHIST_FUNC(); UHCIHIST_CALLED();
852
853 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
854 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
855
856 DPRINTF("QH(%#jx) at 0x%08jx: hlink=%08jx elink=%08jx", (uintptr_t)sqh,
857 (int)sqh->physaddr, le32toh(sqh->qh.qh_hlink),
858 le32toh(sqh->qh.qh_elink));
859
860 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), BUS_DMASYNC_PREREAD);
861 }
862
863
864 #if 1
865 void
866 uhci_dump(void)
867 {
868 uhci_dump_all(thesc);
869 }
870 #endif
871
872 void
873 uhci_dump_all(uhci_softc_t *sc)
874 {
875 uhci_dumpregs(sc);
876 /*printf("framelist[i].link = %08x\n", sc->sc_framelist[0].link);*/
877 uhci_dump_qhs(sc->sc_lctl_start);
878 }
879
880
881 void
882 uhci_dump_qhs(uhci_soft_qh_t *sqh)
883 {
884 UHCIHIST_FUNC(); UHCIHIST_CALLED();
885
886 uhci_dump_qh(sqh);
887
888 /*
889 * uhci_dump_qhs displays all the QHs and TDs from the given QH onwards
890 * Traverses sideways first, then down.
891 *
892 * QH1
893 * QH2
894 * No QH
895 * TD2.1
896 * TD2.2
897 * TD1.1
898 * etc.
899 *
900 * TD2.x being the TDs queued at QH2 and QH1 being referenced from QH1.
901 */
902
903 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
904 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
905 if (sqh->hlink != NULL && !(le32toh(sqh->qh.qh_hlink) & UHCI_PTR_T))
906 uhci_dump_qhs(sqh->hlink);
907 else
908 DPRINTF("No QH", 0, 0, 0, 0);
909 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), BUS_DMASYNC_PREREAD);
910
911 if (sqh->elink != NULL && !(le32toh(sqh->qh.qh_elink) & UHCI_PTR_T))
912 uhci_dump_tds(sqh->elink);
913 else
914 DPRINTF("No QH", 0, 0, 0, 0);
915 }
916
917 void
918 uhci_dump_tds(uhci_soft_td_t *std)
919 {
920 uhci_soft_td_t *td;
921 int stop;
922
923 for (td = std; td != NULL; td = td->link.std) {
924 uhci_dump_td(td);
925
926 /*
927 * Check whether the link pointer in this TD marks
928 * the link pointer as end of queue. This avoids
929 * printing the free list in case the queue/TD has
930 * already been moved there (seatbelt).
931 */
932 usb_syncmem(&td->dma, td->offs + offsetof(uhci_td_t, td_link),
933 sizeof(td->td.td_link),
934 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
935 stop = (le32toh(td->td.td_link) & UHCI_PTR_T ||
936 le32toh(td->td.td_link) == 0);
937 usb_syncmem(&td->dma, td->offs + offsetof(uhci_td_t, td_link),
938 sizeof(td->td.td_link), BUS_DMASYNC_PREREAD);
939 if (stop)
940 break;
941 }
942 }
943
944 Static void
945 uhci_dump_ii(struct uhci_xfer *ux)
946 {
947 struct usbd_pipe *pipe;
948 usb_endpoint_descriptor_t *ed;
949 struct usbd_device *dev;
950
951 if (ux == NULL) {
952 printf("ux NULL\n");
953 return;
954 }
955 pipe = ux->ux_xfer.ux_pipe;
956 if (pipe == NULL) {
957 printf("ux %p: done=%d pipe=NULL\n", ux, ux->ux_isdone);
958 return;
959 }
960 if (pipe->up_endpoint == NULL) {
961 printf("ux %p: done=%d pipe=%p pipe->up_endpoint=NULL\n",
962 ux, ux->ux_isdone, pipe);
963 return;
964 }
965 if (pipe->up_dev == NULL) {
966 printf("ux %p: done=%d pipe=%p pipe->up_dev=NULL\n",
967 ux, ux->ux_isdone, pipe);
968 return;
969 }
970 ed = pipe->up_endpoint->ue_edesc;
971 dev = pipe->up_dev;
972 printf("ux %p: done=%d dev=%p vid=0x%04x pid=0x%04x addr=%d pipe=%p ep=0x%02x attr=0x%02x\n",
973 ux, ux->ux_isdone, dev,
974 UGETW(dev->ud_ddesc.idVendor),
975 UGETW(dev->ud_ddesc.idProduct),
976 dev->ud_addr, pipe,
977 ed->bEndpointAddress, ed->bmAttributes);
978 }
979
980 void uhci_dump_iis(struct uhci_softc *sc);
981 void
982 uhci_dump_iis(struct uhci_softc *sc)
983 {
984 struct uhci_xfer *ux;
985
986 printf("interrupt list:\n");
987 TAILQ_FOREACH(ux, &sc->sc_intrhead, ux_list)
988 uhci_dump_ii(ux);
989 }
990
991 void iidump(void);
992 void iidump(void) { uhci_dump_iis(thesc); }
993
994 #endif
995
996 /*
997 * This routine is executed periodically and simulates interrupts
998 * from the root controller interrupt pipe for port status change.
999 */
1000 void
1001 uhci_poll_hub(void *addr)
1002 {
1003 struct uhci_softc *sc = addr;
1004 struct usbd_xfer *xfer;
1005 u_char *p;
1006
1007 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1008
1009 mutex_enter(&sc->sc_lock);
1010
1011 /*
1012 * If the intr xfer has completed or been synchronously
1013 * aborted, we have nothing to do.
1014 */
1015 xfer = sc->sc_intr_xfer;
1016 if (xfer == NULL)
1017 goto out;
1018 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
1019
1020 /*
1021 * If the intr xfer for which we were scheduled is done, and
1022 * another intr xfer has been submitted, let that one be dealt
1023 * with when the callout fires again.
1024 *
1025 * The call to callout_pending is racy, but the the transition
1026 * from pending to invoking happens atomically. The
1027 * callout_ack ensures callout_invoking does not return true
1028 * due to this invocation of the callout; the lock ensures the
1029 * next invocation of the callout cannot callout_ack (unless it
1030 * had already run to completion and nulled sc->sc_intr_xfer,
1031 * in which case would have bailed out already).
1032 */
1033 callout_ack(&sc->sc_poll_handle);
1034 if (callout_pending(&sc->sc_poll_handle) ||
1035 callout_invoking(&sc->sc_poll_handle))
1036 goto out;
1037
1038 /*
1039 * Check flags for the two interrupt ports, and set them in the
1040 * buffer if an interrupt arrived; otherwise arrange .
1041 */
1042 p = xfer->ux_buf;
1043 p[0] = 0;
1044 if (UREAD2(sc, UHCI_PORTSC1) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC))
1045 p[0] |= 1<<1;
1046 if (UREAD2(sc, UHCI_PORTSC2) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC))
1047 p[0] |= 1<<2;
1048 if (p[0] == 0) {
1049 /*
1050 * No change -- try again in a while, unless we're
1051 * suspending, in which case we'll try again after
1052 * resume.
1053 */
1054 if (sc->sc_suspend != PWR_SUSPEND)
1055 callout_schedule(&sc->sc_poll_handle, sc->sc_ival);
1056 goto out;
1057 }
1058
1059 /*
1060 * Interrupt completed, and the xfer has not been completed or
1061 * synchronously aborted. Complete the xfer now.
1062 */
1063 xfer->ux_actlen = 1;
1064 xfer->ux_status = USBD_NORMAL_COMPLETION;
1065 #ifdef DIAGNOSTIC
1066 UHCI_XFER2UXFER(xfer)->ux_isdone = true;
1067 #endif
1068 usb_transfer_complete(xfer);
1069
1070 out: mutex_exit(&sc->sc_lock);
1071 }
1072
1073 void
1074 uhci_root_intr_done(struct usbd_xfer *xfer)
1075 {
1076 struct uhci_softc *sc = UHCI_XFER2SC(xfer);
1077
1078 KASSERT(mutex_owned(&sc->sc_lock));
1079
1080 /* Claim the xfer so it doesn't get completed again. */
1081 KASSERT(sc->sc_intr_xfer == xfer);
1082 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
1083 sc->sc_intr_xfer = NULL;
1084 }
1085
1086 /*
1087 * Let the last QH loop back to the high speed control transfer QH.
1088 * This is what intel calls "bandwidth reclamation" and improves
1089 * USB performance a lot for some devices.
1090 * If we are already looping, just count it.
1091 */
1092 void
1093 uhci_add_loop(uhci_softc_t *sc)
1094 {
1095 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1096
1097 #ifdef UHCI_DEBUG
1098 if (uhcinoloop)
1099 return;
1100 #endif
1101 if (++sc->sc_loops == 1) {
1102 DPRINTFN(5, "add loop", 0, 0, 0, 0);
1103 /* Note, we don't loop back the soft pointer. */
1104 sc->sc_last_qh->qh.qh_hlink =
1105 htole32(sc->sc_hctl_start->physaddr | UHCI_PTR_QH);
1106 usb_syncmem(&sc->sc_last_qh->dma,
1107 sc->sc_last_qh->offs + offsetof(uhci_qh_t, qh_hlink),
1108 sizeof(sc->sc_last_qh->qh.qh_hlink),
1109 BUS_DMASYNC_PREWRITE);
1110 }
1111 }
1112
1113 void
1114 uhci_rem_loop(uhci_softc_t *sc)
1115 {
1116 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1117
1118 #ifdef UHCI_DEBUG
1119 if (uhcinoloop)
1120 return;
1121 #endif
1122 if (--sc->sc_loops == 0) {
1123 DPRINTFN(5, "remove loop", 0, 0, 0, 0);
1124 sc->sc_last_qh->qh.qh_hlink = htole32(UHCI_PTR_T);
1125 usb_syncmem(&sc->sc_last_qh->dma,
1126 sc->sc_last_qh->offs + offsetof(uhci_qh_t, qh_hlink),
1127 sizeof(sc->sc_last_qh->qh.qh_hlink),
1128 BUS_DMASYNC_PREWRITE);
1129 }
1130 }
1131
1132 /* Add high speed control QH, called with lock held. */
1133 void
1134 uhci_add_hs_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1135 {
1136 uhci_soft_qh_t *eqh;
1137
1138 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1139
1140 KASSERT(mutex_owned(&sc->sc_lock));
1141
1142 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1143 eqh = sc->sc_hctl_end;
1144 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1145 sizeof(eqh->qh.qh_hlink),
1146 BUS_DMASYNC_POSTWRITE);
1147 sqh->hlink = eqh->hlink;
1148 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1149 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1150 BUS_DMASYNC_PREWRITE);
1151 eqh->hlink = sqh;
1152 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1153 sc->sc_hctl_end = sqh;
1154 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1155 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1156 #ifdef UHCI_CTL_LOOP
1157 uhci_add_loop(sc);
1158 #endif
1159 }
1160
1161 /* Remove high speed control QH, called with lock held. */
1162 void
1163 uhci_remove_hs_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1164 {
1165 uhci_soft_qh_t *pqh;
1166 uint32_t elink;
1167
1168 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1169
1170 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1171 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1172 #ifdef UHCI_CTL_LOOP
1173 uhci_rem_loop(sc);
1174 #endif
1175 /*
1176 * The T bit should be set in the elink of the QH so that the HC
1177 * doesn't follow the pointer. This condition may fail if the
1178 * the transferred packet was short so that the QH still points
1179 * at the last used TD.
1180 * In this case we set the T bit and wait a little for the HC
1181 * to stop looking at the TD.
1182 * Note that if the TD chain is large enough, the controller
1183 * may still be looking at the chain at the end of this function.
1184 * uhci_free_std_chain() will make sure the controller stops
1185 * looking at it quickly, but until then we should not change
1186 * sqh->hlink.
1187 */
1188 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1189 sizeof(sqh->qh.qh_elink),
1190 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1191 elink = le32toh(sqh->qh.qh_elink);
1192 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1193 sizeof(sqh->qh.qh_elink), BUS_DMASYNC_PREREAD);
1194 if (!(elink & UHCI_PTR_T)) {
1195 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1196 usb_syncmem(&sqh->dma,
1197 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1198 sizeof(sqh->qh.qh_elink),
1199 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1200 delay(UHCI_QH_REMOVE_DELAY);
1201 }
1202
1203 pqh = uhci_find_prev_qh(sc->sc_hctl_start, sqh);
1204 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1205 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1206 pqh->hlink = sqh->hlink;
1207 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1208 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1209 sizeof(pqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1210 delay(UHCI_QH_REMOVE_DELAY);
1211 if (sc->sc_hctl_end == sqh)
1212 sc->sc_hctl_end = pqh;
1213 }
1214
1215 /* Add low speed control QH, called with lock held. */
1216 void
1217 uhci_add_ls_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1218 {
1219 uhci_soft_qh_t *eqh;
1220
1221 KASSERT(mutex_owned(&sc->sc_lock));
1222
1223 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1224 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1225
1226 eqh = sc->sc_lctl_end;
1227 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1228 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1229 sqh->hlink = eqh->hlink;
1230 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1231 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1232 BUS_DMASYNC_PREWRITE);
1233 eqh->hlink = sqh;
1234 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1235 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1236 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1237 sc->sc_lctl_end = sqh;
1238 }
1239
1240 /* Remove low speed control QH, called with lock held. */
1241 void
1242 uhci_remove_ls_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1243 {
1244 uhci_soft_qh_t *pqh;
1245 uint32_t elink;
1246
1247 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1248
1249 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1250 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1251
1252 /* See comment in uhci_remove_hs_ctrl() */
1253 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1254 sizeof(sqh->qh.qh_elink),
1255 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1256 elink = le32toh(sqh->qh.qh_elink);
1257 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1258 sizeof(sqh->qh.qh_elink), BUS_DMASYNC_PREREAD);
1259 if (!(elink & UHCI_PTR_T)) {
1260 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1261 usb_syncmem(&sqh->dma,
1262 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1263 sizeof(sqh->qh.qh_elink),
1264 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1265 delay(UHCI_QH_REMOVE_DELAY);
1266 }
1267 pqh = uhci_find_prev_qh(sc->sc_lctl_start, sqh);
1268 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1269 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1270 pqh->hlink = sqh->hlink;
1271 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1272 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1273 sizeof(pqh->qh.qh_hlink),
1274 BUS_DMASYNC_PREWRITE);
1275 delay(UHCI_QH_REMOVE_DELAY);
1276 if (sc->sc_lctl_end == sqh)
1277 sc->sc_lctl_end = pqh;
1278 }
1279
1280 /* Add bulk QH, called with lock held. */
1281 void
1282 uhci_add_bulk(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1283 {
1284 uhci_soft_qh_t *eqh;
1285
1286 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1287
1288 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1289 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1290
1291 eqh = sc->sc_bulk_end;
1292 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1293 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1294 sqh->hlink = eqh->hlink;
1295 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1296 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1297 BUS_DMASYNC_PREWRITE);
1298 eqh->hlink = sqh;
1299 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1300 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1301 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1302 sc->sc_bulk_end = sqh;
1303 uhci_add_loop(sc);
1304 }
1305
1306 /* Remove bulk QH, called with lock held. */
1307 void
1308 uhci_remove_bulk(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1309 {
1310 uhci_soft_qh_t *pqh;
1311
1312 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1313
1314 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1315 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1316
1317 uhci_rem_loop(sc);
1318 /* See comment in uhci_remove_hs_ctrl() */
1319 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1320 sizeof(sqh->qh.qh_elink),
1321 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1322 if (!(sqh->qh.qh_elink & htole32(UHCI_PTR_T))) {
1323 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1324 usb_syncmem(&sqh->dma,
1325 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1326 sizeof(sqh->qh.qh_elink),
1327 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1328 delay(UHCI_QH_REMOVE_DELAY);
1329 }
1330 pqh = uhci_find_prev_qh(sc->sc_bulk_start, sqh);
1331 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1332 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1333 pqh->hlink = sqh->hlink;
1334 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1335 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1336 sizeof(pqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1337 delay(UHCI_QH_REMOVE_DELAY);
1338 if (sc->sc_bulk_end == sqh)
1339 sc->sc_bulk_end = pqh;
1340 }
1341
1342 Static int uhci_intr1(uhci_softc_t *);
1343
1344 int
1345 uhci_intr(void *arg)
1346 {
1347 uhci_softc_t *sc = arg;
1348 int ret = 0;
1349
1350 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1351
1352 mutex_spin_enter(&sc->sc_intr_lock);
1353
1354 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1355 goto done;
1356
1357 if (sc->sc_bus.ub_usepolling || UREAD2(sc, UHCI_INTR) == 0) {
1358 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1359 goto done;
1360 }
1361
1362 ret = uhci_intr1(sc);
1363
1364 done:
1365 mutex_spin_exit(&sc->sc_intr_lock);
1366 return ret;
1367 }
1368
1369 int
1370 uhci_intr1(uhci_softc_t *sc)
1371 {
1372 int status;
1373 int ack;
1374
1375 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1376
1377 #ifdef UHCI_DEBUG
1378 if (uhcidebug >= 15) {
1379 DPRINTF("sc %#jx", (uintptr_t)sc, 0, 0, 0);
1380 uhci_dumpregs(sc);
1381 }
1382 #endif
1383
1384 KASSERT(mutex_owned(&sc->sc_intr_lock));
1385
1386 status = UREAD2(sc, UHCI_STS) & UHCI_STS_ALLINTRS;
1387 /* Check if the interrupt was for us. */
1388 if (status == 0)
1389 return 0;
1390
1391 if (sc->sc_suspend != PWR_RESUME) {
1392 #ifdef DIAGNOSTIC
1393 printf("%s: interrupt while not operating ignored\n",
1394 device_xname(sc->sc_dev));
1395 #endif
1396 UWRITE2(sc, UHCI_STS, status); /* acknowledge the ints */
1397 return 0;
1398 }
1399
1400 ack = 0;
1401 if (status & UHCI_STS_USBINT)
1402 ack |= UHCI_STS_USBINT;
1403 if (status & UHCI_STS_USBEI)
1404 ack |= UHCI_STS_USBEI;
1405 if (status & UHCI_STS_RD) {
1406 ack |= UHCI_STS_RD;
1407 #ifdef UHCI_DEBUG
1408 printf("%s: resume detect\n", device_xname(sc->sc_dev));
1409 #endif
1410 }
1411 if (status & UHCI_STS_HSE) {
1412 ack |= UHCI_STS_HSE;
1413 printf("%s: host system error\n", device_xname(sc->sc_dev));
1414 }
1415 if (status & UHCI_STS_HCPE) {
1416 ack |= UHCI_STS_HCPE;
1417 printf("%s: host controller process error\n",
1418 device_xname(sc->sc_dev));
1419 }
1420
1421 /* When HCHalted=1 and Run/Stop=0 , it is normal */
1422 if ((status & UHCI_STS_HCH) && (UREAD2(sc, UHCI_CMD) & UHCI_CMD_RS)) {
1423 /* no acknowledge needed */
1424 if (!sc->sc_dying) {
1425 printf("%s: host controller halted\n",
1426 device_xname(sc->sc_dev));
1427 #ifdef UHCI_DEBUG
1428 uhci_dump_all(sc);
1429 #endif
1430 }
1431 sc->sc_dying = 1;
1432 }
1433
1434 if (!ack)
1435 return 0; /* nothing to acknowledge */
1436 UWRITE2(sc, UHCI_STS, ack); /* acknowledge the ints */
1437
1438 usb_schedsoftintr(&sc->sc_bus);
1439
1440 DPRINTFN(15, "sc %#jx done", (uintptr_t)sc, 0, 0, 0);
1441
1442 return 1;
1443 }
1444
1445 void
1446 uhci_softintr(void *v)
1447 {
1448 struct usbd_bus *bus = v;
1449 uhci_softc_t *sc = UHCI_BUS2SC(bus);
1450 struct uhci_xfer *ux, *nextux;
1451 ux_completeq_t cq;
1452
1453 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1454 DPRINTF("sc %#jx", (uintptr_t)sc, 0, 0, 0);
1455
1456 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1457
1458 TAILQ_INIT(&cq);
1459 /*
1460 * Interrupts on UHCI really suck. When the host controller
1461 * interrupts because a transfer is completed there is no
1462 * way of knowing which transfer it was. You can scan down
1463 * the TDs and QHs of the previous frame to limit the search,
1464 * but that assumes that the interrupt was not delayed by more
1465 * than 1 ms, which may not always be true (e.g. after debug
1466 * output on a slow console).
1467 * We scan all interrupt descriptors to see if any have
1468 * completed.
1469 */
1470 TAILQ_FOREACH_SAFE(ux, &sc->sc_intrhead, ux_list, nextux) {
1471 uhci_check_intr(sc, ux, &cq);
1472 }
1473
1474 /*
1475 * We abuse ux_list for the interrupt and complete lists and
1476 * interrupt transfers will get re-added here so use
1477 * the _SAFE version of TAILQ_FOREACH.
1478 */
1479 TAILQ_FOREACH_SAFE(ux, &cq, ux_list, nextux) {
1480 DPRINTF("ux %#jx", (uintptr_t)ux, 0, 0, 0);
1481 usb_transfer_complete(&ux->ux_xfer);
1482 }
1483
1484 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1485 }
1486
1487 /* Check for an interrupt. */
1488 void
1489 uhci_check_intr(uhci_softc_t *sc, struct uhci_xfer *ux, ux_completeq_t *cqp)
1490 {
1491 uhci_soft_td_t *std, *fstd = NULL, *lstd = NULL;
1492 uint32_t status;
1493
1494 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1495 DPRINTFN(15, "ux %#jx", (uintptr_t)ux, 0, 0, 0);
1496
1497 KASSERT(ux != NULL);
1498
1499 struct usbd_xfer *xfer = &ux->ux_xfer;
1500 if (xfer->ux_status == USBD_CANCELLED ||
1501 xfer->ux_status == USBD_TIMEOUT) {
1502 DPRINTF("aborted xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
1503 return;
1504 }
1505
1506 switch (ux->ux_type) {
1507 case UX_CTRL:
1508 fstd = ux->ux_setup;
1509 lstd = ux->ux_stat;
1510 break;
1511 case UX_BULK:
1512 case UX_INTR:
1513 case UX_ISOC:
1514 fstd = ux->ux_stdstart;
1515 lstd = ux->ux_stdend;
1516 break;
1517 default:
1518 KASSERT(false);
1519 break;
1520 }
1521 if (fstd == NULL)
1522 return;
1523
1524 KASSERT(lstd != NULL);
1525
1526 usb_syncmem(&lstd->dma,
1527 lstd->offs + offsetof(uhci_td_t, td_status),
1528 sizeof(lstd->td.td_status),
1529 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1530 status = le32toh(lstd->td.td_status);
1531 usb_syncmem(&lstd->dma,
1532 lstd->offs + offsetof(uhci_td_t, td_status),
1533 sizeof(lstd->td.td_status),
1534 BUS_DMASYNC_PREREAD);
1535
1536 /* If the last TD is not marked active we can complete */
1537 if (!(status & UHCI_TD_ACTIVE)) {
1538 done:
1539 DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0);
1540 uhci_idone(ux, cqp);
1541 return;
1542 }
1543
1544 /*
1545 * If the last TD is still active we need to check whether there
1546 * is an error somewhere in the middle, or whether there was a
1547 * short packet (SPD and not ACTIVE).
1548 */
1549 DPRINTFN(12, "active ux=%#jx", (uintptr_t)ux, 0, 0, 0);
1550 for (std = fstd; std != lstd; std = std->link.std) {
1551 usb_syncmem(&std->dma,
1552 std->offs + offsetof(uhci_td_t, td_status),
1553 sizeof(std->td.td_status),
1554 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1555 status = le32toh(std->td.td_status);
1556 usb_syncmem(&std->dma,
1557 std->offs + offsetof(uhci_td_t, td_status),
1558 sizeof(std->td.td_status), BUS_DMASYNC_PREREAD);
1559
1560 /* If there's an active TD the xfer isn't done. */
1561 if (status & UHCI_TD_ACTIVE) {
1562 DPRINTFN(12, "ux=%#jx std=%#jx still active",
1563 (uintptr_t)ux, (uintptr_t)std, 0, 0);
1564 return;
1565 }
1566
1567 /* Any kind of error makes the xfer done. */
1568 if (status & UHCI_TD_STALLED)
1569 goto done;
1570
1571 /*
1572 * If the data phase of a control transfer is short, we need
1573 * to complete the status stage
1574 */
1575
1576 if ((status & UHCI_TD_SPD) && ux->ux_type == UX_CTRL) {
1577 struct uhci_pipe *upipe =
1578 UHCI_PIPE2UPIPE(xfer->ux_pipe);
1579 uhci_soft_qh_t *sqh = upipe->ctrl.sqh;
1580 uhci_soft_td_t *stat = upipe->ctrl.stat;
1581
1582 DPRINTFN(12, "ux=%#jx std=%#jx control status"
1583 "phase needs completion", (uintptr_t)ux,
1584 (uintptr_t)ux->ux_stdstart, 0, 0);
1585
1586 sqh->qh.qh_elink =
1587 htole32(stat->physaddr | UHCI_PTR_TD);
1588 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1589 BUS_DMASYNC_PREWRITE);
1590 break;
1591 }
1592
1593 /* We want short packets, and it is short: it's done */
1594 usb_syncmem(&std->dma,
1595 std->offs + offsetof(uhci_td_t, td_token),
1596 sizeof(std->td.td_token),
1597 BUS_DMASYNC_POSTWRITE);
1598
1599 if ((status & UHCI_TD_SPD) &&
1600 UHCI_TD_GET_ACTLEN(status) <
1601 UHCI_TD_GET_MAXLEN(le32toh(std->td.td_token))) {
1602 goto done;
1603 }
1604 }
1605 }
1606
1607 /* Called with USB lock held. */
1608 void
1609 uhci_idone(struct uhci_xfer *ux, ux_completeq_t *cqp)
1610 {
1611 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1612 struct usbd_xfer *xfer = &ux->ux_xfer;
1613 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
1614 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
1615 uhci_soft_td_t *std;
1616 uint32_t status = 0, nstatus;
1617 int actlen;
1618
1619 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1620
1621 DPRINTFN(12, "ux=%#jx", (uintptr_t)ux, 0, 0, 0);
1622
1623 /*
1624 * Try to claim this xfer for completion. If it has already
1625 * completed or aborted, drop it on the floor.
1626 */
1627 if (!usbd_xfer_trycomplete(xfer))
1628 return;
1629
1630 #ifdef DIAGNOSTIC
1631 #ifdef UHCI_DEBUG
1632 if (ux->ux_isdone) {
1633 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1634 uhci_dump_ii(ux);
1635 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1636 }
1637 #endif
1638 KASSERT(!ux->ux_isdone);
1639 KASSERTMSG(!ux->ux_isdone, "xfer %p type %d status %d", xfer,
1640 ux->ux_type, xfer->ux_status);
1641 ux->ux_isdone = true;
1642 #endif
1643
1644 if (xfer->ux_nframes != 0) {
1645 /* Isoc transfer, do things differently. */
1646 uhci_soft_td_t **stds = upipe->isoc.stds;
1647 int i, n, nframes, len;
1648
1649 DPRINTFN(5, "ux=%#jx isoc ready", (uintptr_t)ux, 0, 0, 0);
1650
1651 nframes = xfer->ux_nframes;
1652 actlen = 0;
1653 n = ux->ux_curframe;
1654 for (i = 0; i < nframes; i++) {
1655 std = stds[n];
1656 #ifdef UHCI_DEBUG
1657 if (uhcidebug >= 5) {
1658 DPRINTF("isoc TD %jd", i, 0, 0, 0);
1659 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1660 uhci_dump_td(std);
1661 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1662 }
1663 #endif
1664 if (++n >= UHCI_VFRAMELIST_COUNT)
1665 n = 0;
1666 usb_syncmem(&std->dma,
1667 std->offs + offsetof(uhci_td_t, td_status),
1668 sizeof(std->td.td_status),
1669 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1670 status = le32toh(std->td.td_status);
1671 len = UHCI_TD_GET_ACTLEN(status);
1672 xfer->ux_frlengths[i] = len;
1673 actlen += len;
1674 }
1675 upipe->isoc.inuse -= nframes;
1676 xfer->ux_actlen = actlen;
1677 xfer->ux_status = USBD_NORMAL_COMPLETION;
1678 goto end;
1679 }
1680
1681 #ifdef UHCI_DEBUG
1682 DPRINTFN(10, "ux=%#jx, xfer=%#jx, pipe=%#jx ready", (uintptr_t)ux,
1683 (uintptr_t)xfer, (uintptr_t)upipe, 0);
1684 if (uhcidebug >= 10) {
1685 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1686 uhci_dump_tds(ux->ux_stdstart);
1687 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1688 }
1689 #endif
1690
1691 /* The transfer is done, compute actual length and status. */
1692 actlen = 0;
1693 for (std = ux->ux_stdstart; std != NULL; std = std->link.std) {
1694 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
1695 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1696 nstatus = le32toh(std->td.td_status);
1697 if (nstatus & UHCI_TD_ACTIVE)
1698 break;
1699
1700 status = nstatus;
1701 if (UHCI_TD_GET_PID(le32toh(std->td.td_token)) !=
1702 UHCI_TD_PID_SETUP)
1703 actlen += UHCI_TD_GET_ACTLEN(status);
1704 else {
1705 /*
1706 * UHCI will report CRCTO in addition to a STALL or NAK
1707 * for a SETUP transaction. See section 3.2.2, "TD
1708 * CONTROL AND STATUS".
1709 */
1710 if (status & (UHCI_TD_STALLED | UHCI_TD_NAK))
1711 status &= ~UHCI_TD_CRCTO;
1712 }
1713 }
1714 /* If there are left over TDs we need to update the toggle. */
1715 if (std != NULL)
1716 upipe->nexttoggle = UHCI_TD_GET_DT(le32toh(std->td.td_token));
1717
1718 status &= UHCI_TD_ERROR;
1719 DPRINTFN(10, "actlen=%jd, status=%#jx", actlen, status, 0, 0);
1720 xfer->ux_actlen = actlen;
1721 if (status != 0) {
1722
1723 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1724 "error, addr=%jd, endpt=0x%02jx",
1725 xfer->ux_pipe->up_dev->ud_addr,
1726 xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress,
1727 0, 0);
1728 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1729 "bitstuff=%jd crcto =%jd nak =%jd babble =%jd",
1730 !!(status & UHCI_TD_BITSTUFF),
1731 !!(status & UHCI_TD_CRCTO),
1732 !!(status & UHCI_TD_NAK),
1733 !!(status & UHCI_TD_BABBLE));
1734 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1735 "dbuffer =%jd stalled =%jd active =%jd",
1736 !!(status & UHCI_TD_DBUFFER),
1737 !!(status & UHCI_TD_STALLED),
1738 !!(status & UHCI_TD_ACTIVE),
1739 0);
1740
1741 if (status == UHCI_TD_STALLED)
1742 xfer->ux_status = USBD_STALLED;
1743 else
1744 xfer->ux_status = USBD_IOERROR; /* more info XXX */
1745 } else {
1746 xfer->ux_status = USBD_NORMAL_COMPLETION;
1747 }
1748
1749 end:
1750 uhci_del_intr_list(sc, ux);
1751 if (cqp)
1752 TAILQ_INSERT_TAIL(cqp, ux, ux_list);
1753
1754 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1755 DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0);
1756 }
1757
1758 void
1759 uhci_poll(struct usbd_bus *bus)
1760 {
1761 uhci_softc_t *sc = UHCI_BUS2SC(bus);
1762
1763 if (UREAD2(sc, UHCI_STS) & UHCI_STS_USBINT) {
1764 mutex_spin_enter(&sc->sc_intr_lock);
1765 uhci_intr1(sc);
1766 mutex_spin_exit(&sc->sc_intr_lock);
1767 }
1768 }
1769
1770 void
1771 uhci_reset(uhci_softc_t *sc)
1772 {
1773 int n;
1774
1775 UHCICMD(sc, UHCI_CMD_HCRESET);
1776 /* The reset bit goes low when the controller is done. */
1777 for (n = 0; n < UHCI_RESET_TIMEOUT &&
1778 (UREAD2(sc, UHCI_CMD) & UHCI_CMD_HCRESET); n++)
1779 usb_delay_ms(&sc->sc_bus, 1);
1780 if (n >= UHCI_RESET_TIMEOUT)
1781 printf("%s: controller did not reset\n",
1782 device_xname(sc->sc_dev));
1783 }
1784
1785 usbd_status
1786 uhci_run(uhci_softc_t *sc, int run, int locked)
1787 {
1788 int n, running;
1789 uint16_t cmd;
1790
1791 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1792
1793 run = run != 0;
1794 if (!locked)
1795 mutex_spin_enter(&sc->sc_intr_lock);
1796
1797 DPRINTF("setting run=%jd", run, 0, 0, 0);
1798 cmd = UREAD2(sc, UHCI_CMD);
1799 if (run)
1800 cmd |= UHCI_CMD_RS;
1801 else
1802 cmd &= ~UHCI_CMD_RS;
1803 UHCICMD(sc, cmd);
1804 for (n = 0; n < 10; n++) {
1805 running = !(UREAD2(sc, UHCI_STS) & UHCI_STS_HCH);
1806 /* return when we've entered the state we want */
1807 if (run == running) {
1808 if (!locked)
1809 mutex_spin_exit(&sc->sc_intr_lock);
1810 DPRINTF("done cmd=%#jx sts=%#jx",
1811 UREAD2(sc, UHCI_CMD), UREAD2(sc, UHCI_STS), 0, 0);
1812 return USBD_NORMAL_COMPLETION;
1813 }
1814 usb_delay_ms_locked(&sc->sc_bus, 1, &sc->sc_intr_lock);
1815 }
1816 if (!locked)
1817 mutex_spin_exit(&sc->sc_intr_lock);
1818 printf("%s: cannot %s\n", device_xname(sc->sc_dev),
1819 run ? "start" : "stop");
1820 return USBD_IOERROR;
1821 }
1822
1823 /*
1824 * Memory management routines.
1825 * uhci_alloc_std allocates TDs
1826 * uhci_alloc_sqh allocates QHs
1827 * These two routines do their own free list management,
1828 * partly for speed, partly because allocating DMAable memory
1829 * has page size granularity so much memory would be wasted if
1830 * only one TD/QH (32 bytes) was placed in each allocated chunk.
1831 */
1832
1833 uhci_soft_td_t *
1834 uhci_alloc_std(uhci_softc_t *sc)
1835 {
1836 uhci_soft_td_t *std;
1837 int i, offs;
1838 usb_dma_t dma;
1839
1840 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1841
1842 mutex_enter(&sc->sc_lock);
1843 if (sc->sc_freetds == NULL) {
1844 DPRINTFN(2, "allocating chunk", 0, 0, 0, 0);
1845 mutex_exit(&sc->sc_lock);
1846
1847 int err = usb_allocmem(&sc->sc_bus, UHCI_STD_SIZE * UHCI_STD_CHUNK,
1848 UHCI_TD_ALIGN, USBMALLOC_COHERENT, &dma);
1849 if (err)
1850 return NULL;
1851
1852 mutex_enter(&sc->sc_lock);
1853 for (i = 0; i < UHCI_STD_CHUNK; i++) {
1854 offs = i * UHCI_STD_SIZE;
1855 std = KERNADDR(&dma, offs);
1856 std->physaddr = DMAADDR(&dma, offs);
1857 std->dma = dma;
1858 std->offs = offs;
1859 std->link.std = sc->sc_freetds;
1860 sc->sc_freetds = std;
1861 }
1862 }
1863 std = sc->sc_freetds;
1864 sc->sc_freetds = std->link.std;
1865 mutex_exit(&sc->sc_lock);
1866
1867 memset(&std->td, 0, sizeof(uhci_td_t));
1868
1869 return std;
1870 }
1871
1872 #define TD_IS_FREE 0x12345678
1873
1874 void
1875 uhci_free_std_locked(uhci_softc_t *sc, uhci_soft_td_t *std)
1876 {
1877 KASSERT(mutex_owned(&sc->sc_lock));
1878
1879 #ifdef DIAGNOSTIC
1880 if (le32toh(std->td.td_token) == TD_IS_FREE) {
1881 printf("%s: freeing free TD %p\n", __func__, std);
1882 return;
1883 }
1884 std->td.td_token = htole32(TD_IS_FREE);
1885 #endif
1886
1887 std->link.std = sc->sc_freetds;
1888 sc->sc_freetds = std;
1889 }
1890
1891 void
1892 uhci_free_std(uhci_softc_t *sc, uhci_soft_td_t *std)
1893 {
1894 mutex_enter(&sc->sc_lock);
1895 uhci_free_std_locked(sc, std);
1896 mutex_exit(&sc->sc_lock);
1897 }
1898
1899 uhci_soft_qh_t *
1900 uhci_alloc_sqh(uhci_softc_t *sc)
1901 {
1902 uhci_soft_qh_t *sqh;
1903 int i, offs;
1904 usb_dma_t dma;
1905
1906 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1907
1908 mutex_enter(&sc->sc_lock);
1909 if (sc->sc_freeqhs == NULL) {
1910 DPRINTFN(2, "allocating chunk", 0, 0, 0, 0);
1911 mutex_exit(&sc->sc_lock);
1912
1913 int err = usb_allocmem(&sc->sc_bus, UHCI_SQH_SIZE * UHCI_SQH_CHUNK,
1914 UHCI_QH_ALIGN, USBMALLOC_COHERENT, &dma);
1915 if (err)
1916 return NULL;
1917
1918 mutex_enter(&sc->sc_lock);
1919 for (i = 0; i < UHCI_SQH_CHUNK; i++) {
1920 offs = i * UHCI_SQH_SIZE;
1921 sqh = KERNADDR(&dma, offs);
1922 sqh->physaddr = DMAADDR(&dma, offs);
1923 sqh->dma = dma;
1924 sqh->offs = offs;
1925 sqh->hlink = sc->sc_freeqhs;
1926 sc->sc_freeqhs = sqh;
1927 }
1928 }
1929 sqh = sc->sc_freeqhs;
1930 sc->sc_freeqhs = sqh->hlink;
1931 mutex_exit(&sc->sc_lock);
1932
1933 memset(&sqh->qh, 0, sizeof(uhci_qh_t));
1934
1935 return sqh;
1936 }
1937
1938 void
1939 uhci_free_sqh(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1940 {
1941 KASSERT(mutex_owned(&sc->sc_lock));
1942
1943 sqh->hlink = sc->sc_freeqhs;
1944 sc->sc_freeqhs = sqh;
1945 }
1946
1947 #if 0
1948 void
1949 uhci_free_std_chain(uhci_softc_t *sc, uhci_soft_td_t *std,
1950 uhci_soft_td_t *stdend)
1951 {
1952 uhci_soft_td_t *p;
1953 uint32_t td_link;
1954
1955 /*
1956 * to avoid race condition with the controller which may be looking
1957 * at this chain, we need to first invalidate all links, and
1958 * then wait for the controller to move to another queue
1959 */
1960 for (p = std; p != stdend; p = p->link.std) {
1961 usb_syncmem(&p->dma,
1962 p->offs + offsetof(uhci_td_t, td_link),
1963 sizeof(p->td.td_link),
1964 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1965 td_link = le32toh(p->td.td_link);
1966 usb_syncmem(&p->dma,
1967 p->offs + offsetof(uhci_td_t, td_link),
1968 sizeof(p->td.td_link),
1969 BUS_DMASYNC_PREREAD);
1970 if ((td_link & UHCI_PTR_T) == 0) {
1971 p->td.td_link = htole32(UHCI_PTR_T);
1972 usb_syncmem(&p->dma,
1973 p->offs + offsetof(uhci_td_t, td_link),
1974 sizeof(p->td.td_link),
1975 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1976 }
1977 }
1978 delay(UHCI_QH_REMOVE_DELAY);
1979
1980 for (; std != stdend; std = p) {
1981 p = std->link.std;
1982 uhci_free_std(sc, std);
1983 }
1984 }
1985 #endif
1986
1987 int
1988 uhci_alloc_std_chain(uhci_softc_t *sc, struct usbd_xfer *xfer, int len,
1989 int rd, uhci_soft_td_t **sp)
1990 {
1991 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
1992 uint16_t flags = xfer->ux_flags;
1993 uhci_soft_td_t *p;
1994
1995 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1996
1997 DPRINTFN(8, "xfer=%#jx pipe=%#jx", (uintptr_t)xfer,
1998 (uintptr_t)xfer->ux_pipe, 0, 0);
1999
2000 ASSERT_SLEEPABLE();
2001 KASSERT(sp);
2002
2003 int maxp = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
2004 if (maxp == 0) {
2005 printf("%s: maxp=0\n", __func__);
2006 return EINVAL;
2007 }
2008 size_t ntd = howmany(len, maxp);
2009 /*
2010 * if our transfer is bigger than PAGE_SIZE and maxp not a factor of
2011 * PAGE_SIZE then we will need another TD per page.
2012 */
2013 if (len > PAGE_SIZE && (PAGE_SIZE % maxp) != 0) {
2014 ntd += howmany(len, PAGE_SIZE);
2015 }
2016
2017 /*
2018 * Might need one more TD if we're writing a ZLP
2019 */
2020 if (!rd && (flags & USBD_FORCE_SHORT_XFER)) {
2021 ntd++;
2022 }
2023 DPRINTFN(10, "maxp=%jd ntd=%jd", maxp, ntd, 0, 0);
2024
2025 uxfer->ux_stds = NULL;
2026 uxfer->ux_nstd = ntd;
2027 if (ntd == 0) {
2028 *sp = NULL;
2029 DPRINTF("ntd=0", 0, 0, 0, 0);
2030 return 0;
2031 }
2032 uxfer->ux_stds = kmem_alloc(sizeof(uhci_soft_td_t *) * ntd,
2033 KM_SLEEP);
2034
2035 for (int i = 0; i < ntd; i++) {
2036 p = uhci_alloc_std(sc);
2037 if (p == NULL) {
2038 if (i != 0) {
2039 uxfer->ux_nstd = i;
2040 uhci_free_stds(sc, uxfer);
2041 }
2042 kmem_free(uxfer->ux_stds,
2043 sizeof(uhci_soft_td_t *) * ntd);
2044 return ENOMEM;
2045 }
2046 uxfer->ux_stds[i] = p;
2047 }
2048
2049 *sp = uxfer->ux_stds[0];
2050
2051 return 0;
2052 }
2053
2054 Static void
2055 uhci_free_stds(uhci_softc_t *sc, struct uhci_xfer *ux)
2056 {
2057 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2058
2059 DPRINTFN(8, "ux=%#jx", (uintptr_t)ux, 0, 0, 0);
2060
2061 mutex_enter(&sc->sc_lock);
2062 for (size_t i = 0; i < ux->ux_nstd; i++) {
2063 uhci_soft_td_t *std = ux->ux_stds[i];
2064 #ifdef DIAGNOSTIC
2065 if (le32toh(std->td.td_token) == TD_IS_FREE) {
2066 printf("%s: freeing free TD %p\n", __func__, std);
2067 return;
2068 }
2069 std->td.td_token = htole32(TD_IS_FREE);
2070 #endif
2071 ux->ux_stds[i]->link.std = sc->sc_freetds;
2072 sc->sc_freetds = std;
2073 }
2074 mutex_exit(&sc->sc_lock);
2075 }
2076
2077
2078 Static void
2079 uhci_reset_std_chain(uhci_softc_t *sc, struct usbd_xfer *xfer,
2080 int length, int isread, int *toggle, uhci_soft_td_t **lstd)
2081 {
2082 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2083 struct usbd_pipe *pipe = xfer->ux_pipe;
2084 usb_dma_t *dma = &xfer->ux_dmabuf;
2085 uint16_t flags = xfer->ux_flags;
2086 uhci_soft_td_t *std, *prev;
2087 int len = length;
2088 int tog = *toggle;
2089 int maxp;
2090 uint32_t status;
2091 size_t i, offs;
2092
2093 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2094 DPRINTFN(8, "xfer=%#jx len %jd isread %jd toggle %jd", (uintptr_t)xfer,
2095 len, isread, *toggle);
2096
2097 KASSERT(len != 0 || (!isread && (flags & USBD_FORCE_SHORT_XFER)));
2098
2099 maxp = UGETW(pipe->up_endpoint->ue_edesc->wMaxPacketSize);
2100 KASSERT(maxp != 0);
2101
2102 int addr = xfer->ux_pipe->up_dev->ud_addr;
2103 int endpt = xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress;
2104
2105 status = UHCI_TD_ZERO_ACTLEN(UHCI_TD_SET_ERRCNT(3) | UHCI_TD_ACTIVE);
2106 if (pipe->up_dev->ud_speed == USB_SPEED_LOW)
2107 status |= UHCI_TD_LS;
2108 if (flags & USBD_SHORT_XFER_OK)
2109 status |= UHCI_TD_SPD;
2110 usb_syncmem(dma, 0, len,
2111 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2112 std = prev = NULL;
2113 for (offs = i = 0; len != 0 && i < uxfer->ux_nstd; i++, prev = std) {
2114 int l = len;
2115 std = uxfer->ux_stds[i];
2116
2117 const bus_addr_t sbp = DMAADDR(dma, offs);
2118 const bus_addr_t ebp = DMAADDR(dma, offs + l - 1);
2119 if (((sbp ^ ebp) & ~PAGE_MASK) != 0)
2120 l = PAGE_SIZE - (DMAADDR(dma, offs) & PAGE_MASK);
2121
2122 if (l > maxp)
2123 l = maxp;
2124
2125 if (prev) {
2126 prev->link.std = std;
2127 prev->td.td_link = htole32(
2128 std->physaddr | UHCI_PTR_VF | UHCI_PTR_TD
2129 );
2130 usb_syncmem(&prev->dma, prev->offs, sizeof(prev->td),
2131 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2132 }
2133
2134 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2135 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2136
2137 std->td.td_link = htole32(UHCI_PTR_T | UHCI_PTR_VF | UHCI_PTR_TD);
2138 std->td.td_status = htole32(status);
2139 std->td.td_token = htole32(
2140 UHCI_TD_SET_ENDPT(UE_GET_ADDR(endpt)) |
2141 UHCI_TD_SET_DEVADDR(addr) |
2142 UHCI_TD_SET_PID(isread ? UHCI_TD_PID_IN : UHCI_TD_PID_OUT) |
2143 UHCI_TD_SET_DT(tog) |
2144 UHCI_TD_SET_MAXLEN(l)
2145 );
2146 std->td.td_buffer = htole32(DMAADDR(dma, offs));
2147
2148 std->link.std = NULL;
2149
2150 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2151 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2152 tog ^= 1;
2153
2154 offs += l;
2155 len -= l;
2156 }
2157 KASSERTMSG(len == 0, "xfer %p alen %d len %d mps %d ux_nqtd %zu i %zu",
2158 xfer, length, len, maxp, uxfer->ux_nstd, i);
2159
2160 if (!isread &&
2161 (flags & USBD_FORCE_SHORT_XFER) &&
2162 length % maxp == 0) {
2163 /* Force a 0 length transfer at the end. */
2164 KASSERTMSG(i < uxfer->ux_nstd, "i=%zu nstd=%zu", i,
2165 uxfer->ux_nstd);
2166 std = uxfer->ux_stds[i++];
2167
2168 std->td.td_link = htole32(UHCI_PTR_T | UHCI_PTR_VF | UHCI_PTR_TD);
2169 std->td.td_status = htole32(status);
2170 std->td.td_token = htole32(
2171 UHCI_TD_SET_ENDPT(UE_GET_ADDR(endpt)) |
2172 UHCI_TD_SET_DEVADDR(addr) |
2173 UHCI_TD_SET_PID(UHCI_TD_PID_OUT) |
2174 UHCI_TD_SET_DT(tog) |
2175 UHCI_TD_SET_MAXLEN(0)
2176 );
2177 std->td.td_buffer = 0;
2178 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2179 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2180
2181 std->link.std = NULL;
2182 if (prev) {
2183 prev->link.std = std;
2184 prev->td.td_link = htole32(
2185 std->physaddr | UHCI_PTR_VF | UHCI_PTR_TD
2186 );
2187 usb_syncmem(&prev->dma, prev->offs, sizeof(prev->td),
2188 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2189 }
2190 tog ^= 1;
2191 }
2192 *lstd = std;
2193 *toggle = tog;
2194 }
2195
2196 void
2197 uhci_device_clear_toggle(struct usbd_pipe *pipe)
2198 {
2199 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2200 upipe->nexttoggle = 0;
2201 }
2202
2203 void
2204 uhci_noop(struct usbd_pipe *pipe)
2205 {
2206 }
2207
2208 int
2209 uhci_device_bulk_init(struct usbd_xfer *xfer)
2210 {
2211 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2212 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2213 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
2214 int endpt = ed->bEndpointAddress;
2215 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2216 int len = xfer->ux_bufsize;
2217 int err = 0;
2218
2219 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2220 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, len,
2221 xfer->ux_flags, 0);
2222
2223 if (sc->sc_dying)
2224 return USBD_IOERROR;
2225
2226 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2227
2228 uxfer->ux_type = UX_BULK;
2229 err = uhci_alloc_std_chain(sc, xfer, len, isread, &uxfer->ux_stdstart);
2230 if (err)
2231 return err;
2232
2233 #ifdef UHCI_DEBUG
2234 if (uhcidebug >= 10) {
2235 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2236 uhci_dump_tds(uxfer->ux_stdstart);
2237 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2238 }
2239 #endif
2240
2241 return 0;
2242 }
2243
2244 Static void
2245 uhci_device_bulk_fini(struct usbd_xfer *xfer)
2246 {
2247 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2248 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2249
2250 KASSERT(ux->ux_type == UX_BULK);
2251
2252 if (ux->ux_nstd) {
2253 uhci_free_stds(sc, ux);
2254 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2255 }
2256 }
2257
2258 usbd_status
2259 uhci_device_bulk_transfer(struct usbd_xfer *xfer)
2260 {
2261 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2262 usbd_status err;
2263
2264 /* Insert last in queue. */
2265 mutex_enter(&sc->sc_lock);
2266 err = usb_insert_transfer(xfer);
2267 mutex_exit(&sc->sc_lock);
2268 if (err)
2269 return err;
2270
2271 /*
2272 * Pipe isn't running (otherwise err would be USBD_INPROG),
2273 * so start it first.
2274 */
2275 return uhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2276 }
2277
2278 usbd_status
2279 uhci_device_bulk_start(struct usbd_xfer *xfer)
2280 {
2281 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2282 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2283 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2284 uhci_soft_td_t *data, *dataend;
2285 uhci_soft_qh_t *sqh;
2286 const bool polling = sc->sc_bus.ub_usepolling;
2287 int len;
2288 int endpt;
2289 int isread;
2290
2291 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2292 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2293 xfer->ux_length, xfer->ux_flags, 0);
2294
2295 if (sc->sc_dying)
2296 return USBD_IOERROR;
2297
2298 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2299 KASSERT(xfer->ux_length <= xfer->ux_bufsize);
2300
2301 len = xfer->ux_length;
2302 endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2303 isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2304 sqh = upipe->bulk.sqh;
2305
2306 /* Take lock here to protect nexttoggle */
2307 if (!polling)
2308 mutex_enter(&sc->sc_lock);
2309
2310 uhci_reset_std_chain(sc, xfer, len, isread, &upipe->nexttoggle,
2311 &dataend);
2312
2313 data = ux->ux_stdstart;
2314 ux->ux_stdend = dataend;
2315 dataend->td.td_status |= htole32(UHCI_TD_IOC);
2316 usb_syncmem(&dataend->dma,
2317 dataend->offs + offsetof(uhci_td_t, td_status),
2318 sizeof(dataend->td.td_status),
2319 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2320
2321 #ifdef UHCI_DEBUG
2322 if (uhcidebug >= 10) {
2323 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2324 DPRINTFN(10, "before transfer", 0, 0, 0, 0);
2325 uhci_dump_tds(data);
2326 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2327 }
2328 #endif
2329
2330 KASSERT(ux->ux_isdone);
2331 #ifdef DIAGNOSTIC
2332 ux->ux_isdone = false;
2333 #endif
2334
2335 sqh->elink = data;
2336 sqh->qh.qh_elink = htole32(data->physaddr | UHCI_PTR_TD);
2337 /* uhci_add_bulk() will do usb_syncmem(sqh) */
2338
2339 uhci_add_bulk(sc, sqh);
2340 uhci_add_intr_list(sc, ux);
2341 usbd_xfer_schedule_timeout(xfer);
2342 xfer->ux_status = USBD_IN_PROGRESS;
2343 if (!polling)
2344 mutex_exit(&sc->sc_lock);
2345
2346 return USBD_IN_PROGRESS;
2347 }
2348
2349 /* Abort a device bulk request. */
2350 void
2351 uhci_device_bulk_abort(struct usbd_xfer *xfer)
2352 {
2353 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2354
2355 KASSERT(mutex_owned(&sc->sc_lock));
2356
2357 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2358
2359 usbd_xfer_abort(xfer);
2360 }
2361
2362 /*
2363 * To allow the hardware time to notice we simply wait.
2364 */
2365 Static void
2366 uhci_abortx(struct usbd_xfer *xfer)
2367 {
2368 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2369 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2370 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2371 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2372 uhci_soft_td_t *std;
2373
2374 DPRINTFN(1,"xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
2375
2376 KASSERT(mutex_owned(&sc->sc_lock));
2377 ASSERT_SLEEPABLE();
2378
2379 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2380 xfer->ux_status == USBD_TIMEOUT),
2381 "bad abort status: %d", xfer->ux_status);
2382
2383 /*
2384 * If we're dying, skip the hardware action and just notify the
2385 * software that we're done.
2386 */
2387 if (sc->sc_dying) {
2388 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer,
2389 xfer->ux_status, 0, 0);
2390 goto dying;
2391 }
2392
2393 /*
2394 * HC Step 1: Make interrupt routine and hardware ignore xfer.
2395 */
2396 uhci_del_intr_list(sc, ux);
2397
2398 DPRINTF("stop ux=%#jx", (uintptr_t)ux, 0, 0, 0);
2399 for (std = ux->ux_stdstart; std != NULL; std = std->link.std) {
2400 usb_syncmem(&std->dma,
2401 std->offs + offsetof(uhci_td_t, td_status),
2402 sizeof(std->td.td_status),
2403 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2404 std->td.td_status &= htole32(~(UHCI_TD_ACTIVE | UHCI_TD_IOC));
2405 usb_syncmem(&std->dma,
2406 std->offs + offsetof(uhci_td_t, td_status),
2407 sizeof(std->td.td_status),
2408 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2409 }
2410
2411 /*
2412 * HC Step 2: Wait until we know hardware has finished any possible
2413 * use of the xfer.
2414 */
2415 /* Hardware finishes in 1ms */
2416 usb_delay_ms_locked(upipe->pipe.up_dev->ud_bus, 2, &sc->sc_lock);
2417
2418 /*
2419 * HC Step 3: Notify completion to waiting xfers.
2420 */
2421 dying:
2422 #ifdef DIAGNOSTIC
2423 ux->ux_isdone = true;
2424 #endif
2425 usb_transfer_complete(xfer);
2426 DPRINTFN(14, "end", 0, 0, 0, 0);
2427
2428 KASSERT(mutex_owned(&sc->sc_lock));
2429 }
2430
2431 /* Close a device bulk pipe. */
2432 void
2433 uhci_device_bulk_close(struct usbd_pipe *pipe)
2434 {
2435 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2436 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2437
2438 KASSERT(mutex_owned(&sc->sc_lock));
2439
2440 uhci_free_sqh(sc, upipe->bulk.sqh);
2441
2442 pipe->up_endpoint->ue_toggle = upipe->nexttoggle;
2443 }
2444
2445 int
2446 uhci_device_ctrl_init(struct usbd_xfer *xfer)
2447 {
2448 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2449 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2450 usb_device_request_t *req = &xfer->ux_request;
2451 struct usbd_device *dev = upipe->pipe.up_dev;
2452 uhci_softc_t *sc = dev->ud_bus->ub_hcpriv;
2453 uhci_soft_td_t *data = NULL;
2454 int len;
2455 usbd_status err;
2456 int isread;
2457
2458 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2459 DPRINTFN(3, "xfer=%#jx len=%jd, addr=%jd, endpt=%jd",
2460 (uintptr_t)xfer, xfer->ux_bufsize, dev->ud_addr,
2461 upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress);
2462
2463 isread = req->bmRequestType & UT_READ;
2464 len = xfer->ux_bufsize;
2465
2466 uxfer->ux_type = UX_CTRL;
2467 /* Set up data transaction */
2468 if (len != 0) {
2469 err = uhci_alloc_std_chain(sc, xfer, len, isread, &data);
2470 if (err)
2471 return err;
2472 }
2473 /* Set up interrupt info. */
2474 uxfer->ux_setup = upipe->ctrl.setup;
2475 uxfer->ux_stat = upipe->ctrl.stat;
2476 uxfer->ux_data = data;
2477
2478 return 0;
2479 }
2480
2481 Static void
2482 uhci_device_ctrl_fini(struct usbd_xfer *xfer)
2483 {
2484 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2485 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2486
2487 KASSERT(ux->ux_type == UX_CTRL);
2488
2489 if (ux->ux_nstd) {
2490 uhci_free_stds(sc, ux);
2491 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2492 }
2493 }
2494
2495 usbd_status
2496 uhci_device_ctrl_transfer(struct usbd_xfer *xfer)
2497 {
2498 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2499 usbd_status err;
2500
2501 /* Insert last in queue. */
2502 mutex_enter(&sc->sc_lock);
2503 err = usb_insert_transfer(xfer);
2504 mutex_exit(&sc->sc_lock);
2505 if (err)
2506 return err;
2507
2508 /*
2509 * Pipe isn't running (otherwise err would be USBD_INPROG),
2510 * so start it first.
2511 */
2512 return uhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2513 }
2514
2515 usbd_status
2516 uhci_device_ctrl_start(struct usbd_xfer *xfer)
2517 {
2518 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2519 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2520 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2521 usb_device_request_t *req = &xfer->ux_request;
2522 struct usbd_device *dev = upipe->pipe.up_dev;
2523 int addr = dev->ud_addr;
2524 int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2525 uhci_soft_td_t *setup, *stat, *next, *dataend;
2526 uhci_soft_qh_t *sqh;
2527 const bool polling = sc->sc_bus.ub_usepolling;
2528 int len;
2529 int isread;
2530
2531 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2532
2533 if (sc->sc_dying)
2534 return USBD_IOERROR;
2535
2536 KASSERT(xfer->ux_rqflags & URQ_REQUEST);
2537
2538 DPRINTFN(3, "type=0x%02jx, request=0x%02jx, "
2539 "wValue=0x%04jx, wIndex=0x%04jx",
2540 req->bmRequestType, req->bRequest, UGETW(req->wValue),
2541 UGETW(req->wIndex));
2542 DPRINTFN(3, "len=%jd, addr=%jd, endpt=%jd",
2543 UGETW(req->wLength), dev->ud_addr, endpt, 0);
2544
2545 isread = req->bmRequestType & UT_READ;
2546 len = UGETW(req->wLength);
2547
2548 setup = upipe->ctrl.setup;
2549 stat = upipe->ctrl.stat;
2550 sqh = upipe->ctrl.sqh;
2551
2552 memcpy(KERNADDR(&upipe->ctrl.reqdma, 0), req, sizeof(*req));
2553 usb_syncmem(&upipe->ctrl.reqdma, 0, sizeof(*req), BUS_DMASYNC_PREWRITE);
2554
2555 if (!polling)
2556 mutex_enter(&sc->sc_lock);
2557
2558 /* Set up data transaction */
2559 if (len != 0) {
2560 upipe->nexttoggle = 1;
2561 next = uxfer->ux_data;
2562 uhci_reset_std_chain(sc, xfer, len, isread,
2563 &upipe->nexttoggle, &dataend);
2564 dataend->link.std = stat;
2565 dataend->td.td_link = htole32(stat->physaddr | UHCI_PTR_TD);
2566 usb_syncmem(&dataend->dma,
2567 dataend->offs + offsetof(uhci_td_t, td_link),
2568 sizeof(dataend->td.td_link),
2569 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2570 } else {
2571 next = stat;
2572 }
2573
2574 const uint32_t status = UHCI_TD_ZERO_ACTLEN(
2575 UHCI_TD_SET_ERRCNT(3) |
2576 UHCI_TD_ACTIVE |
2577 (dev->ud_speed == USB_SPEED_LOW ? UHCI_TD_LS : 0)
2578 );
2579 setup->link.std = next;
2580 setup->td.td_link = htole32(next->physaddr | UHCI_PTR_TD);
2581 setup->td.td_status = htole32(status);
2582 setup->td.td_token = htole32(UHCI_TD_SETUP(sizeof(*req), endpt, addr));
2583 setup->td.td_buffer = htole32(DMAADDR(&upipe->ctrl.reqdma, 0));
2584
2585 usb_syncmem(&setup->dma, setup->offs, sizeof(setup->td),
2586 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2587
2588 stat->link.std = NULL;
2589 stat->td.td_link = htole32(UHCI_PTR_T);
2590 stat->td.td_status = htole32(status | UHCI_TD_IOC);
2591 stat->td.td_token =
2592 htole32(isread ? UHCI_TD_OUT(0, endpt, addr, 1) :
2593 UHCI_TD_IN (0, endpt, addr, 1));
2594 stat->td.td_buffer = htole32(0);
2595 usb_syncmem(&stat->dma, stat->offs, sizeof(stat->td),
2596 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2597
2598 #ifdef UHCI_DEBUG
2599 if (uhcidebug >= 10) {
2600 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2601 DPRINTF("before transfer", 0, 0, 0, 0);
2602 uhci_dump_tds(setup);
2603 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2604 }
2605 #endif
2606
2607 /* Set up interrupt info. */
2608 uxfer->ux_setup = setup;
2609 uxfer->ux_stat = stat;
2610 KASSERT(uxfer->ux_isdone);
2611 #ifdef DIAGNOSTIC
2612 uxfer->ux_isdone = false;
2613 #endif
2614
2615 sqh->elink = setup;
2616 sqh->qh.qh_elink = htole32(setup->physaddr | UHCI_PTR_TD);
2617 /* uhci_add_?s_ctrl() will do usb_syncmem(sqh) */
2618
2619 if (dev->ud_speed == USB_SPEED_LOW)
2620 uhci_add_ls_ctrl(sc, sqh);
2621 else
2622 uhci_add_hs_ctrl(sc, sqh);
2623 uhci_add_intr_list(sc, uxfer);
2624 #ifdef UHCI_DEBUG
2625 if (uhcidebug >= 12) {
2626 uhci_soft_td_t *std;
2627 uhci_soft_qh_t *xqh;
2628 uhci_soft_qh_t *sxqh;
2629 int maxqh = 0;
2630 uhci_physaddr_t link;
2631 DPRINTFN(12, "--- dump start ---", 0, 0, 0, 0);
2632 DPRINTFN(12, "follow from [0]", 0, 0, 0, 0);
2633 for (std = sc->sc_vframes[0].htd, link = 0;
2634 (link & UHCI_PTR_QH) == 0;
2635 std = std->link.std) {
2636 link = le32toh(std->td.td_link);
2637 uhci_dump_td(std);
2638 }
2639 sxqh = (uhci_soft_qh_t *)std;
2640 uhci_dump_qh(sxqh);
2641 for (xqh = sxqh;
2642 xqh != NULL;
2643 xqh = (maxqh++ == 5 || xqh->hlink == sxqh ||
2644 xqh->hlink == xqh ? NULL : xqh->hlink)) {
2645 uhci_dump_qh(xqh);
2646 }
2647 DPRINTFN(12, "Enqueued QH:", 0, 0, 0, 0);
2648 uhci_dump_qh(sqh);
2649 uhci_dump_tds(sqh->elink);
2650 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2651 }
2652 #endif
2653 usbd_xfer_schedule_timeout(xfer);
2654 xfer->ux_status = USBD_IN_PROGRESS;
2655 if (!polling)
2656 mutex_exit(&sc->sc_lock);
2657
2658 return USBD_IN_PROGRESS;
2659 }
2660
2661 int
2662 uhci_device_intr_init(struct usbd_xfer *xfer)
2663 {
2664 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2665 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2666 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
2667 int endpt = ed->bEndpointAddress;
2668 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2669 int len = xfer->ux_bufsize;
2670 int err;
2671
2672 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2673
2674 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2675 xfer->ux_length, xfer->ux_flags, 0);
2676
2677 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2678 KASSERT(len != 0);
2679
2680 ux->ux_type = UX_INTR;
2681 ux->ux_nstd = 0;
2682 err = uhci_alloc_std_chain(sc, xfer, len, isread, &ux->ux_stdstart);
2683
2684 return err;
2685 }
2686
2687 Static void
2688 uhci_device_intr_fini(struct usbd_xfer *xfer)
2689 {
2690 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2691 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2692
2693 KASSERT(ux->ux_type == UX_INTR);
2694
2695 if (ux->ux_nstd) {
2696 uhci_free_stds(sc, ux);
2697 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2698 }
2699 }
2700
2701 usbd_status
2702 uhci_device_intr_transfer(struct usbd_xfer *xfer)
2703 {
2704 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2705 usbd_status err;
2706
2707 /* Insert last in queue. */
2708 mutex_enter(&sc->sc_lock);
2709 err = usb_insert_transfer(xfer);
2710 mutex_exit(&sc->sc_lock);
2711 if (err)
2712 return err;
2713
2714 /*
2715 * Pipe isn't running (otherwise err would be USBD_INPROG),
2716 * so start it first.
2717 */
2718 return uhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2719 }
2720
2721 usbd_status
2722 uhci_device_intr_start(struct usbd_xfer *xfer)
2723 {
2724 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2725 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2726 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2727 uhci_soft_td_t *data, *dataend;
2728 uhci_soft_qh_t *sqh;
2729 const bool polling = sc->sc_bus.ub_usepolling;
2730 int isread, endpt;
2731 int i;
2732
2733 if (sc->sc_dying)
2734 return USBD_IOERROR;
2735
2736 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2737
2738 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2739 xfer->ux_length, xfer->ux_flags, 0);
2740
2741 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2742 KASSERT(xfer->ux_length <= xfer->ux_bufsize);
2743
2744 endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2745 isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2746
2747 data = ux->ux_stdstart;
2748
2749 KASSERT(ux->ux_isdone);
2750 #ifdef DIAGNOSTIC
2751 ux->ux_isdone = false;
2752 #endif
2753
2754 /* Take lock to protect nexttoggle */
2755 if (!polling)
2756 mutex_enter(&sc->sc_lock);
2757 uhci_reset_std_chain(sc, xfer, xfer->ux_length, isread,
2758 &upipe->nexttoggle, &dataend);
2759
2760 dataend->td.td_status |= htole32(UHCI_TD_IOC);
2761 usb_syncmem(&dataend->dma,
2762 dataend->offs + offsetof(uhci_td_t, td_status),
2763 sizeof(dataend->td.td_status),
2764 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2765 ux->ux_stdend = dataend;
2766
2767 #ifdef UHCI_DEBUG
2768 if (uhcidebug >= 10) {
2769 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2770 uhci_dump_tds(data);
2771 uhci_dump_qh(upipe->intr.qhs[0]);
2772 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2773 }
2774 #endif
2775
2776 DPRINTFN(10, "qhs[0]=%#jx", (uintptr_t)upipe->intr.qhs[0], 0, 0, 0);
2777 for (i = 0; i < upipe->intr.npoll; i++) {
2778 sqh = upipe->intr.qhs[i];
2779 sqh->elink = data;
2780 sqh->qh.qh_elink = htole32(data->physaddr | UHCI_PTR_TD);
2781 usb_syncmem(&sqh->dma,
2782 sqh->offs + offsetof(uhci_qh_t, qh_elink),
2783 sizeof(sqh->qh.qh_elink),
2784 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2785 }
2786 uhci_add_intr_list(sc, ux);
2787 xfer->ux_status = USBD_IN_PROGRESS;
2788 if (!polling)
2789 mutex_exit(&sc->sc_lock);
2790
2791 #ifdef UHCI_DEBUG
2792 if (uhcidebug >= 10) {
2793 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2794 uhci_dump_tds(data);
2795 uhci_dump_qh(upipe->intr.qhs[0]);
2796 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2797 }
2798 #endif
2799
2800 return USBD_IN_PROGRESS;
2801 }
2802
2803 /* Abort a device control request. */
2804 void
2805 uhci_device_ctrl_abort(struct usbd_xfer *xfer)
2806 {
2807 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2808
2809 KASSERT(mutex_owned(&sc->sc_lock));
2810
2811 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2812 usbd_xfer_abort(xfer);
2813 }
2814
2815 /* Close a device control pipe. */
2816 void
2817 uhci_device_ctrl_close(struct usbd_pipe *pipe)
2818 {
2819 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2820 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2821
2822 uhci_free_sqh(sc, upipe->ctrl.sqh);
2823 uhci_free_std_locked(sc, upipe->ctrl.setup);
2824 uhci_free_std_locked(sc, upipe->ctrl.stat);
2825
2826 usb_freemem(&sc->sc_bus, &upipe->ctrl.reqdma);
2827 }
2828
2829 /* Abort a device interrupt request. */
2830 void
2831 uhci_device_intr_abort(struct usbd_xfer *xfer)
2832 {
2833 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2834
2835 KASSERT(mutex_owned(&sc->sc_lock));
2836
2837 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2838 DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
2839
2840 usbd_xfer_abort(xfer);
2841 }
2842
2843 /* Close a device interrupt pipe. */
2844 void
2845 uhci_device_intr_close(struct usbd_pipe *pipe)
2846 {
2847 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2848 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2849 int i, npoll;
2850
2851 KASSERT(mutex_owned(&sc->sc_lock));
2852
2853 /* Unlink descriptors from controller data structures. */
2854 npoll = upipe->intr.npoll;
2855 for (i = 0; i < npoll; i++)
2856 uhci_remove_intr(sc, upipe->intr.qhs[i]);
2857
2858 /*
2859 * We now have to wait for any activity on the physical
2860 * descriptors to stop.
2861 */
2862 usb_delay_ms_locked(&sc->sc_bus, 2, &sc->sc_lock);
2863
2864 for (i = 0; i < npoll; i++)
2865 uhci_free_sqh(sc, upipe->intr.qhs[i]);
2866 kmem_free(upipe->intr.qhs, npoll * sizeof(uhci_soft_qh_t *));
2867 }
2868
2869 int
2870 uhci_device_isoc_init(struct usbd_xfer *xfer)
2871 {
2872 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2873
2874 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2875 KASSERT(xfer->ux_nframes != 0);
2876 KASSERT(ux->ux_isdone);
2877
2878 ux->ux_type = UX_ISOC;
2879 return 0;
2880 }
2881
2882 Static void
2883 uhci_device_isoc_fini(struct usbd_xfer *xfer)
2884 {
2885 struct uhci_xfer *ux __diagused = UHCI_XFER2UXFER(xfer);
2886
2887 KASSERT(ux->ux_type == UX_ISOC);
2888 }
2889
2890 usbd_status
2891 uhci_device_isoc_transfer(struct usbd_xfer *xfer)
2892 {
2893 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2894 usbd_status err __diagused;
2895
2896 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2897 DPRINTFN(5, "xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
2898
2899 /* Put it on our queue, */
2900 mutex_enter(&sc->sc_lock);
2901 err = usb_insert_transfer(xfer);
2902 mutex_exit(&sc->sc_lock);
2903
2904 KASSERT(err == USBD_NORMAL_COMPLETION);
2905
2906 /* insert into schedule, */
2907
2908 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2909 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2910 struct isoc *isoc = &upipe->isoc;
2911 uhci_soft_td_t *std = NULL;
2912 uint32_t buf, len, status, offs;
2913 int i, next, nframes;
2914 int rd = UE_GET_DIR(upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress) == UE_DIR_IN;
2915
2916 DPRINTFN(5, "used=%jd next=%jd xfer=%#jx nframes=%jd",
2917 isoc->inuse, isoc->next, (uintptr_t)xfer, xfer->ux_nframes);
2918
2919 if (sc->sc_dying)
2920 return USBD_IOERROR;
2921
2922 if (xfer->ux_status == USBD_IN_PROGRESS) {
2923 /* This request has already been entered into the frame list */
2924 printf("%s: xfer=%p in frame list\n", __func__, xfer);
2925 /* XXX */
2926 }
2927
2928 #ifdef DIAGNOSTIC
2929 if (isoc->inuse >= UHCI_VFRAMELIST_COUNT)
2930 printf("%s: overflow!\n", __func__);
2931 #endif
2932
2933 KASSERT(xfer->ux_nframes != 0);
2934
2935 if (xfer->ux_length)
2936 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
2937 rd ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2938
2939 mutex_enter(&sc->sc_lock);
2940 next = isoc->next;
2941 if (next == -1) {
2942 /* Not in use yet, schedule it a few frames ahead. */
2943 next = (UREAD2(sc, UHCI_FRNUM) + 3) % UHCI_VFRAMELIST_COUNT;
2944 DPRINTFN(2, "start next=%jd", next, 0, 0, 0);
2945 }
2946
2947 xfer->ux_status = USBD_IN_PROGRESS;
2948 ux->ux_curframe = next;
2949
2950 offs = 0;
2951 status = UHCI_TD_ZERO_ACTLEN(UHCI_TD_SET_ERRCNT(0) |
2952 UHCI_TD_ACTIVE |
2953 UHCI_TD_IOS);
2954 nframes = xfer->ux_nframes;
2955 for (i = 0; i < nframes; i++) {
2956 buf = DMAADDR(&xfer->ux_dmabuf, offs);
2957 std = isoc->stds[next];
2958 if (++next >= UHCI_VFRAMELIST_COUNT)
2959 next = 0;
2960 len = xfer->ux_frlengths[i];
2961
2962 KASSERTMSG(len <= __SHIFTOUT_MASK(UHCI_TD_MAXLEN_MASK),
2963 "len %d", len);
2964 std->td.td_buffer = htole32(buf);
2965 usb_syncmem(&xfer->ux_dmabuf, offs, len,
2966 rd ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2967 if (i == nframes - 1)
2968 status |= UHCI_TD_IOC;
2969 std->td.td_status = htole32(status);
2970 std->td.td_token &= htole32(~UHCI_TD_MAXLEN_MASK);
2971 std->td.td_token |= htole32(UHCI_TD_SET_MAXLEN(len));
2972 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2973 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2974 #ifdef UHCI_DEBUG
2975 if (uhcidebug >= 5) {
2976 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2977 DPRINTF("TD %jd", i, 0, 0, 0);
2978 uhci_dump_td(std);
2979 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2980 }
2981 #endif
2982 offs += len;
2983 const bus_addr_t bend __diagused =
2984 DMAADDR(&xfer->ux_dmabuf, offs - 1);
2985
2986 KASSERT(((buf ^ bend) & ~PAGE_MASK) == 0);
2987 }
2988 isoc->next = next;
2989 isoc->inuse += xfer->ux_nframes;
2990
2991 /* Set up interrupt info. */
2992 ux->ux_stdstart = std;
2993 ux->ux_stdend = std;
2994
2995 KASSERT(ux->ux_isdone);
2996 #ifdef DIAGNOSTIC
2997 ux->ux_isdone = false;
2998 #endif
2999 uhci_add_intr_list(sc, ux);
3000
3001 mutex_exit(&sc->sc_lock);
3002
3003 return USBD_IN_PROGRESS;
3004 }
3005
3006 void
3007 uhci_device_isoc_abort(struct usbd_xfer *xfer)
3008 {
3009 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3010 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3011 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
3012 uhci_soft_td_t **stds = upipe->isoc.stds;
3013 uhci_soft_td_t *std;
3014 int i, n, nframes, maxlen, len;
3015
3016 KASSERT(mutex_owned(&sc->sc_lock));
3017
3018 /* Transfer is already done. */
3019 if (xfer->ux_status != USBD_NOT_STARTED &&
3020 xfer->ux_status != USBD_IN_PROGRESS) {
3021 return;
3022 }
3023
3024 /* Give xfer the requested abort code. */
3025 xfer->ux_status = USBD_CANCELLED;
3026
3027 /* make hardware ignore it, */
3028 nframes = xfer->ux_nframes;
3029 n = ux->ux_curframe;
3030 maxlen = 0;
3031 for (i = 0; i < nframes; i++) {
3032 std = stds[n];
3033 usb_syncmem(&std->dma,
3034 std->offs + offsetof(uhci_td_t, td_status),
3035 sizeof(std->td.td_status),
3036 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3037 std->td.td_status &= htole32(~(UHCI_TD_ACTIVE | UHCI_TD_IOC));
3038 usb_syncmem(&std->dma,
3039 std->offs + offsetof(uhci_td_t, td_status),
3040 sizeof(std->td.td_status),
3041 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3042 usb_syncmem(&std->dma,
3043 std->offs + offsetof(uhci_td_t, td_token),
3044 sizeof(std->td.td_token),
3045 BUS_DMASYNC_POSTWRITE);
3046 len = UHCI_TD_GET_MAXLEN(le32toh(std->td.td_token));
3047 if (len > maxlen)
3048 maxlen = len;
3049 if (++n >= UHCI_VFRAMELIST_COUNT)
3050 n = 0;
3051 }
3052
3053 /* and wait until we are sure the hardware has finished. */
3054 delay(maxlen);
3055
3056 #ifdef DIAGNOSTIC
3057 ux->ux_isdone = true;
3058 #endif
3059 /* Remove from interrupt list. */
3060 uhci_del_intr_list(sc, ux);
3061
3062 /* Run callback. */
3063 usb_transfer_complete(xfer);
3064
3065 KASSERT(mutex_owned(&sc->sc_lock));
3066 }
3067
3068 void
3069 uhci_device_isoc_close(struct usbd_pipe *pipe)
3070 {
3071 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
3072 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3073 uhci_soft_td_t *std, *vstd;
3074 struct isoc *isoc;
3075 int i;
3076
3077 KASSERT(mutex_owned(&sc->sc_lock));
3078
3079 /*
3080 * Make sure all TDs are marked as inactive.
3081 * Wait for completion.
3082 * Unschedule.
3083 * Deallocate.
3084 */
3085 isoc = &upipe->isoc;
3086
3087 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3088 std = isoc->stds[i];
3089 usb_syncmem(&std->dma,
3090 std->offs + offsetof(uhci_td_t, td_status),
3091 sizeof(std->td.td_status),
3092 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3093 std->td.td_status &= htole32(~UHCI_TD_ACTIVE);
3094 usb_syncmem(&std->dma,
3095 std->offs + offsetof(uhci_td_t, td_status),
3096 sizeof(std->td.td_status),
3097 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3098 }
3099 /* wait for completion */
3100 usb_delay_ms_locked(&sc->sc_bus, 2, &sc->sc_lock);
3101
3102 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3103 std = isoc->stds[i];
3104 for (vstd = sc->sc_vframes[i].htd;
3105 vstd != NULL && vstd->link.std != std;
3106 vstd = vstd->link.std)
3107 ;
3108 if (vstd == NULL) {
3109 /*panic*/
3110 printf("%s: %p not found\n", __func__, std);
3111 mutex_exit(&sc->sc_lock);
3112 return;
3113 }
3114 vstd->link = std->link;
3115 usb_syncmem(&std->dma,
3116 std->offs + offsetof(uhci_td_t, td_link),
3117 sizeof(std->td.td_link),
3118 BUS_DMASYNC_POSTWRITE);
3119 vstd->td.td_link = std->td.td_link;
3120 usb_syncmem(&vstd->dma,
3121 vstd->offs + offsetof(uhci_td_t, td_link),
3122 sizeof(vstd->td.td_link),
3123 BUS_DMASYNC_PREWRITE);
3124 uhci_free_std_locked(sc, std);
3125 }
3126
3127 kmem_free(isoc->stds, UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *));
3128 }
3129
3130 usbd_status
3131 uhci_setup_isoc(struct usbd_pipe *pipe)
3132 {
3133 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
3134 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3135 int addr = upipe->pipe.up_dev->ud_addr;
3136 int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
3137 int rd = UE_GET_DIR(endpt) == UE_DIR_IN;
3138 uhci_soft_td_t *std, *vstd;
3139 uint32_t token;
3140 struct isoc *isoc;
3141 int i;
3142
3143 isoc = &upipe->isoc;
3144
3145 isoc->stds = kmem_alloc(
3146 UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *), KM_SLEEP);
3147 if (isoc->stds == NULL)
3148 return USBD_NOMEM;
3149
3150 token = rd ? UHCI_TD_IN (0, endpt, addr, 0) :
3151 UHCI_TD_OUT(0, endpt, addr, 0);
3152
3153 /* Allocate the TDs and mark as inactive; */
3154 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3155 std = uhci_alloc_std(sc);
3156 if (std == 0)
3157 goto bad;
3158 std->td.td_status = htole32(UHCI_TD_IOS); /* iso, inactive */
3159 std->td.td_token = htole32(token);
3160 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
3161 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3162 isoc->stds[i] = std;
3163 }
3164
3165 mutex_enter(&sc->sc_lock);
3166
3167 /* Insert TDs into schedule. */
3168 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3169 std = isoc->stds[i];
3170 vstd = sc->sc_vframes[i].htd;
3171 usb_syncmem(&vstd->dma,
3172 vstd->offs + offsetof(uhci_td_t, td_link),
3173 sizeof(vstd->td.td_link),
3174 BUS_DMASYNC_POSTWRITE);
3175 std->link = vstd->link;
3176 std->td.td_link = vstd->td.td_link;
3177 usb_syncmem(&std->dma,
3178 std->offs + offsetof(uhci_td_t, td_link),
3179 sizeof(std->td.td_link),
3180 BUS_DMASYNC_PREWRITE);
3181 vstd->link.std = std;
3182 vstd->td.td_link = htole32(std->physaddr | UHCI_PTR_TD);
3183 usb_syncmem(&vstd->dma,
3184 vstd->offs + offsetof(uhci_td_t, td_link),
3185 sizeof(vstd->td.td_link),
3186 BUS_DMASYNC_PREWRITE);
3187 }
3188 mutex_exit(&sc->sc_lock);
3189
3190 isoc->next = -1;
3191 isoc->inuse = 0;
3192
3193 return USBD_NORMAL_COMPLETION;
3194
3195 bad:
3196 while (--i >= 0)
3197 uhci_free_std(sc, isoc->stds[i]);
3198 kmem_free(isoc->stds, UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *));
3199 return USBD_NOMEM;
3200 }
3201
3202 void
3203 uhci_device_isoc_done(struct usbd_xfer *xfer)
3204 {
3205 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
3206 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3207 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
3208 int i, offs;
3209 int rd = UE_GET_DIR(upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress) == UE_DIR_IN;
3210
3211 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3212 DPRINTFN(4, "length=%jd, ux_state=0x%08jx",
3213 xfer->ux_actlen, xfer->ux_state, 0, 0);
3214
3215 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3216
3217 #ifdef DIAGNOSTIC
3218 if (ux->ux_stdend == NULL) {
3219 printf("%s: xfer=%p stdend==NULL\n", __func__, xfer);
3220 #ifdef UHCI_DEBUG
3221 DPRINTF("--- dump start ---", 0, 0, 0, 0);
3222 uhci_dump_ii(ux);
3223 DPRINTF("--- dump end ---", 0, 0, 0, 0);
3224 #endif
3225 return;
3226 }
3227 #endif
3228
3229 /* Turn off the interrupt since it is active even if the TD is not. */
3230 usb_syncmem(&ux->ux_stdend->dma,
3231 ux->ux_stdend->offs + offsetof(uhci_td_t, td_status),
3232 sizeof(ux->ux_stdend->td.td_status),
3233 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3234 ux->ux_stdend->td.td_status &= htole32(~UHCI_TD_IOC);
3235 usb_syncmem(&ux->ux_stdend->dma,
3236 ux->ux_stdend->offs + offsetof(uhci_td_t, td_status),
3237 sizeof(ux->ux_stdend->td.td_status),
3238 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3239
3240 offs = 0;
3241 for (i = 0; i < xfer->ux_nframes; i++) {
3242 usb_syncmem(&xfer->ux_dmabuf, offs, xfer->ux_frlengths[i],
3243 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3244 offs += xfer->ux_frlengths[i];
3245 }
3246 }
3247
3248 void
3249 uhci_device_intr_done(struct usbd_xfer *xfer)
3250 {
3251 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
3252 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3253 uhci_soft_qh_t *sqh;
3254 int i, npoll;
3255
3256 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3257 DPRINTFN(5, "length=%jd", xfer->ux_actlen, 0, 0, 0);
3258
3259 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3260
3261 npoll = upipe->intr.npoll;
3262 for (i = 0; i < npoll; i++) {
3263 sqh = upipe->intr.qhs[i];
3264 sqh->elink = NULL;
3265 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3266 usb_syncmem(&sqh->dma,
3267 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3268 sizeof(sqh->qh.qh_elink),
3269 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3270 }
3271 const int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
3272 const bool isread = UE_GET_DIR(endpt) == UE_DIR_IN;
3273 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
3274 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3275 }
3276
3277 /* Deallocate request data structures */
3278 void
3279 uhci_device_ctrl_done(struct usbd_xfer *xfer)
3280 {
3281 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3282 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3283 int len = UGETW(xfer->ux_request.wLength);
3284 int isread = (xfer->ux_request.bmRequestType & UT_READ);
3285
3286 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3287
3288 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3289 KASSERT(xfer->ux_rqflags & URQ_REQUEST);
3290
3291 /* XXXNH move to uhci_idone??? */
3292 if (upipe->pipe.up_dev->ud_speed == USB_SPEED_LOW)
3293 uhci_remove_ls_ctrl(sc, upipe->ctrl.sqh);
3294 else
3295 uhci_remove_hs_ctrl(sc, upipe->ctrl.sqh);
3296
3297 if (len) {
3298 usb_syncmem(&xfer->ux_dmabuf, 0, len,
3299 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3300 }
3301 usb_syncmem(&upipe->ctrl.reqdma, 0,
3302 sizeof(usb_device_request_t), BUS_DMASYNC_POSTWRITE);
3303
3304 DPRINTF("length=%jd", xfer->ux_actlen, 0, 0, 0);
3305 }
3306
3307 /* Deallocate request data structures */
3308 void
3309 uhci_device_bulk_done(struct usbd_xfer *xfer)
3310 {
3311 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3312 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3313 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
3314 int endpt = ed->bEndpointAddress;
3315 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
3316
3317 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3318 DPRINTFN(5, "xfer=%#jx sc=%#jx upipe=%#jx", (uintptr_t)xfer,
3319 (uintptr_t)sc, (uintptr_t)upipe, 0);
3320
3321 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3322
3323 uhci_remove_bulk(sc, upipe->bulk.sqh);
3324
3325 if (xfer->ux_length) {
3326 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
3327 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3328 }
3329
3330 DPRINTFN(5, "length=%jd", xfer->ux_actlen, 0, 0, 0);
3331 }
3332
3333 /* Add interrupt QH, called with vflock. */
3334 void
3335 uhci_add_intr(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
3336 {
3337 struct uhci_vframe *vf = &sc->sc_vframes[sqh->pos];
3338 uhci_soft_qh_t *eqh;
3339
3340 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3341 DPRINTFN(4, "n=%jd sqh=%#jx", sqh->pos, (uintptr_t)sqh, 0, 0);
3342
3343 eqh = vf->eqh;
3344 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
3345 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
3346 sqh->hlink = eqh->hlink;
3347 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
3348 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
3349 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
3350 eqh->hlink = sqh;
3351 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
3352 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
3353 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
3354 vf->eqh = sqh;
3355 vf->bandwidth++;
3356 }
3357
3358 /* Remove interrupt QH. */
3359 void
3360 uhci_remove_intr(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
3361 {
3362 struct uhci_vframe *vf = &sc->sc_vframes[sqh->pos];
3363 uhci_soft_qh_t *pqh;
3364
3365 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3366 DPRINTFN(4, "n=%jd sqh=%#jx", sqh->pos, (uintptr_t)sqh, 0, 0);
3367
3368 /* See comment in uhci_remove_ctrl() */
3369
3370 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
3371 sizeof(sqh->qh.qh_elink),
3372 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3373 if (!(sqh->qh.qh_elink & htole32(UHCI_PTR_T))) {
3374 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3375 usb_syncmem(&sqh->dma,
3376 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3377 sizeof(sqh->qh.qh_elink),
3378 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3379 delay(UHCI_QH_REMOVE_DELAY);
3380 }
3381
3382 pqh = uhci_find_prev_qh(vf->hqh, sqh);
3383 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
3384 sizeof(sqh->qh.qh_hlink),
3385 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3386 pqh->hlink = sqh->hlink;
3387 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
3388 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
3389 sizeof(pqh->qh.qh_hlink),
3390 BUS_DMASYNC_PREWRITE);
3391 delay(UHCI_QH_REMOVE_DELAY);
3392 if (vf->eqh == sqh)
3393 vf->eqh = pqh;
3394 vf->bandwidth--;
3395 }
3396
3397 usbd_status
3398 uhci_device_setintr(uhci_softc_t *sc, struct uhci_pipe *upipe, int ival)
3399 {
3400 uhci_soft_qh_t *sqh;
3401 int i, npoll;
3402 u_int bestbw, bw, bestoffs, offs;
3403
3404 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3405 DPRINTFN(2, "pipe=%#jx", (uintptr_t)upipe, 0, 0, 0);
3406 if (ival == 0) {
3407 printf("%s: 0 interval\n", __func__);
3408 return USBD_INVAL;
3409 }
3410
3411 if (ival > UHCI_VFRAMELIST_COUNT)
3412 ival = UHCI_VFRAMELIST_COUNT;
3413 npoll = howmany(UHCI_VFRAMELIST_COUNT, ival);
3414 DPRINTF("ival=%jd npoll=%jd", ival, npoll, 0, 0);
3415
3416 upipe->intr.npoll = npoll;
3417 upipe->intr.qhs =
3418 kmem_alloc(npoll * sizeof(uhci_soft_qh_t *), KM_SLEEP);
3419
3420 /*
3421 * Figure out which offset in the schedule that has most
3422 * bandwidth left over.
3423 */
3424 #define MOD(i) ((i) & (UHCI_VFRAMELIST_COUNT-1))
3425 for (bestoffs = offs = 0, bestbw = ~0; offs < ival; offs++) {
3426 for (bw = i = 0; i < npoll; i++)
3427 bw += sc->sc_vframes[MOD(i * ival + offs)].bandwidth;
3428 if (bw < bestbw) {
3429 bestbw = bw;
3430 bestoffs = offs;
3431 }
3432 }
3433 DPRINTF("bw=%jd offs=%jd", bestbw, bestoffs, 0, 0);
3434 for (i = 0; i < npoll; i++) {
3435 upipe->intr.qhs[i] = sqh = uhci_alloc_sqh(sc);
3436 sqh->elink = NULL;
3437 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3438 usb_syncmem(&sqh->dma,
3439 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3440 sizeof(sqh->qh.qh_elink),
3441 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3442 sqh->pos = MOD(i * ival + bestoffs);
3443 }
3444 #undef MOD
3445
3446 mutex_enter(&sc->sc_lock);
3447 /* Enter QHs into the controller data structures. */
3448 for (i = 0; i < npoll; i++)
3449 uhci_add_intr(sc, upipe->intr.qhs[i]);
3450 mutex_exit(&sc->sc_lock);
3451
3452 DPRINTFN(5, "returns %#jx", (uintptr_t)upipe, 0, 0, 0);
3453
3454 return USBD_NORMAL_COMPLETION;
3455 }
3456
3457 /* Open a new pipe. */
3458 usbd_status
3459 uhci_open(struct usbd_pipe *pipe)
3460 {
3461 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3462 struct usbd_bus *bus = pipe->up_dev->ud_bus;
3463 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
3464 usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc;
3465 int ival;
3466
3467 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3468 DPRINTF("pipe=%#jx, addr=%jd, endpt=%jd (%jd)",
3469 (uintptr_t)pipe, pipe->up_dev->ud_addr, ed->bEndpointAddress,
3470 bus->ub_rhaddr);
3471
3472 if (sc->sc_dying)
3473 return USBD_IOERROR;
3474
3475 upipe->aborting = 0;
3476 /* toggle state needed for bulk endpoints */
3477 upipe->nexttoggle = pipe->up_endpoint->ue_toggle;
3478
3479 if (pipe->up_dev->ud_addr == bus->ub_rhaddr) {
3480 switch (ed->bEndpointAddress) {
3481 case USB_CONTROL_ENDPOINT:
3482 pipe->up_methods = &roothub_ctrl_methods;
3483 break;
3484 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
3485 pipe->up_methods = &uhci_root_intr_methods;
3486 break;
3487 default:
3488 return USBD_INVAL;
3489 }
3490 } else {
3491 switch (ed->bmAttributes & UE_XFERTYPE) {
3492 case UE_CONTROL:
3493 pipe->up_methods = &uhci_device_ctrl_methods;
3494 upipe->ctrl.sqh = uhci_alloc_sqh(sc);
3495 if (upipe->ctrl.sqh == NULL)
3496 goto bad;
3497 upipe->ctrl.setup = uhci_alloc_std(sc);
3498 if (upipe->ctrl.setup == NULL) {
3499 uhci_free_sqh(sc, upipe->ctrl.sqh);
3500 goto bad;
3501 }
3502 upipe->ctrl.stat = uhci_alloc_std(sc);
3503 if (upipe->ctrl.stat == NULL) {
3504 uhci_free_sqh(sc, upipe->ctrl.sqh);
3505 uhci_free_std(sc, upipe->ctrl.setup);
3506 goto bad;
3507 }
3508 int err = usb_allocmem(&sc->sc_bus,
3509 sizeof(usb_device_request_t), 0,
3510 USBMALLOC_COHERENT, &upipe->ctrl.reqdma);
3511 if (err) {
3512 uhci_free_sqh(sc, upipe->ctrl.sqh);
3513 uhci_free_std(sc, upipe->ctrl.setup);
3514 uhci_free_std(sc, upipe->ctrl.stat);
3515 goto bad;
3516 }
3517 break;
3518 case UE_INTERRUPT:
3519 pipe->up_methods = &uhci_device_intr_methods;
3520 ival = pipe->up_interval;
3521 if (ival == USBD_DEFAULT_INTERVAL)
3522 ival = ed->bInterval;
3523 return uhci_device_setintr(sc, upipe, ival);
3524 case UE_ISOCHRONOUS:
3525 pipe->up_serialise = false;
3526 pipe->up_methods = &uhci_device_isoc_methods;
3527 return uhci_setup_isoc(pipe);
3528 case UE_BULK:
3529 pipe->up_methods = &uhci_device_bulk_methods;
3530 upipe->bulk.sqh = uhci_alloc_sqh(sc);
3531 if (upipe->bulk.sqh == NULL)
3532 goto bad;
3533 break;
3534 }
3535 }
3536 return USBD_NORMAL_COMPLETION;
3537
3538 bad:
3539 return USBD_NOMEM;
3540 }
3541
3542 /*
3543 * Data structures and routines to emulate the root hub.
3544 */
3545 /*
3546 * The USB hub protocol requires that SET_FEATURE(PORT_RESET) also
3547 * enables the port, and also states that SET_FEATURE(PORT_ENABLE)
3548 * should not be used by the USB subsystem. As we cannot issue a
3549 * SET_FEATURE(PORT_ENABLE) externally, we must ensure that the port
3550 * will be enabled as part of the reset.
3551 *
3552 * On the VT83C572, the port cannot be successfully enabled until the
3553 * outstanding "port enable change" and "connection status change"
3554 * events have been reset.
3555 */
3556 Static usbd_status
3557 uhci_portreset(uhci_softc_t *sc, int index)
3558 {
3559 int lim, port, x;
3560 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3561
3562 if (index == 1)
3563 port = UHCI_PORTSC1;
3564 else if (index == 2)
3565 port = UHCI_PORTSC2;
3566 else
3567 return USBD_IOERROR;
3568
3569 x = URWMASK(UREAD2(sc, port));
3570 UWRITE2(sc, port, x | UHCI_PORTSC_PR);
3571
3572 usb_delay_ms(&sc->sc_bus, USB_PORT_ROOT_RESET_DELAY);
3573
3574 DPRINTF("uhci port %jd reset, status0 = 0x%04jx", index,
3575 UREAD2(sc, port), 0, 0);
3576
3577 x = URWMASK(UREAD2(sc, port));
3578 UWRITE2(sc, port, x & ~(UHCI_PORTSC_PR | UHCI_PORTSC_SUSP));
3579
3580 delay(100);
3581
3582 DPRINTF("uhci port %jd reset, status1 = 0x%04jx", index,
3583 UREAD2(sc, port), 0, 0);
3584
3585 x = URWMASK(UREAD2(sc, port));
3586 UWRITE2(sc, port, x | UHCI_PORTSC_PE);
3587
3588 for (lim = 10; --lim > 0;) {
3589 usb_delay_ms(&sc->sc_bus, USB_PORT_RESET_DELAY);
3590
3591 x = UREAD2(sc, port);
3592 DPRINTF("uhci port %jd iteration %ju, status = 0x%04jx", index,
3593 lim, x, 0);
3594
3595 if (!(x & UHCI_PORTSC_CCS)) {
3596 /*
3597 * No device is connected (or was disconnected
3598 * during reset). Consider the port reset.
3599 * The delay must be long enough to ensure on
3600 * the initial iteration that the device
3601 * connection will have been registered. 50ms
3602 * appears to be sufficient, but 20ms is not.
3603 */
3604 DPRINTFN(3, "uhci port %jd loop %ju, device detached",
3605 index, lim, 0, 0);
3606 break;
3607 }
3608
3609 if (x & (UHCI_PORTSC_POEDC | UHCI_PORTSC_CSC)) {
3610 /*
3611 * Port enabled changed and/or connection
3612 * status changed were set. Reset either or
3613 * both raised flags (by writing a 1 to that
3614 * bit), and wait again for state to settle.
3615 */
3616 UWRITE2(sc, port, URWMASK(x) |
3617 (x & (UHCI_PORTSC_POEDC | UHCI_PORTSC_CSC)));
3618 continue;
3619 }
3620
3621 if (x & UHCI_PORTSC_PE)
3622 /* Port is enabled */
3623 break;
3624
3625 UWRITE2(sc, port, URWMASK(x) | UHCI_PORTSC_PE);
3626 }
3627
3628 DPRINTFN(3, "uhci port %jd reset, status2 = 0x%04jx", index,
3629 UREAD2(sc, port), 0, 0);
3630
3631 if (lim <= 0) {
3632 DPRINTF("uhci port %jd reset timed out", index,
3633 0, 0, 0);
3634 return USBD_TIMEOUT;
3635 }
3636
3637 sc->sc_isreset = 1;
3638 return USBD_NORMAL_COMPLETION;
3639 }
3640
3641 Static int
3642 uhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
3643 void *buf, int buflen)
3644 {
3645 uhci_softc_t *sc = UHCI_BUS2SC(bus);
3646 int port, x;
3647 int status, change, totlen = 0;
3648 uint16_t len, value, index;
3649 usb_port_status_t ps;
3650 usbd_status err;
3651
3652 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3653
3654 if (sc->sc_dying)
3655 return -1;
3656
3657 DPRINTF("type=0x%02jx request=%02jx", req->bmRequestType,
3658 req->bRequest, 0, 0);
3659
3660 len = UGETW(req->wLength);
3661 value = UGETW(req->wValue);
3662 index = UGETW(req->wIndex);
3663
3664 #define C(x,y) ((x) | ((y) << 8))
3665 switch (C(req->bRequest, req->bmRequestType)) {
3666 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
3667 DPRINTF("wValue=0x%04jx", value, 0, 0, 0);
3668 if (len == 0)
3669 break;
3670 switch (value) {
3671 #define sd ((usb_string_descriptor_t *)buf)
3672 case C(2, UDESC_STRING):
3673 /* Product */
3674 totlen = usb_makestrdesc(sd, len, "UHCI root hub");
3675 break;
3676 #undef sd
3677 default:
3678 /* default from usbroothub */
3679 return buflen;
3680 }
3681 break;
3682
3683 /* Hub requests */
3684 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
3685 break;
3686 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
3687 DPRINTF("UR_CLEAR_PORT_FEATURE port=%jd feature=%jd", index,
3688 value, 0, 0);
3689 if (index == 1)
3690 port = UHCI_PORTSC1;
3691 else if (index == 2)
3692 port = UHCI_PORTSC2;
3693 else {
3694 return -1;
3695 }
3696 switch(value) {
3697 case UHF_PORT_ENABLE:
3698 x = URWMASK(UREAD2(sc, port));
3699 UWRITE2(sc, port, x & ~UHCI_PORTSC_PE);
3700 break;
3701 case UHF_PORT_SUSPEND:
3702 x = URWMASK(UREAD2(sc, port));
3703 if (!(x & UHCI_PORTSC_SUSP)) /* not suspended */
3704 break;
3705 UWRITE2(sc, port, x | UHCI_PORTSC_RD);
3706 /* see USB2 spec ch. 7.1.7.7 */
3707 usb_delay_ms(&sc->sc_bus, 20);
3708 UWRITE2(sc, port, x & ~UHCI_PORTSC_SUSP);
3709 /* 10ms resume delay must be provided by caller */
3710 break;
3711 case UHF_PORT_RESET:
3712 x = URWMASK(UREAD2(sc, port));
3713 UWRITE2(sc, port, x & ~UHCI_PORTSC_PR);
3714 break;
3715 case UHF_C_PORT_CONNECTION:
3716 x = URWMASK(UREAD2(sc, port));
3717 UWRITE2(sc, port, x | UHCI_PORTSC_CSC);
3718 break;
3719 case UHF_C_PORT_ENABLE:
3720 x = URWMASK(UREAD2(sc, port));
3721 UWRITE2(sc, port, x | UHCI_PORTSC_POEDC);
3722 break;
3723 case UHF_C_PORT_OVER_CURRENT:
3724 x = URWMASK(UREAD2(sc, port));
3725 UWRITE2(sc, port, x | UHCI_PORTSC_OCIC);
3726 break;
3727 case UHF_C_PORT_RESET:
3728 sc->sc_isreset = 0;
3729 break;
3730 case UHF_PORT_CONNECTION:
3731 case UHF_PORT_OVER_CURRENT:
3732 case UHF_PORT_POWER:
3733 case UHF_PORT_LOW_SPEED:
3734 case UHF_C_PORT_SUSPEND:
3735 default:
3736 return -1;
3737 }
3738 break;
3739 case C(UR_GET_BUS_STATE, UT_READ_CLASS_OTHER):
3740 if (index == 1)
3741 port = UHCI_PORTSC1;
3742 else if (index == 2)
3743 port = UHCI_PORTSC2;
3744 else {
3745 return -1;
3746 }
3747 if (len > 0) {
3748 *(uint8_t *)buf =
3749 UHCI_PORTSC_GET_LS(UREAD2(sc, port));
3750 totlen = 1;
3751 }
3752 break;
3753 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
3754 if (len == 0)
3755 break;
3756 if ((value & 0xff) != 0) {
3757 return -1;
3758 }
3759 usb_hub_descriptor_t hubd;
3760
3761 totlen = uimin(buflen, sizeof(hubd));
3762 memcpy(&hubd, buf, totlen);
3763 hubd.bNbrPorts = 2;
3764 memcpy(buf, &hubd, totlen);
3765 break;
3766 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
3767 if (len != 4) {
3768 return -1;
3769 }
3770 memset(buf, 0, len);
3771 totlen = len;
3772 break;
3773 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
3774 if (index == 1)
3775 port = UHCI_PORTSC1;
3776 else if (index == 2)
3777 port = UHCI_PORTSC2;
3778 else {
3779 return -1;
3780 }
3781 if (len != 4) {
3782 return -1;
3783 }
3784 x = UREAD2(sc, port);
3785 status = change = 0;
3786 if (x & UHCI_PORTSC_CCS)
3787 status |= UPS_CURRENT_CONNECT_STATUS;
3788 if (x & UHCI_PORTSC_CSC)
3789 change |= UPS_C_CONNECT_STATUS;
3790 if (x & UHCI_PORTSC_PE)
3791 status |= UPS_PORT_ENABLED;
3792 if (x & UHCI_PORTSC_POEDC)
3793 change |= UPS_C_PORT_ENABLED;
3794 if (x & UHCI_PORTSC_OCI)
3795 status |= UPS_OVERCURRENT_INDICATOR;
3796 if (x & UHCI_PORTSC_OCIC)
3797 change |= UPS_C_OVERCURRENT_INDICATOR;
3798 if (x & UHCI_PORTSC_SUSP)
3799 status |= UPS_SUSPEND;
3800 if (x & UHCI_PORTSC_LSDA)
3801 status |= UPS_LOW_SPEED;
3802 status |= UPS_PORT_POWER;
3803 if (sc->sc_isreset)
3804 change |= UPS_C_PORT_RESET;
3805 USETW(ps.wPortStatus, status);
3806 USETW(ps.wPortChange, change);
3807 totlen = uimin(len, sizeof(ps));
3808 memcpy(buf, &ps, totlen);
3809 break;
3810 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
3811 return -1;
3812 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
3813 break;
3814 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
3815 if (index == 1)
3816 port = UHCI_PORTSC1;
3817 else if (index == 2)
3818 port = UHCI_PORTSC2;
3819 else {
3820 return -1;
3821 }
3822 switch(value) {
3823 case UHF_PORT_ENABLE:
3824 x = URWMASK(UREAD2(sc, port));
3825 UWRITE2(sc, port, x | UHCI_PORTSC_PE);
3826 break;
3827 case UHF_PORT_SUSPEND:
3828 x = URWMASK(UREAD2(sc, port));
3829 UWRITE2(sc, port, x | UHCI_PORTSC_SUSP);
3830 break;
3831 case UHF_PORT_RESET:
3832 err = uhci_portreset(sc, index);
3833 if (err != USBD_NORMAL_COMPLETION)
3834 return -1;
3835 return 0;
3836 case UHF_PORT_POWER:
3837 /* Pretend we turned on power */
3838 return 0;
3839 case UHF_C_PORT_CONNECTION:
3840 case UHF_C_PORT_ENABLE:
3841 case UHF_C_PORT_OVER_CURRENT:
3842 case UHF_PORT_CONNECTION:
3843 case UHF_PORT_OVER_CURRENT:
3844 case UHF_PORT_LOW_SPEED:
3845 case UHF_C_PORT_SUSPEND:
3846 case UHF_C_PORT_RESET:
3847 default:
3848 return -1;
3849 }
3850 break;
3851 default:
3852 /* default from usbroothub */
3853 DPRINTF("returning %jd (usbroothub default)",
3854 buflen, 0, 0, 0);
3855 return buflen;
3856 }
3857
3858 DPRINTF("returning %jd", totlen, 0, 0, 0);
3859
3860 return totlen;
3861 }
3862
3863 /* Abort a root interrupt request. */
3864 void
3865 uhci_root_intr_abort(struct usbd_xfer *xfer)
3866 {
3867 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3868
3869 KASSERT(mutex_owned(&sc->sc_lock));
3870 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
3871
3872 /*
3873 * Try to stop the callout before it starts. If we got in too
3874 * late, too bad; but if the callout had yet to run and time
3875 * out the xfer, cancel it ourselves.
3876 */
3877 callout_stop(&sc->sc_poll_handle);
3878 if (sc->sc_intr_xfer == NULL)
3879 return;
3880
3881 KASSERT(sc->sc_intr_xfer == xfer);
3882 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
3883 xfer->ux_status = USBD_CANCELLED;
3884 #ifdef DIAGNOSTIC
3885 UHCI_XFER2UXFER(xfer)->ux_isdone = true;
3886 #endif
3887 usb_transfer_complete(xfer);
3888 }
3889
3890 usbd_status
3891 uhci_root_intr_transfer(struct usbd_xfer *xfer)
3892 {
3893 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3894 usbd_status err;
3895
3896 /* Insert last in queue. */
3897 mutex_enter(&sc->sc_lock);
3898 err = usb_insert_transfer(xfer);
3899 mutex_exit(&sc->sc_lock);
3900 if (err)
3901 return err;
3902
3903 /*
3904 * Pipe isn't running (otherwise err would be USBD_INPROG),
3905 * start first
3906 */
3907 return uhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
3908 }
3909
3910 /* Start a transfer on the root interrupt pipe */
3911 usbd_status
3912 uhci_root_intr_start(struct usbd_xfer *xfer)
3913 {
3914 struct usbd_pipe *pipe = xfer->ux_pipe;
3915 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3916 unsigned int ival;
3917 const bool polling = sc->sc_bus.ub_usepolling;
3918
3919 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3920 DPRINTF("xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, xfer->ux_length,
3921 xfer->ux_flags, 0);
3922
3923 if (sc->sc_dying)
3924 return USBD_IOERROR;
3925
3926 if (!polling)
3927 mutex_enter(&sc->sc_lock);
3928
3929 KASSERT(sc->sc_intr_xfer == NULL);
3930
3931 /* XXX temporary variable needed to avoid gcc3 warning */
3932 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
3933 sc->sc_ival = mstohz(ival);
3934 callout_schedule(&sc->sc_poll_handle, sc->sc_ival);
3935 sc->sc_intr_xfer = xfer;
3936 xfer->ux_status = USBD_IN_PROGRESS;
3937
3938 if (!polling)
3939 mutex_exit(&sc->sc_lock);
3940
3941 return USBD_IN_PROGRESS;
3942 }
3943
3944 /* Close the root interrupt pipe. */
3945 void
3946 uhci_root_intr_close(struct usbd_pipe *pipe)
3947 {
3948 uhci_softc_t *sc __diagused = UHCI_PIPE2SC(pipe);
3949 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3950
3951 KASSERT(mutex_owned(&sc->sc_lock));
3952
3953 /*
3954 * The caller must arrange to have aborted the pipe already, so
3955 * there can be no intr xfer in progress. The callout may
3956 * still be pending from a prior intr xfer -- if it has already
3957 * fired, it will see there is nothing to do, and do nothing.
3958 */
3959 KASSERT(sc->sc_intr_xfer == NULL);
3960 KASSERT(!callout_pending(&sc->sc_poll_handle));
3961 }
3962