uhci.c revision 1.305 1 /* $NetBSD: uhci.c,v 1.305 2020/12/22 01:07:23 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004, 2011, 2012, 2016, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology, Jared D. McNeill (jmcneill (at) invisible.ca),
10 * Matthew R. Green (mrg (at) eterna.com.au) and Nick Hudson.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * USB Universal Host Controller driver.
36 * Handles e.g. PIIX3 and PIIX4.
37 *
38 * UHCI spec: http://www.intel.com/technology/usb/spec.htm
39 * USB spec: http://www.usb.org/developers/docs/
40 * PIIXn spec: ftp://download.intel.com/design/intarch/datashts/29055002.pdf
41 * ftp://download.intel.com/design/intarch/datashts/29056201.pdf
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: uhci.c,v 1.305 2020/12/22 01:07:23 riastradh Exp $");
46
47 #ifdef _KERNEL_OPT
48 #include "opt_usb.h"
49 #endif
50
51 #include <sys/param.h>
52
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/device.h>
56 #include <sys/kernel.h>
57 #include <sys/kmem.h>
58 #include <sys/mutex.h>
59 #include <sys/proc.h>
60 #include <sys/queue.h>
61 #include <sys/select.h>
62 #include <sys/sysctl.h>
63 #include <sys/systm.h>
64
65 #include <machine/endian.h>
66
67 #include <dev/usb/usb.h>
68 #include <dev/usb/usbdi.h>
69 #include <dev/usb/usbdivar.h>
70 #include <dev/usb/usb_mem.h>
71
72 #include <dev/usb/uhcireg.h>
73 #include <dev/usb/uhcivar.h>
74 #include <dev/usb/usbroothub.h>
75 #include <dev/usb/usbhist.h>
76
77 /* Use bandwidth reclamation for control transfers. Some devices choke on it. */
78 /*#define UHCI_CTL_LOOP */
79
80 #ifdef UHCI_DEBUG
81 uhci_softc_t *thesc;
82 int uhcinoloop = 0;
83 #endif
84
85 #ifdef USB_DEBUG
86 #ifndef UHCI_DEBUG
87 #define uhcidebug 0
88 #else
89 static int uhcidebug = 0;
90
91 SYSCTL_SETUP(sysctl_hw_uhci_setup, "sysctl hw.uhci setup")
92 {
93 int err;
94 const struct sysctlnode *rnode;
95 const struct sysctlnode *cnode;
96
97 err = sysctl_createv(clog, 0, NULL, &rnode,
98 CTLFLAG_PERMANENT, CTLTYPE_NODE, "uhci",
99 SYSCTL_DESCR("uhci global controls"),
100 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
101
102 if (err)
103 goto fail;
104
105 /* control debugging printfs */
106 err = sysctl_createv(clog, 0, &rnode, &cnode,
107 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
108 "debug", SYSCTL_DESCR("Enable debugging output"),
109 NULL, 0, &uhcidebug, sizeof(uhcidebug), CTL_CREATE, CTL_EOL);
110 if (err)
111 goto fail;
112
113 return;
114 fail:
115 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
116 }
117
118 #endif /* UHCI_DEBUG */
119 #endif /* USB_DEBUG */
120
121 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOGN(uhcidebug,1,FMT,A,B,C,D)
122 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(uhcidebug,N,FMT,A,B,C,D)
123 #define UHCIHIST_FUNC() USBHIST_FUNC()
124 #define UHCIHIST_CALLED(name) USBHIST_CALLED(uhcidebug)
125
126 /*
127 * The UHCI controller is little endian, so on big endian machines
128 * the data stored in memory needs to be swapped.
129 */
130
131 struct uhci_pipe {
132 struct usbd_pipe pipe;
133 int nexttoggle;
134
135 u_char aborting;
136 struct usbd_xfer *abortstart, abortend;
137
138 /* Info needed for different pipe kinds. */
139 union {
140 /* Control pipe */
141 struct {
142 uhci_soft_qh_t *sqh;
143 usb_dma_t reqdma;
144 uhci_soft_td_t *setup;
145 uhci_soft_td_t *stat;
146 } ctrl;
147 /* Interrupt pipe */
148 struct {
149 int npoll;
150 uhci_soft_qh_t **qhs;
151 } intr;
152 /* Bulk pipe */
153 struct {
154 uhci_soft_qh_t *sqh;
155 } bulk;
156 /* Isochronous pipe */
157 struct isoc {
158 uhci_soft_td_t **stds;
159 int next, inuse;
160 } isoc;
161 };
162 };
163
164 typedef TAILQ_HEAD(ux_completeq, uhci_xfer) ux_completeq_t;
165
166 Static void uhci_globalreset(uhci_softc_t *);
167 Static usbd_status uhci_portreset(uhci_softc_t*, int);
168 Static void uhci_reset(uhci_softc_t *);
169 Static usbd_status uhci_run(uhci_softc_t *, int, int);
170 Static uhci_soft_td_t *uhci_alloc_std(uhci_softc_t *);
171 Static void uhci_free_std(uhci_softc_t *, uhci_soft_td_t *);
172 Static void uhci_free_std_locked(uhci_softc_t *, uhci_soft_td_t *);
173 Static uhci_soft_qh_t *uhci_alloc_sqh(uhci_softc_t *);
174 Static void uhci_free_sqh(uhci_softc_t *, uhci_soft_qh_t *);
175 #if 0
176 Static void uhci_enter_ctl_q(uhci_softc_t *, uhci_soft_qh_t *,
177 uhci_intr_info_t *);
178 Static void uhci_exit_ctl_q(uhci_softc_t *, uhci_soft_qh_t *);
179 #endif
180
181 #if 0
182 Static void uhci_free_std_chain(uhci_softc_t *, uhci_soft_td_t *,
183 uhci_soft_td_t *);
184 #endif
185 Static int uhci_alloc_std_chain(uhci_softc_t *, struct usbd_xfer *,
186 int, int, uhci_soft_td_t **);
187 Static void uhci_free_stds(uhci_softc_t *, struct uhci_xfer *);
188
189 Static void uhci_reset_std_chain(uhci_softc_t *, struct usbd_xfer *,
190 int, int, int *, uhci_soft_td_t **);
191
192 Static void uhci_poll_hub(void *);
193 Static void uhci_check_intr(uhci_softc_t *, struct uhci_xfer *,
194 ux_completeq_t *);
195 Static void uhci_idone(struct uhci_xfer *, ux_completeq_t *);
196
197 Static void uhci_abortx(struct usbd_xfer *);
198
199 Static void uhci_add_ls_ctrl(uhci_softc_t *, uhci_soft_qh_t *);
200 Static void uhci_add_hs_ctrl(uhci_softc_t *, uhci_soft_qh_t *);
201 Static void uhci_add_bulk(uhci_softc_t *, uhci_soft_qh_t *);
202 Static void uhci_remove_ls_ctrl(uhci_softc_t *,uhci_soft_qh_t *);
203 Static void uhci_remove_hs_ctrl(uhci_softc_t *,uhci_soft_qh_t *);
204 Static void uhci_remove_bulk(uhci_softc_t *,uhci_soft_qh_t *);
205 Static void uhci_add_loop(uhci_softc_t *);
206 Static void uhci_rem_loop(uhci_softc_t *);
207
208 Static usbd_status uhci_setup_isoc(struct usbd_pipe *);
209
210 Static struct usbd_xfer *
211 uhci_allocx(struct usbd_bus *, unsigned int);
212 Static void uhci_freex(struct usbd_bus *, struct usbd_xfer *);
213 Static bool uhci_dying(struct usbd_bus *);
214 Static void uhci_get_lock(struct usbd_bus *, kmutex_t **);
215 Static int uhci_roothub_ctrl(struct usbd_bus *,
216 usb_device_request_t *, void *, int);
217
218 Static int uhci_device_ctrl_init(struct usbd_xfer *);
219 Static void uhci_device_ctrl_fini(struct usbd_xfer *);
220 Static usbd_status uhci_device_ctrl_transfer(struct usbd_xfer *);
221 Static usbd_status uhci_device_ctrl_start(struct usbd_xfer *);
222 Static void uhci_device_ctrl_abort(struct usbd_xfer *);
223 Static void uhci_device_ctrl_close(struct usbd_pipe *);
224 Static void uhci_device_ctrl_done(struct usbd_xfer *);
225
226 Static int uhci_device_intr_init(struct usbd_xfer *);
227 Static void uhci_device_intr_fini(struct usbd_xfer *);
228 Static usbd_status uhci_device_intr_transfer(struct usbd_xfer *);
229 Static usbd_status uhci_device_intr_start(struct usbd_xfer *);
230 Static void uhci_device_intr_abort(struct usbd_xfer *);
231 Static void uhci_device_intr_close(struct usbd_pipe *);
232 Static void uhci_device_intr_done(struct usbd_xfer *);
233
234 Static int uhci_device_bulk_init(struct usbd_xfer *);
235 Static void uhci_device_bulk_fini(struct usbd_xfer *);
236 Static usbd_status uhci_device_bulk_transfer(struct usbd_xfer *);
237 Static usbd_status uhci_device_bulk_start(struct usbd_xfer *);
238 Static void uhci_device_bulk_abort(struct usbd_xfer *);
239 Static void uhci_device_bulk_close(struct usbd_pipe *);
240 Static void uhci_device_bulk_done(struct usbd_xfer *);
241
242 Static int uhci_device_isoc_init(struct usbd_xfer *);
243 Static void uhci_device_isoc_fini(struct usbd_xfer *);
244 Static usbd_status uhci_device_isoc_transfer(struct usbd_xfer *);
245 Static void uhci_device_isoc_abort(struct usbd_xfer *);
246 Static void uhci_device_isoc_close(struct usbd_pipe *);
247 Static void uhci_device_isoc_done(struct usbd_xfer *);
248
249 Static usbd_status uhci_root_intr_transfer(struct usbd_xfer *);
250 Static usbd_status uhci_root_intr_start(struct usbd_xfer *);
251 Static void uhci_root_intr_abort(struct usbd_xfer *);
252 Static void uhci_root_intr_close(struct usbd_pipe *);
253 Static void uhci_root_intr_done(struct usbd_xfer *);
254
255 Static usbd_status uhci_open(struct usbd_pipe *);
256 Static void uhci_poll(struct usbd_bus *);
257 Static void uhci_softintr(void *);
258
259 Static void uhci_add_intr(uhci_softc_t *, uhci_soft_qh_t *);
260 Static void uhci_remove_intr(uhci_softc_t *, uhci_soft_qh_t *);
261 Static usbd_status uhci_device_setintr(uhci_softc_t *,
262 struct uhci_pipe *, int);
263
264 Static void uhci_device_clear_toggle(struct usbd_pipe *);
265 Static void uhci_noop(struct usbd_pipe *);
266
267 static inline uhci_soft_qh_t *
268 uhci_find_prev_qh(uhci_soft_qh_t *, uhci_soft_qh_t *);
269
270 #ifdef UHCI_DEBUG
271 Static void uhci_dump_all(uhci_softc_t *);
272 Static void uhci_dumpregs(uhci_softc_t *);
273 Static void uhci_dump_qhs(uhci_soft_qh_t *);
274 Static void uhci_dump_qh(uhci_soft_qh_t *);
275 Static void uhci_dump_tds(uhci_soft_td_t *);
276 Static void uhci_dump_td(uhci_soft_td_t *);
277 Static void uhci_dump_ii(struct uhci_xfer *);
278 void uhci_dump(void);
279 #endif
280
281 #define UBARR(sc) bus_space_barrier((sc)->iot, (sc)->ioh, 0, (sc)->sc_size, \
282 BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE)
283 #define UWRITE1(sc, r, x) \
284 do { UBARR(sc); bus_space_write_1((sc)->iot, (sc)->ioh, (r), (x)); \
285 } while (/*CONSTCOND*/0)
286 #define UWRITE2(sc, r, x) \
287 do { UBARR(sc); bus_space_write_2((sc)->iot, (sc)->ioh, (r), (x)); \
288 } while (/*CONSTCOND*/0)
289 #define UWRITE4(sc, r, x) \
290 do { UBARR(sc); bus_space_write_4((sc)->iot, (sc)->ioh, (r), (x)); \
291 } while (/*CONSTCOND*/0)
292
293 static __inline uint8_t
294 UREAD1(uhci_softc_t *sc, bus_size_t r)
295 {
296
297 UBARR(sc);
298 return bus_space_read_1(sc->iot, sc->ioh, r);
299 }
300
301 static __inline uint16_t
302 UREAD2(uhci_softc_t *sc, bus_size_t r)
303 {
304
305 UBARR(sc);
306 return bus_space_read_2(sc->iot, sc->ioh, r);
307 }
308
309 #ifdef UHCI_DEBUG
310 static __inline uint32_t
311 UREAD4(uhci_softc_t *sc, bus_size_t r)
312 {
313
314 UBARR(sc);
315 return bus_space_read_4(sc->iot, sc->ioh, r);
316 }
317 #endif
318
319 #define UHCICMD(sc, cmd) UWRITE2(sc, UHCI_CMD, cmd)
320 #define UHCISTS(sc) UREAD2(sc, UHCI_STS)
321
322 #define UHCI_RESET_TIMEOUT 100 /* ms, reset timeout */
323
324 #define UHCI_CURFRAME(sc) (UREAD2(sc, UHCI_FRNUM) & UHCI_FRNUM_MASK)
325
326 const struct usbd_bus_methods uhci_bus_methods = {
327 .ubm_open = uhci_open,
328 .ubm_softint = uhci_softintr,
329 .ubm_dopoll = uhci_poll,
330 .ubm_allocx = uhci_allocx,
331 .ubm_freex = uhci_freex,
332 .ubm_abortx = uhci_abortx,
333 .ubm_dying = uhci_dying,
334 .ubm_getlock = uhci_get_lock,
335 .ubm_rhctrl = uhci_roothub_ctrl,
336 };
337
338 const struct usbd_pipe_methods uhci_root_intr_methods = {
339 .upm_transfer = uhci_root_intr_transfer,
340 .upm_start = uhci_root_intr_start,
341 .upm_abort = uhci_root_intr_abort,
342 .upm_close = uhci_root_intr_close,
343 .upm_cleartoggle = uhci_noop,
344 .upm_done = uhci_root_intr_done,
345 };
346
347 const struct usbd_pipe_methods uhci_device_ctrl_methods = {
348 .upm_init = uhci_device_ctrl_init,
349 .upm_fini = uhci_device_ctrl_fini,
350 .upm_transfer = uhci_device_ctrl_transfer,
351 .upm_start = uhci_device_ctrl_start,
352 .upm_abort = uhci_device_ctrl_abort,
353 .upm_close = uhci_device_ctrl_close,
354 .upm_cleartoggle = uhci_noop,
355 .upm_done = uhci_device_ctrl_done,
356 };
357
358 const struct usbd_pipe_methods uhci_device_intr_methods = {
359 .upm_init = uhci_device_intr_init,
360 .upm_fini = uhci_device_intr_fini,
361 .upm_transfer = uhci_device_intr_transfer,
362 .upm_start = uhci_device_intr_start,
363 .upm_abort = uhci_device_intr_abort,
364 .upm_close = uhci_device_intr_close,
365 .upm_cleartoggle = uhci_device_clear_toggle,
366 .upm_done = uhci_device_intr_done,
367 };
368
369 const struct usbd_pipe_methods uhci_device_bulk_methods = {
370 .upm_init = uhci_device_bulk_init,
371 .upm_fini = uhci_device_bulk_fini,
372 .upm_transfer = uhci_device_bulk_transfer,
373 .upm_start = uhci_device_bulk_start,
374 .upm_abort = uhci_device_bulk_abort,
375 .upm_close = uhci_device_bulk_close,
376 .upm_cleartoggle = uhci_device_clear_toggle,
377 .upm_done = uhci_device_bulk_done,
378 };
379
380 const struct usbd_pipe_methods uhci_device_isoc_methods = {
381 .upm_init = uhci_device_isoc_init,
382 .upm_fini = uhci_device_isoc_fini,
383 .upm_transfer = uhci_device_isoc_transfer,
384 .upm_abort = uhci_device_isoc_abort,
385 .upm_close = uhci_device_isoc_close,
386 .upm_cleartoggle = uhci_noop,
387 .upm_done = uhci_device_isoc_done,
388 };
389
390 static inline void
391 uhci_add_intr_list(uhci_softc_t *sc, struct uhci_xfer *ux)
392 {
393
394 TAILQ_INSERT_TAIL(&sc->sc_intrhead, ux, ux_list);
395 }
396
397 static inline void
398 uhci_del_intr_list(uhci_softc_t *sc, struct uhci_xfer *ux)
399 {
400
401 TAILQ_REMOVE(&sc->sc_intrhead, ux, ux_list);
402 }
403
404 static inline uhci_soft_qh_t *
405 uhci_find_prev_qh(uhci_soft_qh_t *pqh, uhci_soft_qh_t *sqh)
406 {
407 UHCIHIST_FUNC(); UHCIHIST_CALLED();
408 DPRINTFN(15, "pqh=%#jx sqh=%#jx", (uintptr_t)pqh, (uintptr_t)sqh, 0, 0);
409
410 for (; pqh->hlink != sqh; pqh = pqh->hlink) {
411 #if defined(DIAGNOSTIC) || defined(UHCI_DEBUG)
412 usb_syncmem(&pqh->dma,
413 pqh->offs + offsetof(uhci_qh_t, qh_hlink),
414 sizeof(pqh->qh.qh_hlink),
415 BUS_DMASYNC_POSTWRITE);
416 if (le32toh(pqh->qh.qh_hlink) & UHCI_PTR_T) {
417 printf("%s: QH not found\n", __func__);
418 return NULL;
419 }
420 #endif
421 }
422 return pqh;
423 }
424
425 void
426 uhci_globalreset(uhci_softc_t *sc)
427 {
428 UHCICMD(sc, UHCI_CMD_GRESET); /* global reset */
429 usb_delay_ms(&sc->sc_bus, USB_BUS_RESET_DELAY); /* wait a little */
430 UHCICMD(sc, 0); /* do nothing */
431 }
432
433 int
434 uhci_init(uhci_softc_t *sc)
435 {
436 usbd_status err;
437 int i, j;
438 uhci_soft_qh_t *clsqh, *chsqh, *bsqh, *sqh, *lsqh;
439 uhci_soft_td_t *std;
440
441 UHCIHIST_FUNC(); UHCIHIST_CALLED();
442
443 #ifdef UHCI_DEBUG
444 thesc = sc;
445
446 if (uhcidebug >= 2)
447 uhci_dumpregs(sc);
448 #endif
449
450 sc->sc_suspend = PWR_RESUME;
451
452 UWRITE2(sc, UHCI_INTR, 0); /* disable interrupts */
453 uhci_globalreset(sc); /* reset the controller */
454 uhci_reset(sc);
455
456 /* Allocate and initialize real frame array. */
457 err = usb_allocmem(&sc->sc_bus,
458 UHCI_FRAMELIST_COUNT * sizeof(uhci_physaddr_t),
459 UHCI_FRAMELIST_ALIGN, USBMALLOC_COHERENT, &sc->sc_dma);
460 if (err)
461 return err;
462 sc->sc_pframes = KERNADDR(&sc->sc_dma, 0);
463 /* set frame number to 0 */
464 UWRITE2(sc, UHCI_FRNUM, 0);
465 /* set frame list */
466 UWRITE4(sc, UHCI_FLBASEADDR, DMAADDR(&sc->sc_dma, 0));
467
468 /* Initialise mutex early for uhci_alloc_* */
469 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
470 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
471
472 /*
473 * Allocate a TD, inactive, that hangs from the last QH.
474 * This is to avoid a bug in the PIIX that makes it run berserk
475 * otherwise.
476 */
477 std = uhci_alloc_std(sc);
478 if (std == NULL)
479 return ENOMEM;
480 std->link.std = NULL;
481 std->td.td_link = htole32(UHCI_PTR_T);
482 std->td.td_status = htole32(0); /* inactive */
483 std->td.td_token = htole32(0);
484 std->td.td_buffer = htole32(0);
485 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
486 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
487
488 /* Allocate the dummy QH marking the end and used for looping the QHs.*/
489 lsqh = uhci_alloc_sqh(sc);
490 if (lsqh == NULL)
491 goto fail1;
492 lsqh->hlink = NULL;
493 lsqh->qh.qh_hlink = htole32(UHCI_PTR_T); /* end of QH chain */
494 lsqh->elink = std;
495 lsqh->qh.qh_elink = htole32(std->physaddr | UHCI_PTR_TD);
496 sc->sc_last_qh = lsqh;
497 usb_syncmem(&lsqh->dma, lsqh->offs, sizeof(lsqh->qh),
498 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
499
500 /* Allocate the dummy QH where bulk traffic will be queued. */
501 bsqh = uhci_alloc_sqh(sc);
502 if (bsqh == NULL)
503 goto fail2;
504 bsqh->hlink = lsqh;
505 bsqh->qh.qh_hlink = htole32(lsqh->physaddr | UHCI_PTR_QH);
506 bsqh->elink = NULL;
507 bsqh->qh.qh_elink = htole32(UHCI_PTR_T);
508 sc->sc_bulk_start = sc->sc_bulk_end = bsqh;
509 usb_syncmem(&bsqh->dma, bsqh->offs, sizeof(bsqh->qh),
510 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
511
512 /* Allocate dummy QH where high speed control traffic will be queued. */
513 chsqh = uhci_alloc_sqh(sc);
514 if (chsqh == NULL)
515 goto fail3;
516 chsqh->hlink = bsqh;
517 chsqh->qh.qh_hlink = htole32(bsqh->physaddr | UHCI_PTR_QH);
518 chsqh->elink = NULL;
519 chsqh->qh.qh_elink = htole32(UHCI_PTR_T);
520 sc->sc_hctl_start = sc->sc_hctl_end = chsqh;
521 usb_syncmem(&chsqh->dma, chsqh->offs, sizeof(chsqh->qh),
522 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
523
524 /* Allocate dummy QH where control traffic will be queued. */
525 clsqh = uhci_alloc_sqh(sc);
526 if (clsqh == NULL)
527 goto fail4;
528 clsqh->hlink = chsqh;
529 clsqh->qh.qh_hlink = htole32(chsqh->physaddr | UHCI_PTR_QH);
530 clsqh->elink = NULL;
531 clsqh->qh.qh_elink = htole32(UHCI_PTR_T);
532 sc->sc_lctl_start = sc->sc_lctl_end = clsqh;
533 usb_syncmem(&clsqh->dma, clsqh->offs, sizeof(clsqh->qh),
534 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
535
536 /*
537 * Make all (virtual) frame list pointers point to the interrupt
538 * queue heads and the interrupt queue heads at the control
539 * queue head and point the physical frame list to the virtual.
540 */
541 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
542 std = uhci_alloc_std(sc);
543 sqh = uhci_alloc_sqh(sc);
544 if (std == NULL || sqh == NULL)
545 return USBD_NOMEM;
546 std->link.sqh = sqh;
547 std->td.td_link = htole32(sqh->physaddr | UHCI_PTR_QH);
548 std->td.td_status = htole32(UHCI_TD_IOS); /* iso, inactive */
549 std->td.td_token = htole32(0);
550 std->td.td_buffer = htole32(0);
551 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
552 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
553 sqh->hlink = clsqh;
554 sqh->qh.qh_hlink = htole32(clsqh->physaddr | UHCI_PTR_QH);
555 sqh->elink = NULL;
556 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
557 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
558 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
559 sc->sc_vframes[i].htd = std;
560 sc->sc_vframes[i].etd = std;
561 sc->sc_vframes[i].hqh = sqh;
562 sc->sc_vframes[i].eqh = sqh;
563 for (j = i;
564 j < UHCI_FRAMELIST_COUNT;
565 j += UHCI_VFRAMELIST_COUNT)
566 sc->sc_pframes[j] = htole32(std->physaddr);
567 }
568 usb_syncmem(&sc->sc_dma, 0,
569 UHCI_FRAMELIST_COUNT * sizeof(uhci_physaddr_t),
570 BUS_DMASYNC_PREWRITE);
571
572
573 TAILQ_INIT(&sc->sc_intrhead);
574
575 sc->sc_xferpool = pool_cache_init(sizeof(struct uhci_xfer), 0, 0, 0,
576 "uhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
577
578 callout_init(&sc->sc_poll_handle, CALLOUT_MPSAFE);
579 callout_setfunc(&sc->sc_poll_handle, uhci_poll_hub, sc);
580
581 /* Set up the bus struct. */
582 sc->sc_bus.ub_methods = &uhci_bus_methods;
583 sc->sc_bus.ub_pipesize = sizeof(struct uhci_pipe);
584 sc->sc_bus.ub_usedma = true;
585 sc->sc_bus.ub_dmaflags = USBMALLOC_MULTISEG;
586
587 UHCICMD(sc, UHCI_CMD_MAXP); /* Assume 64 byte packets at frame end */
588
589 DPRINTF("Enabling...", 0, 0, 0, 0);
590
591 err = uhci_run(sc, 1, 0); /* and here we go... */
592 UWRITE2(sc, UHCI_INTR, UHCI_INTR_TOCRCIE | UHCI_INTR_RIE |
593 UHCI_INTR_IOCE | UHCI_INTR_SPIE); /* enable interrupts */
594 return err;
595
596 fail4:
597 uhci_free_sqh(sc, chsqh);
598 fail3:
599 uhci_free_sqh(sc, lsqh);
600 fail2:
601 uhci_free_sqh(sc, lsqh);
602 fail1:
603 uhci_free_std(sc, std);
604
605 return ENOMEM;
606 }
607
608 int
609 uhci_activate(device_t self, enum devact act)
610 {
611 struct uhci_softc *sc = device_private(self);
612
613 switch (act) {
614 case DVACT_DEACTIVATE:
615 sc->sc_dying = 1;
616 return 0;
617 default:
618 return EOPNOTSUPP;
619 }
620 }
621
622 void
623 uhci_childdet(device_t self, device_t child)
624 {
625 struct uhci_softc *sc = device_private(self);
626
627 KASSERT(sc->sc_child == child);
628 sc->sc_child = NULL;
629 }
630
631 int
632 uhci_detach(struct uhci_softc *sc, int flags)
633 {
634 int rv = 0;
635
636 if (sc->sc_child != NULL)
637 rv = config_detach(sc->sc_child, flags);
638
639 if (rv != 0)
640 return rv;
641
642 callout_halt(&sc->sc_poll_handle, NULL);
643 callout_destroy(&sc->sc_poll_handle);
644
645 mutex_destroy(&sc->sc_lock);
646 mutex_destroy(&sc->sc_intr_lock);
647
648 pool_cache_destroy(sc->sc_xferpool);
649
650 /* XXX free other data structures XXX */
651
652 return rv;
653 }
654
655 struct usbd_xfer *
656 uhci_allocx(struct usbd_bus *bus, unsigned int nframes)
657 {
658 struct uhci_softc *sc = UHCI_BUS2SC(bus);
659 struct usbd_xfer *xfer;
660
661 xfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
662 if (xfer != NULL) {
663 memset(xfer, 0, sizeof(struct uhci_xfer));
664
665 #ifdef DIAGNOSTIC
666 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
667 uxfer->ux_isdone = true;
668 xfer->ux_state = XFER_BUSY;
669 #endif
670 }
671 return xfer;
672 }
673
674 void
675 uhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
676 {
677 struct uhci_softc *sc = UHCI_BUS2SC(bus);
678 struct uhci_xfer *uxfer __diagused = UHCI_XFER2UXFER(xfer);
679
680 KASSERTMSG(xfer->ux_state == XFER_BUSY ||
681 xfer->ux_status == USBD_NOT_STARTED,
682 "xfer %p state %d\n", xfer, xfer->ux_state);
683 KASSERTMSG(uxfer->ux_isdone || xfer->ux_status == USBD_NOT_STARTED,
684 "xfer %p not done\n", xfer);
685 #ifdef DIAGNOSTIC
686 xfer->ux_state = XFER_FREE;
687 #endif
688 pool_cache_put(sc->sc_xferpool, xfer);
689 }
690
691 Static bool
692 uhci_dying(struct usbd_bus *bus)
693 {
694 struct uhci_softc *sc = UHCI_BUS2SC(bus);
695
696 return sc->sc_dying;
697 }
698
699 Static void
700 uhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
701 {
702 struct uhci_softc *sc = UHCI_BUS2SC(bus);
703
704 *lock = &sc->sc_lock;
705 }
706
707
708 /*
709 * Handle suspend/resume.
710 *
711 * We need to switch to polling mode here, because this routine is
712 * called from an interrupt context. This is all right since we
713 * are almost suspended anyway.
714 */
715 bool
716 uhci_resume(device_t dv, const pmf_qual_t *qual)
717 {
718 uhci_softc_t *sc = device_private(dv);
719 int cmd;
720
721 mutex_spin_enter(&sc->sc_intr_lock);
722
723 cmd = UREAD2(sc, UHCI_CMD);
724 sc->sc_bus.ub_usepolling++;
725 UWRITE2(sc, UHCI_INTR, 0);
726 uhci_globalreset(sc);
727 uhci_reset(sc);
728 if (cmd & UHCI_CMD_RS)
729 uhci_run(sc, 0, 1);
730
731 /* restore saved state */
732 UWRITE4(sc, UHCI_FLBASEADDR, DMAADDR(&sc->sc_dma, 0));
733 UWRITE2(sc, UHCI_FRNUM, sc->sc_saved_frnum);
734 UWRITE1(sc, UHCI_SOF, sc->sc_saved_sof);
735
736 UHCICMD(sc, cmd | UHCI_CMD_FGR); /* force resume */
737 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_DELAY, &sc->sc_intr_lock);
738 UHCICMD(sc, cmd & ~UHCI_CMD_EGSM); /* back to normal */
739 UWRITE2(sc, UHCI_INTR, UHCI_INTR_TOCRCIE |
740 UHCI_INTR_RIE | UHCI_INTR_IOCE | UHCI_INTR_SPIE);
741 UHCICMD(sc, UHCI_CMD_MAXP);
742 uhci_run(sc, 1, 1); /* and start traffic again */
743 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_RECOVERY, &sc->sc_intr_lock);
744 sc->sc_bus.ub_usepolling--;
745 if (sc->sc_intr_xfer != NULL)
746 callout_schedule(&sc->sc_poll_handle, sc->sc_ival);
747 #ifdef UHCI_DEBUG
748 if (uhcidebug >= 2)
749 uhci_dumpregs(sc);
750 #endif
751
752 sc->sc_suspend = PWR_RESUME;
753 mutex_spin_exit(&sc->sc_intr_lock);
754
755 return true;
756 }
757
758 bool
759 uhci_suspend(device_t dv, const pmf_qual_t *qual)
760 {
761 uhci_softc_t *sc = device_private(dv);
762 int cmd;
763
764 mutex_spin_enter(&sc->sc_intr_lock);
765
766 cmd = UREAD2(sc, UHCI_CMD);
767
768 #ifdef UHCI_DEBUG
769 if (uhcidebug >= 2)
770 uhci_dumpregs(sc);
771 #endif
772 sc->sc_suspend = PWR_SUSPEND;
773 if (sc->sc_intr_xfer != NULL)
774 callout_halt(&sc->sc_poll_handle, &sc->sc_intr_lock);
775 sc->sc_bus.ub_usepolling++;
776
777 uhci_run(sc, 0, 1); /* stop the controller */
778 cmd &= ~UHCI_CMD_RS;
779
780 /* save some state if BIOS doesn't */
781 sc->sc_saved_frnum = UREAD2(sc, UHCI_FRNUM);
782 sc->sc_saved_sof = UREAD1(sc, UHCI_SOF);
783
784 UWRITE2(sc, UHCI_INTR, 0); /* disable intrs */
785
786 UHCICMD(sc, cmd | UHCI_CMD_EGSM); /* enter suspend */
787 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_WAIT, &sc->sc_intr_lock);
788 sc->sc_bus.ub_usepolling--;
789
790 mutex_spin_exit(&sc->sc_intr_lock);
791
792 return true;
793 }
794
795 #ifdef UHCI_DEBUG
796 Static void
797 uhci_dumpregs(uhci_softc_t *sc)
798 {
799 UHCIHIST_FUNC(); UHCIHIST_CALLED();
800 DPRINTF("cmd =%04jx sts =%04jx intr =%04jx frnum =%04jx",
801 UREAD2(sc, UHCI_CMD), UREAD2(sc, UHCI_STS),
802 UREAD2(sc, UHCI_INTR), UREAD2(sc, UHCI_FRNUM));
803 DPRINTF("sof =%04jx portsc1=%04jx portsc2=%04jx flbase=%08jx",
804 UREAD1(sc, UHCI_SOF), UREAD2(sc, UHCI_PORTSC1),
805 UREAD2(sc, UHCI_PORTSC2), UREAD4(sc, UHCI_FLBASEADDR));
806 }
807
808 void
809 uhci_dump_td(uhci_soft_td_t *p)
810 {
811 UHCIHIST_FUNC(); UHCIHIST_CALLED();
812
813 usb_syncmem(&p->dma, p->offs, sizeof(p->td),
814 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
815
816 DPRINTF("TD(%#jx) at 0x%08jx", (uintptr_t)p, p->physaddr, 0, 0);
817 DPRINTF(" link=0x%08jx status=0x%08jx "
818 "token=0x%08x buffer=0x%08x",
819 le32toh(p->td.td_link),
820 le32toh(p->td.td_status),
821 le32toh(p->td.td_token),
822 le32toh(p->td.td_buffer));
823
824 DPRINTF("bitstuff=%jd crcto =%jd nak =%jd babble =%jd",
825 !!(le32toh(p->td.td_status) & UHCI_TD_BITSTUFF),
826 !!(le32toh(p->td.td_status) & UHCI_TD_CRCTO),
827 !!(le32toh(p->td.td_status) & UHCI_TD_NAK),
828 !!(le32toh(p->td.td_status) & UHCI_TD_BABBLE));
829 DPRINTF("dbuffer =%jd stalled =%jd active =%jd ioc =%jd",
830 !!(le32toh(p->td.td_status) & UHCI_TD_DBUFFER),
831 !!(le32toh(p->td.td_status) & UHCI_TD_STALLED),
832 !!(le32toh(p->td.td_status) & UHCI_TD_ACTIVE),
833 !!(le32toh(p->td.td_status) & UHCI_TD_IOC));
834 DPRINTF("ios =%jd ls =%jd spd =%jd",
835 !!(le32toh(p->td.td_status) & UHCI_TD_IOS),
836 !!(le32toh(p->td.td_status) & UHCI_TD_LS),
837 !!(le32toh(p->td.td_status) & UHCI_TD_SPD), 0);
838 DPRINTF("errcnt =%d actlen =%d pid=%02x",
839 UHCI_TD_GET_ERRCNT(le32toh(p->td.td_status)),
840 UHCI_TD_GET_ACTLEN(le32toh(p->td.td_status)),
841 UHCI_TD_GET_PID(le32toh(p->td.td_token)), 0);
842 DPRINTF("addr=%jd endpt=%jd D=%jd maxlen=%jd,",
843 UHCI_TD_GET_DEVADDR(le32toh(p->td.td_token)),
844 UHCI_TD_GET_ENDPT(le32toh(p->td.td_token)),
845 UHCI_TD_GET_DT(le32toh(p->td.td_token)),
846 UHCI_TD_GET_MAXLEN(le32toh(p->td.td_token)));
847 }
848
849 void
850 uhci_dump_qh(uhci_soft_qh_t *sqh)
851 {
852 UHCIHIST_FUNC(); UHCIHIST_CALLED();
853
854 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
855 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
856
857 DPRINTF("QH(%#jx) at 0x%08jx: hlink=%08jx elink=%08jx", (uintptr_t)sqh,
858 (int)sqh->physaddr, le32toh(sqh->qh.qh_hlink),
859 le32toh(sqh->qh.qh_elink));
860
861 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), BUS_DMASYNC_PREREAD);
862 }
863
864
865 #if 1
866 void
867 uhci_dump(void)
868 {
869 uhci_dump_all(thesc);
870 }
871 #endif
872
873 void
874 uhci_dump_all(uhci_softc_t *sc)
875 {
876 uhci_dumpregs(sc);
877 /*printf("framelist[i].link = %08x\n", sc->sc_framelist[0].link);*/
878 uhci_dump_qhs(sc->sc_lctl_start);
879 }
880
881
882 void
883 uhci_dump_qhs(uhci_soft_qh_t *sqh)
884 {
885 UHCIHIST_FUNC(); UHCIHIST_CALLED();
886
887 uhci_dump_qh(sqh);
888
889 /*
890 * uhci_dump_qhs displays all the QHs and TDs from the given QH onwards
891 * Traverses sideways first, then down.
892 *
893 * QH1
894 * QH2
895 * No QH
896 * TD2.1
897 * TD2.2
898 * TD1.1
899 * etc.
900 *
901 * TD2.x being the TDs queued at QH2 and QH1 being referenced from QH1.
902 */
903
904 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
905 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
906 if (sqh->hlink != NULL && !(le32toh(sqh->qh.qh_hlink) & UHCI_PTR_T))
907 uhci_dump_qhs(sqh->hlink);
908 else
909 DPRINTF("No QH", 0, 0, 0, 0);
910 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), BUS_DMASYNC_PREREAD);
911
912 if (sqh->elink != NULL && !(le32toh(sqh->qh.qh_elink) & UHCI_PTR_T))
913 uhci_dump_tds(sqh->elink);
914 else
915 DPRINTF("No QH", 0, 0, 0, 0);
916 }
917
918 void
919 uhci_dump_tds(uhci_soft_td_t *std)
920 {
921 uhci_soft_td_t *td;
922 int stop;
923
924 for (td = std; td != NULL; td = td->link.std) {
925 uhci_dump_td(td);
926
927 /*
928 * Check whether the link pointer in this TD marks
929 * the link pointer as end of queue. This avoids
930 * printing the free list in case the queue/TD has
931 * already been moved there (seatbelt).
932 */
933 usb_syncmem(&td->dma, td->offs + offsetof(uhci_td_t, td_link),
934 sizeof(td->td.td_link),
935 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
936 stop = (le32toh(td->td.td_link) & UHCI_PTR_T ||
937 le32toh(td->td.td_link) == 0);
938 usb_syncmem(&td->dma, td->offs + offsetof(uhci_td_t, td_link),
939 sizeof(td->td.td_link), BUS_DMASYNC_PREREAD);
940 if (stop)
941 break;
942 }
943 }
944
945 Static void
946 uhci_dump_ii(struct uhci_xfer *ux)
947 {
948 struct usbd_pipe *pipe;
949 usb_endpoint_descriptor_t *ed;
950 struct usbd_device *dev;
951
952 if (ux == NULL) {
953 printf("ux NULL\n");
954 return;
955 }
956 pipe = ux->ux_xfer.ux_pipe;
957 if (pipe == NULL) {
958 printf("ux %p: done=%d pipe=NULL\n", ux, ux->ux_isdone);
959 return;
960 }
961 if (pipe->up_endpoint == NULL) {
962 printf("ux %p: done=%d pipe=%p pipe->up_endpoint=NULL\n",
963 ux, ux->ux_isdone, pipe);
964 return;
965 }
966 if (pipe->up_dev == NULL) {
967 printf("ux %p: done=%d pipe=%p pipe->up_dev=NULL\n",
968 ux, ux->ux_isdone, pipe);
969 return;
970 }
971 ed = pipe->up_endpoint->ue_edesc;
972 dev = pipe->up_dev;
973 printf("ux %p: done=%d dev=%p vid=0x%04x pid=0x%04x addr=%d pipe=%p ep=0x%02x attr=0x%02x\n",
974 ux, ux->ux_isdone, dev,
975 UGETW(dev->ud_ddesc.idVendor),
976 UGETW(dev->ud_ddesc.idProduct),
977 dev->ud_addr, pipe,
978 ed->bEndpointAddress, ed->bmAttributes);
979 }
980
981 void uhci_dump_iis(struct uhci_softc *sc);
982 void
983 uhci_dump_iis(struct uhci_softc *sc)
984 {
985 struct uhci_xfer *ux;
986
987 printf("interrupt list:\n");
988 TAILQ_FOREACH(ux, &sc->sc_intrhead, ux_list)
989 uhci_dump_ii(ux);
990 }
991
992 void iidump(void);
993 void iidump(void) { uhci_dump_iis(thesc); }
994
995 #endif
996
997 /*
998 * This routine is executed periodically and simulates interrupts
999 * from the root controller interrupt pipe for port status change.
1000 */
1001 void
1002 uhci_poll_hub(void *addr)
1003 {
1004 struct uhci_softc *sc = addr;
1005 struct usbd_xfer *xfer;
1006 u_char *p;
1007
1008 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1009
1010 mutex_enter(&sc->sc_lock);
1011
1012 /*
1013 * If the intr xfer has completed or been synchronously
1014 * aborted, we have nothing to do.
1015 */
1016 xfer = sc->sc_intr_xfer;
1017 if (xfer == NULL)
1018 goto out;
1019 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
1020
1021 /*
1022 * If the intr xfer for which we were scheduled is done, and
1023 * another intr xfer has been submitted, let that one be dealt
1024 * with when the callout fires again.
1025 *
1026 * The call to callout_pending is racy, but the the transition
1027 * from pending to invoking happens atomically. The
1028 * callout_ack ensures callout_invoking does not return true
1029 * due to this invocation of the callout; the lock ensures the
1030 * next invocation of the callout cannot callout_ack (unless it
1031 * had already run to completion and nulled sc->sc_intr_xfer,
1032 * in which case would have bailed out already).
1033 */
1034 callout_ack(&sc->sc_poll_handle);
1035 if (callout_pending(&sc->sc_poll_handle) ||
1036 callout_invoking(&sc->sc_poll_handle))
1037 goto out;
1038
1039 /*
1040 * Check flags for the two interrupt ports, and set them in the
1041 * buffer if an interrupt arrived; otherwise arrange .
1042 */
1043 p = xfer->ux_buf;
1044 p[0] = 0;
1045 if (UREAD2(sc, UHCI_PORTSC1) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC))
1046 p[0] |= 1<<1;
1047 if (UREAD2(sc, UHCI_PORTSC2) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC))
1048 p[0] |= 1<<2;
1049 if (p[0] == 0) {
1050 /*
1051 * No change -- try again in a while, unless we're
1052 * suspending, in which case we'll try again after
1053 * resume.
1054 */
1055 if (sc->sc_suspend != PWR_SUSPEND)
1056 callout_schedule(&sc->sc_poll_handle, sc->sc_ival);
1057 goto out;
1058 }
1059
1060 /*
1061 * Interrupt completed, and the xfer has not been completed or
1062 * synchronously aborted. Complete the xfer now.
1063 */
1064 xfer->ux_actlen = 1;
1065 xfer->ux_status = USBD_NORMAL_COMPLETION;
1066 #ifdef DIAGNOSTIC
1067 UHCI_XFER2UXFER(xfer)->ux_isdone = true;
1068 #endif
1069 usb_transfer_complete(xfer);
1070
1071 out: mutex_exit(&sc->sc_lock);
1072 }
1073
1074 void
1075 uhci_root_intr_done(struct usbd_xfer *xfer)
1076 {
1077 struct uhci_softc *sc = UHCI_XFER2SC(xfer);
1078
1079 KASSERT(mutex_owned(&sc->sc_lock));
1080
1081 /* Claim the xfer so it doesn't get completed again. */
1082 KASSERT(sc->sc_intr_xfer == xfer);
1083 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
1084 sc->sc_intr_xfer = NULL;
1085 }
1086
1087 /*
1088 * Let the last QH loop back to the high speed control transfer QH.
1089 * This is what intel calls "bandwidth reclamation" and improves
1090 * USB performance a lot for some devices.
1091 * If we are already looping, just count it.
1092 */
1093 void
1094 uhci_add_loop(uhci_softc_t *sc)
1095 {
1096 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1097
1098 #ifdef UHCI_DEBUG
1099 if (uhcinoloop)
1100 return;
1101 #endif
1102 if (++sc->sc_loops == 1) {
1103 DPRINTFN(5, "add loop", 0, 0, 0, 0);
1104 /* Note, we don't loop back the soft pointer. */
1105 sc->sc_last_qh->qh.qh_hlink =
1106 htole32(sc->sc_hctl_start->physaddr | UHCI_PTR_QH);
1107 usb_syncmem(&sc->sc_last_qh->dma,
1108 sc->sc_last_qh->offs + offsetof(uhci_qh_t, qh_hlink),
1109 sizeof(sc->sc_last_qh->qh.qh_hlink),
1110 BUS_DMASYNC_PREWRITE);
1111 }
1112 }
1113
1114 void
1115 uhci_rem_loop(uhci_softc_t *sc)
1116 {
1117 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1118
1119 #ifdef UHCI_DEBUG
1120 if (uhcinoloop)
1121 return;
1122 #endif
1123 if (--sc->sc_loops == 0) {
1124 DPRINTFN(5, "remove loop", 0, 0, 0, 0);
1125 sc->sc_last_qh->qh.qh_hlink = htole32(UHCI_PTR_T);
1126 usb_syncmem(&sc->sc_last_qh->dma,
1127 sc->sc_last_qh->offs + offsetof(uhci_qh_t, qh_hlink),
1128 sizeof(sc->sc_last_qh->qh.qh_hlink),
1129 BUS_DMASYNC_PREWRITE);
1130 }
1131 }
1132
1133 /* Add high speed control QH, called with lock held. */
1134 void
1135 uhci_add_hs_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1136 {
1137 uhci_soft_qh_t *eqh;
1138
1139 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1140
1141 KASSERT(mutex_owned(&sc->sc_lock));
1142
1143 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1144 eqh = sc->sc_hctl_end;
1145 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1146 sizeof(eqh->qh.qh_hlink),
1147 BUS_DMASYNC_POSTWRITE);
1148 sqh->hlink = eqh->hlink;
1149 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1150 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1151 BUS_DMASYNC_PREWRITE);
1152 eqh->hlink = sqh;
1153 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1154 sc->sc_hctl_end = sqh;
1155 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1156 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1157 #ifdef UHCI_CTL_LOOP
1158 uhci_add_loop(sc);
1159 #endif
1160 }
1161
1162 /* Remove high speed control QH, called with lock held. */
1163 void
1164 uhci_remove_hs_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1165 {
1166 uhci_soft_qh_t *pqh;
1167 uint32_t elink;
1168
1169 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1170
1171 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1172 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1173 #ifdef UHCI_CTL_LOOP
1174 uhci_rem_loop(sc);
1175 #endif
1176 /*
1177 * The T bit should be set in the elink of the QH so that the HC
1178 * doesn't follow the pointer. This condition may fail if the
1179 * the transferred packet was short so that the QH still points
1180 * at the last used TD.
1181 * In this case we set the T bit and wait a little for the HC
1182 * to stop looking at the TD.
1183 * Note that if the TD chain is large enough, the controller
1184 * may still be looking at the chain at the end of this function.
1185 * uhci_free_std_chain() will make sure the controller stops
1186 * looking at it quickly, but until then we should not change
1187 * sqh->hlink.
1188 */
1189 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1190 sizeof(sqh->qh.qh_elink),
1191 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1192 elink = le32toh(sqh->qh.qh_elink);
1193 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1194 sizeof(sqh->qh.qh_elink), BUS_DMASYNC_PREREAD);
1195 if (!(elink & UHCI_PTR_T)) {
1196 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1197 usb_syncmem(&sqh->dma,
1198 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1199 sizeof(sqh->qh.qh_elink),
1200 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1201 delay(UHCI_QH_REMOVE_DELAY);
1202 }
1203
1204 pqh = uhci_find_prev_qh(sc->sc_hctl_start, sqh);
1205 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1206 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1207 pqh->hlink = sqh->hlink;
1208 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1209 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1210 sizeof(pqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1211 delay(UHCI_QH_REMOVE_DELAY);
1212 if (sc->sc_hctl_end == sqh)
1213 sc->sc_hctl_end = pqh;
1214 }
1215
1216 /* Add low speed control QH, called with lock held. */
1217 void
1218 uhci_add_ls_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1219 {
1220 uhci_soft_qh_t *eqh;
1221
1222 KASSERT(mutex_owned(&sc->sc_lock));
1223
1224 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1225 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1226
1227 eqh = sc->sc_lctl_end;
1228 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1229 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1230 sqh->hlink = eqh->hlink;
1231 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1232 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1233 BUS_DMASYNC_PREWRITE);
1234 eqh->hlink = sqh;
1235 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1236 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1237 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1238 sc->sc_lctl_end = sqh;
1239 }
1240
1241 /* Remove low speed control QH, called with lock held. */
1242 void
1243 uhci_remove_ls_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1244 {
1245 uhci_soft_qh_t *pqh;
1246 uint32_t elink;
1247
1248 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1249
1250 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1251 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1252
1253 /* See comment in uhci_remove_hs_ctrl() */
1254 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1255 sizeof(sqh->qh.qh_elink),
1256 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1257 elink = le32toh(sqh->qh.qh_elink);
1258 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1259 sizeof(sqh->qh.qh_elink), BUS_DMASYNC_PREREAD);
1260 if (!(elink & UHCI_PTR_T)) {
1261 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1262 usb_syncmem(&sqh->dma,
1263 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1264 sizeof(sqh->qh.qh_elink),
1265 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1266 delay(UHCI_QH_REMOVE_DELAY);
1267 }
1268 pqh = uhci_find_prev_qh(sc->sc_lctl_start, sqh);
1269 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1270 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1271 pqh->hlink = sqh->hlink;
1272 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1273 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1274 sizeof(pqh->qh.qh_hlink),
1275 BUS_DMASYNC_PREWRITE);
1276 delay(UHCI_QH_REMOVE_DELAY);
1277 if (sc->sc_lctl_end == sqh)
1278 sc->sc_lctl_end = pqh;
1279 }
1280
1281 /* Add bulk QH, called with lock held. */
1282 void
1283 uhci_add_bulk(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1284 {
1285 uhci_soft_qh_t *eqh;
1286
1287 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1288
1289 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1290 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1291
1292 eqh = sc->sc_bulk_end;
1293 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1294 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1295 sqh->hlink = eqh->hlink;
1296 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1297 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1298 BUS_DMASYNC_PREWRITE);
1299 eqh->hlink = sqh;
1300 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1301 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1302 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1303 sc->sc_bulk_end = sqh;
1304 uhci_add_loop(sc);
1305 }
1306
1307 /* Remove bulk QH, called with lock held. */
1308 void
1309 uhci_remove_bulk(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1310 {
1311 uhci_soft_qh_t *pqh;
1312
1313 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1314
1315 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1316 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1317
1318 uhci_rem_loop(sc);
1319 /* See comment in uhci_remove_hs_ctrl() */
1320 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1321 sizeof(sqh->qh.qh_elink),
1322 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1323 if (!(sqh->qh.qh_elink & htole32(UHCI_PTR_T))) {
1324 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1325 usb_syncmem(&sqh->dma,
1326 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1327 sizeof(sqh->qh.qh_elink),
1328 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1329 delay(UHCI_QH_REMOVE_DELAY);
1330 }
1331 pqh = uhci_find_prev_qh(sc->sc_bulk_start, sqh);
1332 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1333 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1334 pqh->hlink = sqh->hlink;
1335 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1336 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1337 sizeof(pqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1338 delay(UHCI_QH_REMOVE_DELAY);
1339 if (sc->sc_bulk_end == sqh)
1340 sc->sc_bulk_end = pqh;
1341 }
1342
1343 Static int uhci_intr1(uhci_softc_t *);
1344
1345 int
1346 uhci_intr(void *arg)
1347 {
1348 uhci_softc_t *sc = arg;
1349 int ret = 0;
1350
1351 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1352
1353 mutex_spin_enter(&sc->sc_intr_lock);
1354
1355 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1356 goto done;
1357
1358 if (sc->sc_bus.ub_usepolling || UREAD2(sc, UHCI_INTR) == 0) {
1359 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1360 goto done;
1361 }
1362
1363 ret = uhci_intr1(sc);
1364
1365 done:
1366 mutex_spin_exit(&sc->sc_intr_lock);
1367 return ret;
1368 }
1369
1370 int
1371 uhci_intr1(uhci_softc_t *sc)
1372 {
1373 int status;
1374 int ack;
1375
1376 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1377
1378 #ifdef UHCI_DEBUG
1379 if (uhcidebug >= 15) {
1380 DPRINTF("sc %#jx", (uintptr_t)sc, 0, 0, 0);
1381 uhci_dumpregs(sc);
1382 }
1383 #endif
1384
1385 KASSERT(mutex_owned(&sc->sc_intr_lock));
1386
1387 status = UREAD2(sc, UHCI_STS) & UHCI_STS_ALLINTRS;
1388 /* Check if the interrupt was for us. */
1389 if (status == 0)
1390 return 0;
1391
1392 if (sc->sc_suspend != PWR_RESUME) {
1393 #ifdef DIAGNOSTIC
1394 printf("%s: interrupt while not operating ignored\n",
1395 device_xname(sc->sc_dev));
1396 #endif
1397 UWRITE2(sc, UHCI_STS, status); /* acknowledge the ints */
1398 return 0;
1399 }
1400
1401 ack = 0;
1402 if (status & UHCI_STS_USBINT)
1403 ack |= UHCI_STS_USBINT;
1404 if (status & UHCI_STS_USBEI)
1405 ack |= UHCI_STS_USBEI;
1406 if (status & UHCI_STS_RD) {
1407 ack |= UHCI_STS_RD;
1408 #ifdef UHCI_DEBUG
1409 printf("%s: resume detect\n", device_xname(sc->sc_dev));
1410 #endif
1411 }
1412 if (status & UHCI_STS_HSE) {
1413 ack |= UHCI_STS_HSE;
1414 printf("%s: host system error\n", device_xname(sc->sc_dev));
1415 }
1416 if (status & UHCI_STS_HCPE) {
1417 ack |= UHCI_STS_HCPE;
1418 printf("%s: host controller process error\n",
1419 device_xname(sc->sc_dev));
1420 }
1421
1422 /* When HCHalted=1 and Run/Stop=0 , it is normal */
1423 if ((status & UHCI_STS_HCH) && (UREAD2(sc, UHCI_CMD) & UHCI_CMD_RS)) {
1424 /* no acknowledge needed */
1425 if (!sc->sc_dying) {
1426 printf("%s: host controller halted\n",
1427 device_xname(sc->sc_dev));
1428 #ifdef UHCI_DEBUG
1429 uhci_dump_all(sc);
1430 #endif
1431 }
1432 sc->sc_dying = 1;
1433 }
1434
1435 if (!ack)
1436 return 0; /* nothing to acknowledge */
1437 UWRITE2(sc, UHCI_STS, ack); /* acknowledge the ints */
1438
1439 usb_schedsoftintr(&sc->sc_bus);
1440
1441 DPRINTFN(15, "sc %#jx done", (uintptr_t)sc, 0, 0, 0);
1442
1443 return 1;
1444 }
1445
1446 void
1447 uhci_softintr(void *v)
1448 {
1449 struct usbd_bus *bus = v;
1450 uhci_softc_t *sc = UHCI_BUS2SC(bus);
1451 struct uhci_xfer *ux, *nextux;
1452 ux_completeq_t cq;
1453
1454 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1455 DPRINTF("sc %#jx", (uintptr_t)sc, 0, 0, 0);
1456
1457 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1458
1459 TAILQ_INIT(&cq);
1460 /*
1461 * Interrupts on UHCI really suck. When the host controller
1462 * interrupts because a transfer is completed there is no
1463 * way of knowing which transfer it was. You can scan down
1464 * the TDs and QHs of the previous frame to limit the search,
1465 * but that assumes that the interrupt was not delayed by more
1466 * than 1 ms, which may not always be true (e.g. after debug
1467 * output on a slow console).
1468 * We scan all interrupt descriptors to see if any have
1469 * completed.
1470 */
1471 TAILQ_FOREACH_SAFE(ux, &sc->sc_intrhead, ux_list, nextux) {
1472 uhci_check_intr(sc, ux, &cq);
1473 }
1474
1475 /*
1476 * We abuse ux_list for the interrupt and complete lists and
1477 * interrupt transfers will get re-added here so use
1478 * the _SAFE version of TAILQ_FOREACH.
1479 */
1480 TAILQ_FOREACH_SAFE(ux, &cq, ux_list, nextux) {
1481 DPRINTF("ux %#jx", (uintptr_t)ux, 0, 0, 0);
1482 usb_transfer_complete(&ux->ux_xfer);
1483 }
1484
1485 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1486 }
1487
1488 /* Check for an interrupt. */
1489 void
1490 uhci_check_intr(uhci_softc_t *sc, struct uhci_xfer *ux, ux_completeq_t *cqp)
1491 {
1492 uhci_soft_td_t *std, *fstd = NULL, *lstd = NULL;
1493 uint32_t status;
1494
1495 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1496 DPRINTFN(15, "ux %#jx", (uintptr_t)ux, 0, 0, 0);
1497
1498 KASSERT(ux != NULL);
1499
1500 struct usbd_xfer *xfer = &ux->ux_xfer;
1501 if (xfer->ux_status == USBD_CANCELLED ||
1502 xfer->ux_status == USBD_TIMEOUT) {
1503 DPRINTF("aborted xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
1504 return;
1505 }
1506
1507 switch (ux->ux_type) {
1508 case UX_CTRL:
1509 fstd = ux->ux_setup;
1510 lstd = ux->ux_stat;
1511 break;
1512 case UX_BULK:
1513 case UX_INTR:
1514 case UX_ISOC:
1515 fstd = ux->ux_stdstart;
1516 lstd = ux->ux_stdend;
1517 break;
1518 default:
1519 KASSERT(false);
1520 break;
1521 }
1522 if (fstd == NULL)
1523 return;
1524
1525 KASSERT(lstd != NULL);
1526
1527 usb_syncmem(&lstd->dma,
1528 lstd->offs + offsetof(uhci_td_t, td_status),
1529 sizeof(lstd->td.td_status),
1530 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1531 status = le32toh(lstd->td.td_status);
1532 usb_syncmem(&lstd->dma,
1533 lstd->offs + offsetof(uhci_td_t, td_status),
1534 sizeof(lstd->td.td_status),
1535 BUS_DMASYNC_PREREAD);
1536
1537 /* If the last TD is not marked active we can complete */
1538 if (!(status & UHCI_TD_ACTIVE)) {
1539 done:
1540 DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0);
1541 uhci_idone(ux, cqp);
1542 return;
1543 }
1544
1545 /*
1546 * If the last TD is still active we need to check whether there
1547 * is an error somewhere in the middle, or whether there was a
1548 * short packet (SPD and not ACTIVE).
1549 */
1550 DPRINTFN(12, "active ux=%#jx", (uintptr_t)ux, 0, 0, 0);
1551 for (std = fstd; std != lstd; std = std->link.std) {
1552 usb_syncmem(&std->dma,
1553 std->offs + offsetof(uhci_td_t, td_status),
1554 sizeof(std->td.td_status),
1555 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1556 status = le32toh(std->td.td_status);
1557 usb_syncmem(&std->dma,
1558 std->offs + offsetof(uhci_td_t, td_status),
1559 sizeof(std->td.td_status), BUS_DMASYNC_PREREAD);
1560
1561 /* If there's an active TD the xfer isn't done. */
1562 if (status & UHCI_TD_ACTIVE) {
1563 DPRINTFN(12, "ux=%#jx std=%#jx still active",
1564 (uintptr_t)ux, (uintptr_t)std, 0, 0);
1565 return;
1566 }
1567
1568 /* Any kind of error makes the xfer done. */
1569 if (status & UHCI_TD_STALLED)
1570 goto done;
1571
1572 /*
1573 * If the data phase of a control transfer is short, we need
1574 * to complete the status stage
1575 */
1576
1577 if ((status & UHCI_TD_SPD) && ux->ux_type == UX_CTRL) {
1578 struct uhci_pipe *upipe =
1579 UHCI_PIPE2UPIPE(xfer->ux_pipe);
1580 uhci_soft_qh_t *sqh = upipe->ctrl.sqh;
1581 uhci_soft_td_t *stat = upipe->ctrl.stat;
1582
1583 DPRINTFN(12, "ux=%#jx std=%#jx control status"
1584 "phase needs completion", (uintptr_t)ux,
1585 (uintptr_t)ux->ux_stdstart, 0, 0);
1586
1587 sqh->qh.qh_elink =
1588 htole32(stat->physaddr | UHCI_PTR_TD);
1589 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1590 BUS_DMASYNC_PREWRITE);
1591 break;
1592 }
1593
1594 /* We want short packets, and it is short: it's done */
1595 usb_syncmem(&std->dma,
1596 std->offs + offsetof(uhci_td_t, td_token),
1597 sizeof(std->td.td_token),
1598 BUS_DMASYNC_POSTWRITE);
1599
1600 if ((status & UHCI_TD_SPD) &&
1601 UHCI_TD_GET_ACTLEN(status) <
1602 UHCI_TD_GET_MAXLEN(le32toh(std->td.td_token))) {
1603 goto done;
1604 }
1605 }
1606 }
1607
1608 /* Called with USB lock held. */
1609 void
1610 uhci_idone(struct uhci_xfer *ux, ux_completeq_t *cqp)
1611 {
1612 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1613 struct usbd_xfer *xfer = &ux->ux_xfer;
1614 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
1615 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
1616 uhci_soft_td_t *std;
1617 uint32_t status = 0, nstatus;
1618 int actlen;
1619
1620 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1621
1622 DPRINTFN(12, "ux=%#jx", (uintptr_t)ux, 0, 0, 0);
1623
1624 /*
1625 * Try to claim this xfer for completion. If it has already
1626 * completed or aborted, drop it on the floor.
1627 */
1628 if (!usbd_xfer_trycomplete(xfer))
1629 return;
1630
1631 #ifdef DIAGNOSTIC
1632 #ifdef UHCI_DEBUG
1633 if (ux->ux_isdone) {
1634 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1635 uhci_dump_ii(ux);
1636 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1637 }
1638 #endif
1639 KASSERT(!ux->ux_isdone);
1640 KASSERTMSG(!ux->ux_isdone, "xfer %p type %d status %d", xfer,
1641 ux->ux_type, xfer->ux_status);
1642 ux->ux_isdone = true;
1643 #endif
1644
1645 if (xfer->ux_nframes != 0) {
1646 /* Isoc transfer, do things differently. */
1647 uhci_soft_td_t **stds = upipe->isoc.stds;
1648 int i, n, nframes, len;
1649
1650 DPRINTFN(5, "ux=%#jx isoc ready", (uintptr_t)ux, 0, 0, 0);
1651
1652 nframes = xfer->ux_nframes;
1653 actlen = 0;
1654 n = ux->ux_curframe;
1655 for (i = 0; i < nframes; i++) {
1656 std = stds[n];
1657 #ifdef UHCI_DEBUG
1658 if (uhcidebug >= 5) {
1659 DPRINTF("isoc TD %jd", i, 0, 0, 0);
1660 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1661 uhci_dump_td(std);
1662 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1663 }
1664 #endif
1665 if (++n >= UHCI_VFRAMELIST_COUNT)
1666 n = 0;
1667 usb_syncmem(&std->dma,
1668 std->offs + offsetof(uhci_td_t, td_status),
1669 sizeof(std->td.td_status),
1670 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1671 status = le32toh(std->td.td_status);
1672 len = UHCI_TD_GET_ACTLEN(status);
1673 xfer->ux_frlengths[i] = len;
1674 actlen += len;
1675 }
1676 upipe->isoc.inuse -= nframes;
1677 xfer->ux_actlen = actlen;
1678 xfer->ux_status = USBD_NORMAL_COMPLETION;
1679 goto end;
1680 }
1681
1682 #ifdef UHCI_DEBUG
1683 DPRINTFN(10, "ux=%#jx, xfer=%#jx, pipe=%#jx ready", (uintptr_t)ux,
1684 (uintptr_t)xfer, (uintptr_t)upipe, 0);
1685 if (uhcidebug >= 10) {
1686 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1687 uhci_dump_tds(ux->ux_stdstart);
1688 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1689 }
1690 #endif
1691
1692 /* The transfer is done, compute actual length and status. */
1693 actlen = 0;
1694 for (std = ux->ux_stdstart; std != NULL; std = std->link.std) {
1695 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
1696 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1697 nstatus = le32toh(std->td.td_status);
1698 if (nstatus & UHCI_TD_ACTIVE)
1699 break;
1700
1701 status = nstatus;
1702 if (UHCI_TD_GET_PID(le32toh(std->td.td_token)) !=
1703 UHCI_TD_PID_SETUP)
1704 actlen += UHCI_TD_GET_ACTLEN(status);
1705 else {
1706 /*
1707 * UHCI will report CRCTO in addition to a STALL or NAK
1708 * for a SETUP transaction. See section 3.2.2, "TD
1709 * CONTROL AND STATUS".
1710 */
1711 if (status & (UHCI_TD_STALLED | UHCI_TD_NAK))
1712 status &= ~UHCI_TD_CRCTO;
1713 }
1714 }
1715 /* If there are left over TDs we need to update the toggle. */
1716 if (std != NULL)
1717 upipe->nexttoggle = UHCI_TD_GET_DT(le32toh(std->td.td_token));
1718
1719 status &= UHCI_TD_ERROR;
1720 DPRINTFN(10, "actlen=%jd, status=%#jx", actlen, status, 0, 0);
1721 xfer->ux_actlen = actlen;
1722 if (status != 0) {
1723
1724 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1725 "error, addr=%jd, endpt=0x%02jx",
1726 xfer->ux_pipe->up_dev->ud_addr,
1727 xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress,
1728 0, 0);
1729 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1730 "bitstuff=%jd crcto =%jd nak =%jd babble =%jd",
1731 !!(status & UHCI_TD_BITSTUFF),
1732 !!(status & UHCI_TD_CRCTO),
1733 !!(status & UHCI_TD_NAK),
1734 !!(status & UHCI_TD_BABBLE));
1735 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1736 "dbuffer =%jd stalled =%jd active =%jd",
1737 !!(status & UHCI_TD_DBUFFER),
1738 !!(status & UHCI_TD_STALLED),
1739 !!(status & UHCI_TD_ACTIVE),
1740 0);
1741
1742 if (status == UHCI_TD_STALLED)
1743 xfer->ux_status = USBD_STALLED;
1744 else
1745 xfer->ux_status = USBD_IOERROR; /* more info XXX */
1746 } else {
1747 xfer->ux_status = USBD_NORMAL_COMPLETION;
1748 }
1749
1750 end:
1751 uhci_del_intr_list(sc, ux);
1752 if (cqp)
1753 TAILQ_INSERT_TAIL(cqp, ux, ux_list);
1754
1755 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1756 DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0);
1757 }
1758
1759 void
1760 uhci_poll(struct usbd_bus *bus)
1761 {
1762 uhci_softc_t *sc = UHCI_BUS2SC(bus);
1763
1764 if (UREAD2(sc, UHCI_STS) & UHCI_STS_USBINT) {
1765 mutex_spin_enter(&sc->sc_intr_lock);
1766 uhci_intr1(sc);
1767 mutex_spin_exit(&sc->sc_intr_lock);
1768 }
1769 }
1770
1771 void
1772 uhci_reset(uhci_softc_t *sc)
1773 {
1774 int n;
1775
1776 UHCICMD(sc, UHCI_CMD_HCRESET);
1777 /* The reset bit goes low when the controller is done. */
1778 for (n = 0; n < UHCI_RESET_TIMEOUT &&
1779 (UREAD2(sc, UHCI_CMD) & UHCI_CMD_HCRESET); n++)
1780 usb_delay_ms(&sc->sc_bus, 1);
1781 if (n >= UHCI_RESET_TIMEOUT)
1782 printf("%s: controller did not reset\n",
1783 device_xname(sc->sc_dev));
1784 }
1785
1786 usbd_status
1787 uhci_run(uhci_softc_t *sc, int run, int locked)
1788 {
1789 int n, running;
1790 uint16_t cmd;
1791
1792 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1793
1794 run = run != 0;
1795 if (!locked)
1796 mutex_spin_enter(&sc->sc_intr_lock);
1797
1798 DPRINTF("setting run=%jd", run, 0, 0, 0);
1799 cmd = UREAD2(sc, UHCI_CMD);
1800 if (run)
1801 cmd |= UHCI_CMD_RS;
1802 else
1803 cmd &= ~UHCI_CMD_RS;
1804 UHCICMD(sc, cmd);
1805 for (n = 0; n < 10; n++) {
1806 running = !(UREAD2(sc, UHCI_STS) & UHCI_STS_HCH);
1807 /* return when we've entered the state we want */
1808 if (run == running) {
1809 if (!locked)
1810 mutex_spin_exit(&sc->sc_intr_lock);
1811 DPRINTF("done cmd=%#jx sts=%#jx",
1812 UREAD2(sc, UHCI_CMD), UREAD2(sc, UHCI_STS), 0, 0);
1813 return USBD_NORMAL_COMPLETION;
1814 }
1815 usb_delay_ms_locked(&sc->sc_bus, 1, &sc->sc_intr_lock);
1816 }
1817 if (!locked)
1818 mutex_spin_exit(&sc->sc_intr_lock);
1819 printf("%s: cannot %s\n", device_xname(sc->sc_dev),
1820 run ? "start" : "stop");
1821 return USBD_IOERROR;
1822 }
1823
1824 /*
1825 * Memory management routines.
1826 * uhci_alloc_std allocates TDs
1827 * uhci_alloc_sqh allocates QHs
1828 * These two routines do their own free list management,
1829 * partly for speed, partly because allocating DMAable memory
1830 * has page size granularity so much memory would be wasted if
1831 * only one TD/QH (32 bytes) was placed in each allocated chunk.
1832 */
1833
1834 uhci_soft_td_t *
1835 uhci_alloc_std(uhci_softc_t *sc)
1836 {
1837 uhci_soft_td_t *std;
1838 usbd_status err;
1839 int i, offs;
1840 usb_dma_t dma;
1841
1842 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1843
1844 mutex_enter(&sc->sc_lock);
1845 if (sc->sc_freetds == NULL) {
1846 DPRINTFN(2, "allocating chunk", 0, 0, 0, 0);
1847 mutex_exit(&sc->sc_lock);
1848
1849 err = usb_allocmem(&sc->sc_bus, UHCI_STD_SIZE * UHCI_STD_CHUNK,
1850 UHCI_TD_ALIGN, USBMALLOC_COHERENT, &dma);
1851 if (err)
1852 return NULL;
1853
1854 mutex_enter(&sc->sc_lock);
1855 for (i = 0; i < UHCI_STD_CHUNK; i++) {
1856 offs = i * UHCI_STD_SIZE;
1857 std = KERNADDR(&dma, offs);
1858 std->physaddr = DMAADDR(&dma, offs);
1859 std->dma = dma;
1860 std->offs = offs;
1861 std->link.std = sc->sc_freetds;
1862 sc->sc_freetds = std;
1863 }
1864 }
1865 std = sc->sc_freetds;
1866 sc->sc_freetds = std->link.std;
1867 mutex_exit(&sc->sc_lock);
1868
1869 memset(&std->td, 0, sizeof(uhci_td_t));
1870
1871 return std;
1872 }
1873
1874 #define TD_IS_FREE 0x12345678
1875
1876 void
1877 uhci_free_std_locked(uhci_softc_t *sc, uhci_soft_td_t *std)
1878 {
1879 KASSERT(mutex_owned(&sc->sc_lock));
1880
1881 #ifdef DIAGNOSTIC
1882 if (le32toh(std->td.td_token) == TD_IS_FREE) {
1883 printf("%s: freeing free TD %p\n", __func__, std);
1884 return;
1885 }
1886 std->td.td_token = htole32(TD_IS_FREE);
1887 #endif
1888
1889 std->link.std = sc->sc_freetds;
1890 sc->sc_freetds = std;
1891 }
1892
1893 void
1894 uhci_free_std(uhci_softc_t *sc, uhci_soft_td_t *std)
1895 {
1896 mutex_enter(&sc->sc_lock);
1897 uhci_free_std_locked(sc, std);
1898 mutex_exit(&sc->sc_lock);
1899 }
1900
1901 uhci_soft_qh_t *
1902 uhci_alloc_sqh(uhci_softc_t *sc)
1903 {
1904 uhci_soft_qh_t *sqh;
1905 usbd_status err;
1906 int i, offs;
1907 usb_dma_t dma;
1908
1909 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1910
1911 mutex_enter(&sc->sc_lock);
1912 if (sc->sc_freeqhs == NULL) {
1913 DPRINTFN(2, "allocating chunk", 0, 0, 0, 0);
1914 mutex_exit(&sc->sc_lock);
1915
1916 err = usb_allocmem(&sc->sc_bus, UHCI_SQH_SIZE * UHCI_SQH_CHUNK,
1917 UHCI_QH_ALIGN, USBMALLOC_COHERENT, &dma);
1918 if (err)
1919 return NULL;
1920
1921 mutex_enter(&sc->sc_lock);
1922 for (i = 0; i < UHCI_SQH_CHUNK; i++) {
1923 offs = i * UHCI_SQH_SIZE;
1924 sqh = KERNADDR(&dma, offs);
1925 sqh->physaddr = DMAADDR(&dma, offs);
1926 sqh->dma = dma;
1927 sqh->offs = offs;
1928 sqh->hlink = sc->sc_freeqhs;
1929 sc->sc_freeqhs = sqh;
1930 }
1931 }
1932 sqh = sc->sc_freeqhs;
1933 sc->sc_freeqhs = sqh->hlink;
1934 mutex_exit(&sc->sc_lock);
1935
1936 memset(&sqh->qh, 0, sizeof(uhci_qh_t));
1937
1938 return sqh;
1939 }
1940
1941 void
1942 uhci_free_sqh(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1943 {
1944 KASSERT(mutex_owned(&sc->sc_lock));
1945
1946 sqh->hlink = sc->sc_freeqhs;
1947 sc->sc_freeqhs = sqh;
1948 }
1949
1950 #if 0
1951 void
1952 uhci_free_std_chain(uhci_softc_t *sc, uhci_soft_td_t *std,
1953 uhci_soft_td_t *stdend)
1954 {
1955 uhci_soft_td_t *p;
1956 uint32_t td_link;
1957
1958 /*
1959 * to avoid race condition with the controller which may be looking
1960 * at this chain, we need to first invalidate all links, and
1961 * then wait for the controller to move to another queue
1962 */
1963 for (p = std; p != stdend; p = p->link.std) {
1964 usb_syncmem(&p->dma,
1965 p->offs + offsetof(uhci_td_t, td_link),
1966 sizeof(p->td.td_link),
1967 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1968 td_link = le32toh(p->td.td_link);
1969 usb_syncmem(&p->dma,
1970 p->offs + offsetof(uhci_td_t, td_link),
1971 sizeof(p->td.td_link),
1972 BUS_DMASYNC_PREREAD);
1973 if ((td_link & UHCI_PTR_T) == 0) {
1974 p->td.td_link = htole32(UHCI_PTR_T);
1975 usb_syncmem(&p->dma,
1976 p->offs + offsetof(uhci_td_t, td_link),
1977 sizeof(p->td.td_link),
1978 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1979 }
1980 }
1981 delay(UHCI_QH_REMOVE_DELAY);
1982
1983 for (; std != stdend; std = p) {
1984 p = std->link.std;
1985 uhci_free_std(sc, std);
1986 }
1987 }
1988 #endif
1989
1990 int
1991 uhci_alloc_std_chain(uhci_softc_t *sc, struct usbd_xfer *xfer, int len,
1992 int rd, uhci_soft_td_t **sp)
1993 {
1994 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
1995 uint16_t flags = xfer->ux_flags;
1996 uhci_soft_td_t *p;
1997
1998 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1999
2000 DPRINTFN(8, "xfer=%#jx pipe=%#jx", (uintptr_t)xfer,
2001 (uintptr_t)xfer->ux_pipe, 0, 0);
2002
2003 ASSERT_SLEEPABLE();
2004 KASSERT(sp);
2005
2006 int maxp = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
2007 if (maxp == 0) {
2008 printf("%s: maxp=0\n", __func__);
2009 return EINVAL;
2010 }
2011 size_t ntd = howmany(len, maxp);
2012 /*
2013 * if our transfer is bigger than PAGE_SIZE and maxp not a factor of
2014 * PAGE_SIZE then we will need another TD per page.
2015 */
2016 if (len > PAGE_SIZE && (PAGE_SIZE % maxp) != 0) {
2017 ntd += howmany(len, PAGE_SIZE);
2018 }
2019
2020 /*
2021 * Might need one more TD if we're writing a ZLP
2022 */
2023 if (!rd && (flags & USBD_FORCE_SHORT_XFER)) {
2024 ntd++;
2025 }
2026 DPRINTFN(10, "maxp=%jd ntd=%jd", maxp, ntd, 0, 0);
2027
2028 uxfer->ux_stds = NULL;
2029 uxfer->ux_nstd = ntd;
2030 if (ntd == 0) {
2031 *sp = NULL;
2032 DPRINTF("ntd=0", 0, 0, 0, 0);
2033 return 0;
2034 }
2035 uxfer->ux_stds = kmem_alloc(sizeof(uhci_soft_td_t *) * ntd,
2036 KM_SLEEP);
2037
2038 for (int i = 0; i < ntd; i++) {
2039 p = uhci_alloc_std(sc);
2040 if (p == NULL) {
2041 if (i != 0) {
2042 uxfer->ux_nstd = i;
2043 uhci_free_stds(sc, uxfer);
2044 }
2045 kmem_free(uxfer->ux_stds,
2046 sizeof(uhci_soft_td_t *) * ntd);
2047 return ENOMEM;
2048 }
2049 uxfer->ux_stds[i] = p;
2050 }
2051
2052 *sp = uxfer->ux_stds[0];
2053
2054 return 0;
2055 }
2056
2057 Static void
2058 uhci_free_stds(uhci_softc_t *sc, struct uhci_xfer *ux)
2059 {
2060 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2061
2062 DPRINTFN(8, "ux=%#jx", (uintptr_t)ux, 0, 0, 0);
2063
2064 mutex_enter(&sc->sc_lock);
2065 for (size_t i = 0; i < ux->ux_nstd; i++) {
2066 uhci_soft_td_t *std = ux->ux_stds[i];
2067 #ifdef DIAGNOSTIC
2068 if (le32toh(std->td.td_token) == TD_IS_FREE) {
2069 printf("%s: freeing free TD %p\n", __func__, std);
2070 return;
2071 }
2072 std->td.td_token = htole32(TD_IS_FREE);
2073 #endif
2074 ux->ux_stds[i]->link.std = sc->sc_freetds;
2075 sc->sc_freetds = std;
2076 }
2077 mutex_exit(&sc->sc_lock);
2078 }
2079
2080
2081 Static void
2082 uhci_reset_std_chain(uhci_softc_t *sc, struct usbd_xfer *xfer,
2083 int length, int isread, int *toggle, uhci_soft_td_t **lstd)
2084 {
2085 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2086 struct usbd_pipe *pipe = xfer->ux_pipe;
2087 usb_dma_t *dma = &xfer->ux_dmabuf;
2088 uint16_t flags = xfer->ux_flags;
2089 uhci_soft_td_t *std, *prev;
2090 int len = length;
2091 int tog = *toggle;
2092 int maxp;
2093 uint32_t status;
2094 size_t i, offs;
2095
2096 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2097 DPRINTFN(8, "xfer=%#jx len %jd isread %jd toggle %jd", (uintptr_t)xfer,
2098 len, isread, *toggle);
2099
2100 KASSERT(len != 0 || (!isread && (flags & USBD_FORCE_SHORT_XFER)));
2101
2102 maxp = UGETW(pipe->up_endpoint->ue_edesc->wMaxPacketSize);
2103 KASSERT(maxp != 0);
2104
2105 int addr = xfer->ux_pipe->up_dev->ud_addr;
2106 int endpt = xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress;
2107
2108 status = UHCI_TD_ZERO_ACTLEN(UHCI_TD_SET_ERRCNT(3) | UHCI_TD_ACTIVE);
2109 if (pipe->up_dev->ud_speed == USB_SPEED_LOW)
2110 status |= UHCI_TD_LS;
2111 if (flags & USBD_SHORT_XFER_OK)
2112 status |= UHCI_TD_SPD;
2113 usb_syncmem(dma, 0, len,
2114 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2115 std = prev = NULL;
2116 for (offs = i = 0; len != 0 && i < uxfer->ux_nstd; i++, prev = std) {
2117 int l = len;
2118 std = uxfer->ux_stds[i];
2119
2120 const bus_addr_t sbp = DMAADDR(dma, offs);
2121 const bus_addr_t ebp = DMAADDR(dma, offs + l - 1);
2122 if (((sbp ^ ebp) & ~PAGE_MASK) != 0)
2123 l = PAGE_SIZE - (DMAADDR(dma, offs) & PAGE_MASK);
2124
2125 if (l > maxp)
2126 l = maxp;
2127
2128 if (prev) {
2129 prev->link.std = std;
2130 prev->td.td_link = htole32(
2131 std->physaddr | UHCI_PTR_VF | UHCI_PTR_TD
2132 );
2133 usb_syncmem(&prev->dma, prev->offs, sizeof(prev->td),
2134 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2135 }
2136
2137 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2138 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2139
2140 std->td.td_link = htole32(UHCI_PTR_T | UHCI_PTR_VF | UHCI_PTR_TD);
2141 std->td.td_status = htole32(status);
2142 std->td.td_token = htole32(
2143 UHCI_TD_SET_ENDPT(UE_GET_ADDR(endpt)) |
2144 UHCI_TD_SET_DEVADDR(addr) |
2145 UHCI_TD_SET_PID(isread ? UHCI_TD_PID_IN : UHCI_TD_PID_OUT) |
2146 UHCI_TD_SET_DT(tog) |
2147 UHCI_TD_SET_MAXLEN(l)
2148 );
2149 std->td.td_buffer = htole32(DMAADDR(dma, offs));
2150
2151 std->link.std = NULL;
2152
2153 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2154 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2155 tog ^= 1;
2156
2157 offs += l;
2158 len -= l;
2159 }
2160 KASSERTMSG(len == 0, "xfer %p alen %d len %d mps %d ux_nqtd %zu i %zu",
2161 xfer, length, len, maxp, uxfer->ux_nstd, i);
2162
2163 if (!isread &&
2164 (flags & USBD_FORCE_SHORT_XFER) &&
2165 length % maxp == 0) {
2166 /* Force a 0 length transfer at the end. */
2167 KASSERTMSG(i < uxfer->ux_nstd, "i=%zu nstd=%zu", i,
2168 uxfer->ux_nstd);
2169 std = uxfer->ux_stds[i++];
2170
2171 std->td.td_link = htole32(UHCI_PTR_T | UHCI_PTR_VF | UHCI_PTR_TD);
2172 std->td.td_status = htole32(status);
2173 std->td.td_token = htole32(
2174 UHCI_TD_SET_ENDPT(UE_GET_ADDR(endpt)) |
2175 UHCI_TD_SET_DEVADDR(addr) |
2176 UHCI_TD_SET_PID(UHCI_TD_PID_OUT) |
2177 UHCI_TD_SET_DT(tog) |
2178 UHCI_TD_SET_MAXLEN(0)
2179 );
2180 std->td.td_buffer = 0;
2181 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2182 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2183
2184 std->link.std = NULL;
2185 if (prev) {
2186 prev->link.std = std;
2187 prev->td.td_link = htole32(
2188 std->physaddr | UHCI_PTR_VF | UHCI_PTR_TD
2189 );
2190 usb_syncmem(&prev->dma, prev->offs, sizeof(prev->td),
2191 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2192 }
2193 tog ^= 1;
2194 }
2195 *lstd = std;
2196 *toggle = tog;
2197 }
2198
2199 void
2200 uhci_device_clear_toggle(struct usbd_pipe *pipe)
2201 {
2202 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2203 upipe->nexttoggle = 0;
2204 }
2205
2206 void
2207 uhci_noop(struct usbd_pipe *pipe)
2208 {
2209 }
2210
2211 int
2212 uhci_device_bulk_init(struct usbd_xfer *xfer)
2213 {
2214 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2215 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2216 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
2217 int endpt = ed->bEndpointAddress;
2218 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2219 int len = xfer->ux_bufsize;
2220 int err = 0;
2221
2222 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2223 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, len,
2224 xfer->ux_flags, 0);
2225
2226 if (sc->sc_dying)
2227 return USBD_IOERROR;
2228
2229 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2230
2231 uxfer->ux_type = UX_BULK;
2232 err = uhci_alloc_std_chain(sc, xfer, len, isread, &uxfer->ux_stdstart);
2233 if (err)
2234 return err;
2235
2236 #ifdef UHCI_DEBUG
2237 if (uhcidebug >= 10) {
2238 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2239 uhci_dump_tds(uxfer->ux_stdstart);
2240 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2241 }
2242 #endif
2243
2244 return 0;
2245 }
2246
2247 Static void
2248 uhci_device_bulk_fini(struct usbd_xfer *xfer)
2249 {
2250 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2251 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2252
2253 KASSERT(ux->ux_type == UX_BULK);
2254
2255 if (ux->ux_nstd) {
2256 uhci_free_stds(sc, ux);
2257 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2258 }
2259 }
2260
2261 usbd_status
2262 uhci_device_bulk_transfer(struct usbd_xfer *xfer)
2263 {
2264 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2265 usbd_status err;
2266
2267 /* Insert last in queue. */
2268 mutex_enter(&sc->sc_lock);
2269 err = usb_insert_transfer(xfer);
2270 mutex_exit(&sc->sc_lock);
2271 if (err)
2272 return err;
2273
2274 /*
2275 * Pipe isn't running (otherwise err would be USBD_INPROG),
2276 * so start it first.
2277 */
2278 return uhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2279 }
2280
2281 usbd_status
2282 uhci_device_bulk_start(struct usbd_xfer *xfer)
2283 {
2284 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2285 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2286 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2287 uhci_soft_td_t *data, *dataend;
2288 uhci_soft_qh_t *sqh;
2289 const bool polling = sc->sc_bus.ub_usepolling;
2290 int len;
2291 int endpt;
2292 int isread;
2293
2294 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2295 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2296 xfer->ux_length, xfer->ux_flags, 0);
2297
2298 if (sc->sc_dying)
2299 return USBD_IOERROR;
2300
2301 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2302 KASSERT(xfer->ux_length <= xfer->ux_bufsize);
2303
2304 len = xfer->ux_length;
2305 endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2306 isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2307 sqh = upipe->bulk.sqh;
2308
2309 /* Take lock here to protect nexttoggle */
2310 if (!polling)
2311 mutex_enter(&sc->sc_lock);
2312
2313 uhci_reset_std_chain(sc, xfer, len, isread, &upipe->nexttoggle,
2314 &dataend);
2315
2316 data = ux->ux_stdstart;
2317 ux->ux_stdend = dataend;
2318 dataend->td.td_status |= htole32(UHCI_TD_IOC);
2319 usb_syncmem(&dataend->dma,
2320 dataend->offs + offsetof(uhci_td_t, td_status),
2321 sizeof(dataend->td.td_status),
2322 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2323
2324 #ifdef UHCI_DEBUG
2325 if (uhcidebug >= 10) {
2326 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2327 DPRINTFN(10, "before transfer", 0, 0, 0, 0);
2328 uhci_dump_tds(data);
2329 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2330 }
2331 #endif
2332
2333 KASSERT(ux->ux_isdone);
2334 #ifdef DIAGNOSTIC
2335 ux->ux_isdone = false;
2336 #endif
2337
2338 sqh->elink = data;
2339 sqh->qh.qh_elink = htole32(data->physaddr | UHCI_PTR_TD);
2340 /* uhci_add_bulk() will do usb_syncmem(sqh) */
2341
2342 uhci_add_bulk(sc, sqh);
2343 uhci_add_intr_list(sc, ux);
2344 usbd_xfer_schedule_timeout(xfer);
2345 xfer->ux_status = USBD_IN_PROGRESS;
2346 if (!polling)
2347 mutex_exit(&sc->sc_lock);
2348
2349 return USBD_IN_PROGRESS;
2350 }
2351
2352 /* Abort a device bulk request. */
2353 void
2354 uhci_device_bulk_abort(struct usbd_xfer *xfer)
2355 {
2356 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2357
2358 KASSERT(mutex_owned(&sc->sc_lock));
2359
2360 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2361
2362 usbd_xfer_abort(xfer);
2363 }
2364
2365 /*
2366 * To allow the hardware time to notice we simply wait.
2367 */
2368 Static void
2369 uhci_abortx(struct usbd_xfer *xfer)
2370 {
2371 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2372 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2373 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2374 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2375 uhci_soft_td_t *std;
2376
2377 DPRINTFN(1,"xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
2378
2379 KASSERT(mutex_owned(&sc->sc_lock));
2380 ASSERT_SLEEPABLE();
2381
2382 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2383 xfer->ux_status == USBD_TIMEOUT),
2384 "bad abort status: %d", xfer->ux_status);
2385
2386 /*
2387 * If we're dying, skip the hardware action and just notify the
2388 * software that we're done.
2389 */
2390 if (sc->sc_dying) {
2391 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer,
2392 xfer->ux_status, 0, 0);
2393 goto dying;
2394 }
2395
2396 /*
2397 * HC Step 1: Make interrupt routine and hardware ignore xfer.
2398 */
2399 uhci_del_intr_list(sc, ux);
2400
2401 DPRINTF("stop ux=%#jx", (uintptr_t)ux, 0, 0, 0);
2402 for (std = ux->ux_stdstart; std != NULL; std = std->link.std) {
2403 usb_syncmem(&std->dma,
2404 std->offs + offsetof(uhci_td_t, td_status),
2405 sizeof(std->td.td_status),
2406 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2407 std->td.td_status &= htole32(~(UHCI_TD_ACTIVE | UHCI_TD_IOC));
2408 usb_syncmem(&std->dma,
2409 std->offs + offsetof(uhci_td_t, td_status),
2410 sizeof(std->td.td_status),
2411 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2412 }
2413
2414 /*
2415 * HC Step 2: Wait until we know hardware has finished any possible
2416 * use of the xfer.
2417 */
2418 /* Hardware finishes in 1ms */
2419 usb_delay_ms_locked(upipe->pipe.up_dev->ud_bus, 2, &sc->sc_lock);
2420
2421 /*
2422 * HC Step 3: Notify completion to waiting xfers.
2423 */
2424 dying:
2425 #ifdef DIAGNOSTIC
2426 ux->ux_isdone = true;
2427 #endif
2428 usb_transfer_complete(xfer);
2429 DPRINTFN(14, "end", 0, 0, 0, 0);
2430
2431 KASSERT(mutex_owned(&sc->sc_lock));
2432 }
2433
2434 /* Close a device bulk pipe. */
2435 void
2436 uhci_device_bulk_close(struct usbd_pipe *pipe)
2437 {
2438 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2439 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2440
2441 KASSERT(mutex_owned(&sc->sc_lock));
2442
2443 uhci_free_sqh(sc, upipe->bulk.sqh);
2444
2445 pipe->up_endpoint->ue_toggle = upipe->nexttoggle;
2446 }
2447
2448 int
2449 uhci_device_ctrl_init(struct usbd_xfer *xfer)
2450 {
2451 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2452 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2453 usb_device_request_t *req = &xfer->ux_request;
2454 struct usbd_device *dev = upipe->pipe.up_dev;
2455 uhci_softc_t *sc = dev->ud_bus->ub_hcpriv;
2456 uhci_soft_td_t *data = NULL;
2457 int len;
2458 usbd_status err;
2459 int isread;
2460
2461 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2462 DPRINTFN(3, "xfer=%#jx len=%jd, addr=%jd, endpt=%jd",
2463 (uintptr_t)xfer, xfer->ux_bufsize, dev->ud_addr,
2464 upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress);
2465
2466 isread = req->bmRequestType & UT_READ;
2467 len = xfer->ux_bufsize;
2468
2469 uxfer->ux_type = UX_CTRL;
2470 /* Set up data transaction */
2471 if (len != 0) {
2472 err = uhci_alloc_std_chain(sc, xfer, len, isread, &data);
2473 if (err)
2474 return err;
2475 }
2476 /* Set up interrupt info. */
2477 uxfer->ux_setup = upipe->ctrl.setup;
2478 uxfer->ux_stat = upipe->ctrl.stat;
2479 uxfer->ux_data = data;
2480
2481 return 0;
2482 }
2483
2484 Static void
2485 uhci_device_ctrl_fini(struct usbd_xfer *xfer)
2486 {
2487 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2488 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2489
2490 KASSERT(ux->ux_type == UX_CTRL);
2491
2492 if (ux->ux_nstd) {
2493 uhci_free_stds(sc, ux);
2494 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2495 }
2496 }
2497
2498 usbd_status
2499 uhci_device_ctrl_transfer(struct usbd_xfer *xfer)
2500 {
2501 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2502 usbd_status err;
2503
2504 /* Insert last in queue. */
2505 mutex_enter(&sc->sc_lock);
2506 err = usb_insert_transfer(xfer);
2507 mutex_exit(&sc->sc_lock);
2508 if (err)
2509 return err;
2510
2511 /*
2512 * Pipe isn't running (otherwise err would be USBD_INPROG),
2513 * so start it first.
2514 */
2515 return uhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2516 }
2517
2518 usbd_status
2519 uhci_device_ctrl_start(struct usbd_xfer *xfer)
2520 {
2521 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2522 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2523 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2524 usb_device_request_t *req = &xfer->ux_request;
2525 struct usbd_device *dev = upipe->pipe.up_dev;
2526 int addr = dev->ud_addr;
2527 int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2528 uhci_soft_td_t *setup, *stat, *next, *dataend;
2529 uhci_soft_qh_t *sqh;
2530 const bool polling = sc->sc_bus.ub_usepolling;
2531 int len;
2532 int isread;
2533
2534 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2535
2536 if (sc->sc_dying)
2537 return USBD_IOERROR;
2538
2539 KASSERT(xfer->ux_rqflags & URQ_REQUEST);
2540
2541 DPRINTFN(3, "type=0x%02jx, request=0x%02jx, "
2542 "wValue=0x%04jx, wIndex=0x%04jx",
2543 req->bmRequestType, req->bRequest, UGETW(req->wValue),
2544 UGETW(req->wIndex));
2545 DPRINTFN(3, "len=%jd, addr=%jd, endpt=%jd",
2546 UGETW(req->wLength), dev->ud_addr, endpt, 0);
2547
2548 isread = req->bmRequestType & UT_READ;
2549 len = UGETW(req->wLength);
2550
2551 setup = upipe->ctrl.setup;
2552 stat = upipe->ctrl.stat;
2553 sqh = upipe->ctrl.sqh;
2554
2555 memcpy(KERNADDR(&upipe->ctrl.reqdma, 0), req, sizeof(*req));
2556 usb_syncmem(&upipe->ctrl.reqdma, 0, sizeof(*req), BUS_DMASYNC_PREWRITE);
2557
2558 if (!polling)
2559 mutex_enter(&sc->sc_lock);
2560
2561 /* Set up data transaction */
2562 if (len != 0) {
2563 upipe->nexttoggle = 1;
2564 next = uxfer->ux_data;
2565 uhci_reset_std_chain(sc, xfer, len, isread,
2566 &upipe->nexttoggle, &dataend);
2567 dataend->link.std = stat;
2568 dataend->td.td_link = htole32(stat->physaddr | UHCI_PTR_TD);
2569 usb_syncmem(&dataend->dma,
2570 dataend->offs + offsetof(uhci_td_t, td_link),
2571 sizeof(dataend->td.td_link),
2572 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2573 } else {
2574 next = stat;
2575 }
2576
2577 const uint32_t status = UHCI_TD_ZERO_ACTLEN(
2578 UHCI_TD_SET_ERRCNT(3) |
2579 UHCI_TD_ACTIVE |
2580 (dev->ud_speed == USB_SPEED_LOW ? UHCI_TD_LS : 0)
2581 );
2582 setup->link.std = next;
2583 setup->td.td_link = htole32(next->physaddr | UHCI_PTR_TD);
2584 setup->td.td_status = htole32(status);
2585 setup->td.td_token = htole32(UHCI_TD_SETUP(sizeof(*req), endpt, addr));
2586 setup->td.td_buffer = htole32(DMAADDR(&upipe->ctrl.reqdma, 0));
2587
2588 usb_syncmem(&setup->dma, setup->offs, sizeof(setup->td),
2589 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2590
2591 stat->link.std = NULL;
2592 stat->td.td_link = htole32(UHCI_PTR_T);
2593 stat->td.td_status = htole32(status | UHCI_TD_IOC);
2594 stat->td.td_token =
2595 htole32(isread ? UHCI_TD_OUT(0, endpt, addr, 1) :
2596 UHCI_TD_IN (0, endpt, addr, 1));
2597 stat->td.td_buffer = htole32(0);
2598 usb_syncmem(&stat->dma, stat->offs, sizeof(stat->td),
2599 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2600
2601 #ifdef UHCI_DEBUG
2602 if (uhcidebug >= 10) {
2603 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2604 DPRINTF("before transfer", 0, 0, 0, 0);
2605 uhci_dump_tds(setup);
2606 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2607 }
2608 #endif
2609
2610 /* Set up interrupt info. */
2611 uxfer->ux_setup = setup;
2612 uxfer->ux_stat = stat;
2613 KASSERT(uxfer->ux_isdone);
2614 #ifdef DIAGNOSTIC
2615 uxfer->ux_isdone = false;
2616 #endif
2617
2618 sqh->elink = setup;
2619 sqh->qh.qh_elink = htole32(setup->physaddr | UHCI_PTR_TD);
2620 /* uhci_add_?s_ctrl() will do usb_syncmem(sqh) */
2621
2622 if (dev->ud_speed == USB_SPEED_LOW)
2623 uhci_add_ls_ctrl(sc, sqh);
2624 else
2625 uhci_add_hs_ctrl(sc, sqh);
2626 uhci_add_intr_list(sc, uxfer);
2627 #ifdef UHCI_DEBUG
2628 if (uhcidebug >= 12) {
2629 uhci_soft_td_t *std;
2630 uhci_soft_qh_t *xqh;
2631 uhci_soft_qh_t *sxqh;
2632 int maxqh = 0;
2633 uhci_physaddr_t link;
2634 DPRINTFN(12, "--- dump start ---", 0, 0, 0, 0);
2635 DPRINTFN(12, "follow from [0]", 0, 0, 0, 0);
2636 for (std = sc->sc_vframes[0].htd, link = 0;
2637 (link & UHCI_PTR_QH) == 0;
2638 std = std->link.std) {
2639 link = le32toh(std->td.td_link);
2640 uhci_dump_td(std);
2641 }
2642 sxqh = (uhci_soft_qh_t *)std;
2643 uhci_dump_qh(sxqh);
2644 for (xqh = sxqh;
2645 xqh != NULL;
2646 xqh = (maxqh++ == 5 || xqh->hlink == sxqh ||
2647 xqh->hlink == xqh ? NULL : xqh->hlink)) {
2648 uhci_dump_qh(xqh);
2649 }
2650 DPRINTFN(12, "Enqueued QH:", 0, 0, 0, 0);
2651 uhci_dump_qh(sqh);
2652 uhci_dump_tds(sqh->elink);
2653 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2654 }
2655 #endif
2656 usbd_xfer_schedule_timeout(xfer);
2657 xfer->ux_status = USBD_IN_PROGRESS;
2658 if (!polling)
2659 mutex_exit(&sc->sc_lock);
2660
2661 return USBD_IN_PROGRESS;
2662 }
2663
2664 int
2665 uhci_device_intr_init(struct usbd_xfer *xfer)
2666 {
2667 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2668 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2669 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
2670 int endpt = ed->bEndpointAddress;
2671 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2672 int len = xfer->ux_bufsize;
2673 int err;
2674
2675 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2676
2677 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2678 xfer->ux_length, xfer->ux_flags, 0);
2679
2680 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2681 KASSERT(len != 0);
2682
2683 ux->ux_type = UX_INTR;
2684 ux->ux_nstd = 0;
2685 err = uhci_alloc_std_chain(sc, xfer, len, isread, &ux->ux_stdstart);
2686
2687 return err;
2688 }
2689
2690 Static void
2691 uhci_device_intr_fini(struct usbd_xfer *xfer)
2692 {
2693 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2694 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2695
2696 KASSERT(ux->ux_type == UX_INTR);
2697
2698 if (ux->ux_nstd) {
2699 uhci_free_stds(sc, ux);
2700 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2701 }
2702 }
2703
2704 usbd_status
2705 uhci_device_intr_transfer(struct usbd_xfer *xfer)
2706 {
2707 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2708 usbd_status err;
2709
2710 /* Insert last in queue. */
2711 mutex_enter(&sc->sc_lock);
2712 err = usb_insert_transfer(xfer);
2713 mutex_exit(&sc->sc_lock);
2714 if (err)
2715 return err;
2716
2717 /*
2718 * Pipe isn't running (otherwise err would be USBD_INPROG),
2719 * so start it first.
2720 */
2721 return uhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2722 }
2723
2724 usbd_status
2725 uhci_device_intr_start(struct usbd_xfer *xfer)
2726 {
2727 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2728 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2729 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2730 uhci_soft_td_t *data, *dataend;
2731 uhci_soft_qh_t *sqh;
2732 const bool polling = sc->sc_bus.ub_usepolling;
2733 int isread, endpt;
2734 int i;
2735
2736 if (sc->sc_dying)
2737 return USBD_IOERROR;
2738
2739 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2740
2741 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2742 xfer->ux_length, xfer->ux_flags, 0);
2743
2744 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2745 KASSERT(xfer->ux_length <= xfer->ux_bufsize);
2746
2747 endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2748 isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2749
2750 data = ux->ux_stdstart;
2751
2752 KASSERT(ux->ux_isdone);
2753 #ifdef DIAGNOSTIC
2754 ux->ux_isdone = false;
2755 #endif
2756
2757 /* Take lock to protect nexttoggle */
2758 if (!polling)
2759 mutex_enter(&sc->sc_lock);
2760 uhci_reset_std_chain(sc, xfer, xfer->ux_length, isread,
2761 &upipe->nexttoggle, &dataend);
2762
2763 dataend->td.td_status |= htole32(UHCI_TD_IOC);
2764 usb_syncmem(&dataend->dma,
2765 dataend->offs + offsetof(uhci_td_t, td_status),
2766 sizeof(dataend->td.td_status),
2767 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2768 ux->ux_stdend = dataend;
2769
2770 #ifdef UHCI_DEBUG
2771 if (uhcidebug >= 10) {
2772 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2773 uhci_dump_tds(data);
2774 uhci_dump_qh(upipe->intr.qhs[0]);
2775 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2776 }
2777 #endif
2778
2779 DPRINTFN(10, "qhs[0]=%#jx", (uintptr_t)upipe->intr.qhs[0], 0, 0, 0);
2780 for (i = 0; i < upipe->intr.npoll; i++) {
2781 sqh = upipe->intr.qhs[i];
2782 sqh->elink = data;
2783 sqh->qh.qh_elink = htole32(data->physaddr | UHCI_PTR_TD);
2784 usb_syncmem(&sqh->dma,
2785 sqh->offs + offsetof(uhci_qh_t, qh_elink),
2786 sizeof(sqh->qh.qh_elink),
2787 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2788 }
2789 uhci_add_intr_list(sc, ux);
2790 xfer->ux_status = USBD_IN_PROGRESS;
2791 if (!polling)
2792 mutex_exit(&sc->sc_lock);
2793
2794 #ifdef UHCI_DEBUG
2795 if (uhcidebug >= 10) {
2796 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2797 uhci_dump_tds(data);
2798 uhci_dump_qh(upipe->intr.qhs[0]);
2799 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2800 }
2801 #endif
2802
2803 return USBD_IN_PROGRESS;
2804 }
2805
2806 /* Abort a device control request. */
2807 void
2808 uhci_device_ctrl_abort(struct usbd_xfer *xfer)
2809 {
2810 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2811
2812 KASSERT(mutex_owned(&sc->sc_lock));
2813
2814 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2815 usbd_xfer_abort(xfer);
2816 }
2817
2818 /* Close a device control pipe. */
2819 void
2820 uhci_device_ctrl_close(struct usbd_pipe *pipe)
2821 {
2822 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2823 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2824
2825 uhci_free_sqh(sc, upipe->ctrl.sqh);
2826 uhci_free_std_locked(sc, upipe->ctrl.setup);
2827 uhci_free_std_locked(sc, upipe->ctrl.stat);
2828
2829 usb_freemem(&sc->sc_bus, &upipe->ctrl.reqdma);
2830 }
2831
2832 /* Abort a device interrupt request. */
2833 void
2834 uhci_device_intr_abort(struct usbd_xfer *xfer)
2835 {
2836 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2837
2838 KASSERT(mutex_owned(&sc->sc_lock));
2839
2840 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2841 DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
2842
2843 usbd_xfer_abort(xfer);
2844 }
2845
2846 /* Close a device interrupt pipe. */
2847 void
2848 uhci_device_intr_close(struct usbd_pipe *pipe)
2849 {
2850 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2851 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2852 int i, npoll;
2853
2854 KASSERT(mutex_owned(&sc->sc_lock));
2855
2856 /* Unlink descriptors from controller data structures. */
2857 npoll = upipe->intr.npoll;
2858 for (i = 0; i < npoll; i++)
2859 uhci_remove_intr(sc, upipe->intr.qhs[i]);
2860
2861 /*
2862 * We now have to wait for any activity on the physical
2863 * descriptors to stop.
2864 */
2865 usb_delay_ms_locked(&sc->sc_bus, 2, &sc->sc_lock);
2866
2867 for (i = 0; i < npoll; i++)
2868 uhci_free_sqh(sc, upipe->intr.qhs[i]);
2869 kmem_free(upipe->intr.qhs, npoll * sizeof(uhci_soft_qh_t *));
2870 }
2871
2872 int
2873 uhci_device_isoc_init(struct usbd_xfer *xfer)
2874 {
2875 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2876
2877 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2878 KASSERT(xfer->ux_nframes != 0);
2879 KASSERT(ux->ux_isdone);
2880
2881 ux->ux_type = UX_ISOC;
2882 return 0;
2883 }
2884
2885 Static void
2886 uhci_device_isoc_fini(struct usbd_xfer *xfer)
2887 {
2888 struct uhci_xfer *ux __diagused = UHCI_XFER2UXFER(xfer);
2889
2890 KASSERT(ux->ux_type == UX_ISOC);
2891 }
2892
2893 usbd_status
2894 uhci_device_isoc_transfer(struct usbd_xfer *xfer)
2895 {
2896 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2897 usbd_status err __diagused;
2898
2899 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2900 DPRINTFN(5, "xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
2901
2902 /* Put it on our queue, */
2903 mutex_enter(&sc->sc_lock);
2904 err = usb_insert_transfer(xfer);
2905 mutex_exit(&sc->sc_lock);
2906
2907 KASSERT(err == USBD_NORMAL_COMPLETION);
2908
2909 /* insert into schedule, */
2910
2911 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2912 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2913 struct isoc *isoc = &upipe->isoc;
2914 uhci_soft_td_t *std = NULL;
2915 uint32_t buf, len, status, offs;
2916 int i, next, nframes;
2917 int rd = UE_GET_DIR(upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress) == UE_DIR_IN;
2918
2919 DPRINTFN(5, "used=%jd next=%jd xfer=%#jx nframes=%jd",
2920 isoc->inuse, isoc->next, (uintptr_t)xfer, xfer->ux_nframes);
2921
2922 if (sc->sc_dying)
2923 return USBD_IOERROR;
2924
2925 if (xfer->ux_status == USBD_IN_PROGRESS) {
2926 /* This request has already been entered into the frame list */
2927 printf("%s: xfer=%p in frame list\n", __func__, xfer);
2928 /* XXX */
2929 }
2930
2931 #ifdef DIAGNOSTIC
2932 if (isoc->inuse >= UHCI_VFRAMELIST_COUNT)
2933 printf("%s: overflow!\n", __func__);
2934 #endif
2935
2936 KASSERT(xfer->ux_nframes != 0);
2937
2938 if (xfer->ux_length)
2939 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
2940 rd ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2941
2942 mutex_enter(&sc->sc_lock);
2943 next = isoc->next;
2944 if (next == -1) {
2945 /* Not in use yet, schedule it a few frames ahead. */
2946 next = (UREAD2(sc, UHCI_FRNUM) + 3) % UHCI_VFRAMELIST_COUNT;
2947 DPRINTFN(2, "start next=%jd", next, 0, 0, 0);
2948 }
2949
2950 xfer->ux_status = USBD_IN_PROGRESS;
2951 ux->ux_curframe = next;
2952
2953 offs = 0;
2954 status = UHCI_TD_ZERO_ACTLEN(UHCI_TD_SET_ERRCNT(0) |
2955 UHCI_TD_ACTIVE |
2956 UHCI_TD_IOS);
2957 nframes = xfer->ux_nframes;
2958 for (i = 0; i < nframes; i++) {
2959 buf = DMAADDR(&xfer->ux_dmabuf, offs);
2960 std = isoc->stds[next];
2961 if (++next >= UHCI_VFRAMELIST_COUNT)
2962 next = 0;
2963 len = xfer->ux_frlengths[i];
2964
2965 KASSERTMSG(len <= __SHIFTOUT_MASK(UHCI_TD_MAXLEN_MASK),
2966 "len %d", len);
2967 std->td.td_buffer = htole32(buf);
2968 usb_syncmem(&xfer->ux_dmabuf, offs, len,
2969 rd ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2970 if (i == nframes - 1)
2971 status |= UHCI_TD_IOC;
2972 std->td.td_status = htole32(status);
2973 std->td.td_token &= htole32(~UHCI_TD_MAXLEN_MASK);
2974 std->td.td_token |= htole32(UHCI_TD_SET_MAXLEN(len));
2975 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2976 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2977 #ifdef UHCI_DEBUG
2978 if (uhcidebug >= 5) {
2979 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2980 DPRINTF("TD %jd", i, 0, 0, 0);
2981 uhci_dump_td(std);
2982 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2983 }
2984 #endif
2985 offs += len;
2986 const bus_addr_t bend __diagused =
2987 DMAADDR(&xfer->ux_dmabuf, offs - 1);
2988
2989 KASSERT(((buf ^ bend) & ~PAGE_MASK) == 0);
2990 }
2991 isoc->next = next;
2992 isoc->inuse += xfer->ux_nframes;
2993
2994 /* Set up interrupt info. */
2995 ux->ux_stdstart = std;
2996 ux->ux_stdend = std;
2997
2998 KASSERT(ux->ux_isdone);
2999 #ifdef DIAGNOSTIC
3000 ux->ux_isdone = false;
3001 #endif
3002 uhci_add_intr_list(sc, ux);
3003
3004 mutex_exit(&sc->sc_lock);
3005
3006 return USBD_IN_PROGRESS;
3007 }
3008
3009 void
3010 uhci_device_isoc_abort(struct usbd_xfer *xfer)
3011 {
3012 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3013 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3014 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
3015 uhci_soft_td_t **stds = upipe->isoc.stds;
3016 uhci_soft_td_t *std;
3017 int i, n, nframes, maxlen, len;
3018
3019 KASSERT(mutex_owned(&sc->sc_lock));
3020
3021 /* Transfer is already done. */
3022 if (xfer->ux_status != USBD_NOT_STARTED &&
3023 xfer->ux_status != USBD_IN_PROGRESS) {
3024 return;
3025 }
3026
3027 /* Give xfer the requested abort code. */
3028 xfer->ux_status = USBD_CANCELLED;
3029
3030 /* make hardware ignore it, */
3031 nframes = xfer->ux_nframes;
3032 n = ux->ux_curframe;
3033 maxlen = 0;
3034 for (i = 0; i < nframes; i++) {
3035 std = stds[n];
3036 usb_syncmem(&std->dma,
3037 std->offs + offsetof(uhci_td_t, td_status),
3038 sizeof(std->td.td_status),
3039 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3040 std->td.td_status &= htole32(~(UHCI_TD_ACTIVE | UHCI_TD_IOC));
3041 usb_syncmem(&std->dma,
3042 std->offs + offsetof(uhci_td_t, td_status),
3043 sizeof(std->td.td_status),
3044 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3045 usb_syncmem(&std->dma,
3046 std->offs + offsetof(uhci_td_t, td_token),
3047 sizeof(std->td.td_token),
3048 BUS_DMASYNC_POSTWRITE);
3049 len = UHCI_TD_GET_MAXLEN(le32toh(std->td.td_token));
3050 if (len > maxlen)
3051 maxlen = len;
3052 if (++n >= UHCI_VFRAMELIST_COUNT)
3053 n = 0;
3054 }
3055
3056 /* and wait until we are sure the hardware has finished. */
3057 delay(maxlen);
3058
3059 #ifdef DIAGNOSTIC
3060 ux->ux_isdone = true;
3061 #endif
3062 /* Remove from interrupt list. */
3063 uhci_del_intr_list(sc, ux);
3064
3065 /* Run callback. */
3066 usb_transfer_complete(xfer);
3067
3068 KASSERT(mutex_owned(&sc->sc_lock));
3069 }
3070
3071 void
3072 uhci_device_isoc_close(struct usbd_pipe *pipe)
3073 {
3074 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
3075 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3076 uhci_soft_td_t *std, *vstd;
3077 struct isoc *isoc;
3078 int i;
3079
3080 KASSERT(mutex_owned(&sc->sc_lock));
3081
3082 /*
3083 * Make sure all TDs are marked as inactive.
3084 * Wait for completion.
3085 * Unschedule.
3086 * Deallocate.
3087 */
3088 isoc = &upipe->isoc;
3089
3090 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3091 std = isoc->stds[i];
3092 usb_syncmem(&std->dma,
3093 std->offs + offsetof(uhci_td_t, td_status),
3094 sizeof(std->td.td_status),
3095 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3096 std->td.td_status &= htole32(~UHCI_TD_ACTIVE);
3097 usb_syncmem(&std->dma,
3098 std->offs + offsetof(uhci_td_t, td_status),
3099 sizeof(std->td.td_status),
3100 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3101 }
3102 /* wait for completion */
3103 usb_delay_ms_locked(&sc->sc_bus, 2, &sc->sc_lock);
3104
3105 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3106 std = isoc->stds[i];
3107 for (vstd = sc->sc_vframes[i].htd;
3108 vstd != NULL && vstd->link.std != std;
3109 vstd = vstd->link.std)
3110 ;
3111 if (vstd == NULL) {
3112 /*panic*/
3113 printf("%s: %p not found\n", __func__, std);
3114 mutex_exit(&sc->sc_lock);
3115 return;
3116 }
3117 vstd->link = std->link;
3118 usb_syncmem(&std->dma,
3119 std->offs + offsetof(uhci_td_t, td_link),
3120 sizeof(std->td.td_link),
3121 BUS_DMASYNC_POSTWRITE);
3122 vstd->td.td_link = std->td.td_link;
3123 usb_syncmem(&vstd->dma,
3124 vstd->offs + offsetof(uhci_td_t, td_link),
3125 sizeof(vstd->td.td_link),
3126 BUS_DMASYNC_PREWRITE);
3127 uhci_free_std_locked(sc, std);
3128 }
3129
3130 kmem_free(isoc->stds, UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *));
3131 }
3132
3133 usbd_status
3134 uhci_setup_isoc(struct usbd_pipe *pipe)
3135 {
3136 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
3137 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3138 int addr = upipe->pipe.up_dev->ud_addr;
3139 int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
3140 int rd = UE_GET_DIR(endpt) == UE_DIR_IN;
3141 uhci_soft_td_t *std, *vstd;
3142 uint32_t token;
3143 struct isoc *isoc;
3144 int i;
3145
3146 isoc = &upipe->isoc;
3147
3148 isoc->stds = kmem_alloc(
3149 UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *), KM_SLEEP);
3150 if (isoc->stds == NULL)
3151 return USBD_NOMEM;
3152
3153 token = rd ? UHCI_TD_IN (0, endpt, addr, 0) :
3154 UHCI_TD_OUT(0, endpt, addr, 0);
3155
3156 /* Allocate the TDs and mark as inactive; */
3157 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3158 std = uhci_alloc_std(sc);
3159 if (std == 0)
3160 goto bad;
3161 std->td.td_status = htole32(UHCI_TD_IOS); /* iso, inactive */
3162 std->td.td_token = htole32(token);
3163 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
3164 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3165 isoc->stds[i] = std;
3166 }
3167
3168 mutex_enter(&sc->sc_lock);
3169
3170 /* Insert TDs into schedule. */
3171 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3172 std = isoc->stds[i];
3173 vstd = sc->sc_vframes[i].htd;
3174 usb_syncmem(&vstd->dma,
3175 vstd->offs + offsetof(uhci_td_t, td_link),
3176 sizeof(vstd->td.td_link),
3177 BUS_DMASYNC_POSTWRITE);
3178 std->link = vstd->link;
3179 std->td.td_link = vstd->td.td_link;
3180 usb_syncmem(&std->dma,
3181 std->offs + offsetof(uhci_td_t, td_link),
3182 sizeof(std->td.td_link),
3183 BUS_DMASYNC_PREWRITE);
3184 vstd->link.std = std;
3185 vstd->td.td_link = htole32(std->physaddr | UHCI_PTR_TD);
3186 usb_syncmem(&vstd->dma,
3187 vstd->offs + offsetof(uhci_td_t, td_link),
3188 sizeof(vstd->td.td_link),
3189 BUS_DMASYNC_PREWRITE);
3190 }
3191 mutex_exit(&sc->sc_lock);
3192
3193 isoc->next = -1;
3194 isoc->inuse = 0;
3195
3196 return USBD_NORMAL_COMPLETION;
3197
3198 bad:
3199 while (--i >= 0)
3200 uhci_free_std(sc, isoc->stds[i]);
3201 kmem_free(isoc->stds, UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *));
3202 return USBD_NOMEM;
3203 }
3204
3205 void
3206 uhci_device_isoc_done(struct usbd_xfer *xfer)
3207 {
3208 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
3209 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3210 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
3211 int i, offs;
3212 int rd = UE_GET_DIR(upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress) == UE_DIR_IN;
3213
3214 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3215 DPRINTFN(4, "length=%jd, ux_state=0x%08jx",
3216 xfer->ux_actlen, xfer->ux_state, 0, 0);
3217
3218 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3219
3220 #ifdef DIAGNOSTIC
3221 if (ux->ux_stdend == NULL) {
3222 printf("%s: xfer=%p stdend==NULL\n", __func__, xfer);
3223 #ifdef UHCI_DEBUG
3224 DPRINTF("--- dump start ---", 0, 0, 0, 0);
3225 uhci_dump_ii(ux);
3226 DPRINTF("--- dump end ---", 0, 0, 0, 0);
3227 #endif
3228 return;
3229 }
3230 #endif
3231
3232 /* Turn off the interrupt since it is active even if the TD is not. */
3233 usb_syncmem(&ux->ux_stdend->dma,
3234 ux->ux_stdend->offs + offsetof(uhci_td_t, td_status),
3235 sizeof(ux->ux_stdend->td.td_status),
3236 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3237 ux->ux_stdend->td.td_status &= htole32(~UHCI_TD_IOC);
3238 usb_syncmem(&ux->ux_stdend->dma,
3239 ux->ux_stdend->offs + offsetof(uhci_td_t, td_status),
3240 sizeof(ux->ux_stdend->td.td_status),
3241 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3242
3243 offs = 0;
3244 for (i = 0; i < xfer->ux_nframes; i++) {
3245 usb_syncmem(&xfer->ux_dmabuf, offs, xfer->ux_frlengths[i],
3246 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3247 offs += xfer->ux_frlengths[i];
3248 }
3249 }
3250
3251 void
3252 uhci_device_intr_done(struct usbd_xfer *xfer)
3253 {
3254 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
3255 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3256 uhci_soft_qh_t *sqh;
3257 int i, npoll;
3258
3259 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3260 DPRINTFN(5, "length=%jd", xfer->ux_actlen, 0, 0, 0);
3261
3262 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3263
3264 npoll = upipe->intr.npoll;
3265 for (i = 0; i < npoll; i++) {
3266 sqh = upipe->intr.qhs[i];
3267 sqh->elink = NULL;
3268 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3269 usb_syncmem(&sqh->dma,
3270 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3271 sizeof(sqh->qh.qh_elink),
3272 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3273 }
3274 const int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
3275 const bool isread = UE_GET_DIR(endpt) == UE_DIR_IN;
3276 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
3277 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3278 }
3279
3280 /* Deallocate request data structures */
3281 void
3282 uhci_device_ctrl_done(struct usbd_xfer *xfer)
3283 {
3284 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3285 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3286 int len = UGETW(xfer->ux_request.wLength);
3287 int isread = (xfer->ux_request.bmRequestType & UT_READ);
3288
3289 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3290
3291 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3292 KASSERT(xfer->ux_rqflags & URQ_REQUEST);
3293
3294 /* XXXNH move to uhci_idone??? */
3295 if (upipe->pipe.up_dev->ud_speed == USB_SPEED_LOW)
3296 uhci_remove_ls_ctrl(sc, upipe->ctrl.sqh);
3297 else
3298 uhci_remove_hs_ctrl(sc, upipe->ctrl.sqh);
3299
3300 if (len) {
3301 usb_syncmem(&xfer->ux_dmabuf, 0, len,
3302 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3303 }
3304 usb_syncmem(&upipe->ctrl.reqdma, 0,
3305 sizeof(usb_device_request_t), BUS_DMASYNC_POSTWRITE);
3306
3307 DPRINTF("length=%jd", xfer->ux_actlen, 0, 0, 0);
3308 }
3309
3310 /* Deallocate request data structures */
3311 void
3312 uhci_device_bulk_done(struct usbd_xfer *xfer)
3313 {
3314 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3315 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3316 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
3317 int endpt = ed->bEndpointAddress;
3318 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
3319
3320 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3321 DPRINTFN(5, "xfer=%#jx sc=%#jx upipe=%#jx", (uintptr_t)xfer,
3322 (uintptr_t)sc, (uintptr_t)upipe, 0);
3323
3324 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3325
3326 uhci_remove_bulk(sc, upipe->bulk.sqh);
3327
3328 if (xfer->ux_length) {
3329 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
3330 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3331 }
3332
3333 DPRINTFN(5, "length=%jd", xfer->ux_actlen, 0, 0, 0);
3334 }
3335
3336 /* Add interrupt QH, called with vflock. */
3337 void
3338 uhci_add_intr(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
3339 {
3340 struct uhci_vframe *vf = &sc->sc_vframes[sqh->pos];
3341 uhci_soft_qh_t *eqh;
3342
3343 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3344 DPRINTFN(4, "n=%jd sqh=%#jx", sqh->pos, (uintptr_t)sqh, 0, 0);
3345
3346 eqh = vf->eqh;
3347 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
3348 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
3349 sqh->hlink = eqh->hlink;
3350 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
3351 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
3352 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
3353 eqh->hlink = sqh;
3354 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
3355 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
3356 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
3357 vf->eqh = sqh;
3358 vf->bandwidth++;
3359 }
3360
3361 /* Remove interrupt QH. */
3362 void
3363 uhci_remove_intr(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
3364 {
3365 struct uhci_vframe *vf = &sc->sc_vframes[sqh->pos];
3366 uhci_soft_qh_t *pqh;
3367
3368 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3369 DPRINTFN(4, "n=%jd sqh=%#jx", sqh->pos, (uintptr_t)sqh, 0, 0);
3370
3371 /* See comment in uhci_remove_ctrl() */
3372
3373 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
3374 sizeof(sqh->qh.qh_elink),
3375 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3376 if (!(sqh->qh.qh_elink & htole32(UHCI_PTR_T))) {
3377 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3378 usb_syncmem(&sqh->dma,
3379 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3380 sizeof(sqh->qh.qh_elink),
3381 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3382 delay(UHCI_QH_REMOVE_DELAY);
3383 }
3384
3385 pqh = uhci_find_prev_qh(vf->hqh, sqh);
3386 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
3387 sizeof(sqh->qh.qh_hlink),
3388 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3389 pqh->hlink = sqh->hlink;
3390 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
3391 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
3392 sizeof(pqh->qh.qh_hlink),
3393 BUS_DMASYNC_PREWRITE);
3394 delay(UHCI_QH_REMOVE_DELAY);
3395 if (vf->eqh == sqh)
3396 vf->eqh = pqh;
3397 vf->bandwidth--;
3398 }
3399
3400 usbd_status
3401 uhci_device_setintr(uhci_softc_t *sc, struct uhci_pipe *upipe, int ival)
3402 {
3403 uhci_soft_qh_t *sqh;
3404 int i, npoll;
3405 u_int bestbw, bw, bestoffs, offs;
3406
3407 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3408 DPRINTFN(2, "pipe=%#jx", (uintptr_t)upipe, 0, 0, 0);
3409 if (ival == 0) {
3410 printf("%s: 0 interval\n", __func__);
3411 return USBD_INVAL;
3412 }
3413
3414 if (ival > UHCI_VFRAMELIST_COUNT)
3415 ival = UHCI_VFRAMELIST_COUNT;
3416 npoll = howmany(UHCI_VFRAMELIST_COUNT, ival);
3417 DPRINTF("ival=%jd npoll=%jd", ival, npoll, 0, 0);
3418
3419 upipe->intr.npoll = npoll;
3420 upipe->intr.qhs =
3421 kmem_alloc(npoll * sizeof(uhci_soft_qh_t *), KM_SLEEP);
3422
3423 /*
3424 * Figure out which offset in the schedule that has most
3425 * bandwidth left over.
3426 */
3427 #define MOD(i) ((i) & (UHCI_VFRAMELIST_COUNT-1))
3428 for (bestoffs = offs = 0, bestbw = ~0; offs < ival; offs++) {
3429 for (bw = i = 0; i < npoll; i++)
3430 bw += sc->sc_vframes[MOD(i * ival + offs)].bandwidth;
3431 if (bw < bestbw) {
3432 bestbw = bw;
3433 bestoffs = offs;
3434 }
3435 }
3436 DPRINTF("bw=%jd offs=%jd", bestbw, bestoffs, 0, 0);
3437 for (i = 0; i < npoll; i++) {
3438 upipe->intr.qhs[i] = sqh = uhci_alloc_sqh(sc);
3439 sqh->elink = NULL;
3440 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3441 usb_syncmem(&sqh->dma,
3442 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3443 sizeof(sqh->qh.qh_elink),
3444 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3445 sqh->pos = MOD(i * ival + bestoffs);
3446 }
3447 #undef MOD
3448
3449 mutex_enter(&sc->sc_lock);
3450 /* Enter QHs into the controller data structures. */
3451 for (i = 0; i < npoll; i++)
3452 uhci_add_intr(sc, upipe->intr.qhs[i]);
3453 mutex_exit(&sc->sc_lock);
3454
3455 DPRINTFN(5, "returns %#jx", (uintptr_t)upipe, 0, 0, 0);
3456
3457 return USBD_NORMAL_COMPLETION;
3458 }
3459
3460 /* Open a new pipe. */
3461 usbd_status
3462 uhci_open(struct usbd_pipe *pipe)
3463 {
3464 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3465 struct usbd_bus *bus = pipe->up_dev->ud_bus;
3466 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
3467 usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc;
3468 usbd_status err = USBD_NOMEM;
3469 int ival;
3470
3471 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3472 DPRINTF("pipe=%#jx, addr=%jd, endpt=%jd (%jd)",
3473 (uintptr_t)pipe, pipe->up_dev->ud_addr, ed->bEndpointAddress,
3474 bus->ub_rhaddr);
3475
3476 if (sc->sc_dying)
3477 return USBD_IOERROR;
3478
3479 upipe->aborting = 0;
3480 /* toggle state needed for bulk endpoints */
3481 upipe->nexttoggle = pipe->up_endpoint->ue_toggle;
3482
3483 if (pipe->up_dev->ud_addr == bus->ub_rhaddr) {
3484 switch (ed->bEndpointAddress) {
3485 case USB_CONTROL_ENDPOINT:
3486 pipe->up_methods = &roothub_ctrl_methods;
3487 break;
3488 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
3489 pipe->up_methods = &uhci_root_intr_methods;
3490 break;
3491 default:
3492 return USBD_INVAL;
3493 }
3494 } else {
3495 switch (ed->bmAttributes & UE_XFERTYPE) {
3496 case UE_CONTROL:
3497 pipe->up_methods = &uhci_device_ctrl_methods;
3498 upipe->ctrl.sqh = uhci_alloc_sqh(sc);
3499 if (upipe->ctrl.sqh == NULL)
3500 goto bad;
3501 upipe->ctrl.setup = uhci_alloc_std(sc);
3502 if (upipe->ctrl.setup == NULL) {
3503 uhci_free_sqh(sc, upipe->ctrl.sqh);
3504 goto bad;
3505 }
3506 upipe->ctrl.stat = uhci_alloc_std(sc);
3507 if (upipe->ctrl.stat == NULL) {
3508 uhci_free_sqh(sc, upipe->ctrl.sqh);
3509 uhci_free_std(sc, upipe->ctrl.setup);
3510 goto bad;
3511 }
3512 err = usb_allocmem(&sc->sc_bus,
3513 sizeof(usb_device_request_t), 0,
3514 USBMALLOC_COHERENT, &upipe->ctrl.reqdma);
3515 if (err) {
3516 uhci_free_sqh(sc, upipe->ctrl.sqh);
3517 uhci_free_std(sc, upipe->ctrl.setup);
3518 uhci_free_std(sc, upipe->ctrl.stat);
3519 goto bad;
3520 }
3521 break;
3522 case UE_INTERRUPT:
3523 pipe->up_methods = &uhci_device_intr_methods;
3524 ival = pipe->up_interval;
3525 if (ival == USBD_DEFAULT_INTERVAL)
3526 ival = ed->bInterval;
3527 return uhci_device_setintr(sc, upipe, ival);
3528 case UE_ISOCHRONOUS:
3529 pipe->up_serialise = false;
3530 pipe->up_methods = &uhci_device_isoc_methods;
3531 return uhci_setup_isoc(pipe);
3532 case UE_BULK:
3533 pipe->up_methods = &uhci_device_bulk_methods;
3534 upipe->bulk.sqh = uhci_alloc_sqh(sc);
3535 if (upipe->bulk.sqh == NULL)
3536 goto bad;
3537 break;
3538 }
3539 }
3540 return USBD_NORMAL_COMPLETION;
3541
3542 bad:
3543 return USBD_NOMEM;
3544 }
3545
3546 /*
3547 * Data structures and routines to emulate the root hub.
3548 */
3549 /*
3550 * The USB hub protocol requires that SET_FEATURE(PORT_RESET) also
3551 * enables the port, and also states that SET_FEATURE(PORT_ENABLE)
3552 * should not be used by the USB subsystem. As we cannot issue a
3553 * SET_FEATURE(PORT_ENABLE) externally, we must ensure that the port
3554 * will be enabled as part of the reset.
3555 *
3556 * On the VT83C572, the port cannot be successfully enabled until the
3557 * outstanding "port enable change" and "connection status change"
3558 * events have been reset.
3559 */
3560 Static usbd_status
3561 uhci_portreset(uhci_softc_t *sc, int index)
3562 {
3563 int lim, port, x;
3564 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3565
3566 if (index == 1)
3567 port = UHCI_PORTSC1;
3568 else if (index == 2)
3569 port = UHCI_PORTSC2;
3570 else
3571 return USBD_IOERROR;
3572
3573 x = URWMASK(UREAD2(sc, port));
3574 UWRITE2(sc, port, x | UHCI_PORTSC_PR);
3575
3576 usb_delay_ms(&sc->sc_bus, USB_PORT_ROOT_RESET_DELAY);
3577
3578 DPRINTF("uhci port %jd reset, status0 = 0x%04jx", index,
3579 UREAD2(sc, port), 0, 0);
3580
3581 x = URWMASK(UREAD2(sc, port));
3582 UWRITE2(sc, port, x & ~(UHCI_PORTSC_PR | UHCI_PORTSC_SUSP));
3583
3584 delay(100);
3585
3586 DPRINTF("uhci port %jd reset, status1 = 0x%04jx", index,
3587 UREAD2(sc, port), 0, 0);
3588
3589 x = URWMASK(UREAD2(sc, port));
3590 UWRITE2(sc, port, x | UHCI_PORTSC_PE);
3591
3592 for (lim = 10; --lim > 0;) {
3593 usb_delay_ms(&sc->sc_bus, USB_PORT_RESET_DELAY);
3594
3595 x = UREAD2(sc, port);
3596 DPRINTF("uhci port %jd iteration %ju, status = 0x%04jx", index,
3597 lim, x, 0);
3598
3599 if (!(x & UHCI_PORTSC_CCS)) {
3600 /*
3601 * No device is connected (or was disconnected
3602 * during reset). Consider the port reset.
3603 * The delay must be long enough to ensure on
3604 * the initial iteration that the device
3605 * connection will have been registered. 50ms
3606 * appears to be sufficient, but 20ms is not.
3607 */
3608 DPRINTFN(3, "uhci port %jd loop %ju, device detached",
3609 index, lim, 0, 0);
3610 break;
3611 }
3612
3613 if (x & (UHCI_PORTSC_POEDC | UHCI_PORTSC_CSC)) {
3614 /*
3615 * Port enabled changed and/or connection
3616 * status changed were set. Reset either or
3617 * both raised flags (by writing a 1 to that
3618 * bit), and wait again for state to settle.
3619 */
3620 UWRITE2(sc, port, URWMASK(x) |
3621 (x & (UHCI_PORTSC_POEDC | UHCI_PORTSC_CSC)));
3622 continue;
3623 }
3624
3625 if (x & UHCI_PORTSC_PE)
3626 /* Port is enabled */
3627 break;
3628
3629 UWRITE2(sc, port, URWMASK(x) | UHCI_PORTSC_PE);
3630 }
3631
3632 DPRINTFN(3, "uhci port %jd reset, status2 = 0x%04jx", index,
3633 UREAD2(sc, port), 0, 0);
3634
3635 if (lim <= 0) {
3636 DPRINTF("uhci port %jd reset timed out", index,
3637 0, 0, 0);
3638 return USBD_TIMEOUT;
3639 }
3640
3641 sc->sc_isreset = 1;
3642 return USBD_NORMAL_COMPLETION;
3643 }
3644
3645 Static int
3646 uhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
3647 void *buf, int buflen)
3648 {
3649 uhci_softc_t *sc = UHCI_BUS2SC(bus);
3650 int port, x;
3651 int status, change, totlen = 0;
3652 uint16_t len, value, index;
3653 usb_port_status_t ps;
3654 usbd_status err;
3655
3656 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3657
3658 if (sc->sc_dying)
3659 return -1;
3660
3661 DPRINTF("type=0x%02jx request=%02jx", req->bmRequestType,
3662 req->bRequest, 0, 0);
3663
3664 len = UGETW(req->wLength);
3665 value = UGETW(req->wValue);
3666 index = UGETW(req->wIndex);
3667
3668 #define C(x,y) ((x) | ((y) << 8))
3669 switch (C(req->bRequest, req->bmRequestType)) {
3670 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
3671 DPRINTF("wValue=0x%04jx", value, 0, 0, 0);
3672 if (len == 0)
3673 break;
3674 switch (value) {
3675 #define sd ((usb_string_descriptor_t *)buf)
3676 case C(2, UDESC_STRING):
3677 /* Product */
3678 totlen = usb_makestrdesc(sd, len, "UHCI root hub");
3679 break;
3680 #undef sd
3681 default:
3682 /* default from usbroothub */
3683 return buflen;
3684 }
3685 break;
3686
3687 /* Hub requests */
3688 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
3689 break;
3690 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
3691 DPRINTF("UR_CLEAR_PORT_FEATURE port=%jd feature=%jd", index,
3692 value, 0, 0);
3693 if (index == 1)
3694 port = UHCI_PORTSC1;
3695 else if (index == 2)
3696 port = UHCI_PORTSC2;
3697 else {
3698 return -1;
3699 }
3700 switch(value) {
3701 case UHF_PORT_ENABLE:
3702 x = URWMASK(UREAD2(sc, port));
3703 UWRITE2(sc, port, x & ~UHCI_PORTSC_PE);
3704 break;
3705 case UHF_PORT_SUSPEND:
3706 x = URWMASK(UREAD2(sc, port));
3707 if (!(x & UHCI_PORTSC_SUSP)) /* not suspended */
3708 break;
3709 UWRITE2(sc, port, x | UHCI_PORTSC_RD);
3710 /* see USB2 spec ch. 7.1.7.7 */
3711 usb_delay_ms(&sc->sc_bus, 20);
3712 UWRITE2(sc, port, x & ~UHCI_PORTSC_SUSP);
3713 /* 10ms resume delay must be provided by caller */
3714 break;
3715 case UHF_PORT_RESET:
3716 x = URWMASK(UREAD2(sc, port));
3717 UWRITE2(sc, port, x & ~UHCI_PORTSC_PR);
3718 break;
3719 case UHF_C_PORT_CONNECTION:
3720 x = URWMASK(UREAD2(sc, port));
3721 UWRITE2(sc, port, x | UHCI_PORTSC_CSC);
3722 break;
3723 case UHF_C_PORT_ENABLE:
3724 x = URWMASK(UREAD2(sc, port));
3725 UWRITE2(sc, port, x | UHCI_PORTSC_POEDC);
3726 break;
3727 case UHF_C_PORT_OVER_CURRENT:
3728 x = URWMASK(UREAD2(sc, port));
3729 UWRITE2(sc, port, x | UHCI_PORTSC_OCIC);
3730 break;
3731 case UHF_C_PORT_RESET:
3732 sc->sc_isreset = 0;
3733 break;
3734 case UHF_PORT_CONNECTION:
3735 case UHF_PORT_OVER_CURRENT:
3736 case UHF_PORT_POWER:
3737 case UHF_PORT_LOW_SPEED:
3738 case UHF_C_PORT_SUSPEND:
3739 default:
3740 return -1;
3741 }
3742 break;
3743 case C(UR_GET_BUS_STATE, UT_READ_CLASS_OTHER):
3744 if (index == 1)
3745 port = UHCI_PORTSC1;
3746 else if (index == 2)
3747 port = UHCI_PORTSC2;
3748 else {
3749 return -1;
3750 }
3751 if (len > 0) {
3752 *(uint8_t *)buf =
3753 UHCI_PORTSC_GET_LS(UREAD2(sc, port));
3754 totlen = 1;
3755 }
3756 break;
3757 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
3758 if (len == 0)
3759 break;
3760 if ((value & 0xff) != 0) {
3761 return -1;
3762 }
3763 usb_hub_descriptor_t hubd;
3764
3765 totlen = uimin(buflen, sizeof(hubd));
3766 memcpy(&hubd, buf, totlen);
3767 hubd.bNbrPorts = 2;
3768 memcpy(buf, &hubd, totlen);
3769 break;
3770 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
3771 if (len != 4) {
3772 return -1;
3773 }
3774 memset(buf, 0, len);
3775 totlen = len;
3776 break;
3777 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
3778 if (index == 1)
3779 port = UHCI_PORTSC1;
3780 else if (index == 2)
3781 port = UHCI_PORTSC2;
3782 else {
3783 return -1;
3784 }
3785 if (len != 4) {
3786 return -1;
3787 }
3788 x = UREAD2(sc, port);
3789 status = change = 0;
3790 if (x & UHCI_PORTSC_CCS)
3791 status |= UPS_CURRENT_CONNECT_STATUS;
3792 if (x & UHCI_PORTSC_CSC)
3793 change |= UPS_C_CONNECT_STATUS;
3794 if (x & UHCI_PORTSC_PE)
3795 status |= UPS_PORT_ENABLED;
3796 if (x & UHCI_PORTSC_POEDC)
3797 change |= UPS_C_PORT_ENABLED;
3798 if (x & UHCI_PORTSC_OCI)
3799 status |= UPS_OVERCURRENT_INDICATOR;
3800 if (x & UHCI_PORTSC_OCIC)
3801 change |= UPS_C_OVERCURRENT_INDICATOR;
3802 if (x & UHCI_PORTSC_SUSP)
3803 status |= UPS_SUSPEND;
3804 if (x & UHCI_PORTSC_LSDA)
3805 status |= UPS_LOW_SPEED;
3806 status |= UPS_PORT_POWER;
3807 if (sc->sc_isreset)
3808 change |= UPS_C_PORT_RESET;
3809 USETW(ps.wPortStatus, status);
3810 USETW(ps.wPortChange, change);
3811 totlen = uimin(len, sizeof(ps));
3812 memcpy(buf, &ps, totlen);
3813 break;
3814 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
3815 return -1;
3816 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
3817 break;
3818 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
3819 if (index == 1)
3820 port = UHCI_PORTSC1;
3821 else if (index == 2)
3822 port = UHCI_PORTSC2;
3823 else {
3824 return -1;
3825 }
3826 switch(value) {
3827 case UHF_PORT_ENABLE:
3828 x = URWMASK(UREAD2(sc, port));
3829 UWRITE2(sc, port, x | UHCI_PORTSC_PE);
3830 break;
3831 case UHF_PORT_SUSPEND:
3832 x = URWMASK(UREAD2(sc, port));
3833 UWRITE2(sc, port, x | UHCI_PORTSC_SUSP);
3834 break;
3835 case UHF_PORT_RESET:
3836 err = uhci_portreset(sc, index);
3837 if (err != USBD_NORMAL_COMPLETION)
3838 return -1;
3839 return 0;
3840 case UHF_PORT_POWER:
3841 /* Pretend we turned on power */
3842 return 0;
3843 case UHF_C_PORT_CONNECTION:
3844 case UHF_C_PORT_ENABLE:
3845 case UHF_C_PORT_OVER_CURRENT:
3846 case UHF_PORT_CONNECTION:
3847 case UHF_PORT_OVER_CURRENT:
3848 case UHF_PORT_LOW_SPEED:
3849 case UHF_C_PORT_SUSPEND:
3850 case UHF_C_PORT_RESET:
3851 default:
3852 return -1;
3853 }
3854 break;
3855 default:
3856 /* default from usbroothub */
3857 DPRINTF("returning %jd (usbroothub default)",
3858 buflen, 0, 0, 0);
3859 return buflen;
3860 }
3861
3862 DPRINTF("returning %jd", totlen, 0, 0, 0);
3863
3864 return totlen;
3865 }
3866
3867 /* Abort a root interrupt request. */
3868 void
3869 uhci_root_intr_abort(struct usbd_xfer *xfer)
3870 {
3871 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3872
3873 KASSERT(mutex_owned(&sc->sc_lock));
3874 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
3875
3876 /*
3877 * Try to stop the callout before it starts. If we got in too
3878 * late, too bad; but if the callout had yet to run and time
3879 * out the xfer, cancel it ourselves.
3880 */
3881 callout_stop(&sc->sc_poll_handle);
3882 if (sc->sc_intr_xfer == NULL)
3883 return;
3884
3885 KASSERT(sc->sc_intr_xfer == xfer);
3886 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
3887 xfer->ux_status = USBD_CANCELLED;
3888 #ifdef DIAGNOSTIC
3889 UHCI_XFER2UXFER(xfer)->ux_isdone = true;
3890 #endif
3891 usb_transfer_complete(xfer);
3892 }
3893
3894 usbd_status
3895 uhci_root_intr_transfer(struct usbd_xfer *xfer)
3896 {
3897 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3898 usbd_status err;
3899
3900 /* Insert last in queue. */
3901 mutex_enter(&sc->sc_lock);
3902 err = usb_insert_transfer(xfer);
3903 mutex_exit(&sc->sc_lock);
3904 if (err)
3905 return err;
3906
3907 /*
3908 * Pipe isn't running (otherwise err would be USBD_INPROG),
3909 * start first
3910 */
3911 return uhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
3912 }
3913
3914 /* Start a transfer on the root interrupt pipe */
3915 usbd_status
3916 uhci_root_intr_start(struct usbd_xfer *xfer)
3917 {
3918 struct usbd_pipe *pipe = xfer->ux_pipe;
3919 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3920 unsigned int ival;
3921 const bool polling = sc->sc_bus.ub_usepolling;
3922
3923 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3924 DPRINTF("xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, xfer->ux_length,
3925 xfer->ux_flags, 0);
3926
3927 if (sc->sc_dying)
3928 return USBD_IOERROR;
3929
3930 if (!polling)
3931 mutex_enter(&sc->sc_lock);
3932
3933 KASSERT(sc->sc_intr_xfer == NULL);
3934
3935 /* XXX temporary variable needed to avoid gcc3 warning */
3936 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
3937 sc->sc_ival = mstohz(ival);
3938 callout_schedule(&sc->sc_poll_handle, sc->sc_ival);
3939 sc->sc_intr_xfer = xfer;
3940 xfer->ux_status = USBD_IN_PROGRESS;
3941
3942 if (!polling)
3943 mutex_exit(&sc->sc_lock);
3944
3945 return USBD_IN_PROGRESS;
3946 }
3947
3948 /* Close the root interrupt pipe. */
3949 void
3950 uhci_root_intr_close(struct usbd_pipe *pipe)
3951 {
3952 uhci_softc_t *sc __diagused = UHCI_PIPE2SC(pipe);
3953 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3954
3955 KASSERT(mutex_owned(&sc->sc_lock));
3956
3957 /*
3958 * The caller must arrange to have aborted the pipe already, so
3959 * there can be no intr xfer in progress. The callout may
3960 * still be pending from a prior intr xfer -- if it has already
3961 * fired, it will see there is nothing to do, and do nothing.
3962 */
3963 KASSERT(sc->sc_intr_xfer == NULL);
3964 KASSERT(!callout_pending(&sc->sc_poll_handle));
3965 }
3966