uhci.c revision 1.279.2.2 1 /* $NetBSD: uhci.c,v 1.279.2.2 2018/09/06 06:56:04 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2004, 2011, 2012 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart (at) augustsson.net) at
9 * Carlstedt Research & Technology, Jared D. McNeill (jmcneill (at) invisible.ca)
10 * and Matthew R. Green (mrg (at) eterna.com.au).
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * USB Universal Host Controller driver.
36 * Handles e.g. PIIX3 and PIIX4.
37 *
38 * UHCI spec: http://www.intel.com/technology/usb/spec.htm
39 * USB spec: http://www.usb.org/developers/docs/
40 * PIIXn spec: ftp://download.intel.com/design/intarch/datashts/29055002.pdf
41 * ftp://download.intel.com/design/intarch/datashts/29056201.pdf
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: uhci.c,v 1.279.2.2 2018/09/06 06:56:04 pgoyette Exp $");
46
47 #ifdef _KERNEL_OPT
48 #include "opt_usb.h"
49 #endif
50
51 #include <sys/param.h>
52
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/device.h>
56 #include <sys/kernel.h>
57 #include <sys/kmem.h>
58 #include <sys/mutex.h>
59 #include <sys/proc.h>
60 #include <sys/queue.h>
61 #include <sys/select.h>
62 #include <sys/sysctl.h>
63 #include <sys/systm.h>
64
65 #include <machine/endian.h>
66
67 #include <dev/usb/usb.h>
68 #include <dev/usb/usbdi.h>
69 #include <dev/usb/usbdivar.h>
70 #include <dev/usb/usb_mem.h>
71
72 #include <dev/usb/uhcireg.h>
73 #include <dev/usb/uhcivar.h>
74 #include <dev/usb/usbroothub.h>
75 #include <dev/usb/usbhist.h>
76
77 /* Use bandwidth reclamation for control transfers. Some devices choke on it. */
78 /*#define UHCI_CTL_LOOP */
79
80 #ifdef UHCI_DEBUG
81 uhci_softc_t *thesc;
82 int uhcinoloop = 0;
83 #endif
84
85 #ifdef USB_DEBUG
86 #ifndef UHCI_DEBUG
87 #define uhcidebug 0
88 #else
89 static int uhcidebug = 0;
90
91 SYSCTL_SETUP(sysctl_hw_uhci_setup, "sysctl hw.uhci setup")
92 {
93 int err;
94 const struct sysctlnode *rnode;
95 const struct sysctlnode *cnode;
96
97 err = sysctl_createv(clog, 0, NULL, &rnode,
98 CTLFLAG_PERMANENT, CTLTYPE_NODE, "uhci",
99 SYSCTL_DESCR("uhci global controls"),
100 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
101
102 if (err)
103 goto fail;
104
105 /* control debugging printfs */
106 err = sysctl_createv(clog, 0, &rnode, &cnode,
107 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
108 "debug", SYSCTL_DESCR("Enable debugging output"),
109 NULL, 0, &uhcidebug, sizeof(uhcidebug), CTL_CREATE, CTL_EOL);
110 if (err)
111 goto fail;
112
113 return;
114 fail:
115 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
116 }
117
118 #endif /* UHCI_DEBUG */
119 #endif /* USB_DEBUG */
120
121 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOGN(uhcidebug,1,FMT,A,B,C,D)
122 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(uhcidebug,N,FMT,A,B,C,D)
123 #define UHCIHIST_FUNC() USBHIST_FUNC()
124 #define UHCIHIST_CALLED(name) USBHIST_CALLED(uhcidebug)
125
126 /*
127 * The UHCI controller is little endian, so on big endian machines
128 * the data stored in memory needs to be swapped.
129 */
130
131 struct uhci_pipe {
132 struct usbd_pipe pipe;
133 int nexttoggle;
134
135 u_char aborting;
136 struct usbd_xfer *abortstart, abortend;
137
138 /* Info needed for different pipe kinds. */
139 union {
140 /* Control pipe */
141 struct {
142 uhci_soft_qh_t *sqh;
143 usb_dma_t reqdma;
144 uhci_soft_td_t *setup;
145 uhci_soft_td_t *stat;
146 } ctrl;
147 /* Interrupt pipe */
148 struct {
149 int npoll;
150 uhci_soft_qh_t **qhs;
151 } intr;
152 /* Bulk pipe */
153 struct {
154 uhci_soft_qh_t *sqh;
155 } bulk;
156 /* Isochronous pipe */
157 struct isoc {
158 uhci_soft_td_t **stds;
159 int next, inuse;
160 } isoc;
161 };
162 };
163
164 typedef TAILQ_HEAD(ux_completeq, uhci_xfer) ux_completeq_t;
165
166 Static void uhci_globalreset(uhci_softc_t *);
167 Static usbd_status uhci_portreset(uhci_softc_t*, int);
168 Static void uhci_reset(uhci_softc_t *);
169 Static usbd_status uhci_run(uhci_softc_t *, int, int);
170 Static uhci_soft_td_t *uhci_alloc_std(uhci_softc_t *);
171 Static void uhci_free_std(uhci_softc_t *, uhci_soft_td_t *);
172 Static void uhci_free_std_locked(uhci_softc_t *, uhci_soft_td_t *);
173 Static uhci_soft_qh_t *uhci_alloc_sqh(uhci_softc_t *);
174 Static void uhci_free_sqh(uhci_softc_t *, uhci_soft_qh_t *);
175 #if 0
176 Static void uhci_enter_ctl_q(uhci_softc_t *, uhci_soft_qh_t *,
177 uhci_intr_info_t *);
178 Static void uhci_exit_ctl_q(uhci_softc_t *, uhci_soft_qh_t *);
179 #endif
180
181 #if 0
182 Static void uhci_free_std_chain(uhci_softc_t *, uhci_soft_td_t *,
183 uhci_soft_td_t *);
184 #endif
185 Static int uhci_alloc_std_chain(uhci_softc_t *, struct usbd_xfer *,
186 int, int, uhci_soft_td_t **);
187 Static void uhci_free_stds(uhci_softc_t *, struct uhci_xfer *);
188
189 Static void uhci_reset_std_chain(uhci_softc_t *, struct usbd_xfer *,
190 int, int, int *, uhci_soft_td_t **);
191
192 Static void uhci_poll_hub(void *);
193 Static void uhci_check_intr(uhci_softc_t *, struct uhci_xfer *,
194 ux_completeq_t *);
195 Static void uhci_idone(struct uhci_xfer *, ux_completeq_t *);
196
197 Static void uhci_abort_xfer(struct usbd_xfer *, usbd_status);
198
199 Static void uhci_timeout(void *);
200 Static void uhci_timeout_task(void *);
201 Static void uhci_add_ls_ctrl(uhci_softc_t *, uhci_soft_qh_t *);
202 Static void uhci_add_hs_ctrl(uhci_softc_t *, uhci_soft_qh_t *);
203 Static void uhci_add_bulk(uhci_softc_t *, uhci_soft_qh_t *);
204 Static void uhci_remove_ls_ctrl(uhci_softc_t *,uhci_soft_qh_t *);
205 Static void uhci_remove_hs_ctrl(uhci_softc_t *,uhci_soft_qh_t *);
206 Static void uhci_remove_bulk(uhci_softc_t *,uhci_soft_qh_t *);
207 Static void uhci_add_loop(uhci_softc_t *);
208 Static void uhci_rem_loop(uhci_softc_t *);
209
210 Static usbd_status uhci_setup_isoc(struct usbd_pipe *);
211
212 Static struct usbd_xfer *
213 uhci_allocx(struct usbd_bus *, unsigned int);
214 Static void uhci_freex(struct usbd_bus *, struct usbd_xfer *);
215 Static void uhci_get_lock(struct usbd_bus *, kmutex_t **);
216 Static int uhci_roothub_ctrl(struct usbd_bus *,
217 usb_device_request_t *, void *, int);
218
219 Static int uhci_device_ctrl_init(struct usbd_xfer *);
220 Static void uhci_device_ctrl_fini(struct usbd_xfer *);
221 Static usbd_status uhci_device_ctrl_transfer(struct usbd_xfer *);
222 Static usbd_status uhci_device_ctrl_start(struct usbd_xfer *);
223 Static void uhci_device_ctrl_abort(struct usbd_xfer *);
224 Static void uhci_device_ctrl_close(struct usbd_pipe *);
225 Static void uhci_device_ctrl_done(struct usbd_xfer *);
226
227 Static int uhci_device_intr_init(struct usbd_xfer *);
228 Static void uhci_device_intr_fini(struct usbd_xfer *);
229 Static usbd_status uhci_device_intr_transfer(struct usbd_xfer *);
230 Static usbd_status uhci_device_intr_start(struct usbd_xfer *);
231 Static void uhci_device_intr_abort(struct usbd_xfer *);
232 Static void uhci_device_intr_close(struct usbd_pipe *);
233 Static void uhci_device_intr_done(struct usbd_xfer *);
234
235 Static int uhci_device_bulk_init(struct usbd_xfer *);
236 Static void uhci_device_bulk_fini(struct usbd_xfer *);
237 Static usbd_status uhci_device_bulk_transfer(struct usbd_xfer *);
238 Static usbd_status uhci_device_bulk_start(struct usbd_xfer *);
239 Static void uhci_device_bulk_abort(struct usbd_xfer *);
240 Static void uhci_device_bulk_close(struct usbd_pipe *);
241 Static void uhci_device_bulk_done(struct usbd_xfer *);
242
243 Static int uhci_device_isoc_init(struct usbd_xfer *);
244 Static void uhci_device_isoc_fini(struct usbd_xfer *);
245 Static usbd_status uhci_device_isoc_transfer(struct usbd_xfer *);
246 Static void uhci_device_isoc_abort(struct usbd_xfer *);
247 Static void uhci_device_isoc_close(struct usbd_pipe *);
248 Static void uhci_device_isoc_done(struct usbd_xfer *);
249
250 Static usbd_status uhci_root_intr_transfer(struct usbd_xfer *);
251 Static usbd_status uhci_root_intr_start(struct usbd_xfer *);
252 Static void uhci_root_intr_abort(struct usbd_xfer *);
253 Static void uhci_root_intr_close(struct usbd_pipe *);
254 Static void uhci_root_intr_done(struct usbd_xfer *);
255
256 Static usbd_status uhci_open(struct usbd_pipe *);
257 Static void uhci_poll(struct usbd_bus *);
258 Static void uhci_softintr(void *);
259
260 Static void uhci_add_intr(uhci_softc_t *, uhci_soft_qh_t *);
261 Static void uhci_remove_intr(uhci_softc_t *, uhci_soft_qh_t *);
262 Static usbd_status uhci_device_setintr(uhci_softc_t *,
263 struct uhci_pipe *, int);
264
265 Static void uhci_device_clear_toggle(struct usbd_pipe *);
266 Static void uhci_noop(struct usbd_pipe *);
267
268 static inline uhci_soft_qh_t *
269 uhci_find_prev_qh(uhci_soft_qh_t *, uhci_soft_qh_t *);
270
271 #ifdef UHCI_DEBUG
272 Static void uhci_dump_all(uhci_softc_t *);
273 Static void uhci_dumpregs(uhci_softc_t *);
274 Static void uhci_dump_qhs(uhci_soft_qh_t *);
275 Static void uhci_dump_qh(uhci_soft_qh_t *);
276 Static void uhci_dump_tds(uhci_soft_td_t *);
277 Static void uhci_dump_td(uhci_soft_td_t *);
278 Static void uhci_dump_ii(struct uhci_xfer *);
279 void uhci_dump(void);
280 #endif
281
282 #define UBARR(sc) bus_space_barrier((sc)->iot, (sc)->ioh, 0, (sc)->sc_size, \
283 BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE)
284 #define UWRITE1(sc, r, x) \
285 do { UBARR(sc); bus_space_write_1((sc)->iot, (sc)->ioh, (r), (x)); \
286 } while (/*CONSTCOND*/0)
287 #define UWRITE2(sc, r, x) \
288 do { UBARR(sc); bus_space_write_2((sc)->iot, (sc)->ioh, (r), (x)); \
289 } while (/*CONSTCOND*/0)
290 #define UWRITE4(sc, r, x) \
291 do { UBARR(sc); bus_space_write_4((sc)->iot, (sc)->ioh, (r), (x)); \
292 } while (/*CONSTCOND*/0)
293
294 static __inline uint8_t
295 UREAD1(uhci_softc_t *sc, bus_size_t r)
296 {
297
298 UBARR(sc);
299 return bus_space_read_1(sc->iot, sc->ioh, r);
300 }
301
302 static __inline uint16_t
303 UREAD2(uhci_softc_t *sc, bus_size_t r)
304 {
305
306 UBARR(sc);
307 return bus_space_read_2(sc->iot, sc->ioh, r);
308 }
309
310 #ifdef UHCI_DEBUG
311 static __inline uint32_t
312 UREAD4(uhci_softc_t *sc, bus_size_t r)
313 {
314
315 UBARR(sc);
316 return bus_space_read_4(sc->iot, sc->ioh, r);
317 }
318 #endif
319
320 #define UHCICMD(sc, cmd) UWRITE2(sc, UHCI_CMD, cmd)
321 #define UHCISTS(sc) UREAD2(sc, UHCI_STS)
322
323 #define UHCI_RESET_TIMEOUT 100 /* ms, reset timeout */
324
325 #define UHCI_CURFRAME(sc) (UREAD2(sc, UHCI_FRNUM) & UHCI_FRNUM_MASK)
326
327 const struct usbd_bus_methods uhci_bus_methods = {
328 .ubm_open = uhci_open,
329 .ubm_softint = uhci_softintr,
330 .ubm_dopoll = uhci_poll,
331 .ubm_allocx = uhci_allocx,
332 .ubm_freex = uhci_freex,
333 .ubm_getlock = uhci_get_lock,
334 .ubm_rhctrl = uhci_roothub_ctrl,
335 };
336
337 const struct usbd_pipe_methods uhci_root_intr_methods = {
338 .upm_transfer = uhci_root_intr_transfer,
339 .upm_start = uhci_root_intr_start,
340 .upm_abort = uhci_root_intr_abort,
341 .upm_close = uhci_root_intr_close,
342 .upm_cleartoggle = uhci_noop,
343 .upm_done = uhci_root_intr_done,
344 };
345
346 const struct usbd_pipe_methods uhci_device_ctrl_methods = {
347 .upm_init = uhci_device_ctrl_init,
348 .upm_fini = uhci_device_ctrl_fini,
349 .upm_transfer = uhci_device_ctrl_transfer,
350 .upm_start = uhci_device_ctrl_start,
351 .upm_abort = uhci_device_ctrl_abort,
352 .upm_close = uhci_device_ctrl_close,
353 .upm_cleartoggle = uhci_noop,
354 .upm_done = uhci_device_ctrl_done,
355 };
356
357 const struct usbd_pipe_methods uhci_device_intr_methods = {
358 .upm_init = uhci_device_intr_init,
359 .upm_fini = uhci_device_intr_fini,
360 .upm_transfer = uhci_device_intr_transfer,
361 .upm_start = uhci_device_intr_start,
362 .upm_abort = uhci_device_intr_abort,
363 .upm_close = uhci_device_intr_close,
364 .upm_cleartoggle = uhci_device_clear_toggle,
365 .upm_done = uhci_device_intr_done,
366 };
367
368 const struct usbd_pipe_methods uhci_device_bulk_methods = {
369 .upm_init = uhci_device_bulk_init,
370 .upm_fini = uhci_device_bulk_fini,
371 .upm_transfer = uhci_device_bulk_transfer,
372 .upm_start = uhci_device_bulk_start,
373 .upm_abort = uhci_device_bulk_abort,
374 .upm_close = uhci_device_bulk_close,
375 .upm_cleartoggle = uhci_device_clear_toggle,
376 .upm_done = uhci_device_bulk_done,
377 };
378
379 const struct usbd_pipe_methods uhci_device_isoc_methods = {
380 .upm_init = uhci_device_isoc_init,
381 .upm_fini = uhci_device_isoc_fini,
382 .upm_transfer = uhci_device_isoc_transfer,
383 .upm_abort = uhci_device_isoc_abort,
384 .upm_close = uhci_device_isoc_close,
385 .upm_cleartoggle = uhci_noop,
386 .upm_done = uhci_device_isoc_done,
387 };
388
389 static inline void
390 uhci_add_intr_list(uhci_softc_t *sc, struct uhci_xfer *ux)
391 {
392
393 TAILQ_INSERT_TAIL(&sc->sc_intrhead, ux, ux_list);
394 }
395
396 static inline void
397 uhci_del_intr_list(uhci_softc_t *sc, struct uhci_xfer *ux)
398 {
399
400 TAILQ_REMOVE(&sc->sc_intrhead, ux, ux_list);
401 }
402
403 static inline uhci_soft_qh_t *
404 uhci_find_prev_qh(uhci_soft_qh_t *pqh, uhci_soft_qh_t *sqh)
405 {
406 UHCIHIST_FUNC(); UHCIHIST_CALLED();
407 DPRINTFN(15, "pqh=%#jx sqh=%#jx", (uintptr_t)pqh, (uintptr_t)sqh, 0, 0);
408
409 for (; pqh->hlink != sqh; pqh = pqh->hlink) {
410 #if defined(DIAGNOSTIC) || defined(UHCI_DEBUG)
411 usb_syncmem(&pqh->dma,
412 pqh->offs + offsetof(uhci_qh_t, qh_hlink),
413 sizeof(pqh->qh.qh_hlink),
414 BUS_DMASYNC_POSTWRITE);
415 if (le32toh(pqh->qh.qh_hlink) & UHCI_PTR_T) {
416 printf("%s: QH not found\n", __func__);
417 return NULL;
418 }
419 #endif
420 }
421 return pqh;
422 }
423
424 void
425 uhci_globalreset(uhci_softc_t *sc)
426 {
427 UHCICMD(sc, UHCI_CMD_GRESET); /* global reset */
428 usb_delay_ms(&sc->sc_bus, USB_BUS_RESET_DELAY); /* wait a little */
429 UHCICMD(sc, 0); /* do nothing */
430 }
431
432 int
433 uhci_init(uhci_softc_t *sc)
434 {
435 usbd_status err;
436 int i, j;
437 uhci_soft_qh_t *clsqh, *chsqh, *bsqh, *sqh, *lsqh;
438 uhci_soft_td_t *std;
439
440 UHCIHIST_FUNC(); UHCIHIST_CALLED();
441
442 #ifdef UHCI_DEBUG
443 thesc = sc;
444
445 if (uhcidebug >= 2)
446 uhci_dumpregs(sc);
447 #endif
448
449 sc->sc_suspend = PWR_RESUME;
450
451 UWRITE2(sc, UHCI_INTR, 0); /* disable interrupts */
452 uhci_globalreset(sc); /* reset the controller */
453 uhci_reset(sc);
454
455 /* Allocate and initialize real frame array. */
456 err = usb_allocmem(&sc->sc_bus,
457 UHCI_FRAMELIST_COUNT * sizeof(uhci_physaddr_t),
458 UHCI_FRAMELIST_ALIGN, &sc->sc_dma);
459 if (err)
460 return err;
461 sc->sc_pframes = KERNADDR(&sc->sc_dma, 0);
462 UWRITE2(sc, UHCI_FRNUM, 0); /* set frame number to 0 */
463 UWRITE4(sc, UHCI_FLBASEADDR, DMAADDR(&sc->sc_dma, 0)); /* set frame list*/
464
465 /* Initialise mutex early for uhci_alloc_* */
466 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
467 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
468
469 /*
470 * Allocate a TD, inactive, that hangs from the last QH.
471 * This is to avoid a bug in the PIIX that makes it run berserk
472 * otherwise.
473 */
474 std = uhci_alloc_std(sc);
475 if (std == NULL)
476 return ENOMEM;
477 std->link.std = NULL;
478 std->td.td_link = htole32(UHCI_PTR_T);
479 std->td.td_status = htole32(0); /* inactive */
480 std->td.td_token = htole32(0);
481 std->td.td_buffer = htole32(0);
482 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
483 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
484
485 /* Allocate the dummy QH marking the end and used for looping the QHs.*/
486 lsqh = uhci_alloc_sqh(sc);
487 if (lsqh == NULL)
488 goto fail1;
489 lsqh->hlink = NULL;
490 lsqh->qh.qh_hlink = htole32(UHCI_PTR_T); /* end of QH chain */
491 lsqh->elink = std;
492 lsqh->qh.qh_elink = htole32(std->physaddr | UHCI_PTR_TD);
493 sc->sc_last_qh = lsqh;
494 usb_syncmem(&lsqh->dma, lsqh->offs, sizeof(lsqh->qh),
495 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
496
497 /* Allocate the dummy QH where bulk traffic will be queued. */
498 bsqh = uhci_alloc_sqh(sc);
499 if (bsqh == NULL)
500 goto fail2;
501 bsqh->hlink = lsqh;
502 bsqh->qh.qh_hlink = htole32(lsqh->physaddr | UHCI_PTR_QH);
503 bsqh->elink = NULL;
504 bsqh->qh.qh_elink = htole32(UHCI_PTR_T);
505 sc->sc_bulk_start = sc->sc_bulk_end = bsqh;
506 usb_syncmem(&bsqh->dma, bsqh->offs, sizeof(bsqh->qh),
507 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
508
509 /* Allocate dummy QH where high speed control traffic will be queued. */
510 chsqh = uhci_alloc_sqh(sc);
511 if (chsqh == NULL)
512 goto fail3;
513 chsqh->hlink = bsqh;
514 chsqh->qh.qh_hlink = htole32(bsqh->physaddr | UHCI_PTR_QH);
515 chsqh->elink = NULL;
516 chsqh->qh.qh_elink = htole32(UHCI_PTR_T);
517 sc->sc_hctl_start = sc->sc_hctl_end = chsqh;
518 usb_syncmem(&chsqh->dma, chsqh->offs, sizeof(chsqh->qh),
519 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
520
521 /* Allocate dummy QH where control traffic will be queued. */
522 clsqh = uhci_alloc_sqh(sc);
523 if (clsqh == NULL)
524 goto fail4;
525 clsqh->hlink = chsqh;
526 clsqh->qh.qh_hlink = htole32(chsqh->physaddr | UHCI_PTR_QH);
527 clsqh->elink = NULL;
528 clsqh->qh.qh_elink = htole32(UHCI_PTR_T);
529 sc->sc_lctl_start = sc->sc_lctl_end = clsqh;
530 usb_syncmem(&clsqh->dma, clsqh->offs, sizeof(clsqh->qh),
531 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
532
533 /*
534 * Make all (virtual) frame list pointers point to the interrupt
535 * queue heads and the interrupt queue heads at the control
536 * queue head and point the physical frame list to the virtual.
537 */
538 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
539 std = uhci_alloc_std(sc);
540 sqh = uhci_alloc_sqh(sc);
541 if (std == NULL || sqh == NULL)
542 return USBD_NOMEM;
543 std->link.sqh = sqh;
544 std->td.td_link = htole32(sqh->physaddr | UHCI_PTR_QH);
545 std->td.td_status = htole32(UHCI_TD_IOS); /* iso, inactive */
546 std->td.td_token = htole32(0);
547 std->td.td_buffer = htole32(0);
548 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
549 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
550 sqh->hlink = clsqh;
551 sqh->qh.qh_hlink = htole32(clsqh->physaddr | UHCI_PTR_QH);
552 sqh->elink = NULL;
553 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
554 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
555 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
556 sc->sc_vframes[i].htd = std;
557 sc->sc_vframes[i].etd = std;
558 sc->sc_vframes[i].hqh = sqh;
559 sc->sc_vframes[i].eqh = sqh;
560 for (j = i;
561 j < UHCI_FRAMELIST_COUNT;
562 j += UHCI_VFRAMELIST_COUNT)
563 sc->sc_pframes[j] = htole32(std->physaddr);
564 }
565 usb_syncmem(&sc->sc_dma, 0,
566 UHCI_FRAMELIST_COUNT * sizeof(uhci_physaddr_t),
567 BUS_DMASYNC_PREWRITE);
568
569
570 TAILQ_INIT(&sc->sc_intrhead);
571
572 sc->sc_xferpool = pool_cache_init(sizeof(struct uhci_xfer), 0, 0, 0,
573 "uhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
574
575 callout_init(&sc->sc_poll_handle, CALLOUT_MPSAFE);
576
577 /* Set up the bus struct. */
578 sc->sc_bus.ub_methods = &uhci_bus_methods;
579 sc->sc_bus.ub_pipesize = sizeof(struct uhci_pipe);
580 sc->sc_bus.ub_usedma = true;
581
582 UHCICMD(sc, UHCI_CMD_MAXP); /* Assume 64 byte packets at frame end */
583
584 DPRINTF("Enabling...", 0, 0, 0, 0);
585
586 err = uhci_run(sc, 1, 0); /* and here we go... */
587 UWRITE2(sc, UHCI_INTR, UHCI_INTR_TOCRCIE | UHCI_INTR_RIE |
588 UHCI_INTR_IOCE | UHCI_INTR_SPIE); /* enable interrupts */
589 return err;
590
591 fail4:
592 uhci_free_sqh(sc, chsqh);
593 fail3:
594 uhci_free_sqh(sc, lsqh);
595 fail2:
596 uhci_free_sqh(sc, lsqh);
597 fail1:
598 uhci_free_std(sc, std);
599
600 return ENOMEM;
601 }
602
603 int
604 uhci_activate(device_t self, enum devact act)
605 {
606 struct uhci_softc *sc = device_private(self);
607
608 switch (act) {
609 case DVACT_DEACTIVATE:
610 sc->sc_dying = 1;
611 return 0;
612 default:
613 return EOPNOTSUPP;
614 }
615 }
616
617 void
618 uhci_childdet(device_t self, device_t child)
619 {
620 struct uhci_softc *sc = device_private(self);
621
622 KASSERT(sc->sc_child == child);
623 sc->sc_child = NULL;
624 }
625
626 int
627 uhci_detach(struct uhci_softc *sc, int flags)
628 {
629 int rv = 0;
630
631 if (sc->sc_child != NULL)
632 rv = config_detach(sc->sc_child, flags);
633
634 if (rv != 0)
635 return rv;
636
637 callout_halt(&sc->sc_poll_handle, NULL);
638 callout_destroy(&sc->sc_poll_handle);
639
640 mutex_destroy(&sc->sc_lock);
641 mutex_destroy(&sc->sc_intr_lock);
642
643 pool_cache_destroy(sc->sc_xferpool);
644
645 /* XXX free other data structures XXX */
646
647 return rv;
648 }
649
650 struct usbd_xfer *
651 uhci_allocx(struct usbd_bus *bus, unsigned int nframes)
652 {
653 struct uhci_softc *sc = UHCI_BUS2SC(bus);
654 struct usbd_xfer *xfer;
655
656 xfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
657 if (xfer != NULL) {
658 memset(xfer, 0, sizeof(struct uhci_xfer));
659
660 /* Initialise this always so we can call remove on it. */
661 usb_init_task(&xfer->ux_aborttask, uhci_timeout_task, xfer,
662 USB_TASKQ_MPSAFE);
663 #ifdef DIAGNOSTIC
664 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
665 uxfer->ux_isdone = true;
666 xfer->ux_state = XFER_BUSY;
667 #endif
668 }
669 return xfer;
670 }
671
672 void
673 uhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
674 {
675 struct uhci_softc *sc = UHCI_BUS2SC(bus);
676 struct uhci_xfer *uxfer __diagused = UHCI_XFER2UXFER(xfer);
677
678 KASSERTMSG(xfer->ux_state == XFER_BUSY, "xfer %p state %d\n", xfer,
679 xfer->ux_state);
680 KASSERTMSG(uxfer->ux_isdone, "xfer %p not done\n", xfer);
681 #ifdef DIAGNOSTIC
682 xfer->ux_state = XFER_FREE;
683 #endif
684 pool_cache_put(sc->sc_xferpool, xfer);
685 }
686
687 Static void
688 uhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
689 {
690 struct uhci_softc *sc = UHCI_BUS2SC(bus);
691
692 *lock = &sc->sc_lock;
693 }
694
695
696 /*
697 * Handle suspend/resume.
698 *
699 * We need to switch to polling mode here, because this routine is
700 * called from an interrupt context. This is all right since we
701 * are almost suspended anyway.
702 */
703 bool
704 uhci_resume(device_t dv, const pmf_qual_t *qual)
705 {
706 uhci_softc_t *sc = device_private(dv);
707 int cmd;
708
709 mutex_spin_enter(&sc->sc_intr_lock);
710
711 cmd = UREAD2(sc, UHCI_CMD);
712 sc->sc_bus.ub_usepolling++;
713 UWRITE2(sc, UHCI_INTR, 0);
714 uhci_globalreset(sc);
715 uhci_reset(sc);
716 if (cmd & UHCI_CMD_RS)
717 uhci_run(sc, 0, 1);
718
719 /* restore saved state */
720 UWRITE4(sc, UHCI_FLBASEADDR, DMAADDR(&sc->sc_dma, 0));
721 UWRITE2(sc, UHCI_FRNUM, sc->sc_saved_frnum);
722 UWRITE1(sc, UHCI_SOF, sc->sc_saved_sof);
723
724 UHCICMD(sc, cmd | UHCI_CMD_FGR); /* force resume */
725 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_DELAY, &sc->sc_intr_lock);
726 UHCICMD(sc, cmd & ~UHCI_CMD_EGSM); /* back to normal */
727 UWRITE2(sc, UHCI_INTR, UHCI_INTR_TOCRCIE |
728 UHCI_INTR_RIE | UHCI_INTR_IOCE | UHCI_INTR_SPIE);
729 UHCICMD(sc, UHCI_CMD_MAXP);
730 uhci_run(sc, 1, 1); /* and start traffic again */
731 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_RECOVERY, &sc->sc_intr_lock);
732 sc->sc_bus.ub_usepolling--;
733 if (sc->sc_intr_xfer != NULL)
734 callout_reset(&sc->sc_poll_handle, sc->sc_ival, uhci_poll_hub,
735 sc->sc_intr_xfer);
736 #ifdef UHCI_DEBUG
737 if (uhcidebug >= 2)
738 uhci_dumpregs(sc);
739 #endif
740
741 sc->sc_suspend = PWR_RESUME;
742 mutex_spin_exit(&sc->sc_intr_lock);
743
744 return true;
745 }
746
747 bool
748 uhci_suspend(device_t dv, const pmf_qual_t *qual)
749 {
750 uhci_softc_t *sc = device_private(dv);
751 int cmd;
752
753 mutex_spin_enter(&sc->sc_intr_lock);
754
755 cmd = UREAD2(sc, UHCI_CMD);
756
757 #ifdef UHCI_DEBUG
758 if (uhcidebug >= 2)
759 uhci_dumpregs(sc);
760 #endif
761 if (sc->sc_intr_xfer != NULL)
762 callout_stop(&sc->sc_poll_handle);
763 sc->sc_suspend = PWR_SUSPEND;
764 sc->sc_bus.ub_usepolling++;
765
766 uhci_run(sc, 0, 1); /* stop the controller */
767 cmd &= ~UHCI_CMD_RS;
768
769 /* save some state if BIOS doesn't */
770 sc->sc_saved_frnum = UREAD2(sc, UHCI_FRNUM);
771 sc->sc_saved_sof = UREAD1(sc, UHCI_SOF);
772
773 UWRITE2(sc, UHCI_INTR, 0); /* disable intrs */
774
775 UHCICMD(sc, cmd | UHCI_CMD_EGSM); /* enter suspend */
776 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_WAIT, &sc->sc_intr_lock);
777 sc->sc_bus.ub_usepolling--;
778
779 mutex_spin_exit(&sc->sc_intr_lock);
780
781 return true;
782 }
783
784 #ifdef UHCI_DEBUG
785 Static void
786 uhci_dumpregs(uhci_softc_t *sc)
787 {
788 UHCIHIST_FUNC(); UHCIHIST_CALLED();
789 DPRINTF("cmd =%04jx sts =%04jx intr =%04jx frnum =%04jx",
790 UREAD2(sc, UHCI_CMD), UREAD2(sc, UHCI_STS),
791 UREAD2(sc, UHCI_INTR), UREAD2(sc, UHCI_FRNUM));
792 DPRINTF("sof =%04jx portsc1=%04jx portsc2=%04jx flbase=%08jx",
793 UREAD1(sc, UHCI_SOF), UREAD2(sc, UHCI_PORTSC1),
794 UREAD2(sc, UHCI_PORTSC2), UREAD4(sc, UHCI_FLBASEADDR));
795 }
796
797 void
798 uhci_dump_td(uhci_soft_td_t *p)
799 {
800 UHCIHIST_FUNC(); UHCIHIST_CALLED();
801
802 usb_syncmem(&p->dma, p->offs, sizeof(p->td),
803 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
804
805 DPRINTF("TD(%#jx) at 0x%08jx", (uintptr_t)p, p->physaddr, 0, 0);
806 DPRINTF(" link=0x%08jx status=0x%08jx "
807 "token=0x%08x buffer=0x%08x",
808 le32toh(p->td.td_link),
809 le32toh(p->td.td_status),
810 le32toh(p->td.td_token),
811 le32toh(p->td.td_buffer));
812
813 DPRINTF("bitstuff=%jd crcto =%jd nak =%jd babble =%jd",
814 !!(le32toh(p->td.td_status) & UHCI_TD_BITSTUFF),
815 !!(le32toh(p->td.td_status) & UHCI_TD_CRCTO),
816 !!(le32toh(p->td.td_status) & UHCI_TD_NAK),
817 !!(le32toh(p->td.td_status) & UHCI_TD_BABBLE));
818 DPRINTF("dbuffer =%jd stalled =%jd active =%jd ioc =%jd",
819 !!(le32toh(p->td.td_status) & UHCI_TD_DBUFFER),
820 !!(le32toh(p->td.td_status) & UHCI_TD_STALLED),
821 !!(le32toh(p->td.td_status) & UHCI_TD_ACTIVE),
822 !!(le32toh(p->td.td_status) & UHCI_TD_IOC));
823 DPRINTF("ios =%jd ls =%jd spd =%jd",
824 !!(le32toh(p->td.td_status) & UHCI_TD_IOS),
825 !!(le32toh(p->td.td_status) & UHCI_TD_LS),
826 !!(le32toh(p->td.td_status) & UHCI_TD_SPD), 0);
827 DPRINTF("errcnt =%d actlen =%d pid=%02x",
828 UHCI_TD_GET_ERRCNT(le32toh(p->td.td_status)),
829 UHCI_TD_GET_ACTLEN(le32toh(p->td.td_status)),
830 UHCI_TD_GET_PID(le32toh(p->td.td_token)), 0);
831 DPRINTF("addr=%jd endpt=%jd D=%jd maxlen=%jd,",
832 UHCI_TD_GET_DEVADDR(le32toh(p->td.td_token)),
833 UHCI_TD_GET_ENDPT(le32toh(p->td.td_token)),
834 UHCI_TD_GET_DT(le32toh(p->td.td_token)),
835 UHCI_TD_GET_MAXLEN(le32toh(p->td.td_token)));
836 }
837
838 void
839 uhci_dump_qh(uhci_soft_qh_t *sqh)
840 {
841 UHCIHIST_FUNC(); UHCIHIST_CALLED();
842
843 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
844 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
845
846 DPRINTF("QH(%#jx) at 0x%08jx: hlink=%08jx elink=%08jx", (uintptr_t)sqh,
847 (int)sqh->physaddr, le32toh(sqh->qh.qh_hlink),
848 le32toh(sqh->qh.qh_elink));
849
850 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), BUS_DMASYNC_PREREAD);
851 }
852
853
854 #if 1
855 void
856 uhci_dump(void)
857 {
858 uhci_dump_all(thesc);
859 }
860 #endif
861
862 void
863 uhci_dump_all(uhci_softc_t *sc)
864 {
865 uhci_dumpregs(sc);
866 /*printf("framelist[i].link = %08x\n", sc->sc_framelist[0].link);*/
867 uhci_dump_qhs(sc->sc_lctl_start);
868 }
869
870
871 void
872 uhci_dump_qhs(uhci_soft_qh_t *sqh)
873 {
874 UHCIHIST_FUNC(); UHCIHIST_CALLED();
875
876 uhci_dump_qh(sqh);
877
878 /*
879 * uhci_dump_qhs displays all the QHs and TDs from the given QH onwards
880 * Traverses sideways first, then down.
881 *
882 * QH1
883 * QH2
884 * No QH
885 * TD2.1
886 * TD2.2
887 * TD1.1
888 * etc.
889 *
890 * TD2.x being the TDs queued at QH2 and QH1 being referenced from QH1.
891 */
892
893 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
894 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
895 if (sqh->hlink != NULL && !(le32toh(sqh->qh.qh_hlink) & UHCI_PTR_T))
896 uhci_dump_qhs(sqh->hlink);
897 else
898 DPRINTF("No QH", 0, 0, 0, 0);
899 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), BUS_DMASYNC_PREREAD);
900
901 if (sqh->elink != NULL && !(le32toh(sqh->qh.qh_elink) & UHCI_PTR_T))
902 uhci_dump_tds(sqh->elink);
903 else
904 DPRINTF("No QH", 0, 0, 0, 0);
905 }
906
907 void
908 uhci_dump_tds(uhci_soft_td_t *std)
909 {
910 uhci_soft_td_t *td;
911 int stop;
912
913 for (td = std; td != NULL; td = td->link.std) {
914 uhci_dump_td(td);
915
916 /*
917 * Check whether the link pointer in this TD marks
918 * the link pointer as end of queue. This avoids
919 * printing the free list in case the queue/TD has
920 * already been moved there (seatbelt).
921 */
922 usb_syncmem(&td->dma, td->offs + offsetof(uhci_td_t, td_link),
923 sizeof(td->td.td_link),
924 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
925 stop = (le32toh(td->td.td_link) & UHCI_PTR_T ||
926 le32toh(td->td.td_link) == 0);
927 usb_syncmem(&td->dma, td->offs + offsetof(uhci_td_t, td_link),
928 sizeof(td->td.td_link), BUS_DMASYNC_PREREAD);
929 if (stop)
930 break;
931 }
932 }
933
934 Static void
935 uhci_dump_ii(struct uhci_xfer *ux)
936 {
937 struct usbd_pipe *pipe;
938 usb_endpoint_descriptor_t *ed;
939 struct usbd_device *dev;
940
941 if (ux == NULL) {
942 printf("ux NULL\n");
943 return;
944 }
945 pipe = ux->ux_xfer.ux_pipe;
946 if (pipe == NULL) {
947 printf("ux %p: done=%d pipe=NULL\n", ux, ux->ux_isdone);
948 return;
949 }
950 if (pipe->up_endpoint == NULL) {
951 printf("ux %p: done=%d pipe=%p pipe->up_endpoint=NULL\n",
952 ux, ux->ux_isdone, pipe);
953 return;
954 }
955 if (pipe->up_dev == NULL) {
956 printf("ux %p: done=%d pipe=%p pipe->up_dev=NULL\n",
957 ux, ux->ux_isdone, pipe);
958 return;
959 }
960 ed = pipe->up_endpoint->ue_edesc;
961 dev = pipe->up_dev;
962 printf("ux %p: done=%d dev=%p vid=0x%04x pid=0x%04x addr=%d pipe=%p ep=0x%02x attr=0x%02x\n",
963 ux, ux->ux_isdone, dev,
964 UGETW(dev->ud_ddesc.idVendor),
965 UGETW(dev->ud_ddesc.idProduct),
966 dev->ud_addr, pipe,
967 ed->bEndpointAddress, ed->bmAttributes);
968 }
969
970 void uhci_dump_iis(struct uhci_softc *sc);
971 void
972 uhci_dump_iis(struct uhci_softc *sc)
973 {
974 struct uhci_xfer *ux;
975
976 printf("interrupt list:\n");
977 TAILQ_FOREACH(ux, &sc->sc_intrhead, ux_list)
978 uhci_dump_ii(ux);
979 }
980
981 void iidump(void);
982 void iidump(void) { uhci_dump_iis(thesc); }
983
984 #endif
985
986 /*
987 * This routine is executed periodically and simulates interrupts
988 * from the root controller interrupt pipe for port status change.
989 */
990 void
991 uhci_poll_hub(void *addr)
992 {
993 struct usbd_xfer *xfer = addr;
994 struct usbd_pipe *pipe = xfer->ux_pipe;
995 uhci_softc_t *sc;
996 u_char *p;
997
998 UHCIHIST_FUNC(); UHCIHIST_CALLED();
999
1000 if (__predict_false(pipe->up_dev == NULL || pipe->up_dev->ud_bus == NULL))
1001 return; /* device has detached */
1002 sc = UHCI_PIPE2SC(pipe);
1003 callout_reset(&sc->sc_poll_handle, sc->sc_ival, uhci_poll_hub, xfer);
1004
1005 p = xfer->ux_buf;
1006 p[0] = 0;
1007 if (UREAD2(sc, UHCI_PORTSC1) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC))
1008 p[0] |= 1<<1;
1009 if (UREAD2(sc, UHCI_PORTSC2) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC))
1010 p[0] |= 1<<2;
1011 if (p[0] == 0)
1012 /* No change, try again in a while */
1013 return;
1014
1015 xfer->ux_actlen = 1;
1016 xfer->ux_status = USBD_NORMAL_COMPLETION;
1017 mutex_enter(&sc->sc_lock);
1018 usb_transfer_complete(xfer);
1019 mutex_exit(&sc->sc_lock);
1020 }
1021
1022 void
1023 uhci_root_intr_done(struct usbd_xfer *xfer)
1024 {
1025 }
1026
1027 /*
1028 * Let the last QH loop back to the high speed control transfer QH.
1029 * This is what intel calls "bandwidth reclamation" and improves
1030 * USB performance a lot for some devices.
1031 * If we are already looping, just count it.
1032 */
1033 void
1034 uhci_add_loop(uhci_softc_t *sc)
1035 {
1036 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1037
1038 #ifdef UHCI_DEBUG
1039 if (uhcinoloop)
1040 return;
1041 #endif
1042 if (++sc->sc_loops == 1) {
1043 DPRINTFN(5, "add loop", 0, 0, 0, 0);
1044 /* Note, we don't loop back the soft pointer. */
1045 sc->sc_last_qh->qh.qh_hlink =
1046 htole32(sc->sc_hctl_start->physaddr | UHCI_PTR_QH);
1047 usb_syncmem(&sc->sc_last_qh->dma,
1048 sc->sc_last_qh->offs + offsetof(uhci_qh_t, qh_hlink),
1049 sizeof(sc->sc_last_qh->qh.qh_hlink),
1050 BUS_DMASYNC_PREWRITE);
1051 }
1052 }
1053
1054 void
1055 uhci_rem_loop(uhci_softc_t *sc)
1056 {
1057 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1058
1059 #ifdef UHCI_DEBUG
1060 if (uhcinoloop)
1061 return;
1062 #endif
1063 if (--sc->sc_loops == 0) {
1064 DPRINTFN(5, "remove loop", 0, 0, 0, 0);
1065 sc->sc_last_qh->qh.qh_hlink = htole32(UHCI_PTR_T);
1066 usb_syncmem(&sc->sc_last_qh->dma,
1067 sc->sc_last_qh->offs + offsetof(uhci_qh_t, qh_hlink),
1068 sizeof(sc->sc_last_qh->qh.qh_hlink),
1069 BUS_DMASYNC_PREWRITE);
1070 }
1071 }
1072
1073 /* Add high speed control QH, called with lock held. */
1074 void
1075 uhci_add_hs_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1076 {
1077 uhci_soft_qh_t *eqh;
1078
1079 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1080
1081 KASSERT(mutex_owned(&sc->sc_lock));
1082
1083 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1084 eqh = sc->sc_hctl_end;
1085 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1086 sizeof(eqh->qh.qh_hlink),
1087 BUS_DMASYNC_POSTWRITE);
1088 sqh->hlink = eqh->hlink;
1089 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1090 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1091 BUS_DMASYNC_PREWRITE);
1092 eqh->hlink = sqh;
1093 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1094 sc->sc_hctl_end = sqh;
1095 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1096 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1097 #ifdef UHCI_CTL_LOOP
1098 uhci_add_loop(sc);
1099 #endif
1100 }
1101
1102 /* Remove high speed control QH, called with lock held. */
1103 void
1104 uhci_remove_hs_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1105 {
1106 uhci_soft_qh_t *pqh;
1107 uint32_t elink;
1108
1109 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1110
1111 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1112 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1113 #ifdef UHCI_CTL_LOOP
1114 uhci_rem_loop(sc);
1115 #endif
1116 /*
1117 * The T bit should be set in the elink of the QH so that the HC
1118 * doesn't follow the pointer. This condition may fail if the
1119 * the transferred packet was short so that the QH still points
1120 * at the last used TD.
1121 * In this case we set the T bit and wait a little for the HC
1122 * to stop looking at the TD.
1123 * Note that if the TD chain is large enough, the controller
1124 * may still be looking at the chain at the end of this function.
1125 * uhci_free_std_chain() will make sure the controller stops
1126 * looking at it quickly, but until then we should not change
1127 * sqh->hlink.
1128 */
1129 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1130 sizeof(sqh->qh.qh_elink),
1131 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1132 elink = le32toh(sqh->qh.qh_elink);
1133 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1134 sizeof(sqh->qh.qh_elink), BUS_DMASYNC_PREREAD);
1135 if (!(elink & UHCI_PTR_T)) {
1136 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1137 usb_syncmem(&sqh->dma,
1138 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1139 sizeof(sqh->qh.qh_elink),
1140 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1141 delay(UHCI_QH_REMOVE_DELAY);
1142 }
1143
1144 pqh = uhci_find_prev_qh(sc->sc_hctl_start, sqh);
1145 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1146 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1147 pqh->hlink = sqh->hlink;
1148 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1149 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1150 sizeof(pqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1151 delay(UHCI_QH_REMOVE_DELAY);
1152 if (sc->sc_hctl_end == sqh)
1153 sc->sc_hctl_end = pqh;
1154 }
1155
1156 /* Add low speed control QH, called with lock held. */
1157 void
1158 uhci_add_ls_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1159 {
1160 uhci_soft_qh_t *eqh;
1161
1162 KASSERT(mutex_owned(&sc->sc_lock));
1163
1164 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1165 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1166
1167 eqh = sc->sc_lctl_end;
1168 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1169 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1170 sqh->hlink = eqh->hlink;
1171 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1172 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1173 BUS_DMASYNC_PREWRITE);
1174 eqh->hlink = sqh;
1175 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1176 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1177 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1178 sc->sc_lctl_end = sqh;
1179 }
1180
1181 /* Remove low speed control QH, called with lock held. */
1182 void
1183 uhci_remove_ls_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1184 {
1185 uhci_soft_qh_t *pqh;
1186 uint32_t elink;
1187
1188 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1189
1190 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1191 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1192
1193 /* See comment in uhci_remove_hs_ctrl() */
1194 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1195 sizeof(sqh->qh.qh_elink),
1196 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1197 elink = le32toh(sqh->qh.qh_elink);
1198 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1199 sizeof(sqh->qh.qh_elink), BUS_DMASYNC_PREREAD);
1200 if (!(elink & UHCI_PTR_T)) {
1201 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1202 usb_syncmem(&sqh->dma,
1203 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1204 sizeof(sqh->qh.qh_elink),
1205 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1206 delay(UHCI_QH_REMOVE_DELAY);
1207 }
1208 pqh = uhci_find_prev_qh(sc->sc_lctl_start, sqh);
1209 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1210 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1211 pqh->hlink = sqh->hlink;
1212 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1213 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1214 sizeof(pqh->qh.qh_hlink),
1215 BUS_DMASYNC_PREWRITE);
1216 delay(UHCI_QH_REMOVE_DELAY);
1217 if (sc->sc_lctl_end == sqh)
1218 sc->sc_lctl_end = pqh;
1219 }
1220
1221 /* Add bulk QH, called with lock held. */
1222 void
1223 uhci_add_bulk(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1224 {
1225 uhci_soft_qh_t *eqh;
1226
1227 KASSERT(mutex_owned(&sc->sc_lock));
1228
1229 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1230 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1231
1232 eqh = sc->sc_bulk_end;
1233 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1234 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1235 sqh->hlink = eqh->hlink;
1236 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
1237 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1238 BUS_DMASYNC_PREWRITE);
1239 eqh->hlink = sqh;
1240 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
1241 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
1242 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1243 sc->sc_bulk_end = sqh;
1244 uhci_add_loop(sc);
1245 }
1246
1247 /* Remove bulk QH, called with lock held. */
1248 void
1249 uhci_remove_bulk(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1250 {
1251 uhci_soft_qh_t *pqh;
1252
1253 KASSERT(mutex_owned(&sc->sc_lock));
1254
1255 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1256 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0);
1257
1258 uhci_rem_loop(sc);
1259 /* See comment in uhci_remove_hs_ctrl() */
1260 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
1261 sizeof(sqh->qh.qh_elink),
1262 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1263 if (!(sqh->qh.qh_elink & htole32(UHCI_PTR_T))) {
1264 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
1265 usb_syncmem(&sqh->dma,
1266 sqh->offs + offsetof(uhci_qh_t, qh_elink),
1267 sizeof(sqh->qh.qh_elink),
1268 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1269 delay(UHCI_QH_REMOVE_DELAY);
1270 }
1271 pqh = uhci_find_prev_qh(sc->sc_bulk_start, sqh);
1272 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
1273 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE);
1274 pqh->hlink = sqh->hlink;
1275 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
1276 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
1277 sizeof(pqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE);
1278 delay(UHCI_QH_REMOVE_DELAY);
1279 if (sc->sc_bulk_end == sqh)
1280 sc->sc_bulk_end = pqh;
1281 }
1282
1283 Static int uhci_intr1(uhci_softc_t *);
1284
1285 int
1286 uhci_intr(void *arg)
1287 {
1288 uhci_softc_t *sc = arg;
1289 int ret = 0;
1290
1291 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1292
1293 mutex_spin_enter(&sc->sc_intr_lock);
1294
1295 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1296 goto done;
1297
1298 if (sc->sc_bus.ub_usepolling || UREAD2(sc, UHCI_INTR) == 0) {
1299 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1300 goto done;
1301 }
1302
1303 ret = uhci_intr1(sc);
1304
1305 done:
1306 mutex_spin_exit(&sc->sc_intr_lock);
1307 return ret;
1308 }
1309
1310 int
1311 uhci_intr1(uhci_softc_t *sc)
1312 {
1313 int status;
1314 int ack;
1315
1316 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1317
1318 #ifdef UHCI_DEBUG
1319 if (uhcidebug >= 15) {
1320 DPRINTF("sc %#jx", (uintptr_t)sc, 0, 0, 0);
1321 uhci_dumpregs(sc);
1322 }
1323 #endif
1324
1325 KASSERT(mutex_owned(&sc->sc_intr_lock));
1326
1327 status = UREAD2(sc, UHCI_STS) & UHCI_STS_ALLINTRS;
1328 /* Check if the interrupt was for us. */
1329 if (status == 0)
1330 return 0;
1331
1332 if (sc->sc_suspend != PWR_RESUME) {
1333 #ifdef DIAGNOSTIC
1334 printf("%s: interrupt while not operating ignored\n",
1335 device_xname(sc->sc_dev));
1336 #endif
1337 UWRITE2(sc, UHCI_STS, status); /* acknowledge the ints */
1338 return 0;
1339 }
1340
1341 ack = 0;
1342 if (status & UHCI_STS_USBINT)
1343 ack |= UHCI_STS_USBINT;
1344 if (status & UHCI_STS_USBEI)
1345 ack |= UHCI_STS_USBEI;
1346 if (status & UHCI_STS_RD) {
1347 ack |= UHCI_STS_RD;
1348 #ifdef UHCI_DEBUG
1349 printf("%s: resume detect\n", device_xname(sc->sc_dev));
1350 #endif
1351 }
1352 if (status & UHCI_STS_HSE) {
1353 ack |= UHCI_STS_HSE;
1354 printf("%s: host system error\n", device_xname(sc->sc_dev));
1355 }
1356 if (status & UHCI_STS_HCPE) {
1357 ack |= UHCI_STS_HCPE;
1358 printf("%s: host controller process error\n",
1359 device_xname(sc->sc_dev));
1360 }
1361
1362 /* When HCHalted=1 and Run/Stop=0 , it is normal */
1363 if ((status & UHCI_STS_HCH) && (UREAD2(sc, UHCI_CMD) & UHCI_CMD_RS)) {
1364 /* no acknowledge needed */
1365 if (!sc->sc_dying) {
1366 printf("%s: host controller halted\n",
1367 device_xname(sc->sc_dev));
1368 #ifdef UHCI_DEBUG
1369 uhci_dump_all(sc);
1370 #endif
1371 }
1372 sc->sc_dying = 1;
1373 }
1374
1375 if (!ack)
1376 return 0; /* nothing to acknowledge */
1377 UWRITE2(sc, UHCI_STS, ack); /* acknowledge the ints */
1378
1379 usb_schedsoftintr(&sc->sc_bus);
1380
1381 DPRINTFN(15, "sc %#jx done", (uintptr_t)sc, 0, 0, 0);
1382
1383 return 1;
1384 }
1385
1386 void
1387 uhci_softintr(void *v)
1388 {
1389 struct usbd_bus *bus = v;
1390 uhci_softc_t *sc = UHCI_BUS2SC(bus);
1391 struct uhci_xfer *ux, *nextux;
1392 ux_completeq_t cq;
1393
1394 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1395 DPRINTF("sc %#jx", (uintptr_t)sc, 0, 0, 0);
1396
1397 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1398
1399 TAILQ_INIT(&cq);
1400 /*
1401 * Interrupts on UHCI really suck. When the host controller
1402 * interrupts because a transfer is completed there is no
1403 * way of knowing which transfer it was. You can scan down
1404 * the TDs and QHs of the previous frame to limit the search,
1405 * but that assumes that the interrupt was not delayed by more
1406 * than 1 ms, which may not always be true (e.g. after debug
1407 * output on a slow console).
1408 * We scan all interrupt descriptors to see if any have
1409 * completed.
1410 */
1411 TAILQ_FOREACH_SAFE(ux, &sc->sc_intrhead, ux_list, nextux) {
1412 uhci_check_intr(sc, ux, &cq);
1413 }
1414
1415 /*
1416 * We abuse ux_list for the interrupt and complete lists and
1417 * interrupt transfers will get re-added here so use
1418 * the _SAFE version of TAILQ_FOREACH.
1419 */
1420 TAILQ_FOREACH_SAFE(ux, &cq, ux_list, nextux) {
1421 DPRINTF("ux %#jx", (uintptr_t)ux, 0, 0, 0);
1422 usb_transfer_complete(&ux->ux_xfer);
1423 }
1424
1425 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
1426 }
1427
1428 /* Check for an interrupt. */
1429 void
1430 uhci_check_intr(uhci_softc_t *sc, struct uhci_xfer *ux, ux_completeq_t *cqp)
1431 {
1432 uhci_soft_td_t *std, *fstd = NULL, *lstd = NULL;
1433 uint32_t status;
1434
1435 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1436 DPRINTFN(15, "ux %#jx", (uintptr_t)ux, 0, 0, 0);
1437
1438 KASSERT(ux != NULL);
1439
1440 struct usbd_xfer *xfer = &ux->ux_xfer;
1441 if (xfer->ux_status == USBD_CANCELLED ||
1442 xfer->ux_status == USBD_TIMEOUT) {
1443 DPRINTF("aborted xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
1444 return;
1445 }
1446
1447 switch (ux->ux_type) {
1448 case UX_CTRL:
1449 fstd = ux->ux_setup;
1450 lstd = ux->ux_stat;
1451 break;
1452 case UX_BULK:
1453 case UX_INTR:
1454 case UX_ISOC:
1455 fstd = ux->ux_stdstart;
1456 lstd = ux->ux_stdend;
1457 break;
1458 default:
1459 KASSERT(false);
1460 break;
1461 }
1462 if (fstd == NULL)
1463 return;
1464
1465 KASSERT(lstd != NULL);
1466
1467 usb_syncmem(&lstd->dma,
1468 lstd->offs + offsetof(uhci_td_t, td_status),
1469 sizeof(lstd->td.td_status),
1470 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1471 status = le32toh(lstd->td.td_status);
1472 usb_syncmem(&lstd->dma,
1473 lstd->offs + offsetof(uhci_td_t, td_status),
1474 sizeof(lstd->td.td_status),
1475 BUS_DMASYNC_PREREAD);
1476
1477 /* If the last TD is not marked active we can complete */
1478 if (!(status & UHCI_TD_ACTIVE)) {
1479 done:
1480 DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0);
1481 uhci_idone(ux, cqp);
1482 return;
1483 }
1484
1485 /*
1486 * If the last TD is still active we need to check whether there
1487 * is an error somewhere in the middle, or whether there was a
1488 * short packet (SPD and not ACTIVE).
1489 */
1490 DPRINTFN(12, "active ux=%#jx", (uintptr_t)ux, 0, 0, 0);
1491 for (std = fstd; std != lstd; std = std->link.std) {
1492 usb_syncmem(&std->dma,
1493 std->offs + offsetof(uhci_td_t, td_status),
1494 sizeof(std->td.td_status),
1495 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1496 status = le32toh(std->td.td_status);
1497 usb_syncmem(&std->dma,
1498 std->offs + offsetof(uhci_td_t, td_status),
1499 sizeof(std->td.td_status), BUS_DMASYNC_PREREAD);
1500
1501 /* If there's an active TD the xfer isn't done. */
1502 if (status & UHCI_TD_ACTIVE) {
1503 DPRINTFN(12, "ux=%#jx std=%#jx still active",
1504 (uintptr_t)ux, (uintptr_t)std, 0, 0);
1505 return;
1506 }
1507
1508 /* Any kind of error makes the xfer done. */
1509 if (status & UHCI_TD_STALLED)
1510 goto done;
1511
1512 /*
1513 * If the data phase of a control transfer is short, we need
1514 * to complete the status stage
1515 */
1516
1517 if ((status & UHCI_TD_SPD) && ux->ux_type == UX_CTRL) {
1518 struct uhci_pipe *upipe =
1519 UHCI_PIPE2UPIPE(xfer->ux_pipe);
1520 uhci_soft_qh_t *sqh = upipe->ctrl.sqh;
1521 uhci_soft_td_t *stat = upipe->ctrl.stat;
1522
1523 DPRINTFN(12, "ux=%#jx std=%#jx control status"
1524 "phase needs completion", (uintptr_t)ux,
1525 (uintptr_t)ux->ux_stdstart, 0, 0);
1526
1527 sqh->qh.qh_elink =
1528 htole32(stat->physaddr | UHCI_PTR_TD);
1529 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh),
1530 BUS_DMASYNC_PREWRITE);
1531 break;
1532 }
1533
1534 /* We want short packets, and it is short: it's done */
1535 usb_syncmem(&std->dma,
1536 std->offs + offsetof(uhci_td_t, td_token),
1537 sizeof(std->td.td_token),
1538 BUS_DMASYNC_POSTWRITE);
1539
1540 if ((status & UHCI_TD_SPD) &&
1541 UHCI_TD_GET_ACTLEN(status) <
1542 UHCI_TD_GET_MAXLEN(le32toh(std->td.td_token))) {
1543 goto done;
1544 }
1545 }
1546 }
1547
1548 /* Called with USB lock held. */
1549 void
1550 uhci_idone(struct uhci_xfer *ux, ux_completeq_t *cqp)
1551 {
1552 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1553 struct usbd_xfer *xfer = &ux->ux_xfer;
1554 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
1555 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
1556 uhci_soft_td_t *std;
1557 uint32_t status = 0, nstatus;
1558 bool polling __diagused = sc->sc_bus.ub_usepolling;
1559 int actlen;
1560
1561 KASSERT(polling || mutex_owned(&sc->sc_lock));
1562
1563 DPRINTFN(12, "ux=%#jx", (uintptr_t)ux, 0, 0, 0);
1564
1565 /*
1566 * If software has completed it, either by cancellation
1567 * or timeout, drop it on the floor.
1568 */
1569 if (xfer->ux_status != USBD_IN_PROGRESS) {
1570 KASSERT(xfer->ux_status == USBD_CANCELLED ||
1571 xfer->ux_status == USBD_TIMEOUT);
1572 DPRINTF("aborted xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
1573 return;
1574 }
1575
1576 /*
1577 * Cancel the timeout and the task, which have not yet
1578 * run. If they have already fired, at worst they are
1579 * waiting for the lock. They will see that the xfer
1580 * is no longer in progress and give up.
1581 */
1582 callout_stop(&xfer->ux_callout);
1583 usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask);
1584
1585 #ifdef DIAGNOSTIC
1586 #ifdef UHCI_DEBUG
1587 if (ux->ux_isdone) {
1588 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1589 uhci_dump_ii(ux);
1590 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1591 }
1592 #endif
1593 KASSERT(!ux->ux_isdone);
1594 KASSERTMSG(!ux->ux_isdone, "xfer %p type %d status %d", xfer,
1595 ux->ux_type, xfer->ux_status);
1596 ux->ux_isdone = true;
1597 #endif
1598
1599 if (xfer->ux_nframes != 0) {
1600 /* Isoc transfer, do things differently. */
1601 uhci_soft_td_t **stds = upipe->isoc.stds;
1602 int i, n, nframes, len;
1603
1604 DPRINTFN(5, "ux=%#jx isoc ready", (uintptr_t)ux, 0, 0, 0);
1605
1606 nframes = xfer->ux_nframes;
1607 actlen = 0;
1608 n = ux->ux_curframe;
1609 for (i = 0; i < nframes; i++) {
1610 std = stds[n];
1611 #ifdef UHCI_DEBUG
1612 if (uhcidebug >= 5) {
1613 DPRINTF("isoc TD %jd", i, 0, 0, 0);
1614 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1615 uhci_dump_td(std);
1616 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1617 }
1618 #endif
1619 if (++n >= UHCI_VFRAMELIST_COUNT)
1620 n = 0;
1621 usb_syncmem(&std->dma,
1622 std->offs + offsetof(uhci_td_t, td_status),
1623 sizeof(std->td.td_status),
1624 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1625 status = le32toh(std->td.td_status);
1626 len = UHCI_TD_GET_ACTLEN(status);
1627 xfer->ux_frlengths[i] = len;
1628 actlen += len;
1629 }
1630 upipe->isoc.inuse -= nframes;
1631 xfer->ux_actlen = actlen;
1632 xfer->ux_status = USBD_NORMAL_COMPLETION;
1633 goto end;
1634 }
1635
1636 #ifdef UHCI_DEBUG
1637 DPRINTFN(10, "ux=%#jx, xfer=%#jx, pipe=%#jx ready", (uintptr_t)ux,
1638 (uintptr_t)xfer, (uintptr_t)upipe, 0);
1639 if (uhcidebug >= 10) {
1640 DPRINTF("--- dump start ---", 0, 0, 0, 0);
1641 uhci_dump_tds(ux->ux_stdstart);
1642 DPRINTF("--- dump end ---", 0, 0, 0, 0);
1643 }
1644 #endif
1645
1646 /* The transfer is done, compute actual length and status. */
1647 actlen = 0;
1648 for (std = ux->ux_stdstart; std != NULL; std = std->link.std) {
1649 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
1650 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1651 nstatus = le32toh(std->td.td_status);
1652 if (nstatus & UHCI_TD_ACTIVE)
1653 break;
1654
1655 status = nstatus;
1656 if (UHCI_TD_GET_PID(le32toh(std->td.td_token)) !=
1657 UHCI_TD_PID_SETUP)
1658 actlen += UHCI_TD_GET_ACTLEN(status);
1659 else {
1660 /*
1661 * UHCI will report CRCTO in addition to a STALL or NAK
1662 * for a SETUP transaction. See section 3.2.2, "TD
1663 * CONTROL AND STATUS".
1664 */
1665 if (status & (UHCI_TD_STALLED | UHCI_TD_NAK))
1666 status &= ~UHCI_TD_CRCTO;
1667 }
1668 }
1669 /* If there are left over TDs we need to update the toggle. */
1670 if (std != NULL)
1671 upipe->nexttoggle = UHCI_TD_GET_DT(le32toh(std->td.td_token));
1672
1673 status &= UHCI_TD_ERROR;
1674 DPRINTFN(10, "actlen=%jd, status=0x%jx", actlen, status, 0, 0);
1675 xfer->ux_actlen = actlen;
1676 if (status != 0) {
1677
1678 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1679 "error, addr=%jd, endpt=0x%02jx",
1680 xfer->ux_pipe->up_dev->ud_addr,
1681 xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress,
1682 0, 0);
1683 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1684 "bitstuff=%jd crcto =%jd nak =%jd babble =%jd",
1685 !!(status & UHCI_TD_BITSTUFF),
1686 !!(status & UHCI_TD_CRCTO),
1687 !!(status & UHCI_TD_NAK),
1688 !!(status & UHCI_TD_BABBLE));
1689 DPRINTFN((status == UHCI_TD_STALLED) * 10,
1690 "dbuffer =%jd stalled =%jd active =%jd",
1691 !!(status & UHCI_TD_DBUFFER),
1692 !!(status & UHCI_TD_STALLED),
1693 !!(status & UHCI_TD_ACTIVE),
1694 0);
1695
1696 if (status == UHCI_TD_STALLED)
1697 xfer->ux_status = USBD_STALLED;
1698 else
1699 xfer->ux_status = USBD_IOERROR; /* more info XXX */
1700 } else {
1701 xfer->ux_status = USBD_NORMAL_COMPLETION;
1702 }
1703
1704 end:
1705 uhci_del_intr_list(sc, ux);
1706 if (cqp)
1707 TAILQ_INSERT_TAIL(cqp, ux, ux_list);
1708
1709 KASSERT(polling || mutex_owned(&sc->sc_lock));
1710 DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0);
1711 }
1712
1713 /*
1714 * Called when a request does not complete.
1715 */
1716 void
1717 uhci_timeout(void *addr)
1718 {
1719 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1720 struct usbd_xfer *xfer = addr;
1721 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
1722 struct usbd_device *dev = xfer->ux_pipe->up_dev;
1723
1724 DPRINTF("xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
1725
1726 mutex_enter(&sc->sc_lock);
1727 if (!sc->sc_dying && xfer->ux_status == USBD_IN_PROGRESS)
1728 usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC);
1729 mutex_exit(&sc->sc_lock);
1730 }
1731
1732 void
1733 uhci_timeout_task(void *addr)
1734 {
1735 struct usbd_xfer *xfer = addr;
1736 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
1737
1738 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1739
1740 DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
1741
1742 mutex_enter(&sc->sc_lock);
1743 uhci_abort_xfer(xfer, USBD_TIMEOUT);
1744 mutex_exit(&sc->sc_lock);
1745 }
1746
1747 void
1748 uhci_poll(struct usbd_bus *bus)
1749 {
1750 uhci_softc_t *sc = UHCI_BUS2SC(bus);
1751
1752 if (UREAD2(sc, UHCI_STS) & UHCI_STS_USBINT) {
1753 mutex_spin_enter(&sc->sc_intr_lock);
1754 uhci_intr1(sc);
1755 mutex_spin_exit(&sc->sc_intr_lock);
1756 }
1757 }
1758
1759 void
1760 uhci_reset(uhci_softc_t *sc)
1761 {
1762 int n;
1763
1764 UHCICMD(sc, UHCI_CMD_HCRESET);
1765 /* The reset bit goes low when the controller is done. */
1766 for (n = 0; n < UHCI_RESET_TIMEOUT &&
1767 (UREAD2(sc, UHCI_CMD) & UHCI_CMD_HCRESET); n++)
1768 usb_delay_ms(&sc->sc_bus, 1);
1769 if (n >= UHCI_RESET_TIMEOUT)
1770 printf("%s: controller did not reset\n",
1771 device_xname(sc->sc_dev));
1772 }
1773
1774 usbd_status
1775 uhci_run(uhci_softc_t *sc, int run, int locked)
1776 {
1777 int n, running;
1778 uint16_t cmd;
1779
1780 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1781
1782 run = run != 0;
1783 if (!locked)
1784 mutex_spin_enter(&sc->sc_intr_lock);
1785
1786 DPRINTF("setting run=%jd", run, 0, 0, 0);
1787 cmd = UREAD2(sc, UHCI_CMD);
1788 if (run)
1789 cmd |= UHCI_CMD_RS;
1790 else
1791 cmd &= ~UHCI_CMD_RS;
1792 UHCICMD(sc, cmd);
1793 for (n = 0; n < 10; n++) {
1794 running = !(UREAD2(sc, UHCI_STS) & UHCI_STS_HCH);
1795 /* return when we've entered the state we want */
1796 if (run == running) {
1797 if (!locked)
1798 mutex_spin_exit(&sc->sc_intr_lock);
1799 DPRINTF("done cmd=0x%jx sts=0x%jx",
1800 UREAD2(sc, UHCI_CMD), UREAD2(sc, UHCI_STS), 0, 0);
1801 return USBD_NORMAL_COMPLETION;
1802 }
1803 usb_delay_ms_locked(&sc->sc_bus, 1, &sc->sc_intr_lock);
1804 }
1805 if (!locked)
1806 mutex_spin_exit(&sc->sc_intr_lock);
1807 printf("%s: cannot %s\n", device_xname(sc->sc_dev),
1808 run ? "start" : "stop");
1809 return USBD_IOERROR;
1810 }
1811
1812 /*
1813 * Memory management routines.
1814 * uhci_alloc_std allocates TDs
1815 * uhci_alloc_sqh allocates QHs
1816 * These two routines do their own free list management,
1817 * partly for speed, partly because allocating DMAable memory
1818 * has page size granularity so much memory would be wasted if
1819 * only one TD/QH (32 bytes) was placed in each allocated chunk.
1820 */
1821
1822 uhci_soft_td_t *
1823 uhci_alloc_std(uhci_softc_t *sc)
1824 {
1825 uhci_soft_td_t *std;
1826 usbd_status err;
1827 int i, offs;
1828 usb_dma_t dma;
1829
1830 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1831
1832 mutex_enter(&sc->sc_lock);
1833 if (sc->sc_freetds == NULL) {
1834 DPRINTFN(2, "allocating chunk", 0, 0, 0, 0);
1835 mutex_exit(&sc->sc_lock);
1836
1837 err = usb_allocmem(&sc->sc_bus, UHCI_STD_SIZE * UHCI_STD_CHUNK,
1838 UHCI_TD_ALIGN, &dma);
1839 if (err)
1840 return NULL;
1841
1842 mutex_enter(&sc->sc_lock);
1843 for (i = 0; i < UHCI_STD_CHUNK; i++) {
1844 offs = i * UHCI_STD_SIZE;
1845 std = KERNADDR(&dma, offs);
1846 std->physaddr = DMAADDR(&dma, offs);
1847 std->dma = dma;
1848 std->offs = offs;
1849 std->link.std = sc->sc_freetds;
1850 sc->sc_freetds = std;
1851 }
1852 }
1853 std = sc->sc_freetds;
1854 sc->sc_freetds = std->link.std;
1855 mutex_exit(&sc->sc_lock);
1856
1857 memset(&std->td, 0, sizeof(uhci_td_t));
1858
1859 return std;
1860 }
1861
1862 #define TD_IS_FREE 0x12345678
1863
1864 void
1865 uhci_free_std_locked(uhci_softc_t *sc, uhci_soft_td_t *std)
1866 {
1867 KASSERT(mutex_owned(&sc->sc_lock));
1868
1869 #ifdef DIAGNOSTIC
1870 if (le32toh(std->td.td_token) == TD_IS_FREE) {
1871 printf("%s: freeing free TD %p\n", __func__, std);
1872 return;
1873 }
1874 std->td.td_token = htole32(TD_IS_FREE);
1875 #endif
1876
1877 std->link.std = sc->sc_freetds;
1878 sc->sc_freetds = std;
1879 }
1880
1881 void
1882 uhci_free_std(uhci_softc_t *sc, uhci_soft_td_t *std)
1883 {
1884 mutex_enter(&sc->sc_lock);
1885 uhci_free_std_locked(sc, std);
1886 mutex_exit(&sc->sc_lock);
1887 }
1888
1889 uhci_soft_qh_t *
1890 uhci_alloc_sqh(uhci_softc_t *sc)
1891 {
1892 uhci_soft_qh_t *sqh;
1893 usbd_status err;
1894 int i, offs;
1895 usb_dma_t dma;
1896
1897 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1898
1899 mutex_enter(&sc->sc_lock);
1900 if (sc->sc_freeqhs == NULL) {
1901 DPRINTFN(2, "allocating chunk", 0, 0, 0, 0);
1902 mutex_exit(&sc->sc_lock);
1903
1904 err = usb_allocmem(&sc->sc_bus, UHCI_SQH_SIZE * UHCI_SQH_CHUNK,
1905 UHCI_QH_ALIGN, &dma);
1906 if (err)
1907 return NULL;
1908
1909 mutex_enter(&sc->sc_lock);
1910 for (i = 0; i < UHCI_SQH_CHUNK; i++) {
1911 offs = i * UHCI_SQH_SIZE;
1912 sqh = KERNADDR(&dma, offs);
1913 sqh->physaddr = DMAADDR(&dma, offs);
1914 sqh->dma = dma;
1915 sqh->offs = offs;
1916 sqh->hlink = sc->sc_freeqhs;
1917 sc->sc_freeqhs = sqh;
1918 }
1919 }
1920 sqh = sc->sc_freeqhs;
1921 sc->sc_freeqhs = sqh->hlink;
1922 mutex_exit(&sc->sc_lock);
1923
1924 memset(&sqh->qh, 0, sizeof(uhci_qh_t));
1925
1926 return sqh;
1927 }
1928
1929 void
1930 uhci_free_sqh(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
1931 {
1932 KASSERT(mutex_owned(&sc->sc_lock));
1933
1934 sqh->hlink = sc->sc_freeqhs;
1935 sc->sc_freeqhs = sqh;
1936 }
1937
1938 #if 0
1939 void
1940 uhci_free_std_chain(uhci_softc_t *sc, uhci_soft_td_t *std,
1941 uhci_soft_td_t *stdend)
1942 {
1943 uhci_soft_td_t *p;
1944 uint32_t td_link;
1945
1946 /*
1947 * to avoid race condition with the controller which may be looking
1948 * at this chain, we need to first invalidate all links, and
1949 * then wait for the controller to move to another queue
1950 */
1951 for (p = std; p != stdend; p = p->link.std) {
1952 usb_syncmem(&p->dma,
1953 p->offs + offsetof(uhci_td_t, td_link),
1954 sizeof(p->td.td_link),
1955 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1956 td_link = le32toh(p->td.td_link);
1957 usb_syncmem(&p->dma,
1958 p->offs + offsetof(uhci_td_t, td_link),
1959 sizeof(p->td.td_link),
1960 BUS_DMASYNC_PREREAD);
1961 if ((td_link & UHCI_PTR_T) == 0) {
1962 p->td.td_link = htole32(UHCI_PTR_T);
1963 usb_syncmem(&p->dma,
1964 p->offs + offsetof(uhci_td_t, td_link),
1965 sizeof(p->td.td_link),
1966 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1967 }
1968 }
1969 delay(UHCI_QH_REMOVE_DELAY);
1970
1971 for (; std != stdend; std = p) {
1972 p = std->link.std;
1973 uhci_free_std(sc, std);
1974 }
1975 }
1976 #endif
1977
1978 int
1979 uhci_alloc_std_chain(uhci_softc_t *sc, struct usbd_xfer *xfer, int len,
1980 int rd, uhci_soft_td_t **sp)
1981 {
1982 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
1983 uint16_t flags = xfer->ux_flags;
1984 uhci_soft_td_t *p;
1985
1986 UHCIHIST_FUNC(); UHCIHIST_CALLED();
1987
1988 DPRINTFN(8, "xfer=%#jx pipe=%#jx", (uintptr_t)xfer,
1989 (uintptr_t)xfer->ux_pipe, 0, 0);
1990
1991 ASSERT_SLEEPABLE();
1992 KASSERT(sp);
1993
1994 int maxp = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
1995 if (maxp == 0) {
1996 printf("%s: maxp=0\n", __func__);
1997 return EINVAL;
1998 }
1999 size_t ntd = (len + maxp - 1) / maxp;
2000 if (!rd && (flags & USBD_FORCE_SHORT_XFER)) {
2001 ntd++;
2002 }
2003 DPRINTFN(10, "maxp=%jd ntd=%jd", maxp, ntd, 0, 0);
2004
2005 uxfer->ux_stds = NULL;
2006 uxfer->ux_nstd = ntd;
2007 if (ntd == 0) {
2008 *sp = NULL;
2009 DPRINTF("ntd=0", 0, 0, 0, 0);
2010 return 0;
2011 }
2012 uxfer->ux_stds = kmem_alloc(sizeof(uhci_soft_td_t *) * ntd,
2013 KM_SLEEP);
2014
2015 for (int i = 0; i < ntd; i++) {
2016 p = uhci_alloc_std(sc);
2017 if (p == NULL) {
2018 if (i != 0) {
2019 uxfer->ux_nstd = i;
2020 uhci_free_stds(sc, uxfer);
2021 }
2022 kmem_free(uxfer->ux_stds,
2023 sizeof(uhci_soft_td_t *) * ntd);
2024 return ENOMEM;
2025 }
2026 uxfer->ux_stds[i] = p;
2027 }
2028
2029 *sp = uxfer->ux_stds[0];
2030
2031 return 0;
2032 }
2033
2034 Static void
2035 uhci_free_stds(uhci_softc_t *sc, struct uhci_xfer *ux)
2036 {
2037 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2038
2039 DPRINTFN(8, "ux=%#jx", (uintptr_t)ux, 0, 0, 0);
2040
2041 mutex_enter(&sc->sc_lock);
2042 for (size_t i = 0; i < ux->ux_nstd; i++) {
2043 uhci_soft_td_t *std = ux->ux_stds[i];
2044 #ifdef DIAGNOSTIC
2045 if (le32toh(std->td.td_token) == TD_IS_FREE) {
2046 printf("%s: freeing free TD %p\n", __func__, std);
2047 return;
2048 }
2049 std->td.td_token = htole32(TD_IS_FREE);
2050 #endif
2051 ux->ux_stds[i]->link.std = sc->sc_freetds;
2052 sc->sc_freetds = std;
2053 }
2054 mutex_exit(&sc->sc_lock);
2055 }
2056
2057
2058 Static void
2059 uhci_reset_std_chain(uhci_softc_t *sc, struct usbd_xfer *xfer,
2060 int length, int isread, int *toggle, uhci_soft_td_t **lstd)
2061 {
2062 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2063 struct usbd_pipe *pipe = xfer->ux_pipe;
2064 usb_dma_t *dma = &xfer->ux_dmabuf;
2065 uint16_t flags = xfer->ux_flags;
2066 uhci_soft_td_t *std, *prev;
2067 int len = length;
2068 int tog = *toggle;
2069 int maxp;
2070 uint32_t status;
2071 size_t i;
2072
2073 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2074 DPRINTFN(8, "xfer=%#jx len %jd isread %jd toggle %jd", (uintptr_t)xfer,
2075 len, isread, *toggle);
2076
2077 KASSERT(len != 0 || (!isread && (flags & USBD_FORCE_SHORT_XFER)));
2078
2079 maxp = UGETW(pipe->up_endpoint->ue_edesc->wMaxPacketSize);
2080 KASSERT(maxp != 0);
2081
2082 int addr = xfer->ux_pipe->up_dev->ud_addr;
2083 int endpt = xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress;
2084
2085 status = UHCI_TD_ZERO_ACTLEN(UHCI_TD_SET_ERRCNT(3) | UHCI_TD_ACTIVE);
2086 if (pipe->up_dev->ud_speed == USB_SPEED_LOW)
2087 status |= UHCI_TD_LS;
2088 if (flags & USBD_SHORT_XFER_OK)
2089 status |= UHCI_TD_SPD;
2090 usb_syncmem(dma, 0, len,
2091 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2092 std = prev = NULL;
2093 for (i = 0; len != 0 && i < uxfer->ux_nstd; i++, prev = std) {
2094 int l = len;
2095 std = uxfer->ux_stds[i];
2096 if (l > maxp)
2097 l = maxp;
2098
2099 if (prev) {
2100 prev->link.std = std;
2101 prev->td.td_link = htole32(
2102 std->physaddr | UHCI_PTR_VF | UHCI_PTR_TD
2103 );
2104 usb_syncmem(&prev->dma, prev->offs, sizeof(prev->td),
2105 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2106 }
2107
2108 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2109 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2110
2111 std->td.td_link = htole32(UHCI_PTR_T | UHCI_PTR_VF | UHCI_PTR_TD);
2112 std->td.td_status = htole32(status);
2113 std->td.td_token = htole32(
2114 UHCI_TD_SET_ENDPT(UE_GET_ADDR(endpt)) |
2115 UHCI_TD_SET_DEVADDR(addr) |
2116 UHCI_TD_SET_PID(isread ? UHCI_TD_PID_IN : UHCI_TD_PID_OUT) |
2117 UHCI_TD_SET_DT(tog) |
2118 UHCI_TD_SET_MAXLEN(l)
2119 );
2120 std->td.td_buffer = htole32(DMAADDR(dma, i * maxp));
2121
2122 std->link.std = NULL;
2123
2124 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2125 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2126 tog ^= 1;
2127
2128 len -= l;
2129 }
2130 KASSERTMSG(len == 0, "xfer %p alen %d len %d mps %d ux_nqtd %zu i %zu",
2131 xfer, length, len, maxp, uxfer->ux_nstd, i);
2132
2133 if (!isread &&
2134 (flags & USBD_FORCE_SHORT_XFER) &&
2135 length % maxp == 0) {
2136 /* Force a 0 length transfer at the end. */
2137 KASSERTMSG(i < uxfer->ux_nstd, "i=%zu nstd=%zu", i,
2138 uxfer->ux_nstd);
2139 std = uxfer->ux_stds[i++];
2140
2141 std->td.td_link = htole32(UHCI_PTR_T | UHCI_PTR_VF | UHCI_PTR_TD);
2142 std->td.td_status = htole32(status);
2143 std->td.td_token = htole32(
2144 UHCI_TD_SET_ENDPT(UE_GET_ADDR(endpt)) |
2145 UHCI_TD_SET_DEVADDR(addr) |
2146 UHCI_TD_SET_PID(UHCI_TD_PID_OUT) |
2147 UHCI_TD_SET_DT(tog) |
2148 UHCI_TD_SET_MAXLEN(0)
2149 );
2150 std->td.td_buffer = 0;
2151 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2152 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2153
2154 std->link.std = NULL;
2155 if (prev) {
2156 prev->link.std = std;
2157 prev->td.td_link = htole32(
2158 std->physaddr | UHCI_PTR_VF | UHCI_PTR_TD
2159 );
2160 usb_syncmem(&prev->dma, prev->offs, sizeof(prev->td),
2161 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2162 }
2163 tog ^= 1;
2164 }
2165 *lstd = std;
2166 *toggle = tog;
2167 }
2168
2169 void
2170 uhci_device_clear_toggle(struct usbd_pipe *pipe)
2171 {
2172 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2173 upipe->nexttoggle = 0;
2174 }
2175
2176 void
2177 uhci_noop(struct usbd_pipe *pipe)
2178 {
2179 }
2180
2181 int
2182 uhci_device_bulk_init(struct usbd_xfer *xfer)
2183 {
2184 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2185 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2186 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
2187 int endpt = ed->bEndpointAddress;
2188 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2189 int len = xfer->ux_bufsize;
2190 int err = 0;
2191
2192
2193 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2194 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, len,
2195 xfer->ux_flags, 0);
2196
2197 if (sc->sc_dying)
2198 return USBD_IOERROR;
2199
2200 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2201
2202 uxfer->ux_type = UX_BULK;
2203 err = uhci_alloc_std_chain(sc, xfer, len, isread, &uxfer->ux_stdstart);
2204 if (err)
2205 return err;
2206
2207 #ifdef UHCI_DEBUG
2208 if (uhcidebug >= 10) {
2209 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2210 uhci_dump_tds(uxfer->ux_stdstart);
2211 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2212 }
2213 #endif
2214
2215 return 0;
2216 }
2217
2218 Static void
2219 uhci_device_bulk_fini(struct usbd_xfer *xfer)
2220 {
2221 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2222 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2223
2224 KASSERT(ux->ux_type == UX_BULK);
2225
2226 if (ux->ux_nstd) {
2227 uhci_free_stds(sc, ux);
2228 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2229 }
2230 }
2231
2232 usbd_status
2233 uhci_device_bulk_transfer(struct usbd_xfer *xfer)
2234 {
2235 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2236 usbd_status err;
2237
2238 /* Insert last in queue. */
2239 mutex_enter(&sc->sc_lock);
2240 err = usb_insert_transfer(xfer);
2241 mutex_exit(&sc->sc_lock);
2242 if (err)
2243 return err;
2244
2245 /*
2246 * Pipe isn't running (otherwise err would be USBD_INPROG),
2247 * so start it first.
2248 */
2249 return uhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2250 }
2251
2252 usbd_status
2253 uhci_device_bulk_start(struct usbd_xfer *xfer)
2254 {
2255 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2256 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2257 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2258 uhci_soft_td_t *data, *dataend;
2259 uhci_soft_qh_t *sqh;
2260 int len;
2261 int endpt;
2262 int isread;
2263
2264 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2265 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2266 xfer->ux_length, xfer->ux_flags, 0);
2267
2268 if (sc->sc_dying)
2269 return USBD_IOERROR;
2270
2271 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2272 KASSERT(xfer->ux_length <= xfer->ux_bufsize);
2273
2274 len = xfer->ux_length;
2275 endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2276 isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2277 sqh = upipe->bulk.sqh;
2278
2279 /* Take lock here to protect nexttoggle */
2280 mutex_enter(&sc->sc_lock);
2281
2282 uhci_reset_std_chain(sc, xfer, len, isread, &upipe->nexttoggle,
2283 &dataend);
2284
2285 data = ux->ux_stdstart;
2286 ux->ux_stdend = dataend;
2287 dataend->td.td_status |= htole32(UHCI_TD_IOC);
2288 usb_syncmem(&dataend->dma,
2289 dataend->offs + offsetof(uhci_td_t, td_status),
2290 sizeof(dataend->td.td_status),
2291 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2292
2293 #ifdef UHCI_DEBUG
2294 if (uhcidebug >= 10) {
2295 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2296 DPRINTFN(10, "before transfer", 0, 0, 0, 0);
2297 uhci_dump_tds(data);
2298 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2299 }
2300 #endif
2301
2302 KASSERT(ux->ux_isdone);
2303 #ifdef DIAGNOSTIC
2304 ux->ux_isdone = false;
2305 #endif
2306
2307 sqh->elink = data;
2308 sqh->qh.qh_elink = htole32(data->physaddr | UHCI_PTR_TD);
2309 /* uhci_add_bulk() will do usb_syncmem(sqh) */
2310
2311 uhci_add_bulk(sc, sqh);
2312 uhci_add_intr_list(sc, ux);
2313
2314 if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) {
2315 callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout),
2316 uhci_timeout, xfer);
2317 }
2318 xfer->ux_status = USBD_IN_PROGRESS;
2319 mutex_exit(&sc->sc_lock);
2320
2321 return USBD_IN_PROGRESS;
2322 }
2323
2324 /* Abort a device bulk request. */
2325 void
2326 uhci_device_bulk_abort(struct usbd_xfer *xfer)
2327 {
2328 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2329
2330 KASSERT(mutex_owned(&sc->sc_lock));
2331
2332 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2333
2334 uhci_abort_xfer(xfer, USBD_CANCELLED);
2335 }
2336
2337 /*
2338 * Cancel or timeout a device request. We have two cases to deal with
2339 *
2340 * 1) A driver wants to stop scheduled or inflight transfers
2341 * 2) A transfer has timed out
2342 *
2343 * It's impossible to guarantee that the requested transfer will not
2344 * have (partially) happened since the hardware runs concurrently.
2345 *
2346 * Transfer state is protected by the bus lock and we set the transfer status
2347 * as soon as either of the above happens (with bus lock held).
2348 *
2349 * To allow the hardware time to notice we simply wait.
2350 */
2351 void
2352 uhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
2353 {
2354 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2355 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2356 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2357 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2358 uhci_soft_td_t *std;
2359
2360 KASSERTMSG((status == USBD_CANCELLED || status == USBD_TIMEOUT),
2361 "invalid status for abort: %d", (int)status);
2362
2363 DPRINTFN(1,"xfer=%#jx, status=%jd", (uintptr_t)xfer, status, 0, 0);
2364
2365 KASSERT(mutex_owned(&sc->sc_lock));
2366 ASSERT_SLEEPABLE();
2367
2368 if (status == USBD_CANCELLED) {
2369 /*
2370 * We are synchronously aborting. Try to stop the
2371 * callout and task, but if we can't, wait for them to
2372 * complete.
2373 */
2374 callout_halt(&xfer->ux_callout, &sc->sc_lock);
2375 usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask,
2376 USB_TASKQ_HC, &sc->sc_lock);
2377 } else {
2378 /* Otherwise, we are timing out. */
2379 KASSERT(status == USBD_TIMEOUT);
2380 }
2381
2382 /*
2383 * The xfer cannot have been cancelled already. It is the
2384 * responsibility of the caller of usbd_abort_pipe not to try
2385 * to abort a pipe multiple times, whether concurrently or
2386 * sequentially.
2387 */
2388 KASSERT(xfer->ux_status != USBD_CANCELLED);
2389
2390 /* Only the timeout, which runs only once, can time it out. */
2391 KASSERT(xfer->ux_status != USBD_TIMEOUT);
2392
2393 /* If anyone else beat us, we're done. */
2394 if (xfer->ux_status != USBD_IN_PROGRESS)
2395 return;
2396
2397 /* We beat everyone else. Claim the status. */
2398 xfer->ux_status = status;
2399
2400 /*
2401 * If we're dying, skip the hardware action and just notify the
2402 * software that we're done.
2403 */
2404 if (sc->sc_dying) {
2405 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer,
2406 xfer->ux_status, 0, 0);
2407 goto dying;
2408 }
2409
2410 /*
2411 * HC Step 1: Make interrupt routine and hardware ignore xfer.
2412 */
2413 uhci_del_intr_list(sc, ux);
2414
2415 DPRINTF("stop ux=%#jx", (uintptr_t)ux, 0, 0, 0);
2416 for (std = ux->ux_stdstart; std != NULL; std = std->link.std) {
2417 usb_syncmem(&std->dma,
2418 std->offs + offsetof(uhci_td_t, td_status),
2419 sizeof(std->td.td_status),
2420 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2421 std->td.td_status &= htole32(~(UHCI_TD_ACTIVE | UHCI_TD_IOC));
2422 usb_syncmem(&std->dma,
2423 std->offs + offsetof(uhci_td_t, td_status),
2424 sizeof(std->td.td_status),
2425 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2426 }
2427
2428 /*
2429 * HC Step 2: Wait until we know hardware has finished any possible
2430 * use of the xfer.
2431 */
2432 /* Hardware finishes in 1ms */
2433 usb_delay_ms_locked(upipe->pipe.up_dev->ud_bus, 2, &sc->sc_lock);
2434
2435 /*
2436 * HC Step 3: Notify completion to waiting xfers.
2437 */
2438 dying:
2439 #ifdef DIAGNOSTIC
2440 ux->ux_isdone = true;
2441 #endif
2442 usb_transfer_complete(xfer);
2443 DPRINTFN(14, "end", 0, 0, 0, 0);
2444
2445 KASSERT(mutex_owned(&sc->sc_lock));
2446 }
2447
2448 /* Close a device bulk pipe. */
2449 void
2450 uhci_device_bulk_close(struct usbd_pipe *pipe)
2451 {
2452 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2453 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2454
2455 KASSERT(mutex_owned(&sc->sc_lock));
2456
2457 uhci_free_sqh(sc, upipe->bulk.sqh);
2458
2459 pipe->up_endpoint->ue_toggle = upipe->nexttoggle;
2460 }
2461
2462 int
2463 uhci_device_ctrl_init(struct usbd_xfer *xfer)
2464 {
2465 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2466 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2467 usb_device_request_t *req = &xfer->ux_request;
2468 struct usbd_device *dev = upipe->pipe.up_dev;
2469 uhci_softc_t *sc = dev->ud_bus->ub_hcpriv;
2470 uhci_soft_td_t *data = NULL;
2471 int len;
2472 usbd_status err;
2473 int isread;
2474
2475 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2476 DPRINTFN(3, "xfer=%#jx len=%jd, addr=%jd, endpt=%jd",
2477 (uintptr_t)xfer, xfer->ux_bufsize, dev->ud_addr,
2478 upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress);
2479
2480 isread = req->bmRequestType & UT_READ;
2481 len = xfer->ux_bufsize;
2482
2483 uxfer->ux_type = UX_CTRL;
2484 /* Set up data transaction */
2485 if (len != 0) {
2486 err = uhci_alloc_std_chain(sc, xfer, len, isread, &data);
2487 if (err)
2488 return err;
2489 }
2490 /* Set up interrupt info. */
2491 uxfer->ux_setup = upipe->ctrl.setup;
2492 uxfer->ux_stat = upipe->ctrl.stat;
2493 uxfer->ux_data = data;
2494
2495 return 0;
2496 }
2497
2498 Static void
2499 uhci_device_ctrl_fini(struct usbd_xfer *xfer)
2500 {
2501 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2502 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2503
2504 KASSERT(ux->ux_type == UX_CTRL);
2505
2506 if (ux->ux_nstd) {
2507 uhci_free_stds(sc, ux);
2508 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2509 }
2510 }
2511
2512 usbd_status
2513 uhci_device_ctrl_transfer(struct usbd_xfer *xfer)
2514 {
2515 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2516 usbd_status err;
2517
2518 /* Insert last in queue. */
2519 mutex_enter(&sc->sc_lock);
2520 err = usb_insert_transfer(xfer);
2521 mutex_exit(&sc->sc_lock);
2522 if (err)
2523 return err;
2524
2525 /*
2526 * Pipe isn't running (otherwise err would be USBD_INPROG),
2527 * so start it first.
2528 */
2529 return uhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2530 }
2531
2532 usbd_status
2533 uhci_device_ctrl_start(struct usbd_xfer *xfer)
2534 {
2535 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2536 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
2537 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2538 usb_device_request_t *req = &xfer->ux_request;
2539 struct usbd_device *dev = upipe->pipe.up_dev;
2540 int addr = dev->ud_addr;
2541 int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2542 uhci_soft_td_t *setup, *stat, *next, *dataend;
2543 uhci_soft_qh_t *sqh;
2544 int len;
2545 int isread;
2546
2547 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2548
2549 if (sc->sc_dying)
2550 return USBD_IOERROR;
2551
2552 KASSERT(xfer->ux_rqflags & URQ_REQUEST);
2553
2554 DPRINTFN(3, "type=0x%02jx, request=0x%02jx, "
2555 "wValue=0x%04jx, wIndex=0x%04jx",
2556 req->bmRequestType, req->bRequest, UGETW(req->wValue),
2557 UGETW(req->wIndex));
2558 DPRINTFN(3, "len=%jd, addr=%jd, endpt=%jd",
2559 UGETW(req->wLength), dev->ud_addr, endpt, 0);
2560
2561 isread = req->bmRequestType & UT_READ;
2562 len = UGETW(req->wLength);
2563
2564 setup = upipe->ctrl.setup;
2565 stat = upipe->ctrl.stat;
2566 sqh = upipe->ctrl.sqh;
2567
2568 memcpy(KERNADDR(&upipe->ctrl.reqdma, 0), req, sizeof(*req));
2569 usb_syncmem(&upipe->ctrl.reqdma, 0, sizeof(*req), BUS_DMASYNC_PREWRITE);
2570
2571 mutex_enter(&sc->sc_lock);
2572
2573 /* Set up data transaction */
2574 if (len != 0) {
2575 upipe->nexttoggle = 1;
2576 next = uxfer->ux_data;
2577 uhci_reset_std_chain(sc, xfer, len, isread,
2578 &upipe->nexttoggle, &dataend);
2579 dataend->link.std = stat;
2580 dataend->td.td_link = htole32(stat->physaddr | UHCI_PTR_TD);
2581 usb_syncmem(&dataend->dma,
2582 dataend->offs + offsetof(uhci_td_t, td_link),
2583 sizeof(dataend->td.td_link),
2584 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2585 } else {
2586 next = stat;
2587 }
2588
2589 const uint32_t status = UHCI_TD_ZERO_ACTLEN(
2590 UHCI_TD_SET_ERRCNT(3) |
2591 UHCI_TD_ACTIVE |
2592 (dev->ud_speed == USB_SPEED_LOW ? UHCI_TD_LS : 0)
2593 );
2594 setup->link.std = next;
2595 setup->td.td_link = htole32(next->physaddr | UHCI_PTR_TD);
2596 setup->td.td_status = htole32(status);
2597 setup->td.td_token = htole32(UHCI_TD_SETUP(sizeof(*req), endpt, addr));
2598 setup->td.td_buffer = htole32(DMAADDR(&upipe->ctrl.reqdma, 0));
2599
2600 usb_syncmem(&setup->dma, setup->offs, sizeof(setup->td),
2601 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2602
2603 stat->link.std = NULL;
2604 stat->td.td_link = htole32(UHCI_PTR_T);
2605 stat->td.td_status = htole32(status | UHCI_TD_IOC);
2606 stat->td.td_token =
2607 htole32(isread ? UHCI_TD_OUT(0, endpt, addr, 1) :
2608 UHCI_TD_IN (0, endpt, addr, 1));
2609 stat->td.td_buffer = htole32(0);
2610 usb_syncmem(&stat->dma, stat->offs, sizeof(stat->td),
2611 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2612
2613 #ifdef UHCI_DEBUG
2614 if (uhcidebug >= 10) {
2615 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2616 DPRINTF("before transfer", 0, 0, 0, 0);
2617 uhci_dump_tds(setup);
2618 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2619 }
2620 #endif
2621
2622 /* Set up interrupt info. */
2623 uxfer->ux_setup = setup;
2624 uxfer->ux_stat = stat;
2625 KASSERT(uxfer->ux_isdone);
2626 #ifdef DIAGNOSTIC
2627 uxfer->ux_isdone = false;
2628 #endif
2629
2630 sqh->elink = setup;
2631 sqh->qh.qh_elink = htole32(setup->physaddr | UHCI_PTR_TD);
2632 /* uhci_add_?s_ctrl() will do usb_syncmem(sqh) */
2633
2634 if (dev->ud_speed == USB_SPEED_LOW)
2635 uhci_add_ls_ctrl(sc, sqh);
2636 else
2637 uhci_add_hs_ctrl(sc, sqh);
2638 uhci_add_intr_list(sc, uxfer);
2639 #ifdef UHCI_DEBUG
2640 if (uhcidebug >= 12) {
2641 uhci_soft_td_t *std;
2642 uhci_soft_qh_t *xqh;
2643 uhci_soft_qh_t *sxqh;
2644 int maxqh = 0;
2645 uhci_physaddr_t link;
2646 DPRINTFN(12, "--- dump start ---", 0, 0, 0, 0);
2647 DPRINTFN(12, "follow from [0]", 0, 0, 0, 0);
2648 for (std = sc->sc_vframes[0].htd, link = 0;
2649 (link & UHCI_PTR_QH) == 0;
2650 std = std->link.std) {
2651 link = le32toh(std->td.td_link);
2652 uhci_dump_td(std);
2653 }
2654 sxqh = (uhci_soft_qh_t *)std;
2655 uhci_dump_qh(sxqh);
2656 for (xqh = sxqh;
2657 xqh != NULL;
2658 xqh = (maxqh++ == 5 || xqh->hlink == sxqh ||
2659 xqh->hlink == xqh ? NULL : xqh->hlink)) {
2660 uhci_dump_qh(xqh);
2661 }
2662 DPRINTFN(12, "Enqueued QH:", 0, 0, 0, 0);
2663 uhci_dump_qh(sqh);
2664 uhci_dump_tds(sqh->elink);
2665 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2666 }
2667 #endif
2668 if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) {
2669 callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout),
2670 uhci_timeout, xfer);
2671 }
2672 xfer->ux_status = USBD_IN_PROGRESS;
2673 mutex_exit(&sc->sc_lock);
2674
2675 return USBD_IN_PROGRESS;
2676 }
2677
2678 int
2679 uhci_device_intr_init(struct usbd_xfer *xfer)
2680 {
2681 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2682 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2683 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
2684 int endpt = ed->bEndpointAddress;
2685 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2686 int len = xfer->ux_bufsize;
2687 int err;
2688
2689 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2690
2691 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2692 xfer->ux_length, xfer->ux_flags, 0);
2693
2694 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2695 KASSERT(len != 0);
2696
2697 ux->ux_type = UX_INTR;
2698 ux->ux_nstd = 0;
2699 err = uhci_alloc_std_chain(sc, xfer, len, isread, &ux->ux_stdstart);
2700
2701 return err;
2702 }
2703
2704 Static void
2705 uhci_device_intr_fini(struct usbd_xfer *xfer)
2706 {
2707 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2708 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2709
2710 KASSERT(ux->ux_type == UX_INTR);
2711
2712 if (ux->ux_nstd) {
2713 uhci_free_stds(sc, ux);
2714 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd);
2715 }
2716 }
2717
2718 usbd_status
2719 uhci_device_intr_transfer(struct usbd_xfer *xfer)
2720 {
2721 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2722 usbd_status err;
2723
2724 /* Insert last in queue. */
2725 mutex_enter(&sc->sc_lock);
2726 err = usb_insert_transfer(xfer);
2727 mutex_exit(&sc->sc_lock);
2728 if (err)
2729 return err;
2730
2731 /*
2732 * Pipe isn't running (otherwise err would be USBD_INPROG),
2733 * so start it first.
2734 */
2735 return uhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
2736 }
2737
2738 usbd_status
2739 uhci_device_intr_start(struct usbd_xfer *xfer)
2740 {
2741 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2742 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2743 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2744 uhci_soft_td_t *data, *dataend;
2745 uhci_soft_qh_t *sqh;
2746 int isread, endpt;
2747 int i;
2748
2749 if (sc->sc_dying)
2750 return USBD_IOERROR;
2751
2752 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2753
2754 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer,
2755 xfer->ux_length, xfer->ux_flags, 0);
2756
2757 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2758 KASSERT(xfer->ux_length <= xfer->ux_bufsize);
2759
2760 endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
2761 isread = UE_GET_DIR(endpt) == UE_DIR_IN;
2762
2763 data = ux->ux_stdstart;
2764
2765 KASSERT(ux->ux_isdone);
2766 #ifdef DIAGNOSTIC
2767 ux->ux_isdone = false;
2768 #endif
2769
2770 /* Take lock to protect nexttoggle */
2771 if (!sc->sc_bus.ub_usepolling)
2772 mutex_enter(&sc->sc_lock);
2773 uhci_reset_std_chain(sc, xfer, xfer->ux_length, isread,
2774 &upipe->nexttoggle, &dataend);
2775
2776 dataend->td.td_status |= htole32(UHCI_TD_IOC);
2777 usb_syncmem(&dataend->dma,
2778 dataend->offs + offsetof(uhci_td_t, td_status),
2779 sizeof(dataend->td.td_status),
2780 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2781 ux->ux_stdend = dataend;
2782
2783 #ifdef UHCI_DEBUG
2784 if (uhcidebug >= 10) {
2785 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2786 uhci_dump_tds(data);
2787 uhci_dump_qh(upipe->intr.qhs[0]);
2788 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2789 }
2790 #endif
2791
2792 DPRINTFN(10, "qhs[0]=%#jx", (uintptr_t)upipe->intr.qhs[0], 0, 0, 0);
2793 for (i = 0; i < upipe->intr.npoll; i++) {
2794 sqh = upipe->intr.qhs[i];
2795 sqh->elink = data;
2796 sqh->qh.qh_elink = htole32(data->physaddr | UHCI_PTR_TD);
2797 usb_syncmem(&sqh->dma,
2798 sqh->offs + offsetof(uhci_qh_t, qh_elink),
2799 sizeof(sqh->qh.qh_elink),
2800 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2801 }
2802 uhci_add_intr_list(sc, ux);
2803 xfer->ux_status = USBD_IN_PROGRESS;
2804 if (!sc->sc_bus.ub_usepolling)
2805 mutex_exit(&sc->sc_lock);
2806
2807 #ifdef UHCI_DEBUG
2808 if (uhcidebug >= 10) {
2809 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2810 uhci_dump_tds(data);
2811 uhci_dump_qh(upipe->intr.qhs[0]);
2812 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2813 }
2814 #endif
2815
2816 return USBD_IN_PROGRESS;
2817 }
2818
2819 /* Abort a device control request. */
2820 void
2821 uhci_device_ctrl_abort(struct usbd_xfer *xfer)
2822 {
2823 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2824
2825 KASSERT(mutex_owned(&sc->sc_lock));
2826
2827 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2828 uhci_abort_xfer(xfer, USBD_CANCELLED);
2829 }
2830
2831 /* Close a device control pipe. */
2832 void
2833 uhci_device_ctrl_close(struct usbd_pipe *pipe)
2834 {
2835 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2836 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2837
2838 uhci_free_sqh(sc, upipe->ctrl.sqh);
2839 uhci_free_std_locked(sc, upipe->ctrl.setup);
2840 uhci_free_std_locked(sc, upipe->ctrl.stat);
2841
2842 }
2843
2844 /* Abort a device interrupt request. */
2845 void
2846 uhci_device_intr_abort(struct usbd_xfer *xfer)
2847 {
2848 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
2849
2850 KASSERT(mutex_owned(&sc->sc_lock));
2851 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
2852
2853 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2854 DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
2855
2856 uhci_abort_xfer(xfer, USBD_CANCELLED);
2857 }
2858
2859 /* Close a device interrupt pipe. */
2860 void
2861 uhci_device_intr_close(struct usbd_pipe *pipe)
2862 {
2863 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
2864 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
2865 int i, npoll;
2866
2867 KASSERT(mutex_owned(&sc->sc_lock));
2868
2869 /* Unlink descriptors from controller data structures. */
2870 npoll = upipe->intr.npoll;
2871 for (i = 0; i < npoll; i++)
2872 uhci_remove_intr(sc, upipe->intr.qhs[i]);
2873
2874 /*
2875 * We now have to wait for any activity on the physical
2876 * descriptors to stop.
2877 */
2878 usb_delay_ms_locked(&sc->sc_bus, 2, &sc->sc_lock);
2879
2880 for (i = 0; i < npoll; i++)
2881 uhci_free_sqh(sc, upipe->intr.qhs[i]);
2882 kmem_free(upipe->intr.qhs, npoll * sizeof(uhci_soft_qh_t *));
2883 }
2884
2885 int
2886 uhci_device_isoc_init(struct usbd_xfer *xfer)
2887 {
2888 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2889
2890 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST));
2891 KASSERT(xfer->ux_nframes != 0);
2892 KASSERT(ux->ux_isdone);
2893
2894 ux->ux_type = UX_ISOC;
2895 return 0;
2896 }
2897
2898 Static void
2899 uhci_device_isoc_fini(struct usbd_xfer *xfer)
2900 {
2901 struct uhci_xfer *ux __diagused = UHCI_XFER2UXFER(xfer);
2902
2903 KASSERT(ux->ux_type == UX_ISOC);
2904 }
2905
2906 usbd_status
2907 uhci_device_isoc_transfer(struct usbd_xfer *xfer)
2908 {
2909 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
2910 usbd_status err __diagused;
2911
2912 UHCIHIST_FUNC(); UHCIHIST_CALLED();
2913 DPRINTFN(5, "xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
2914
2915 /* Put it on our queue, */
2916 mutex_enter(&sc->sc_lock);
2917 err = usb_insert_transfer(xfer);
2918 mutex_exit(&sc->sc_lock);
2919
2920 KASSERT(err == USBD_NORMAL_COMPLETION);
2921
2922 /* insert into schedule, */
2923
2924 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
2925 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
2926 struct isoc *isoc = &upipe->isoc;
2927 uhci_soft_td_t *std = NULL;
2928 uint32_t buf, len, status, offs;
2929 int i, next, nframes;
2930 int rd = UE_GET_DIR(upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress) == UE_DIR_IN;
2931
2932 DPRINTFN(5, "used=%jd next=%jd xfer=%#jx nframes=%jd",
2933 isoc->inuse, isoc->next, (uintptr_t)xfer, xfer->ux_nframes);
2934
2935 if (sc->sc_dying)
2936 return USBD_IOERROR;
2937
2938 if (xfer->ux_status == USBD_IN_PROGRESS) {
2939 /* This request has already been entered into the frame list */
2940 printf("%s: xfer=%p in frame list\n", __func__, xfer);
2941 /* XXX */
2942 }
2943
2944 #ifdef DIAGNOSTIC
2945 if (isoc->inuse >= UHCI_VFRAMELIST_COUNT)
2946 printf("%s: overflow!\n", __func__);
2947 #endif
2948
2949 KASSERT(xfer->ux_nframes != 0);
2950
2951 mutex_enter(&sc->sc_lock);
2952 next = isoc->next;
2953 if (next == -1) {
2954 /* Not in use yet, schedule it a few frames ahead. */
2955 next = (UREAD2(sc, UHCI_FRNUM) + 3) % UHCI_VFRAMELIST_COUNT;
2956 DPRINTFN(2, "start next=%jd", next, 0, 0, 0);
2957 }
2958
2959 xfer->ux_status = USBD_IN_PROGRESS;
2960 ux->ux_curframe = next;
2961
2962 buf = DMAADDR(&xfer->ux_dmabuf, 0);
2963 offs = 0;
2964 status = UHCI_TD_ZERO_ACTLEN(UHCI_TD_SET_ERRCNT(0) |
2965 UHCI_TD_ACTIVE |
2966 UHCI_TD_IOS);
2967 nframes = xfer->ux_nframes;
2968 for (i = 0; i < nframes; i++) {
2969 std = isoc->stds[next];
2970 if (++next >= UHCI_VFRAMELIST_COUNT)
2971 next = 0;
2972 len = xfer->ux_frlengths[i];
2973 std->td.td_buffer = htole32(buf);
2974 usb_syncmem(&xfer->ux_dmabuf, offs, len,
2975 rd ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2976 if (i == nframes - 1)
2977 status |= UHCI_TD_IOC;
2978 std->td.td_status = htole32(status);
2979 std->td.td_token &= htole32(~UHCI_TD_MAXLEN_MASK);
2980 std->td.td_token |= htole32(UHCI_TD_SET_MAXLEN(len));
2981 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
2982 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2983 #ifdef UHCI_DEBUG
2984 if (uhcidebug >= 5) {
2985 DPRINTF("--- dump start ---", 0, 0, 0, 0);
2986 DPRINTF("TD %jd", i, 0, 0, 0);
2987 uhci_dump_td(std);
2988 DPRINTF("--- dump end ---", 0, 0, 0, 0);
2989 }
2990 #endif
2991 buf += len;
2992 offs += len;
2993 }
2994 isoc->next = next;
2995 isoc->inuse += xfer->ux_nframes;
2996
2997 /* Set up interrupt info. */
2998 ux->ux_stdstart = std;
2999 ux->ux_stdend = std;
3000
3001 KASSERT(ux->ux_isdone);
3002 #ifdef DIAGNOSTIC
3003 ux->ux_isdone = false;
3004 #endif
3005 uhci_add_intr_list(sc, ux);
3006
3007 mutex_exit(&sc->sc_lock);
3008
3009 return USBD_IN_PROGRESS;
3010 }
3011
3012 void
3013 uhci_device_isoc_abort(struct usbd_xfer *xfer)
3014 {
3015 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3016 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3017 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
3018 uhci_soft_td_t **stds = upipe->isoc.stds;
3019 uhci_soft_td_t *std;
3020 int i, n, nframes, maxlen, len;
3021
3022 KASSERT(mutex_owned(&sc->sc_lock));
3023
3024 /* Transfer is already done. */
3025 if (xfer->ux_status != USBD_NOT_STARTED &&
3026 xfer->ux_status != USBD_IN_PROGRESS) {
3027 return;
3028 }
3029
3030 /* Give xfer the requested abort code. */
3031 xfer->ux_status = USBD_CANCELLED;
3032
3033 /* make hardware ignore it, */
3034 nframes = xfer->ux_nframes;
3035 n = ux->ux_curframe;
3036 maxlen = 0;
3037 for (i = 0; i < nframes; i++) {
3038 std = stds[n];
3039 usb_syncmem(&std->dma,
3040 std->offs + offsetof(uhci_td_t, td_status),
3041 sizeof(std->td.td_status),
3042 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3043 std->td.td_status &= htole32(~(UHCI_TD_ACTIVE | UHCI_TD_IOC));
3044 usb_syncmem(&std->dma,
3045 std->offs + offsetof(uhci_td_t, td_status),
3046 sizeof(std->td.td_status),
3047 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3048 usb_syncmem(&std->dma,
3049 std->offs + offsetof(uhci_td_t, td_token),
3050 sizeof(std->td.td_token),
3051 BUS_DMASYNC_POSTWRITE);
3052 len = UHCI_TD_GET_MAXLEN(le32toh(std->td.td_token));
3053 if (len > maxlen)
3054 maxlen = len;
3055 if (++n >= UHCI_VFRAMELIST_COUNT)
3056 n = 0;
3057 }
3058
3059 /* and wait until we are sure the hardware has finished. */
3060 delay(maxlen);
3061
3062 #ifdef DIAGNOSTIC
3063 ux->ux_isdone = true;
3064 #endif
3065 /* Remove from interrupt list. */
3066 uhci_del_intr_list(sc, ux);
3067
3068 /* Run callback. */
3069 usb_transfer_complete(xfer);
3070
3071 KASSERT(mutex_owned(&sc->sc_lock));
3072 }
3073
3074 void
3075 uhci_device_isoc_close(struct usbd_pipe *pipe)
3076 {
3077 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
3078 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3079 uhci_soft_td_t *std, *vstd;
3080 struct isoc *isoc;
3081 int i;
3082
3083 KASSERT(mutex_owned(&sc->sc_lock));
3084
3085 /*
3086 * Make sure all TDs are marked as inactive.
3087 * Wait for completion.
3088 * Unschedule.
3089 * Deallocate.
3090 */
3091 isoc = &upipe->isoc;
3092
3093 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3094 std = isoc->stds[i];
3095 usb_syncmem(&std->dma,
3096 std->offs + offsetof(uhci_td_t, td_status),
3097 sizeof(std->td.td_status),
3098 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3099 std->td.td_status &= htole32(~UHCI_TD_ACTIVE);
3100 usb_syncmem(&std->dma,
3101 std->offs + offsetof(uhci_td_t, td_status),
3102 sizeof(std->td.td_status),
3103 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3104 }
3105 /* wait for completion */
3106 usb_delay_ms_locked(&sc->sc_bus, 2, &sc->sc_lock);
3107
3108 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3109 std = isoc->stds[i];
3110 for (vstd = sc->sc_vframes[i].htd;
3111 vstd != NULL && vstd->link.std != std;
3112 vstd = vstd->link.std)
3113 ;
3114 if (vstd == NULL) {
3115 /*panic*/
3116 printf("%s: %p not found\n", __func__, std);
3117 mutex_exit(&sc->sc_lock);
3118 return;
3119 }
3120 vstd->link = std->link;
3121 usb_syncmem(&std->dma,
3122 std->offs + offsetof(uhci_td_t, td_link),
3123 sizeof(std->td.td_link),
3124 BUS_DMASYNC_POSTWRITE);
3125 vstd->td.td_link = std->td.td_link;
3126 usb_syncmem(&vstd->dma,
3127 vstd->offs + offsetof(uhci_td_t, td_link),
3128 sizeof(vstd->td.td_link),
3129 BUS_DMASYNC_PREWRITE);
3130 uhci_free_std_locked(sc, std);
3131 }
3132
3133 kmem_free(isoc->stds, UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *));
3134 }
3135
3136 usbd_status
3137 uhci_setup_isoc(struct usbd_pipe *pipe)
3138 {
3139 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
3140 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3141 int addr = upipe->pipe.up_dev->ud_addr;
3142 int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
3143 int rd = UE_GET_DIR(endpt) == UE_DIR_IN;
3144 uhci_soft_td_t *std, *vstd;
3145 uint32_t token;
3146 struct isoc *isoc;
3147 int i;
3148
3149 isoc = &upipe->isoc;
3150
3151 isoc->stds = kmem_alloc(
3152 UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *), KM_SLEEP);
3153 if (isoc->stds == NULL)
3154 return USBD_NOMEM;
3155
3156 token = rd ? UHCI_TD_IN (0, endpt, addr, 0) :
3157 UHCI_TD_OUT(0, endpt, addr, 0);
3158
3159 /* Allocate the TDs and mark as inactive; */
3160 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3161 std = uhci_alloc_std(sc);
3162 if (std == 0)
3163 goto bad;
3164 std->td.td_status = htole32(UHCI_TD_IOS); /* iso, inactive */
3165 std->td.td_token = htole32(token);
3166 usb_syncmem(&std->dma, std->offs, sizeof(std->td),
3167 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3168 isoc->stds[i] = std;
3169 }
3170
3171 mutex_enter(&sc->sc_lock);
3172
3173 /* Insert TDs into schedule. */
3174 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) {
3175 std = isoc->stds[i];
3176 vstd = sc->sc_vframes[i].htd;
3177 usb_syncmem(&vstd->dma,
3178 vstd->offs + offsetof(uhci_td_t, td_link),
3179 sizeof(vstd->td.td_link),
3180 BUS_DMASYNC_POSTWRITE);
3181 std->link = vstd->link;
3182 std->td.td_link = vstd->td.td_link;
3183 usb_syncmem(&std->dma,
3184 std->offs + offsetof(uhci_td_t, td_link),
3185 sizeof(std->td.td_link),
3186 BUS_DMASYNC_PREWRITE);
3187 vstd->link.std = std;
3188 vstd->td.td_link = htole32(std->physaddr | UHCI_PTR_TD);
3189 usb_syncmem(&vstd->dma,
3190 vstd->offs + offsetof(uhci_td_t, td_link),
3191 sizeof(vstd->td.td_link),
3192 BUS_DMASYNC_PREWRITE);
3193 }
3194 mutex_exit(&sc->sc_lock);
3195
3196 isoc->next = -1;
3197 isoc->inuse = 0;
3198
3199 return USBD_NORMAL_COMPLETION;
3200
3201 bad:
3202 while (--i >= 0)
3203 uhci_free_std(sc, isoc->stds[i]);
3204 kmem_free(isoc->stds, UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *));
3205 return USBD_NOMEM;
3206 }
3207
3208 void
3209 uhci_device_isoc_done(struct usbd_xfer *xfer)
3210 {
3211 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3212 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
3213 int i, offs;
3214 int rd = UE_GET_DIR(upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress) == UE_DIR_IN;
3215
3216 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3217 DPRINTFN(4, "length=%jd, ux_state=0x%08jx",
3218 xfer->ux_actlen, xfer->ux_state, 0, 0);
3219
3220 #ifdef DIAGNOSTIC
3221 if (ux->ux_stdend == NULL) {
3222 printf("%s: xfer=%p stdend==NULL\n", __func__, xfer);
3223 #ifdef UHCI_DEBUG
3224 DPRINTF("--- dump start ---", 0, 0, 0, 0);
3225 uhci_dump_ii(ux);
3226 DPRINTF("--- dump end ---", 0, 0, 0, 0);
3227 #endif
3228 return;
3229 }
3230 #endif
3231
3232 /* Turn off the interrupt since it is active even if the TD is not. */
3233 usb_syncmem(&ux->ux_stdend->dma,
3234 ux->ux_stdend->offs + offsetof(uhci_td_t, td_status),
3235 sizeof(ux->ux_stdend->td.td_status),
3236 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3237 ux->ux_stdend->td.td_status &= htole32(~UHCI_TD_IOC);
3238 usb_syncmem(&ux->ux_stdend->dma,
3239 ux->ux_stdend->offs + offsetof(uhci_td_t, td_status),
3240 sizeof(ux->ux_stdend->td.td_status),
3241 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3242
3243 offs = 0;
3244 for (i = 0; i < xfer->ux_nframes; i++) {
3245 usb_syncmem(&xfer->ux_dmabuf, offs, xfer->ux_frlengths[i],
3246 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3247 offs += xfer->ux_frlengths[i];
3248 }
3249 }
3250
3251 void
3252 uhci_device_intr_done(struct usbd_xfer *xfer)
3253 {
3254 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer);
3255 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3256 uhci_soft_qh_t *sqh;
3257 int i, npoll;
3258
3259 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3260 DPRINTFN(5, "length=%jd", xfer->ux_actlen, 0, 0, 0);
3261
3262 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3263
3264 npoll = upipe->intr.npoll;
3265 for (i = 0; i < npoll; i++) {
3266 sqh = upipe->intr.qhs[i];
3267 sqh->elink = NULL;
3268 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3269 usb_syncmem(&sqh->dma,
3270 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3271 sizeof(sqh->qh.qh_elink),
3272 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3273 }
3274 const int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress;
3275 const bool isread = UE_GET_DIR(endpt) == UE_DIR_IN;
3276 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
3277 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3278 }
3279
3280 /* Deallocate request data structures */
3281 void
3282 uhci_device_ctrl_done(struct usbd_xfer *xfer)
3283 {
3284 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3285 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3286 int len = UGETW(xfer->ux_request.wLength);
3287 int isread = (xfer->ux_request.bmRequestType & UT_READ);
3288
3289 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
3290
3291 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3292
3293 KASSERT(xfer->ux_rqflags & URQ_REQUEST);
3294
3295 /* XXXNH move to uhci_idone??? */
3296 if (upipe->pipe.up_dev->ud_speed == USB_SPEED_LOW)
3297 uhci_remove_ls_ctrl(sc, upipe->ctrl.sqh);
3298 else
3299 uhci_remove_hs_ctrl(sc, upipe->ctrl.sqh);
3300
3301 if (len) {
3302 usb_syncmem(&xfer->ux_dmabuf, 0, len,
3303 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3304 }
3305 usb_syncmem(&upipe->ctrl.reqdma, 0,
3306 sizeof(usb_device_request_t), BUS_DMASYNC_POSTWRITE);
3307
3308 DPRINTF("length=%jd", xfer->ux_actlen, 0, 0, 0);
3309 }
3310
3311 /* Deallocate request data structures */
3312 void
3313 uhci_device_bulk_done(struct usbd_xfer *xfer)
3314 {
3315 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3316 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe);
3317 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
3318 int endpt = ed->bEndpointAddress;
3319 int isread = UE_GET_DIR(endpt) == UE_DIR_IN;
3320
3321 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3322 DPRINTFN(5, "xfer=%#jx sc=%#jx upipe=%#jx", (uintptr_t)xfer,
3323 (uintptr_t)sc, (uintptr_t)upipe, 0);
3324
3325 KASSERT(mutex_owned(&sc->sc_lock));
3326
3327 uhci_remove_bulk(sc, upipe->bulk.sqh);
3328
3329 if (xfer->ux_length) {
3330 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
3331 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3332 }
3333
3334 DPRINTFN(5, "length=%jd", xfer->ux_actlen, 0, 0, 0);
3335 }
3336
3337 /* Add interrupt QH, called with vflock. */
3338 void
3339 uhci_add_intr(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
3340 {
3341 struct uhci_vframe *vf = &sc->sc_vframes[sqh->pos];
3342 uhci_soft_qh_t *eqh;
3343
3344 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3345 DPRINTFN(4, "n=%jd sqh=%#jx", sqh->pos, (uintptr_t)sqh, 0, 0);
3346
3347 eqh = vf->eqh;
3348 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
3349 sizeof(eqh->qh.qh_hlink),
3350 BUS_DMASYNC_POSTWRITE);
3351 sqh->hlink = eqh->hlink;
3352 sqh->qh.qh_hlink = eqh->qh.qh_hlink;
3353 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
3354 sizeof(sqh->qh.qh_hlink),
3355 BUS_DMASYNC_PREWRITE);
3356 eqh->hlink = sqh;
3357 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH);
3358 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink),
3359 sizeof(eqh->qh.qh_hlink),
3360 BUS_DMASYNC_PREWRITE);
3361 vf->eqh = sqh;
3362 vf->bandwidth++;
3363 }
3364
3365 /* Remove interrupt QH. */
3366 void
3367 uhci_remove_intr(uhci_softc_t *sc, uhci_soft_qh_t *sqh)
3368 {
3369 struct uhci_vframe *vf = &sc->sc_vframes[sqh->pos];
3370 uhci_soft_qh_t *pqh;
3371
3372 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3373 DPRINTFN(4, "n=%jd sqh=%#jx", sqh->pos, (uintptr_t)sqh, 0, 0);
3374
3375 /* See comment in uhci_remove_ctrl() */
3376
3377 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink),
3378 sizeof(sqh->qh.qh_elink),
3379 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3380 if (!(sqh->qh.qh_elink & htole32(UHCI_PTR_T))) {
3381 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3382 usb_syncmem(&sqh->dma,
3383 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3384 sizeof(sqh->qh.qh_elink),
3385 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3386 delay(UHCI_QH_REMOVE_DELAY);
3387 }
3388
3389 pqh = uhci_find_prev_qh(vf->hqh, sqh);
3390 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink),
3391 sizeof(sqh->qh.qh_hlink),
3392 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3393 pqh->hlink = sqh->hlink;
3394 pqh->qh.qh_hlink = sqh->qh.qh_hlink;
3395 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink),
3396 sizeof(pqh->qh.qh_hlink),
3397 BUS_DMASYNC_PREWRITE);
3398 delay(UHCI_QH_REMOVE_DELAY);
3399 if (vf->eqh == sqh)
3400 vf->eqh = pqh;
3401 vf->bandwidth--;
3402 }
3403
3404 usbd_status
3405 uhci_device_setintr(uhci_softc_t *sc, struct uhci_pipe *upipe, int ival)
3406 {
3407 uhci_soft_qh_t *sqh;
3408 int i, npoll;
3409 u_int bestbw, bw, bestoffs, offs;
3410
3411 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3412 DPRINTFN(2, "pipe=%#jx", (uintptr_t)upipe, 0, 0, 0);
3413 if (ival == 0) {
3414 printf("%s: 0 interval\n", __func__);
3415 return USBD_INVAL;
3416 }
3417
3418 if (ival > UHCI_VFRAMELIST_COUNT)
3419 ival = UHCI_VFRAMELIST_COUNT;
3420 npoll = (UHCI_VFRAMELIST_COUNT + ival - 1) / ival;
3421 DPRINTF("ival=%jd npoll=%jd", ival, npoll, 0, 0);
3422
3423 upipe->intr.npoll = npoll;
3424 upipe->intr.qhs =
3425 kmem_alloc(npoll * sizeof(uhci_soft_qh_t *), KM_SLEEP);
3426
3427 /*
3428 * Figure out which offset in the schedule that has most
3429 * bandwidth left over.
3430 */
3431 #define MOD(i) ((i) & (UHCI_VFRAMELIST_COUNT-1))
3432 for (bestoffs = offs = 0, bestbw = ~0; offs < ival; offs++) {
3433 for (bw = i = 0; i < npoll; i++)
3434 bw += sc->sc_vframes[MOD(i * ival + offs)].bandwidth;
3435 if (bw < bestbw) {
3436 bestbw = bw;
3437 bestoffs = offs;
3438 }
3439 }
3440 DPRINTF("bw=%jd offs=%jd", bestbw, bestoffs, 0, 0);
3441 for (i = 0; i < npoll; i++) {
3442 upipe->intr.qhs[i] = sqh = uhci_alloc_sqh(sc);
3443 sqh->elink = NULL;
3444 sqh->qh.qh_elink = htole32(UHCI_PTR_T);
3445 usb_syncmem(&sqh->dma,
3446 sqh->offs + offsetof(uhci_qh_t, qh_elink),
3447 sizeof(sqh->qh.qh_elink),
3448 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3449 sqh->pos = MOD(i * ival + bestoffs);
3450 }
3451 #undef MOD
3452
3453 mutex_enter(&sc->sc_lock);
3454 /* Enter QHs into the controller data structures. */
3455 for (i = 0; i < npoll; i++)
3456 uhci_add_intr(sc, upipe->intr.qhs[i]);
3457 mutex_exit(&sc->sc_lock);
3458
3459 DPRINTFN(5, "returns %#jx", (uintptr_t)upipe, 0, 0, 0);
3460
3461 return USBD_NORMAL_COMPLETION;
3462 }
3463
3464 /* Open a new pipe. */
3465 usbd_status
3466 uhci_open(struct usbd_pipe *pipe)
3467 {
3468 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3469 struct usbd_bus *bus = pipe->up_dev->ud_bus;
3470 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe);
3471 usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc;
3472 usbd_status err = USBD_NOMEM;
3473 int ival;
3474
3475 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3476 DPRINTF("pipe=%#jx, addr=%jd, endpt=%jd (%jd)",
3477 (uintptr_t)pipe, pipe->up_dev->ud_addr, ed->bEndpointAddress,
3478 bus->ub_rhaddr);
3479
3480 if (sc->sc_dying)
3481 return USBD_IOERROR;
3482
3483 upipe->aborting = 0;
3484 /* toggle state needed for bulk endpoints */
3485 upipe->nexttoggle = pipe->up_endpoint->ue_toggle;
3486
3487 if (pipe->up_dev->ud_addr == bus->ub_rhaddr) {
3488 switch (ed->bEndpointAddress) {
3489 case USB_CONTROL_ENDPOINT:
3490 pipe->up_methods = &roothub_ctrl_methods;
3491 break;
3492 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
3493 pipe->up_methods = &uhci_root_intr_methods;
3494 break;
3495 default:
3496 return USBD_INVAL;
3497 }
3498 } else {
3499 switch (ed->bmAttributes & UE_XFERTYPE) {
3500 case UE_CONTROL:
3501 pipe->up_methods = &uhci_device_ctrl_methods;
3502 upipe->ctrl.sqh = uhci_alloc_sqh(sc);
3503 if (upipe->ctrl.sqh == NULL)
3504 goto bad;
3505 upipe->ctrl.setup = uhci_alloc_std(sc);
3506 if (upipe->ctrl.setup == NULL) {
3507 uhci_free_sqh(sc, upipe->ctrl.sqh);
3508 goto bad;
3509 }
3510 upipe->ctrl.stat = uhci_alloc_std(sc);
3511 if (upipe->ctrl.stat == NULL) {
3512 uhci_free_sqh(sc, upipe->ctrl.sqh);
3513 uhci_free_std(sc, upipe->ctrl.setup);
3514 goto bad;
3515 }
3516 err = usb_allocmem(&sc->sc_bus,
3517 sizeof(usb_device_request_t),
3518 0, &upipe->ctrl.reqdma);
3519 if (err) {
3520 uhci_free_sqh(sc, upipe->ctrl.sqh);
3521 uhci_free_std(sc, upipe->ctrl.setup);
3522 uhci_free_std(sc, upipe->ctrl.stat);
3523 goto bad;
3524 }
3525 break;
3526 case UE_INTERRUPT:
3527 pipe->up_methods = &uhci_device_intr_methods;
3528 ival = pipe->up_interval;
3529 if (ival == USBD_DEFAULT_INTERVAL)
3530 ival = ed->bInterval;
3531 return uhci_device_setintr(sc, upipe, ival);
3532 case UE_ISOCHRONOUS:
3533 pipe->up_serialise = false;
3534 pipe->up_methods = &uhci_device_isoc_methods;
3535 return uhci_setup_isoc(pipe);
3536 case UE_BULK:
3537 pipe->up_methods = &uhci_device_bulk_methods;
3538 upipe->bulk.sqh = uhci_alloc_sqh(sc);
3539 if (upipe->bulk.sqh == NULL)
3540 goto bad;
3541 break;
3542 }
3543 }
3544 return USBD_NORMAL_COMPLETION;
3545
3546 bad:
3547 return USBD_NOMEM;
3548 }
3549
3550 /*
3551 * Data structures and routines to emulate the root hub.
3552 */
3553 /*
3554 * The USB hub protocol requires that SET_FEATURE(PORT_RESET) also
3555 * enables the port, and also states that SET_FEATURE(PORT_ENABLE)
3556 * should not be used by the USB subsystem. As we cannot issue a
3557 * SET_FEATURE(PORT_ENABLE) externally, we must ensure that the port
3558 * will be enabled as part of the reset.
3559 *
3560 * On the VT83C572, the port cannot be successfully enabled until the
3561 * outstanding "port enable change" and "connection status change"
3562 * events have been reset.
3563 */
3564 Static usbd_status
3565 uhci_portreset(uhci_softc_t *sc, int index)
3566 {
3567 int lim, port, x;
3568 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3569
3570 if (index == 1)
3571 port = UHCI_PORTSC1;
3572 else if (index == 2)
3573 port = UHCI_PORTSC2;
3574 else
3575 return USBD_IOERROR;
3576
3577 x = URWMASK(UREAD2(sc, port));
3578 UWRITE2(sc, port, x | UHCI_PORTSC_PR);
3579
3580 usb_delay_ms(&sc->sc_bus, USB_PORT_ROOT_RESET_DELAY);
3581
3582 DPRINTF("uhci port %jd reset, status0 = 0x%04jx", index,
3583 UREAD2(sc, port), 0, 0);
3584
3585 x = URWMASK(UREAD2(sc, port));
3586 UWRITE2(sc, port, x & ~(UHCI_PORTSC_PR | UHCI_PORTSC_SUSP));
3587
3588 delay(100);
3589
3590 DPRINTF("uhci port %jd reset, status1 = 0x%04jx", index,
3591 UREAD2(sc, port), 0, 0);
3592
3593 x = URWMASK(UREAD2(sc, port));
3594 UWRITE2(sc, port, x | UHCI_PORTSC_PE);
3595
3596 for (lim = 10; --lim > 0;) {
3597 usb_delay_ms(&sc->sc_bus, USB_PORT_RESET_DELAY);
3598
3599 x = UREAD2(sc, port);
3600 DPRINTF("uhci port %jd iteration %ju, status = 0x%04jx", index,
3601 lim, x, 0);
3602
3603 if (!(x & UHCI_PORTSC_CCS)) {
3604 /*
3605 * No device is connected (or was disconnected
3606 * during reset). Consider the port reset.
3607 * The delay must be long enough to ensure on
3608 * the initial iteration that the device
3609 * connection will have been registered. 50ms
3610 * appears to be sufficient, but 20ms is not.
3611 */
3612 DPRINTFN(3, "uhci port %jd loop %ju, device detached",
3613 index, lim, 0, 0);
3614 break;
3615 }
3616
3617 if (x & (UHCI_PORTSC_POEDC | UHCI_PORTSC_CSC)) {
3618 /*
3619 * Port enabled changed and/or connection
3620 * status changed were set. Reset either or
3621 * both raised flags (by writing a 1 to that
3622 * bit), and wait again for state to settle.
3623 */
3624 UWRITE2(sc, port, URWMASK(x) |
3625 (x & (UHCI_PORTSC_POEDC | UHCI_PORTSC_CSC)));
3626 continue;
3627 }
3628
3629 if (x & UHCI_PORTSC_PE)
3630 /* Port is enabled */
3631 break;
3632
3633 UWRITE2(sc, port, URWMASK(x) | UHCI_PORTSC_PE);
3634 }
3635
3636 DPRINTFN(3, "uhci port %jd reset, status2 = 0x%04jx", index,
3637 UREAD2(sc, port), 0, 0);
3638
3639 if (lim <= 0) {
3640 DPRINTF("uhci port %jd reset timed out", index,
3641 0, 0, 0);
3642 return USBD_TIMEOUT;
3643 }
3644
3645 sc->sc_isreset = 1;
3646 return USBD_NORMAL_COMPLETION;
3647 }
3648
3649 Static int
3650 uhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
3651 void *buf, int buflen)
3652 {
3653 uhci_softc_t *sc = UHCI_BUS2SC(bus);
3654 int port, x;
3655 int status, change, totlen = 0;
3656 uint16_t len, value, index;
3657 usb_port_status_t ps;
3658 usbd_status err;
3659
3660 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3661
3662 if (sc->sc_dying)
3663 return -1;
3664
3665 DPRINTF("type=0x%02jx request=%02jx", req->bmRequestType,
3666 req->bRequest, 0, 0);
3667
3668 len = UGETW(req->wLength);
3669 value = UGETW(req->wValue);
3670 index = UGETW(req->wIndex);
3671
3672 #define C(x,y) ((x) | ((y) << 8))
3673 switch (C(req->bRequest, req->bmRequestType)) {
3674 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
3675 DPRINTF("wValue=0x%04jx", value, 0, 0, 0);
3676 if (len == 0)
3677 break;
3678 switch (value) {
3679 #define sd ((usb_string_descriptor_t *)buf)
3680 case C(2, UDESC_STRING):
3681 /* Product */
3682 totlen = usb_makestrdesc(sd, len, "UHCI root hub");
3683 break;
3684 #undef sd
3685 default:
3686 /* default from usbroothub */
3687 return buflen;
3688 }
3689 break;
3690
3691 /* Hub requests */
3692 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
3693 break;
3694 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
3695 DPRINTF("UR_CLEAR_PORT_FEATURE port=%jd feature=%jd", index,
3696 value, 0, 0);
3697 if (index == 1)
3698 port = UHCI_PORTSC1;
3699 else if (index == 2)
3700 port = UHCI_PORTSC2;
3701 else {
3702 return -1;
3703 }
3704 switch(value) {
3705 case UHF_PORT_ENABLE:
3706 x = URWMASK(UREAD2(sc, port));
3707 UWRITE2(sc, port, x & ~UHCI_PORTSC_PE);
3708 break;
3709 case UHF_PORT_SUSPEND:
3710 x = URWMASK(UREAD2(sc, port));
3711 if (!(x & UHCI_PORTSC_SUSP)) /* not suspended */
3712 break;
3713 UWRITE2(sc, port, x | UHCI_PORTSC_RD);
3714 /* see USB2 spec ch. 7.1.7.7 */
3715 usb_delay_ms(&sc->sc_bus, 20);
3716 UWRITE2(sc, port, x & ~UHCI_PORTSC_SUSP);
3717 /* 10ms resume delay must be provided by caller */
3718 break;
3719 case UHF_PORT_RESET:
3720 x = URWMASK(UREAD2(sc, port));
3721 UWRITE2(sc, port, x & ~UHCI_PORTSC_PR);
3722 break;
3723 case UHF_C_PORT_CONNECTION:
3724 x = URWMASK(UREAD2(sc, port));
3725 UWRITE2(sc, port, x | UHCI_PORTSC_CSC);
3726 break;
3727 case UHF_C_PORT_ENABLE:
3728 x = URWMASK(UREAD2(sc, port));
3729 UWRITE2(sc, port, x | UHCI_PORTSC_POEDC);
3730 break;
3731 case UHF_C_PORT_OVER_CURRENT:
3732 x = URWMASK(UREAD2(sc, port));
3733 UWRITE2(sc, port, x | UHCI_PORTSC_OCIC);
3734 break;
3735 case UHF_C_PORT_RESET:
3736 sc->sc_isreset = 0;
3737 break;
3738 case UHF_PORT_CONNECTION:
3739 case UHF_PORT_OVER_CURRENT:
3740 case UHF_PORT_POWER:
3741 case UHF_PORT_LOW_SPEED:
3742 case UHF_C_PORT_SUSPEND:
3743 default:
3744 return -1;
3745 }
3746 break;
3747 case C(UR_GET_BUS_STATE, UT_READ_CLASS_OTHER):
3748 if (index == 1)
3749 port = UHCI_PORTSC1;
3750 else if (index == 2)
3751 port = UHCI_PORTSC2;
3752 else {
3753 return -1;
3754 }
3755 if (len > 0) {
3756 *(uint8_t *)buf =
3757 UHCI_PORTSC_GET_LS(UREAD2(sc, port));
3758 totlen = 1;
3759 }
3760 break;
3761 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
3762 if (len == 0)
3763 break;
3764 if ((value & 0xff) != 0) {
3765 return -1;
3766 }
3767 usb_hub_descriptor_t hubd;
3768
3769 totlen = uimin(buflen, sizeof(hubd));
3770 memcpy(&hubd, buf, totlen);
3771 hubd.bNbrPorts = 2;
3772 memcpy(buf, &hubd, totlen);
3773 break;
3774 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
3775 if (len != 4) {
3776 return -1;
3777 }
3778 memset(buf, 0, len);
3779 totlen = len;
3780 break;
3781 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
3782 if (index == 1)
3783 port = UHCI_PORTSC1;
3784 else if (index == 2)
3785 port = UHCI_PORTSC2;
3786 else {
3787 return -1;
3788 }
3789 if (len != 4) {
3790 return -1;
3791 }
3792 x = UREAD2(sc, port);
3793 status = change = 0;
3794 if (x & UHCI_PORTSC_CCS)
3795 status |= UPS_CURRENT_CONNECT_STATUS;
3796 if (x & UHCI_PORTSC_CSC)
3797 change |= UPS_C_CONNECT_STATUS;
3798 if (x & UHCI_PORTSC_PE)
3799 status |= UPS_PORT_ENABLED;
3800 if (x & UHCI_PORTSC_POEDC)
3801 change |= UPS_C_PORT_ENABLED;
3802 if (x & UHCI_PORTSC_OCI)
3803 status |= UPS_OVERCURRENT_INDICATOR;
3804 if (x & UHCI_PORTSC_OCIC)
3805 change |= UPS_C_OVERCURRENT_INDICATOR;
3806 if (x & UHCI_PORTSC_SUSP)
3807 status |= UPS_SUSPEND;
3808 if (x & UHCI_PORTSC_LSDA)
3809 status |= UPS_LOW_SPEED;
3810 status |= UPS_PORT_POWER;
3811 if (sc->sc_isreset)
3812 change |= UPS_C_PORT_RESET;
3813 USETW(ps.wPortStatus, status);
3814 USETW(ps.wPortChange, change);
3815 totlen = uimin(len, sizeof(ps));
3816 memcpy(buf, &ps, totlen);
3817 break;
3818 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
3819 return -1;
3820 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
3821 break;
3822 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
3823 if (index == 1)
3824 port = UHCI_PORTSC1;
3825 else if (index == 2)
3826 port = UHCI_PORTSC2;
3827 else {
3828 return -1;
3829 }
3830 switch(value) {
3831 case UHF_PORT_ENABLE:
3832 x = URWMASK(UREAD2(sc, port));
3833 UWRITE2(sc, port, x | UHCI_PORTSC_PE);
3834 break;
3835 case UHF_PORT_SUSPEND:
3836 x = URWMASK(UREAD2(sc, port));
3837 UWRITE2(sc, port, x | UHCI_PORTSC_SUSP);
3838 break;
3839 case UHF_PORT_RESET:
3840 err = uhci_portreset(sc, index);
3841 if (err != USBD_NORMAL_COMPLETION)
3842 return -1;
3843 return 0;
3844 case UHF_PORT_POWER:
3845 /* Pretend we turned on power */
3846 return 0;
3847 case UHF_C_PORT_CONNECTION:
3848 case UHF_C_PORT_ENABLE:
3849 case UHF_C_PORT_OVER_CURRENT:
3850 case UHF_PORT_CONNECTION:
3851 case UHF_PORT_OVER_CURRENT:
3852 case UHF_PORT_LOW_SPEED:
3853 case UHF_C_PORT_SUSPEND:
3854 case UHF_C_PORT_RESET:
3855 default:
3856 return -1;
3857 }
3858 break;
3859 default:
3860 /* default from usbroothub */
3861 DPRINTF("returning %jd (usbroothub default)",
3862 buflen, 0, 0, 0);
3863 return buflen;
3864 }
3865
3866 DPRINTF("returning %jd", totlen, 0, 0, 0);
3867
3868 return totlen;
3869 }
3870
3871 /* Abort a root interrupt request. */
3872 void
3873 uhci_root_intr_abort(struct usbd_xfer *xfer)
3874 {
3875 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3876
3877 KASSERT(mutex_owned(&sc->sc_lock));
3878 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
3879
3880 callout_stop(&sc->sc_poll_handle);
3881 sc->sc_intr_xfer = NULL;
3882
3883 xfer->ux_status = USBD_CANCELLED;
3884 #ifdef DIAGNOSTIC
3885 UHCI_XFER2UXFER(xfer)->ux_isdone = true;
3886 #endif
3887 usb_transfer_complete(xfer);
3888 }
3889
3890 usbd_status
3891 uhci_root_intr_transfer(struct usbd_xfer *xfer)
3892 {
3893 uhci_softc_t *sc = UHCI_XFER2SC(xfer);
3894 usbd_status err;
3895
3896 /* Insert last in queue. */
3897 mutex_enter(&sc->sc_lock);
3898 err = usb_insert_transfer(xfer);
3899 mutex_exit(&sc->sc_lock);
3900 if (err)
3901 return err;
3902
3903 /*
3904 * Pipe isn't running (otherwise err would be USBD_INPROG),
3905 * start first
3906 */
3907 return uhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
3908 }
3909
3910 /* Start a transfer on the root interrupt pipe */
3911 usbd_status
3912 uhci_root_intr_start(struct usbd_xfer *xfer)
3913 {
3914 struct usbd_pipe *pipe = xfer->ux_pipe;
3915 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3916 unsigned int ival;
3917
3918 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3919 DPRINTF("xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, xfer->ux_length,
3920 xfer->ux_flags, 0);
3921
3922 if (sc->sc_dying)
3923 return USBD_IOERROR;
3924
3925 /* XXX temporary variable needed to avoid gcc3 warning */
3926 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
3927 sc->sc_ival = mstohz(ival);
3928 callout_reset(&sc->sc_poll_handle, sc->sc_ival, uhci_poll_hub, xfer);
3929 sc->sc_intr_xfer = xfer;
3930 return USBD_IN_PROGRESS;
3931 }
3932
3933 /* Close the root interrupt pipe. */
3934 void
3935 uhci_root_intr_close(struct usbd_pipe *pipe)
3936 {
3937 uhci_softc_t *sc = UHCI_PIPE2SC(pipe);
3938 UHCIHIST_FUNC(); UHCIHIST_CALLED();
3939
3940 KASSERT(mutex_owned(&sc->sc_lock));
3941
3942 callout_stop(&sc->sc_poll_handle);
3943 sc->sc_intr_xfer = NULL;
3944 }
3945