xhci.c revision 1.187 1 /* $NetBSD: xhci.c,v 1.187 2025/01/30 00:42:47 jmcneill Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * USB rev 2.0 and rev 3.1 specification
31 * http://www.usb.org/developers/docs/
32 * xHCI rev 1.1 specification
33 * http://www.intel.com/technology/usb/spec.htm
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.187 2025/01/30 00:42:47 jmcneill Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_usb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/kmem.h>
47 #include <sys/device.h>
48 #include <sys/select.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/endian.h>
58
59 #include <dev/usb/usb.h>
60 #include <dev/usb/usbdi.h>
61 #include <dev/usb/usbdivar.h>
62 #include <dev/usb/usbdi_util.h>
63 #include <dev/usb/usbhist.h>
64 #include <dev/usb/usb_mem.h>
65 #include <dev/usb/usb_quirks.h>
66
67 #include <dev/usb/xhcireg.h>
68 #include <dev/usb/xhcivar.h>
69 #include <dev/usb/usbroothub.h>
70
71
72 #ifdef USB_DEBUG
73 #ifndef XHCI_DEBUG
74 #define xhcidebug 0
75 #else /* !XHCI_DEBUG */
76 #define HEXDUMP(a, b, c) \
77 do { \
78 if (xhcidebug > 0) \
79 hexdump(printf, a, b, c); \
80 } while (/*CONSTCOND*/0)
81 static int xhcidebug = 0;
82
83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup")
84 {
85 int err;
86 const struct sysctlnode *rnode;
87 const struct sysctlnode *cnode;
88
89 err = sysctl_createv(clog, 0, NULL, &rnode,
90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci",
91 SYSCTL_DESCR("xhci global controls"),
92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
93
94 if (err)
95 goto fail;
96
97 /* control debugging printfs */
98 err = sysctl_createv(clog, 0, &rnode, &cnode,
99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
100 "debug", SYSCTL_DESCR("Enable debugging output"),
101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL);
102 if (err)
103 goto fail;
104
105 return;
106 fail:
107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
108 }
109
110 #endif /* !XHCI_DEBUG */
111 #endif /* USB_DEBUG */
112
113 #ifndef HEXDUMP
114 #define HEXDUMP(a, b, c)
115 #endif
116
117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D)
118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D)
119 #define XHCIHIST_FUNC() USBHIST_FUNC()
120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug)
121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \
122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D)
123
124 #define XHCI_DCI_SLOT 0
125 #define XHCI_DCI_EP_CONTROL 1
126
127 #define XHCI_ICI_INPUT_CONTROL 0
128
129 struct xhci_pipe {
130 struct usbd_pipe xp_pipe;
131 struct usb_task xp_async_task;
132 int16_t xp_isoc_next; /* next frame */
133 uint8_t xp_maxb; /* max burst */
134 uint8_t xp_mult;
135 };
136
137 #define XHCI_COMMAND_RING_TRBS 256
138 #define XHCI_EVENT_RING_TRBS 256
139 #define XHCI_EVENT_RING_SEGMENTS 1
140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT
141
142 static usbd_status xhci_open(struct usbd_pipe *);
143 static void xhci_close_pipe(struct usbd_pipe *);
144 static int xhci_intr1(struct xhci_softc * const);
145 static void xhci_softintr(void *);
146 static void xhci_poll(struct usbd_bus *);
147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int);
148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
149 static void xhci_abortx(struct usbd_xfer *);
150 static bool xhci_dying(struct usbd_bus *);
151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **);
152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int,
153 struct usbd_port *);
154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
155 void *, int);
156
157 static void xhci_pipe_restart(struct usbd_pipe *);
158 static void xhci_pipe_restart_async_task(void *);
159 static void xhci_pipe_restart_async(struct usbd_pipe *);
160
161 static usbd_status xhci_configure_endpoint(struct usbd_pipe *);
162 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *);
163 static void xhci_reset_endpoint(struct usbd_pipe *);
164 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *,
165 struct xhci_slot *, u_int, uint32_t);
166 static usbd_status xhci_stop_endpoint(struct usbd_pipe *);
167
168 static void xhci_host_dequeue(struct xhci_ring * const);
169 static void xhci_set_dequeue(struct usbd_pipe *);
170
171 static usbd_status xhci_do_command(struct xhci_softc * const,
172 struct xhci_soft_trb * const, int);
173 static usbd_status xhci_do_command_locked(struct xhci_softc * const,
174 struct xhci_soft_trb * const, int);
175 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t);
176 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *);
177 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool);
178 static usbd_status xhci_enable_slot(struct xhci_softc * const,
179 uint8_t * const);
180 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t);
181 static usbd_status xhci_address_device(struct xhci_softc * const,
182 uint64_t, uint8_t, bool);
183 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int);
184 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const,
185 struct xhci_slot * const, u_int);
186 static usbd_status xhci_ring_init(struct xhci_softc * const,
187 struct xhci_ring **, size_t, size_t);
188 static void xhci_ring_free(struct xhci_softc * const,
189 struct xhci_ring ** const);
190
191 static void xhci_setup_ctx(struct usbd_pipe *);
192 static void xhci_setup_route(struct usbd_pipe *, uint32_t *);
193 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *);
194 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *);
195 static uint32_t xhci_bival2ival(uint32_t, uint32_t, uint32_t);
196
197 static void xhci_noop(struct usbd_pipe *);
198
199 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *);
200 static usbd_status xhci_root_intr_start(struct usbd_xfer *);
201 static void xhci_root_intr_abort(struct usbd_xfer *);
202 static void xhci_root_intr_close(struct usbd_pipe *);
203 static void xhci_root_intr_done(struct usbd_xfer *);
204
205 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *);
206 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *);
207 static void xhci_device_ctrl_abort(struct usbd_xfer *);
208 static void xhci_device_ctrl_close(struct usbd_pipe *);
209 static void xhci_device_ctrl_done(struct usbd_xfer *);
210
211 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *);
212 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *);
213 static void xhci_device_isoc_abort(struct usbd_xfer *);
214 static void xhci_device_isoc_close(struct usbd_pipe *);
215 static void xhci_device_isoc_done(struct usbd_xfer *);
216
217 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *);
218 static usbd_status xhci_device_intr_start(struct usbd_xfer *);
219 static void xhci_device_intr_abort(struct usbd_xfer *);
220 static void xhci_device_intr_close(struct usbd_pipe *);
221 static void xhci_device_intr_done(struct usbd_xfer *);
222
223 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *);
224 static usbd_status xhci_device_bulk_start(struct usbd_xfer *);
225 static void xhci_device_bulk_abort(struct usbd_xfer *);
226 static void xhci_device_bulk_close(struct usbd_pipe *);
227 static void xhci_device_bulk_done(struct usbd_xfer *);
228
229 static const struct usbd_bus_methods xhci_bus_methods = {
230 .ubm_open = xhci_open,
231 .ubm_softint = xhci_softintr,
232 .ubm_dopoll = xhci_poll,
233 .ubm_allocx = xhci_allocx,
234 .ubm_freex = xhci_freex,
235 .ubm_abortx = xhci_abortx,
236 .ubm_dying = xhci_dying,
237 .ubm_getlock = xhci_get_lock,
238 .ubm_newdev = xhci_new_device,
239 .ubm_rhctrl = xhci_roothub_ctrl,
240 };
241
242 static const struct usbd_pipe_methods xhci_root_intr_methods = {
243 .upm_transfer = xhci_root_intr_transfer,
244 .upm_start = xhci_root_intr_start,
245 .upm_abort = xhci_root_intr_abort,
246 .upm_close = xhci_root_intr_close,
247 .upm_cleartoggle = xhci_noop,
248 .upm_done = xhci_root_intr_done,
249 };
250
251
252 static const struct usbd_pipe_methods xhci_device_ctrl_methods = {
253 .upm_transfer = xhci_device_ctrl_transfer,
254 .upm_start = xhci_device_ctrl_start,
255 .upm_abort = xhci_device_ctrl_abort,
256 .upm_close = xhci_device_ctrl_close,
257 .upm_cleartoggle = xhci_noop,
258 .upm_done = xhci_device_ctrl_done,
259 };
260
261 static const struct usbd_pipe_methods xhci_device_isoc_methods = {
262 .upm_transfer = xhci_device_isoc_transfer,
263 .upm_abort = xhci_device_isoc_abort,
264 .upm_close = xhci_device_isoc_close,
265 .upm_cleartoggle = xhci_noop,
266 .upm_done = xhci_device_isoc_done,
267 };
268
269 static const struct usbd_pipe_methods xhci_device_bulk_methods = {
270 .upm_transfer = xhci_device_bulk_transfer,
271 .upm_start = xhci_device_bulk_start,
272 .upm_abort = xhci_device_bulk_abort,
273 .upm_close = xhci_device_bulk_close,
274 .upm_cleartoggle = xhci_noop,
275 .upm_done = xhci_device_bulk_done,
276 };
277
278 static const struct usbd_pipe_methods xhci_device_intr_methods = {
279 .upm_transfer = xhci_device_intr_transfer,
280 .upm_start = xhci_device_intr_start,
281 .upm_abort = xhci_device_intr_abort,
282 .upm_close = xhci_device_intr_close,
283 .upm_cleartoggle = xhci_noop,
284 .upm_done = xhci_device_intr_done,
285 };
286
287 static inline uint32_t
288 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset)
289 {
290 if (ISSET(sc->sc_quirks, XHCI_32BIT_ACCESS)) {
291 uint32_t val;
292 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset & ~3);
293 return (val >> ((offset & 3) * NBBY)) & 0xff;
294 } else {
295 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset);
296 }
297 }
298
299 static inline uint32_t
300 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset)
301 {
302 if (ISSET(sc->sc_quirks, XHCI_32BIT_ACCESS)) {
303 uint32_t val;
304 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset & ~3);
305 return (val >> ((offset & 3) * NBBY)) & 0xffff;
306 } else {
307 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
308 }
309 }
310
311 static inline uint32_t
312 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset)
313 {
314 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset);
315 }
316
317 static inline void
318 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset,
319 uint32_t value)
320 {
321 if (ISSET(sc->sc_quirks, XHCI_32BIT_ACCESS)) {
322 const uint32_t mask = 0xffU << ((offset & 3) * NBBY);
323 uint32_t val;
324 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset & ~3);
325 val &= ~mask;
326 val |= __SHIFTIN(value, mask);
327 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset & ~3, val);
328 } else {
329 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value);
330 }
331 }
332
333 #if 0 /* unused */
334 static inline void
335 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset,
336 uint32_t value)
337 {
338 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value);
339 }
340 #endif /* unused */
341
342 static inline uint32_t
343 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset)
344 {
345 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset);
346 }
347
348 static inline uint32_t
349 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset)
350 {
351 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
352 }
353
354 static inline void
355 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset,
356 uint32_t value)
357 {
358 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
359 }
360
361 static inline uint64_t
362 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset)
363 {
364 uint64_t value;
365
366 #ifdef XHCI_USE_BUS_SPACE_8
367 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset);
368 #else
369 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
370 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh,
371 offset + 4) << 32;
372 #endif
373
374 return value;
375 }
376
377 static inline void
378 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset,
379 uint64_t value)
380 {
381 #ifdef XHCI_USE_BUS_SPACE_8
382 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value);
383 #else
384 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0,
385 (value >> 0) & 0xffffffff);
386 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4,
387 (value >> 32) & 0xffffffff);
388 #endif
389 }
390
391 static inline uint32_t
392 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset)
393 {
394 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
395 }
396
397 static inline void
398 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset,
399 uint32_t value)
400 {
401 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
402 }
403
404 static inline uint64_t
405 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset)
406 {
407 uint64_t value;
408
409 #ifdef XHCI_USE_BUS_SPACE_8
410 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset);
411 #else
412 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
413 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh,
414 offset + 4) << 32;
415 #endif
416
417 return value;
418 }
419
420 static inline void
421 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset,
422 uint64_t value)
423 {
424 #ifdef XHCI_USE_BUS_SPACE_8
425 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value);
426 #else
427 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0,
428 (value >> 0) & 0xffffffff);
429 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4,
430 (value >> 32) & 0xffffffff);
431 #endif
432 }
433
434 #if 0 /* unused */
435 static inline uint32_t
436 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset)
437 {
438 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset);
439 }
440 #endif /* unused */
441
442 static inline void
443 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset,
444 uint32_t value)
445 {
446 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value);
447 }
448
449 /* --- */
450
451 static inline uint8_t
452 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed)
453 {
454 u_int eptype = 0;
455
456 switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
457 case UE_CONTROL:
458 eptype = 0x0;
459 break;
460 case UE_ISOCHRONOUS:
461 eptype = 0x1;
462 break;
463 case UE_BULK:
464 eptype = 0x2;
465 break;
466 case UE_INTERRUPT:
467 eptype = 0x3;
468 break;
469 }
470
471 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
472 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
473 return eptype | 0x4;
474 else
475 return eptype;
476 }
477
478 static u_int
479 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed)
480 {
481 /* xHCI 1.0 section 4.5.1 */
482 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress);
483 u_int in = 0;
484
485 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
486 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
487 in = 1;
488
489 return epaddr * 2 + in;
490 }
491
492 static inline u_int
493 xhci_dci_to_ici(const u_int i)
494 {
495 return i + 1;
496 }
497
498 static inline void *
499 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs,
500 const u_int dci)
501 {
502 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
503 }
504
505 #if 0 /* unused */
506 static inline bus_addr_t
507 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs,
508 const u_int dci)
509 {
510 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
511 }
512 #endif /* unused */
513
514 static inline void *
515 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs,
516 const u_int ici)
517 {
518 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
519 }
520
521 static inline bus_addr_t
522 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs,
523 const u_int ici)
524 {
525 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
526 }
527
528 static inline struct xhci_trb *
529 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx)
530 {
531 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
532 }
533
534 static inline bus_addr_t
535 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx)
536 {
537 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
538 }
539
540 static inline void
541 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx,
542 uint64_t parameter, uint32_t status, uint32_t control)
543 {
544 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb);
545 xx->xx_trb[idx].trb_0 = parameter;
546 xx->xx_trb[idx].trb_2 = status;
547 xx->xx_trb[idx].trb_3 = control;
548 }
549
550 static inline void
551 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status,
552 uint32_t control)
553 {
554 trb->trb_0 = htole64(parameter);
555 trb->trb_2 = htole32(status);
556 trb->trb_3 = htole32(control);
557 }
558
559 static int
560 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx)
561 {
562 /* base address of TRBs */
563 bus_addr_t trbp = xhci_ring_trbp(xr, 0);
564
565 /* trb_0 range sanity check */
566 if (trb_0 == 0 || trb_0 < trbp ||
567 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 ||
568 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) {
569 return 1;
570 }
571 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb);
572 return 0;
573 }
574
575 static unsigned int
576 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs,
577 u_int dci)
578 {
579 uint32_t *cp;
580
581 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
582 cp = xhci_slot_get_dcv(sc, xs, dci);
583 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0]));
584 }
585
586 static inline unsigned int
587 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport)
588 {
589 const unsigned int port = ctlrport - 1;
590 const uint8_t bit = __BIT(port % NBBY);
591
592 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit);
593 }
594
595 /*
596 * Return the roothub port for a controller port. Both are 1..n.
597 */
598 static inline unsigned int
599 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport)
600 {
601
602 return sc->sc_ctlrportmap[ctrlport - 1];
603 }
604
605 /*
606 * Return the controller port for a bus roothub port. Both are 1..n.
607 */
608 static inline unsigned int
609 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn,
610 unsigned int rhport)
611 {
612
613 return sc->sc_rhportmap[bn][rhport - 1];
614 }
615
616 /* --- */
617
618 void
619 xhci_childdet(device_t self, device_t child)
620 {
621 struct xhci_softc * const sc = device_private(self);
622
623 mutex_enter(&sc->sc_intr_lock);
624 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child));
625 if (child == sc->sc_child2)
626 sc->sc_child2 = NULL;
627 else if (child == sc->sc_child)
628 sc->sc_child = NULL;
629 mutex_exit(&sc->sc_intr_lock);
630 }
631
632 int
633 xhci_detach(struct xhci_softc *sc, int flags)
634 {
635 int rv = 0;
636
637 if (sc->sc_child2 != NULL) {
638 rv = config_detach(sc->sc_child2, flags);
639 if (rv != 0)
640 return rv;
641 KASSERT(sc->sc_child2 == NULL);
642 }
643
644 if (sc->sc_child != NULL) {
645 rv = config_detach(sc->sc_child, flags);
646 if (rv != 0)
647 return rv;
648 KASSERT(sc->sc_child == NULL);
649 }
650
651 /* XXX unconfigure/free slots */
652
653 /* verify: */
654 xhci_rt_write_4(sc, XHCI_IMAN(0), 0);
655 xhci_op_write_4(sc, XHCI_USBCMD, 0);
656 /* do we need to wait for stop? */
657
658 xhci_op_write_8(sc, XHCI_CRCR, 0);
659 xhci_ring_free(sc, &sc->sc_cr);
660 cv_destroy(&sc->sc_command_cv);
661 cv_destroy(&sc->sc_cmdbusy_cv);
662
663 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0);
664 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0);
665 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY);
666 xhci_ring_free(sc, &sc->sc_er);
667
668 usb_freemem(&sc->sc_eventst_dma);
669
670 xhci_op_write_8(sc, XHCI_DCBAAP, 0);
671 usb_freemem(&sc->sc_dcbaa_dma);
672
673 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots);
674
675 kmem_free(sc->sc_ctlrportbus,
676 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY));
677 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int));
678
679 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
680 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int));
681 }
682
683 mutex_destroy(&sc->sc_rhlock);
684 mutex_destroy(&sc->sc_lock);
685 mutex_destroy(&sc->sc_intr_lock);
686
687 pool_cache_destroy(sc->sc_xferpool);
688
689 return rv;
690 }
691
692 int
693 xhci_activate(device_t self, enum devact act)
694 {
695 struct xhci_softc * const sc = device_private(self);
696
697 switch (act) {
698 case DVACT_DEACTIVATE:
699 sc->sc_dying = true;
700 return 0;
701 default:
702 return EOPNOTSUPP;
703 }
704 }
705
706 bool
707 xhci_suspend(device_t self, const pmf_qual_t *qual)
708 {
709 struct xhci_softc * const sc = device_private(self);
710 size_t i, j, bn, dci;
711 int port;
712 uint32_t v;
713 usbd_status err;
714 bool ok = false;
715
716 XHCIHIST_FUNC(); XHCIHIST_CALLED();
717
718 /*
719 * Block issuance of new commands, and wait for all pending
720 * commands to complete.
721 */
722 mutex_enter(&sc->sc_lock);
723 KASSERT(sc->sc_suspender == NULL);
724 sc->sc_suspender = curlwp;
725 while (sc->sc_command_addr != 0)
726 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
727 mutex_exit(&sc->sc_lock);
728
729 /*
730 * Block roothub xfers which might touch portsc registers until
731 * we're done suspending.
732 */
733 mutex_enter(&sc->sc_rhlock);
734
735 /*
736 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
737 * xHCI Power Management, p. 342
738 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342
739 */
740
741 /*
742 * `1. Stop all USB activity by issuing Stop Endpoint Commands
743 * for Busy endpoints in the Running state. If the Force
744 * Save Context Capability (FSC = ``0'') is not supported,
745 * then Stop Endpoint Commands shall be issued for all idle
746 * endpoints in the Running state as well. The Stop
747 * Endpoint Command causes the xHC to update the respective
748 * Endpoint or Stream Contexts in system memory, e.g. the
749 * TR Dequeue Pointer, DCS, etc. fields. Refer to
750 * Implementation Note "0".'
751 */
752 for (i = 0; i < sc->sc_maxslots; i++) {
753 struct xhci_slot *xs = &sc->sc_slots[i];
754
755 /* Skip if the slot is not in use. */
756 if (xs->xs_idx == 0)
757 continue;
758
759 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
760 /* Skip if the endpoint is not Running. */
761 /* XXX What about Busy? */
762 if (xhci_get_epstate(sc, xs, dci) !=
763 XHCI_EPSTATE_RUNNING)
764 continue;
765
766 /* Stop endpoint. */
767 mutex_enter(&sc->sc_lock);
768 err = xhci_stop_endpoint_cmd(sc, xs, dci,
769 XHCI_TRB_3_SUSP_EP_BIT);
770 mutex_exit(&sc->sc_lock);
771 if (err) {
772 device_printf(self, "failed to stop endpoint"
773 " slot %zu dci %zu err %d\n",
774 i, dci, err);
775 goto out;
776 }
777 }
778 }
779
780 /*
781 * Next, suspend all the ports:
782 *
783 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15:
784 * Suspend-Resume, pp. 276-283
785 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276
786 */
787 for (bn = 0; bn < 2; bn++) {
788 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
789 /* 4.15.1: Port Suspend. */
790 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
791
792 /*
793 * `System software places individual ports
794 * into suspend mode by writing a ``3'' into
795 * the appropriate PORTSC register Port Link
796 * State (PLS) field (refer to Section 5.4.8).
797 * Software should only set the PLS field to
798 * ``3'' when the port is in the Enabled
799 * state.'
800 *
801 * `Software should not attempt to suspend a
802 * port unless the port reports that it is in
803 * the enabled (PED = ``1''; PLS < ``3'')
804 * state (refer to Section 5.4.8 for more
805 * information about PED and PLS).'
806 */
807 v = xhci_op_read_4(sc, port);
808 if (((v & XHCI_PS_PED) == 0) ||
809 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3)
810 continue;
811 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
812 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3);
813 xhci_op_write_4(sc, port, v);
814
815 /*
816 * `When the PLS field is written with U3
817 * (``3''), the status of the PLS bit will not
818 * change to the target U state U3 until the
819 * suspend signaling has completed to the
820 * attached device (which may be as long as
821 * 10ms.).'
822 *
823 * `Software is required to wait for U3
824 * transitions to complete before it puts the
825 * xHC into a low power state, and before
826 * resuming the port.'
827 *
828 * XXX Take advantage of the technique to
829 * reduce polling on host controllers that
830 * support the U3C capability.
831 */
832 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) {
833 v = xhci_op_read_4(sc, port);
834 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3)
835 break;
836 usb_delay_ms(&sc->sc_bus, 1);
837 }
838 if (j == XHCI_WAIT_PLS_U3) {
839 device_printf(self,
840 "suspend timeout on bus %zu port %zu\n",
841 bn, i);
842 goto out;
843 }
844 }
845 }
846
847 /*
848 * `2. Ensure that the Command Ring is in the Stopped state
849 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is
850 * empty), and all Command Completion Events associated
851 * with them have been received.'
852 *
853 * XXX
854 */
855
856 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */
857 xhci_op_write_4(sc, XHCI_USBCMD,
858 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS);
859
860 /*
861 * `4. Read the Operational Runtime, and VTIO registers in the
862 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ,
863 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their
864 * state.'
865 *
866 * (We don't use VTIO here (XXX for now?).)
867 */
868 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
869 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL);
870 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP);
871 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG);
872 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0));
873 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0));
874 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0));
875 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0));
876 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0));
877
878 /*
879 * `5. Set the Controller Save State (CSS) flag in the USBCMD
880 * register (5.4.1)...'
881 */
882 xhci_op_write_4(sc, XHCI_USBCMD,
883 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS);
884
885 /*
886 * `...and wait for the Save State Status (SSS) flag in the
887 * USBSTS register (5.4.2) to transition to ``0''.'
888 */
889 for (i = 0; i < XHCI_WAIT_SSS; i++) {
890 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0)
891 break;
892 usb_delay_ms(&sc->sc_bus, 1);
893 }
894 if (i >= XHCI_WAIT_SSS) {
895 device_printf(self, "suspend timeout, USBSTS.SSS\n");
896 /*
897 * Just optimistically go on and check SRE anyway --
898 * what's the worst that could happen?
899 */
900 }
901
902 /*
903 * `Note: After a Save or Restore operation completes, the
904 * Save/Restore Error (SRE) flag in the USBSTS register should
905 * be checked to ensure that the operation completed
906 * successfully.'
907 */
908 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
909 device_printf(self, "suspend error, USBSTS.SRE\n");
910 goto out;
911 }
912
913 /* Success! */
914 ok = true;
915
916 out: mutex_exit(&sc->sc_rhlock);
917 if (!ok) {
918 /*
919 * If suspend failed, stop holding up command issuance
920 * and make it fail instead.
921 */
922 mutex_enter(&sc->sc_lock);
923 KASSERT(sc->sc_suspender == curlwp);
924 sc->sc_suspender = NULL;
925 sc->sc_suspendresume_failed = true;
926 cv_broadcast(&sc->sc_cmdbusy_cv);
927 mutex_exit(&sc->sc_lock);
928 }
929 return ok;
930 }
931
932 bool
933 xhci_resume(device_t self, const pmf_qual_t *qual)
934 {
935 struct xhci_softc * const sc = device_private(self);
936 size_t i, j, bn, dci;
937 int port;
938 uint32_t v;
939 bool ok = false;
940
941 XHCIHIST_FUNC(); XHCIHIST_CALLED();
942
943 /*
944 * If resume had previously failed, just try again. Can't make
945 * things worse, probably.
946 */
947 mutex_enter(&sc->sc_lock);
948 if (sc->sc_suspendresume_failed) {
949 KASSERT(sc->sc_suspender == NULL);
950 sc->sc_suspender = curlwp;
951 sc->sc_suspendresume_failed = false;
952 }
953 KASSERT(sc->sc_suspender);
954 mutex_exit(&sc->sc_lock);
955
956 /*
957 * Block roothub xfers which might touch portsc registers until
958 * we're done resuming.
959 */
960 mutex_enter(&sc->sc_rhlock);
961
962 /*
963 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
964 * xHCI Power Management, p. 343
965 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343
966 */
967
968 /*
969 * `4. Restore the Operational Runtime, and VTIO registers with
970 * their previously saved state in the following order:
971 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN,
972 * IMOD, and VTIO.'
973 *
974 * (We don't use VTIO here (for now?).)
975 */
976 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd);
977 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl);
978 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap);
979 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config);
980 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0);
981 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0);
982 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0);
983 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0);
984 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0);
985
986 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */
987
988 /*
989 * `5. Set the Controller Restore State (CRS) flag in the
990 * USBCMD register (5.4.1) to ``1''...'
991 */
992 xhci_op_write_4(sc, XHCI_USBCMD,
993 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS);
994
995 /*
996 * `...and wait for the Restore State Status (RSS) in the
997 * USBSTS register (5.4.2) to transition to ``0''.'
998 */
999 for (i = 0; i < XHCI_WAIT_RSS; i++) {
1000 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0)
1001 break;
1002 usb_delay_ms(&sc->sc_bus, 1);
1003 }
1004 if (i >= XHCI_WAIT_RSS) {
1005 device_printf(self, "resume timeout, USBSTS.RSS\n");
1006 goto out;
1007 }
1008
1009 /*
1010 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits
1011 * are consistent with the RCS values to be written to the
1012 * CRCR.'
1013 *
1014 * XXX Hope just zeroing it is good enough!
1015 */
1016 xhci_host_dequeue(sc->sc_cr);
1017
1018 /*
1019 * `7. Write the CRCR with the address and RCS value of the
1020 * reinitialized Command Ring. Note that this write will
1021 * cause the Command Ring to restart at the address
1022 * specified by the CRCR.'
1023 */
1024 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1025 sc->sc_cr->xr_cs);
1026
1027 /*
1028 * `8. Enable the controller by setting Run/Stop (R/S) =
1029 * ``1''.'
1030 */
1031 xhci_op_write_4(sc, XHCI_USBCMD,
1032 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS);
1033
1034 /*
1035 * `9. Software shall walk the USB topology and initialize each
1036 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and
1037 * external hub ports attached to USB devices.'
1038 *
1039 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2
1040 * `Port Resume', 4.15.2.2 `Host Initiated'.
1041 *
1042 * XXX We should maybe batch up initiating the state
1043 * transitions, and then wait for them to complete all at once.
1044 */
1045 for (bn = 0; bn < 2; bn++) {
1046 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
1047 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
1048
1049 /* `When a port is in the U3 state: ...' */
1050 v = xhci_op_read_4(sc, port);
1051 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3)
1052 continue;
1053
1054 /*
1055 * `For a USB2 protocol port, software shall
1056 * write a ``15'' (Resume) to the PLS field to
1057 * initiate resume signaling. The port shall
1058 * transition to the Resume substate and the
1059 * xHC shall transmit the resume signaling
1060 * within 1ms (T_URSM). Software shall ensure
1061 * that resume is signaled for at least 20ms
1062 * (T_DRSMDN). Software shall start timing
1063 * T_DRSMDN from the write of ``15'' (Resume)
1064 * to PLS.'
1065 */
1066 if (bn == 1) {
1067 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0);
1068 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1069 v |= XHCI_PS_LWS;
1070 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME);
1071 xhci_op_write_4(sc, port, v);
1072 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
1073 } else {
1074 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0);
1075 }
1076
1077 /*
1078 * `For a USB3 protocol port [and a USB2
1079 * protocol port after transitioning to
1080 * Resume], software shall write a ``0'' (U0)
1081 * to the PLS field...'
1082 */
1083 v = xhci_op_read_4(sc, port);
1084 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1085 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0);
1086 xhci_op_write_4(sc, port, v);
1087
1088 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) {
1089 v = xhci_op_read_4(sc, port);
1090 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0)
1091 break;
1092 usb_delay_ms(&sc->sc_bus, 1);
1093 }
1094 if (j == XHCI_WAIT_PLS_U0) {
1095 device_printf(self,
1096 "resume timeout on bus %zu port %zu\n",
1097 bn, i);
1098 goto out;
1099 }
1100 }
1101 }
1102
1103 /*
1104 * `10. Restart each of the previously Running endpoints by
1105 * ringing their doorbells.'
1106 */
1107 for (i = 0; i < sc->sc_maxslots; i++) {
1108 struct xhci_slot *xs = &sc->sc_slots[i];
1109
1110 /* Skip if the slot is not in use. */
1111 if (xs->xs_idx == 0)
1112 continue;
1113
1114 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
1115 /* Skip if the endpoint is not Running. */
1116 if (xhci_get_epstate(sc, xs, dci) !=
1117 XHCI_EPSTATE_RUNNING)
1118 continue;
1119
1120 /* Ring the doorbell. */
1121 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
1122 }
1123 }
1124
1125 /*
1126 * `Note: After a Save or Restore operation completes, the
1127 * Save/Restore Error (SRE) flag in the USBSTS register should
1128 * be checked to ensure that the operation completed
1129 * successfully.'
1130 */
1131 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
1132 device_printf(self, "resume error, USBSTS.SRE\n");
1133 goto out;
1134 }
1135
1136 /* Success! */
1137 ok = true;
1138
1139 out: /*
1140 * Resume command issuance. If the hardware failed to resume,
1141 * well, tough -- deadlocking because everything is held up on
1142 * the suspension, with no opportunity to detach, isn't better
1143 * than timing out waiting for dead hardware.
1144 */
1145 mutex_enter(&sc->sc_lock);
1146 KASSERT(sc->sc_suspender);
1147 sc->sc_suspender = NULL;
1148 sc->sc_suspendresume_failed = !ok;
1149 cv_broadcast(&sc->sc_cmdbusy_cv);
1150 mutex_exit(&sc->sc_lock);
1151
1152 mutex_exit(&sc->sc_rhlock);
1153 return ok;
1154 }
1155
1156 bool
1157 xhci_shutdown(device_t self, int flags)
1158 {
1159 return false;
1160 }
1161
1162 static int
1163 xhci_hc_reset(struct xhci_softc * const sc)
1164 {
1165 uint32_t usbcmd, usbsts;
1166 int i;
1167
1168 /* Check controller not ready */
1169 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1170 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1171 if ((usbsts & XHCI_STS_CNR) == 0)
1172 break;
1173 usb_delay_ms(&sc->sc_bus, 1);
1174 }
1175 if (i >= XHCI_WAIT_CNR) {
1176 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n");
1177 return EIO;
1178 }
1179
1180 /* Halt controller */
1181 usbcmd = 0;
1182 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1183 usb_delay_ms(&sc->sc_bus, 1);
1184
1185 /* Reset controller */
1186 usbcmd = XHCI_CMD_HCRST;
1187 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1188 for (i = 0; i < XHCI_WAIT_HCRST; i++) {
1189 /*
1190 * Wait 1ms first. Existing Intel xHCI requires 1ms delay to
1191 * prevent system hang (Errata).
1192 */
1193 usb_delay_ms(&sc->sc_bus, 1);
1194 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
1195 if ((usbcmd & XHCI_CMD_HCRST) == 0)
1196 break;
1197 }
1198 if (i >= XHCI_WAIT_HCRST) {
1199 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n");
1200 return EIO;
1201 }
1202
1203 /* Check controller not ready */
1204 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1205 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1206 if ((usbsts & XHCI_STS_CNR) == 0)
1207 break;
1208 usb_delay_ms(&sc->sc_bus, 1);
1209 }
1210 if (i >= XHCI_WAIT_CNR) {
1211 aprint_error_dev(sc->sc_dev,
1212 "controller not ready timeout after reset\n");
1213 return EIO;
1214 }
1215
1216 return 0;
1217 }
1218
1219 /* 7.2 xHCI Support Protocol Capability */
1220 static void
1221 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp)
1222 {
1223 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1224
1225 /* XXX Cache this lot */
1226
1227 const uint32_t w0 = xhci_read_4(sc, ecp);
1228 const uint32_t w4 = xhci_read_4(sc, ecp + 4);
1229 const uint32_t w8 = xhci_read_4(sc, ecp + 8);
1230 const uint32_t wc = xhci_read_4(sc, ecp + 0xc);
1231
1232 aprint_debug_dev(sc->sc_dev,
1233 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc);
1234
1235 if (w4 != XHCI_XECP_USBID)
1236 return;
1237
1238 const int major = XHCI_XECP_SP_W0_MAJOR(w0);
1239 const int minor = XHCI_XECP_SP_W0_MINOR(w0);
1240 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8);
1241 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8);
1242
1243 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16));
1244 switch (mm) {
1245 case 0x0200:
1246 case 0x0300:
1247 case 0x0301:
1248 case 0x0310:
1249 case 0x0320:
1250 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n",
1251 major == 3 ? "ss" : "hs", cpo, cpo + cpc - 1);
1252 if (major == 3)
1253 sc->sc_usb3nports += cpo + cpc - 1;
1254 else
1255 sc->sc_usb2nports += cpo + cpc - 1;
1256 break;
1257 default:
1258 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n",
1259 major, minor);
1260 return;
1261 }
1262
1263 const size_t bus = (major == 3) ? 0 : 1;
1264
1265 /* Index arrays with 0..n-1 where ports are numbered 1..n */
1266 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) {
1267 if (sc->sc_ctlrportmap[cp] != 0) {
1268 aprint_error_dev(sc->sc_dev, "controller port %zu "
1269 "already assigned", cp);
1270 continue;
1271 }
1272
1273 sc->sc_ctlrportbus[cp / NBBY] |=
1274 bus == 0 ? 0 : __BIT(cp % NBBY);
1275
1276 const size_t rhp = sc->sc_rhportcount[bus]++;
1277
1278 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0,
1279 "bus %zu rhp %zu is %d", bus, rhp,
1280 sc->sc_rhportmap[bus][rhp]);
1281
1282 sc->sc_rhportmap[bus][rhp] = cp + 1;
1283 sc->sc_ctlrportmap[cp] = rhp + 1;
1284 }
1285 }
1286
1287 /* Process extended capabilities */
1288 static void
1289 xhci_ecp(struct xhci_softc *sc)
1290 {
1291 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1292
1293 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4;
1294 while (ecp != 0) {
1295 uint32_t ecr = xhci_read_4(sc, ecp);
1296 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr);
1297 switch (XHCI_XECP_ID(ecr)) {
1298 case XHCI_ID_PROTOCOLS: {
1299 xhci_id_protocols(sc, ecp);
1300 break;
1301 }
1302 case XHCI_ID_USB_LEGACY: {
1303 uint8_t bios_sem;
1304
1305 /* Take host controller ownership from BIOS */
1306 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM);
1307 if (bios_sem) {
1308 /* sets xHCI to be owned by OS */
1309 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1);
1310 aprint_debug_dev(sc->sc_dev,
1311 "waiting for BIOS to give up control\n");
1312 for (int i = 0; i < 5000; i++) {
1313 bios_sem = xhci_read_1(sc, ecp +
1314 XHCI_XECP_BIOS_SEM);
1315 if (bios_sem == 0)
1316 break;
1317 DELAY(1000);
1318 }
1319 if (bios_sem) {
1320 aprint_error_dev(sc->sc_dev,
1321 "timed out waiting for BIOS\n");
1322 }
1323 }
1324 break;
1325 }
1326 default:
1327 break;
1328 }
1329 ecr = xhci_read_4(sc, ecp);
1330 if (XHCI_XECP_NEXT(ecr) == 0) {
1331 ecp = 0;
1332 } else {
1333 ecp += XHCI_XECP_NEXT(ecr) * 4;
1334 }
1335 }
1336 }
1337
1338 #define XHCI_HCCPREV1_BITS \
1339 "\177\020" /* New bitmask */ \
1340 "f\020\020XECP\0" \
1341 "f\014\4MAXPSA\0" \
1342 "b\013CFC\0" \
1343 "b\012SEC\0" \
1344 "b\011SBD\0" \
1345 "b\010FSE\0" \
1346 "b\7NSS\0" \
1347 "b\6LTC\0" \
1348 "b\5LHRC\0" \
1349 "b\4PIND\0" \
1350 "b\3PPC\0" \
1351 "b\2CZC\0" \
1352 "b\1BNC\0" \
1353 "b\0AC64\0" \
1354 "\0"
1355 #define XHCI_HCCV1_x_BITS \
1356 "\177\020" /* New bitmask */ \
1357 "f\020\020XECP\0" \
1358 "f\014\4MAXPSA\0" \
1359 "b\013CFC\0" \
1360 "b\012SEC\0" \
1361 "b\011SPC\0" \
1362 "b\010PAE\0" \
1363 "b\7NSS\0" \
1364 "b\6LTC\0" \
1365 "b\5LHRC\0" \
1366 "b\4PIND\0" \
1367 "b\3PPC\0" \
1368 "b\2CSZ\0" \
1369 "b\1BNC\0" \
1370 "b\0AC64\0" \
1371 "\0"
1372
1373 #define XHCI_HCC2_BITS \
1374 "\177\020" /* New bitmask */ \
1375 "b\7ETC_TSC\0" \
1376 "b\6ETC\0" \
1377 "b\5CIC\0" \
1378 "b\4LEC\0" \
1379 "b\3CTC\0" \
1380 "b\2FSC\0" \
1381 "b\1CMC\0" \
1382 "b\0U3C\0" \
1383 "\0"
1384
1385 void
1386 xhci_start(struct xhci_softc *sc)
1387 {
1388 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA);
1389 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0)
1390 /* Intel xhci needs interrupt rate moderated. */
1391 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP);
1392 else
1393 xhci_rt_write_4(sc, XHCI_IMOD(0), 0);
1394 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n",
1395 xhci_rt_read_4(sc, XHCI_IMOD(0)));
1396
1397 /* Go! */
1398 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
1399 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n",
1400 xhci_op_read_4(sc, XHCI_USBCMD));
1401 }
1402
1403 int
1404 xhci_init(struct xhci_softc *sc)
1405 {
1406 bus_size_t bsz;
1407 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff;
1408 uint32_t pagesize, config;
1409 int i = 0;
1410 uint16_t hciversion;
1411 uint8_t caplength;
1412
1413 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1414
1415 /* Set up the bus struct for the usb 3 and usb 2 buses */
1416 sc->sc_bus.ub_methods = &xhci_bus_methods;
1417 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe);
1418 sc->sc_bus.ub_usedma = true;
1419 sc->sc_bus.ub_hcpriv = sc;
1420
1421 sc->sc_bus2.ub_methods = &xhci_bus_methods;
1422 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe);
1423 sc->sc_bus2.ub_revision = USBREV_2_0;
1424 sc->sc_bus2.ub_usedma = true;
1425 sc->sc_bus2.ub_hcpriv = sc;
1426 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag;
1427
1428 caplength = xhci_read_1(sc, XHCI_CAPLENGTH);
1429 hciversion = xhci_read_2(sc, XHCI_HCIVERSION);
1430
1431 if (hciversion < XHCI_HCIVERSION_0_96 ||
1432 hciversion >= 0x0200) {
1433 aprint_normal_dev(sc->sc_dev,
1434 "xHCI version %x.%x not known to be supported\n",
1435 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1436 } else {
1437 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n",
1438 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1439 }
1440
1441 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength,
1442 &sc->sc_cbh) != 0) {
1443 aprint_error_dev(sc->sc_dev, "capability subregion failure\n");
1444 return ENOMEM;
1445 }
1446
1447 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1);
1448 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1);
1449 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1);
1450 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1);
1451 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2);
1452 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3);
1453 aprint_debug_dev(sc->sc_dev,
1454 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3);
1455
1456 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS);
1457 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32;
1458
1459 char sbuf[128];
1460 if (hciversion < XHCI_HCIVERSION_1_0)
1461 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc);
1462 else
1463 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc);
1464 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf);
1465 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n",
1466 XHCI_HCC_XECP(sc->sc_hcc) * 4);
1467 if (hciversion >= XHCI_HCIVERSION_1_1) {
1468 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2);
1469 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2);
1470 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf);
1471 }
1472
1473 /* default all ports to bus 0, i.e. usb 3 */
1474 sc->sc_ctlrportbus = kmem_zalloc(
1475 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP);
1476 sc->sc_ctlrportmap =
1477 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1478
1479 /* controller port to bus roothub port map */
1480 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
1481 sc->sc_rhportmap[j] =
1482 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1483 }
1484
1485 /*
1486 * Process all Extended Capabilities
1487 */
1488 xhci_ecp(sc);
1489
1490 bsz = XHCI_PORTSC(sc->sc_maxports);
1491 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz,
1492 &sc->sc_obh) != 0) {
1493 aprint_error_dev(sc->sc_dev, "operational subregion failure\n");
1494 return ENOMEM;
1495 }
1496
1497 dboff = xhci_cap_read_4(sc, XHCI_DBOFF);
1498 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff,
1499 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) {
1500 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n");
1501 return ENOMEM;
1502 }
1503
1504 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF);
1505 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff,
1506 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) {
1507 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n");
1508 return ENOMEM;
1509 }
1510
1511 int rv;
1512 rv = xhci_hc_reset(sc);
1513 if (rv != 0) {
1514 return rv;
1515 }
1516
1517 if (sc->sc_vendor_init)
1518 sc->sc_vendor_init(sc);
1519
1520 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE);
1521 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize);
1522 pagesize = ffs(pagesize);
1523 if (pagesize == 0) {
1524 aprint_error_dev(sc->sc_dev, "pagesize is 0\n");
1525 return EIO;
1526 }
1527 sc->sc_pgsz = 1 << (12 + (pagesize - 1));
1528 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz);
1529 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n",
1530 (uint32_t)sc->sc_maxslots);
1531 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports);
1532
1533 int err;
1534 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2);
1535 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf);
1536 if (sc->sc_maxspbuf != 0) {
1537 err = usb_allocmem(sc->sc_bus.ub_dmatag,
1538 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t),
1539 USBMALLOC_ZERO, &sc->sc_spbufarray_dma);
1540 if (err) {
1541 aprint_error_dev(sc->sc_dev,
1542 "spbufarray init fail, err %d\n", err);
1543 return ENOMEM;
1544 }
1545
1546 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) *
1547 sc->sc_maxspbuf, KM_SLEEP);
1548 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0);
1549 for (i = 0; i < sc->sc_maxspbuf; i++) {
1550 usb_dma_t * const dma = &sc->sc_spbuf_dma[i];
1551 /* allocate contexts */
1552 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz,
1553 sc->sc_pgsz, USBMALLOC_ZERO, dma);
1554 if (err) {
1555 aprint_error_dev(sc->sc_dev,
1556 "spbufarray_dma init fail, err %d\n", err);
1557 rv = ENOMEM;
1558 goto bad1;
1559 }
1560 spbufarray[i] = htole64(DMAADDR(dma, 0));
1561 usb_syncmem(dma, 0, sc->sc_pgsz,
1562 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1563 }
1564
1565 usb_syncmem(&sc->sc_spbufarray_dma, 0,
1566 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE);
1567 }
1568
1569 config = xhci_op_read_4(sc, XHCI_CONFIG);
1570 config &= ~0xFF;
1571 config |= sc->sc_maxslots & 0xFF;
1572 xhci_op_write_4(sc, XHCI_CONFIG, config);
1573
1574 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS,
1575 XHCI_COMMAND_RING_SEGMENTS_ALIGN);
1576 if (err) {
1577 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n",
1578 err);
1579 rv = ENOMEM;
1580 goto bad1;
1581 }
1582
1583 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS,
1584 XHCI_EVENT_RING_SEGMENTS_ALIGN);
1585 if (err) {
1586 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n",
1587 err);
1588 rv = ENOMEM;
1589 goto bad2;
1590 }
1591
1592 usb_dma_t *dma;
1593 size_t size;
1594 size_t align;
1595
1596 dma = &sc->sc_eventst_dma;
1597 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE,
1598 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN);
1599 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size);
1600 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN;
1601 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1602 USBMALLOC_ZERO, dma);
1603 if (err) {
1604 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n",
1605 err);
1606 rv = ENOMEM;
1607 goto bad3;
1608 }
1609
1610 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n",
1611 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0),
1612 KERNADDR(&sc->sc_eventst_dma, 0),
1613 sc->sc_eventst_dma.udma_block->size);
1614
1615 dma = &sc->sc_dcbaa_dma;
1616 size = (1 + sc->sc_maxslots) * sizeof(uint64_t);
1617 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size);
1618 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN;
1619 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1620 USBMALLOC_ZERO, dma);
1621 if (err) {
1622 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err);
1623 rv = ENOMEM;
1624 goto bad4;
1625 }
1626 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n",
1627 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0),
1628 KERNADDR(&sc->sc_dcbaa_dma, 0),
1629 sc->sc_dcbaa_dma.udma_block->size);
1630
1631 if (sc->sc_maxspbuf != 0) {
1632 /*
1633 * DCBA entry 0 hold the scratchbuf array pointer.
1634 */
1635 *(uint64_t *)KERNADDR(dma, 0) =
1636 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0));
1637 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1638 }
1639
1640 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots,
1641 KM_SLEEP);
1642 if (sc->sc_slots == NULL) {
1643 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err);
1644 rv = ENOMEM;
1645 goto bad;
1646 }
1647
1648 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0,
1649 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
1650 if (sc->sc_xferpool == NULL) {
1651 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n",
1652 err);
1653 rv = ENOMEM;
1654 goto bad;
1655 }
1656
1657 cv_init(&sc->sc_command_cv, "xhcicmd");
1658 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq");
1659 mutex_init(&sc->sc_rhlock, MUTEX_DEFAULT, IPL_NONE);
1660 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
1661 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
1662
1663 struct xhci_erste *erst;
1664 erst = KERNADDR(&sc->sc_eventst_dma, 0);
1665 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0));
1666 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb);
1667 erst[0].erste_3 = htole32(0);
1668 usb_syncmem(&sc->sc_eventst_dma, 0,
1669 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE);
1670
1671 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS);
1672 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0));
1673 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) |
1674 XHCI_ERDP_BUSY);
1675
1676 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0));
1677 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1678 sc->sc_cr->xr_cs);
1679
1680 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0),
1681 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS);
1682
1683 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0)
1684 xhci_start(sc);
1685
1686 return 0;
1687
1688 bad:
1689 if (sc->sc_xferpool) {
1690 pool_cache_destroy(sc->sc_xferpool);
1691 sc->sc_xferpool = NULL;
1692 }
1693
1694 if (sc->sc_slots) {
1695 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) *
1696 sc->sc_maxslots);
1697 sc->sc_slots = NULL;
1698 }
1699
1700 usb_freemem(&sc->sc_dcbaa_dma);
1701 bad4:
1702 usb_freemem(&sc->sc_eventst_dma);
1703 bad3:
1704 xhci_ring_free(sc, &sc->sc_er);
1705 bad2:
1706 xhci_ring_free(sc, &sc->sc_cr);
1707 i = sc->sc_maxspbuf;
1708 bad1:
1709 for (int j = 0; j < i; j++)
1710 usb_freemem(&sc->sc_spbuf_dma[j]);
1711 usb_freemem(&sc->sc_spbufarray_dma);
1712
1713 return rv;
1714 }
1715
1716 static inline bool
1717 xhci_polling_p(struct xhci_softc * const sc)
1718 {
1719 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling;
1720 }
1721
1722 int
1723 xhci_intr(void *v)
1724 {
1725 struct xhci_softc * const sc = v;
1726 int ret = 0;
1727
1728 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1729
1730 if (sc == NULL)
1731 return 0;
1732
1733 mutex_spin_enter(&sc->sc_intr_lock);
1734
1735 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1736 goto done;
1737
1738 /* If we get an interrupt while polling, then just ignore it. */
1739 if (xhci_polling_p(sc)) {
1740 #ifdef DIAGNOSTIC
1741 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1742 #endif
1743 goto done;
1744 }
1745
1746 ret = xhci_intr1(sc);
1747 if (ret) {
1748 KASSERT(sc->sc_child || sc->sc_child2);
1749
1750 /*
1751 * One of child busses could be already detached. It doesn't
1752 * matter on which of the two the softintr is scheduled.
1753 */
1754 if (sc->sc_child)
1755 usb_schedsoftintr(&sc->sc_bus);
1756 else
1757 usb_schedsoftintr(&sc->sc_bus2);
1758 }
1759 done:
1760 mutex_spin_exit(&sc->sc_intr_lock);
1761 return ret;
1762 }
1763
1764 int
1765 xhci_intr1(struct xhci_softc * const sc)
1766 {
1767 uint32_t usbsts;
1768 uint32_t iman;
1769
1770 XHCIHIST_FUNC();
1771
1772 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1773 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0);
1774 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD |
1775 XHCI_STS_HCE)) == 0) {
1776 DPRINTFN(16, "ignored intr not for %jd",
1777 device_unit(sc->sc_dev), 0, 0, 0);
1778 return 0;
1779 }
1780
1781 /*
1782 * Clear EINT and other transient flags, to not misenterpret
1783 * next shared interrupt. Also, to avoid race, EINT must be cleared
1784 * before XHCI_IMAN_INTR_PEND is cleared.
1785 */
1786 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & ~XHCI_STS_RSVDP0);
1787
1788 #ifdef XHCI_DEBUG
1789 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1790 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1791 #endif
1792
1793 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1794 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1795 iman |= XHCI_IMAN_INTR_PEND;
1796 xhci_rt_write_4(sc, XHCI_IMAN(0), iman);
1797
1798 #ifdef XHCI_DEBUG
1799 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1800 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1801 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1802 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1803 #endif
1804
1805 return 1;
1806 }
1807
1808 /*
1809 * 3 port speed types used in USB stack
1810 *
1811 * usbdi speed
1812 * definition: USB_SPEED_* in usb.h
1813 * They are used in struct usbd_device in USB stack.
1814 * ioctl interface uses these values too.
1815 * port_status speed
1816 * definition: UPS_*_SPEED in usb.h
1817 * They are used in usb_port_status_t and valid only for USB 2.0.
1818 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus
1819 * of usb_port_status_ext_t indicates port speed.
1820 * Note that some 3.0 values overlap with 2.0 values.
1821 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and
1822 * means UPS_LOW_SPEED in HS.)
1823 * port status returned from hub also uses these values.
1824 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed
1825 * or more.
1826 * xspeed:
1827 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1)
1828 * They are used in only slot context and PORTSC reg of xhci.
1829 * The difference between usbdi speed and xspeed is
1830 * that FS and LS values are swapped.
1831 */
1832
1833 /* convert usbdi speed to xspeed */
1834 static int
1835 xhci_speed2xspeed(int speed)
1836 {
1837 switch (speed) {
1838 case USB_SPEED_LOW: return 2;
1839 case USB_SPEED_FULL: return 1;
1840 default: return speed;
1841 }
1842 }
1843
1844 #if 0
1845 /* convert xspeed to usbdi speed */
1846 static int
1847 xhci_xspeed2speed(int xspeed)
1848 {
1849 switch (xspeed) {
1850 case 1: return USB_SPEED_FULL;
1851 case 2: return USB_SPEED_LOW;
1852 default: return xspeed;
1853 }
1854 }
1855 #endif
1856
1857 /* convert xspeed to port status speed */
1858 static int
1859 xhci_xspeed2psspeed(int xspeed)
1860 {
1861 switch (xspeed) {
1862 case 0: return 0;
1863 case 1: return UPS_FULL_SPEED;
1864 case 2: return UPS_LOW_SPEED;
1865 case 3: return UPS_HIGH_SPEED;
1866 default: return UPS_OTHER_SPEED;
1867 }
1868 }
1869
1870 /*
1871 * Construct input contexts and issue TRB to open pipe.
1872 */
1873 static usbd_status
1874 xhci_configure_endpoint(struct usbd_pipe *pipe)
1875 {
1876 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1877 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1878 #ifdef USB_DEBUG
1879 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1880 #endif
1881 struct xhci_soft_trb trb;
1882 usbd_status err;
1883
1884 XHCIHIST_FUNC();
1885 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx",
1886 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress,
1887 pipe->up_endpoint->ue_edesc->bmAttributes);
1888
1889 /* XXX ensure input context is available? */
1890
1891 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz);
1892
1893 /* set up context */
1894 xhci_setup_ctx(pipe);
1895
1896 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0),
1897 sc->sc_ctxsz * 1);
1898 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs,
1899 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1);
1900
1901 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1902 trb.trb_2 = 0;
1903 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1904 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1905
1906 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
1907
1908 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1909 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci),
1910 sc->sc_ctxsz * 1);
1911
1912 return err;
1913 }
1914
1915 #if 0
1916 static usbd_status
1917 xhci_unconfigure_endpoint(struct usbd_pipe *pipe)
1918 {
1919 #ifdef USB_DEBUG
1920 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1921 #endif
1922
1923 XHCIHIST_FUNC();
1924 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
1925
1926 return USBD_NORMAL_COMPLETION;
1927 }
1928 #endif
1929
1930 /* 4.6.8, 6.4.3.7 */
1931 static void
1932 xhci_reset_endpoint(struct usbd_pipe *pipe)
1933 {
1934 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1935 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1936 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1937 struct xhci_soft_trb trb;
1938
1939 XHCIHIST_FUNC();
1940 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1941
1942 KASSERT(mutex_owned(&sc->sc_lock));
1943
1944 trb.trb_0 = 0;
1945 trb.trb_2 = 0;
1946 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1947 XHCI_TRB_3_EP_SET(dci) |
1948 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP);
1949
1950 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
1951 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
1952 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
1953 }
1954 }
1955
1956 /*
1957 * 4.6.9, 6.4.3.8
1958 * Stop execution of TDs on xfer ring.
1959 * Should be called with sc_lock held.
1960 */
1961 static usbd_status
1962 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci,
1963 uint32_t trb3flags)
1964 {
1965 struct xhci_soft_trb trb;
1966 usbd_status err;
1967
1968 XHCIHIST_FUNC();
1969 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1970
1971 KASSERT(mutex_owned(&sc->sc_lock));
1972
1973 trb.trb_0 = 0;
1974 trb.trb_2 = 0;
1975 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1976 XHCI_TRB_3_EP_SET(dci) |
1977 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) |
1978 trb3flags;
1979
1980 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1981
1982 return err;
1983 }
1984
1985 static usbd_status
1986 xhci_stop_endpoint(struct usbd_pipe *pipe)
1987 {
1988 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1989 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1990 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1991
1992 XHCIHIST_FUNC();
1993 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1994
1995 KASSERT(mutex_owned(&sc->sc_lock));
1996
1997 return xhci_stop_endpoint_cmd(sc, xs, dci, 0);
1998 }
1999
2000 /*
2001 * Set TR Dequeue Pointer.
2002 * xHCI 1.1 4.6.10 6.4.3.9
2003 * Purge all of the TRBs on ring and reinitialize ring.
2004 * Set TR dequeue Pointer to 0 and Cycle State to 1.
2005 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE
2006 * error will be generated.
2007 */
2008 static void
2009 xhci_set_dequeue(struct usbd_pipe *pipe)
2010 {
2011 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2012 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2013 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2014 struct xhci_ring * const xr = xs->xs_xr[dci];
2015 struct xhci_soft_trb trb;
2016
2017 XHCIHIST_FUNC();
2018 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
2019
2020 KASSERT(mutex_owned(&sc->sc_lock));
2021 KASSERT(xr != NULL);
2022
2023 xhci_host_dequeue(xr);
2024
2025 /* set DCS */
2026 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */
2027 trb.trb_2 = 0;
2028 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2029 XHCI_TRB_3_EP_SET(dci) |
2030 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE);
2031
2032 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
2033 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
2034 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
2035 }
2036 }
2037
2038 /*
2039 * Open new pipe: called from usbd_setup_pipe_flags.
2040 * Fills methods of pipe.
2041 * If pipe is not for ep0, calls configure_endpoint.
2042 */
2043 static usbd_status
2044 xhci_open(struct usbd_pipe *pipe)
2045 {
2046 struct usbd_device * const dev = pipe->up_dev;
2047 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
2048 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
2049 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2050 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2051 const u_int dci = xhci_ep_get_dci(ed);
2052 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
2053 usbd_status err;
2054
2055 XHCIHIST_FUNC();
2056 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr,
2057 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed);
2058 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx",
2059 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress,
2060 ed->bmAttributes);
2061 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize),
2062 ed->bInterval, 0, 0);
2063
2064 if (sc->sc_dying)
2065 return USBD_IOERROR;
2066
2067 /* Root Hub */
2068 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) {
2069 switch (ed->bEndpointAddress) {
2070 case USB_CONTROL_ENDPOINT:
2071 pipe->up_methods = &roothub_ctrl_methods;
2072 break;
2073 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
2074 pipe->up_methods = &xhci_root_intr_methods;
2075 break;
2076 default:
2077 pipe->up_methods = NULL;
2078 DPRINTFN(0, "bad bEndpointAddress 0x%02jx",
2079 ed->bEndpointAddress, 0, 0, 0);
2080 return USBD_INVAL;
2081 }
2082 return USBD_NORMAL_COMPLETION;
2083 }
2084
2085 usb_init_task(&xpipe->xp_async_task, xhci_pipe_restart_async_task,
2086 pipe, USB_TASKQ_MPSAFE);
2087
2088 switch (xfertype) {
2089 case UE_CONTROL:
2090 pipe->up_methods = &xhci_device_ctrl_methods;
2091 break;
2092 case UE_ISOCHRONOUS:
2093 pipe->up_methods = &xhci_device_isoc_methods;
2094 pipe->up_serialise = false;
2095 xpipe->xp_isoc_next = -1;
2096 break;
2097 case UE_BULK:
2098 pipe->up_methods = &xhci_device_bulk_methods;
2099 break;
2100 case UE_INTERRUPT:
2101 pipe->up_methods = &xhci_device_intr_methods;
2102 break;
2103 default:
2104 return USBD_IOERROR;
2105 break;
2106 }
2107
2108 KASSERT(xs != NULL);
2109 KASSERT(xs->xs_xr[dci] == NULL);
2110
2111 /* allocate transfer ring */
2112 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS,
2113 XHCI_TRB_ALIGN);
2114 if (err) {
2115 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0);
2116 return err;
2117 }
2118
2119 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT)
2120 return xhci_configure_endpoint(pipe);
2121
2122 return USBD_NORMAL_COMPLETION;
2123 }
2124
2125 /*
2126 * Closes pipe, called from usbd_kill_pipe via close methods.
2127 * If the endpoint to be closed is ep0, disable_slot.
2128 * Should be called with sc_lock held.
2129 */
2130 static void
2131 xhci_close_pipe(struct usbd_pipe *pipe)
2132 {
2133 struct xhci_pipe * const xp =
2134 container_of(pipe, struct xhci_pipe, xp_pipe);
2135 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2136 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2137 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2138 const u_int dci = xhci_ep_get_dci(ed);
2139 struct xhci_soft_trb trb;
2140 uint32_t *cp;
2141
2142 XHCIHIST_FUNC();
2143
2144 usb_rem_task_wait(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC,
2145 &sc->sc_lock);
2146
2147 if (sc->sc_dying)
2148 return;
2149
2150 /* xs is uninitialized before xhci_init_slot */
2151 if (xs == NULL || xs->xs_idx == 0)
2152 return;
2153
2154 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2155 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2156
2157 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
2158 KASSERT(mutex_owned(&sc->sc_lock));
2159
2160 if (pipe->up_dev->ud_depth == 0)
2161 return;
2162
2163 if (dci == XHCI_DCI_EP_CONTROL) {
2164 DPRINTFN(4, "closing ep0", 0, 0, 0, 0);
2165 /* This frees all rings */
2166 xhci_disable_slot(sc, xs->xs_idx);
2167 return;
2168 }
2169
2170 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED)
2171 (void)xhci_stop_endpoint(pipe);
2172
2173 /*
2174 * set appropriate bit to be dropped.
2175 * don't set DC bit to 1, otherwise all endpoints
2176 * would be deconfigured.
2177 */
2178 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
2179 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci));
2180 cp[1] = htole32(0);
2181
2182 /* XXX should be most significant one, not dci? */
2183 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
2184 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci));
2185
2186 /* configure ep context performs an implicit dequeue */
2187 xhci_host_dequeue(xs->xs_xr[dci]);
2188
2189 /* sync input contexts before they are read from memory */
2190 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
2191
2192 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
2193 trb.trb_2 = 0;
2194 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2195 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
2196
2197 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
2198 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2199
2200 xhci_ring_free(sc, &xs->xs_xr[dci]);
2201 xs->xs_xr[dci] = NULL;
2202 }
2203
2204 /*
2205 * Abort transfer. Must be called with sc_lock held. Releases and
2206 * reacquires sc_lock to sleep until hardware acknowledges abort.
2207 */
2208 static void
2209 xhci_abortx(struct usbd_xfer *xfer)
2210 {
2211 XHCIHIST_FUNC();
2212 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2213
2214 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx",
2215 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0);
2216
2217 KASSERT(mutex_owned(&sc->sc_lock));
2218 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2219 xfer->ux_status == USBD_TIMEOUT),
2220 "bad abort status: %d", xfer->ux_status);
2221
2222 xhci_pipe_restart(xfer->ux_pipe);
2223
2224 DPRINTFN(14, "end", 0, 0, 0, 0);
2225 }
2226
2227 static void
2228 xhci_host_dequeue(struct xhci_ring * const xr)
2229 {
2230 /* When dequeueing the controller, update our struct copy too */
2231 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE);
2232 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE,
2233 BUS_DMASYNC_PREWRITE);
2234 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies));
2235
2236 xr->xr_ep = 0;
2237 xr->xr_cs = 1;
2238 }
2239
2240 /*
2241 * Recover STALLed endpoint, or stop endpoint to abort a pipe.
2242 * xHCI 1.1 sect 4.10.2.1
2243 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove
2244 * all transfers on transfer ring.
2245 */
2246 static void
2247 xhci_pipe_restart(struct usbd_pipe *pipe)
2248 {
2249 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2250 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2251 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2252
2253 XHCIHIST_FUNC();
2254 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2255 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2256
2257 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2258
2259 /*
2260 * - If the endpoint is halted, indicating a stall, reset it.
2261 * - If the endpoint is stopped, we're already good.
2262 * - Otherwise, someone wanted to abort the pipe, so stop the
2263 * endpoint.
2264 *
2265 * In any case, clear the ring.
2266 */
2267 switch (xhci_get_epstate(sc, xs, dci)) {
2268 case XHCI_EPSTATE_HALTED:
2269 xhci_reset_endpoint(pipe);
2270 break;
2271 case XHCI_EPSTATE_STOPPED:
2272 break;
2273 default:
2274 xhci_stop_endpoint(pipe);
2275 break;
2276 }
2277
2278 switch (xhci_get_epstate(sc, xs, dci)) {
2279 case XHCI_EPSTATE_STOPPED:
2280 break;
2281 case XHCI_EPSTATE_ERROR:
2282 device_printf(sc->sc_dev, "endpoint 0x%x error\n",
2283 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2284 break;
2285 default:
2286 device_printf(sc->sc_dev, "endpoint 0x%x failed to stop\n",
2287 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2288 }
2289
2290 xhci_set_dequeue(pipe);
2291
2292 DPRINTFN(4, "ends", 0, 0, 0, 0);
2293 }
2294
2295 static void
2296 xhci_pipe_restart_async_task(void *cookie)
2297 {
2298 struct usbd_pipe * const pipe = cookie;
2299 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2300 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2301 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2302 struct xhci_ring * const tr = xs->xs_xr[dci];
2303 struct usbd_xfer *xfer;
2304
2305 XHCIHIST_FUNC();
2306 XHCIHIST_CALLARGS("sc=%#jx pipe=%#jx",
2307 (uintptr_t)sc, (uintptr_t)pipe, 0, 0);
2308
2309 mutex_enter(&sc->sc_lock);
2310
2311 xhci_pipe_restart(pipe);
2312
2313 /*
2314 * We halted our own queue because it stalled. Mark it no
2315 * longer halted and start issuing queued transfers again.
2316 */
2317 tr->is_halted = false;
2318 xfer = SIMPLEQ_FIRST(&pipe->up_queue);
2319 if (xfer) {
2320 /*
2321 * If the first xfer of the queue is not in progress,
2322 * though, there may be a concurrent software abort
2323 * that has already cancelled it and is now in the
2324 * middle of a concurrent xhci_pipe_restart waiting to
2325 * reacquire the pipe (bus) lock. So only restart the
2326 * xfer if it's still USBD_IN_PROGRESS.
2327 *
2328 * Either way, xfers on the queue can't be in
2329 * USBD_NOT_STARTED.
2330 */
2331 KASSERT(xfer->ux_status != USBD_NOT_STARTED);
2332 if (xfer->ux_status == USBD_IN_PROGRESS) {
2333 (*pipe->up_methods->upm_start)(xfer);
2334 } else {
2335 DPRINTF("pipe restart race xfer=%#jx status=%jd",
2336 (uintptr_t)xfer, xfer->ux_status, 0, 0);
2337 }
2338 }
2339
2340 mutex_exit(&sc->sc_lock);
2341 }
2342
2343 static void
2344 xhci_pipe_restart_async(struct usbd_pipe *pipe)
2345 {
2346 struct xhci_pipe * const xp =
2347 container_of(pipe, struct xhci_pipe, xp_pipe);
2348 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2349 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2350 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2351 struct xhci_ring * const tr = xs->xs_xr[dci];
2352
2353 XHCIHIST_FUNC();
2354 XHCIHIST_CALLARGS("pipe %#jx", (uintptr_t)pipe, 0, 0, 0);
2355
2356 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2357
2358 tr->is_halted = true;
2359 usb_add_task(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC);
2360
2361 DPRINTFN(4, "ends", 0, 0, 0, 0);
2362 }
2363
2364 /* Process roothub port status/change events and notify to uhub_intr. */
2365 static void
2366 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport)
2367 {
2368 XHCIHIST_FUNC();
2369 XHCIHIST_CALLARGS("xhci%jd: port %ju status change",
2370 device_unit(sc->sc_dev), ctlrport, 0, 0);
2371
2372 if (ctlrport > sc->sc_maxports)
2373 return;
2374
2375 const size_t bn = xhci_ctlrport2bus(sc, ctlrport);
2376 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport);
2377 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn];
2378
2379 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change",
2380 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer);
2381
2382 if (xfer == NULL)
2383 return;
2384 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
2385
2386 uint8_t *p = xfer->ux_buf;
2387 if (!xhci_polling_p(sc) || !sc->sc_intrxfer_deferred[bn])
2388 memset(p, 0, xfer->ux_length);
2389 p[rhp / NBBY] |= 1 << (rhp % NBBY);
2390 xfer->ux_actlen = xfer->ux_length;
2391 xfer->ux_status = USBD_NORMAL_COMPLETION;
2392 if (xhci_polling_p(sc))
2393 sc->sc_intrxfer_deferred[bn] = true;
2394 else
2395 usb_transfer_complete(xfer);
2396 }
2397
2398 /* Process Transfer Events */
2399 static void
2400 xhci_event_transfer(struct xhci_softc * const sc,
2401 const struct xhci_trb * const trb)
2402 {
2403 uint64_t trb_0;
2404 uint32_t trb_2, trb_3;
2405 uint8_t trbcode;
2406 u_int slot, dci;
2407 struct xhci_slot *xs;
2408 struct xhci_ring *xr;
2409 struct xhci_xfer *xx;
2410 struct usbd_xfer *xfer;
2411 usbd_status err;
2412
2413 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2414
2415 trb_0 = le64toh(trb->trb_0);
2416 trb_2 = le32toh(trb->trb_2);
2417 trb_3 = le32toh(trb->trb_3);
2418 trbcode = XHCI_TRB_2_ERROR_GET(trb_2);
2419 slot = XHCI_TRB_3_SLOT_GET(trb_3);
2420 dci = XHCI_TRB_3_EP_GET(trb_3);
2421 xs = &sc->sc_slots[slot];
2422 xr = xs->xs_xr[dci];
2423
2424 /* sanity check */
2425 KASSERT(xr != NULL);
2426 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots,
2427 "invalid xs_idx %u slot %u", xs->xs_idx, slot);
2428
2429 int idx = 0;
2430 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2431 if (xhci_trb_get_idx(xr, trb_0, &idx)) {
2432 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0);
2433 return;
2434 }
2435 xx = xr->xr_cookies[idx];
2436
2437 /* clear cookie of consumed TRB */
2438 xr->xr_cookies[idx] = NULL;
2439
2440 /*
2441 * xx is NULL if pipe is opened but xfer is not started.
2442 * It happens when stopping idle pipe.
2443 */
2444 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) {
2445 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju",
2446 idx, (uintptr_t)xx, trbcode, dci);
2447 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0,
2448 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)),
2449 0, 0);
2450 return;
2451 }
2452 } else {
2453 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */
2454 xx = (void *)(uintptr_t)(trb_0 & ~0x3);
2455 }
2456 /* XXX this may not happen */
2457 if (xx == NULL) {
2458 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0);
2459 return;
2460 }
2461 xfer = &xx->xx_xfer;
2462 /* XXX this may happen when detaching */
2463 if (xfer == NULL) {
2464 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx",
2465 (uintptr_t)xx, trb_0, 0, 0);
2466 return;
2467 }
2468 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2469 /* XXX I dunno why this happens */
2470 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer);
2471
2472 if (!xfer->ux_pipe->up_repeat &&
2473 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) {
2474 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer,
2475 0, 0, 0);
2476 return;
2477 }
2478
2479 const uint8_t xfertype =
2480 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes);
2481
2482 /* 4.11.5.2 Event Data TRB */
2483 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) {
2484 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx"
2485 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0);
2486 if ((trb_0 & 0x3) == 0x3) {
2487 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2);
2488 }
2489 }
2490
2491 switch (trbcode) {
2492 case XHCI_TRB_ERROR_SHORT_PKT:
2493 case XHCI_TRB_ERROR_SUCCESS:
2494 /*
2495 * A ctrl transfer can generate two events if it has a Data
2496 * stage. A short data stage can be OK and should not
2497 * complete the transfer as the status stage needs to be
2498 * performed.
2499 *
2500 * Note: Data and Status stage events point at same xfer.
2501 * ux_actlen and ux_dmabuf will be passed to
2502 * usb_transfer_complete after the Status stage event.
2503 *
2504 * It can be distinguished which stage generates the event:
2505 * + by checking least 3 bits of trb_0 if ED==1.
2506 * (see xhci_device_ctrl_start).
2507 * + by checking the type of original TRB if ED==0.
2508 *
2509 * In addition, intr, bulk, and isoc transfer currently
2510 * consists of single TD, so the "skip" is not needed.
2511 * ctrl xfer uses EVENT_DATA, and others do not.
2512 * Thus driver can switch the flow by checking ED bit.
2513 */
2514 if (xfertype == UE_ISOCHRONOUS) {
2515 xfer->ux_frlengths[xx->xx_isoc_done] -=
2516 XHCI_TRB_2_REM_GET(trb_2);
2517 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done];
2518 } else if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2519 if (xfer->ux_actlen == 0)
2520 xfer->ux_actlen = xfer->ux_length -
2521 XHCI_TRB_2_REM_GET(trb_2);
2522 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3))
2523 == XHCI_TRB_TYPE_DATA_STAGE) {
2524 return;
2525 }
2526 } else if ((trb_0 & 0x3) == 0x3) {
2527 return;
2528 }
2529 err = USBD_NORMAL_COMPLETION;
2530 break;
2531 case XHCI_TRB_ERROR_STOPPED:
2532 case XHCI_TRB_ERROR_LENGTH:
2533 case XHCI_TRB_ERROR_STOPPED_SHORT:
2534 err = USBD_IOERROR;
2535 break;
2536 case XHCI_TRB_ERROR_STALL:
2537 case XHCI_TRB_ERROR_BABBLE:
2538 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2539 xhci_pipe_restart_async(xfer->ux_pipe);
2540 err = USBD_STALLED;
2541 break;
2542 default:
2543 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2544 err = USBD_IOERROR;
2545 break;
2546 }
2547
2548 if (xfertype == UE_ISOCHRONOUS) {
2549 switch (trbcode) {
2550 case XHCI_TRB_ERROR_SHORT_PKT:
2551 case XHCI_TRB_ERROR_SUCCESS:
2552 break;
2553 case XHCI_TRB_ERROR_MISSED_SERVICE:
2554 case XHCI_TRB_ERROR_RING_UNDERRUN:
2555 case XHCI_TRB_ERROR_RING_OVERRUN:
2556 default:
2557 xfer->ux_frlengths[xx->xx_isoc_done] = 0;
2558 break;
2559 }
2560 if (++xx->xx_isoc_done < xfer->ux_nframes)
2561 return;
2562 }
2563
2564 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 ||
2565 (trb_0 & 0x3) == 0x0) {
2566 /*
2567 * Try to claim this xfer for completion. If it has
2568 * already completed or aborted, drop it on the floor.
2569 */
2570 if (!usbd_xfer_trycomplete(xfer))
2571 return;
2572
2573 /* Set the status. */
2574 xfer->ux_status = err;
2575
2576 usb_transfer_complete(xfer);
2577 }
2578 }
2579
2580 /* Process Command complete events */
2581 static void
2582 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb)
2583 {
2584 uint64_t trb_0;
2585 uint32_t trb_2, trb_3;
2586
2587 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2588
2589 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2590
2591 trb_0 = le64toh(trb->trb_0);
2592 trb_2 = le32toh(trb->trb_2);
2593 trb_3 = le32toh(trb->trb_3);
2594
2595 if (trb_0 == sc->sc_command_addr) {
2596 sc->sc_resultpending = false;
2597
2598 sc->sc_result_trb.trb_0 = trb_0;
2599 sc->sc_result_trb.trb_2 = trb_2;
2600 sc->sc_result_trb.trb_3 = trb_3;
2601 if (XHCI_TRB_2_ERROR_GET(trb_2) !=
2602 XHCI_TRB_ERROR_SUCCESS) {
2603 DPRINTFN(1, "command completion "
2604 "failure: 0x%016jx 0x%08jx 0x%08jx",
2605 trb_0, trb_2, trb_3, 0);
2606 }
2607 cv_signal(&sc->sc_command_cv);
2608 } else {
2609 DPRINTFN(1, "spurious event: %#jx 0x%016jx "
2610 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3);
2611 }
2612 }
2613
2614 /*
2615 * Process events.
2616 * called from xhci_softintr
2617 */
2618 static void
2619 xhci_handle_event(struct xhci_softc * const sc,
2620 const struct xhci_trb * const trb)
2621 {
2622 uint64_t trb_0;
2623 uint32_t trb_2, trb_3;
2624
2625 XHCIHIST_FUNC();
2626
2627 trb_0 = le64toh(trb->trb_0);
2628 trb_2 = le32toh(trb->trb_2);
2629 trb_3 = le32toh(trb->trb_3);
2630
2631 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx",
2632 (uintptr_t)trb, trb_0, trb_2, trb_3);
2633
2634 /*
2635 * 4.11.3.1, 6.4.2.1
2636 * TRB Pointer is invalid for these completion codes.
2637 */
2638 switch (XHCI_TRB_2_ERROR_GET(trb_2)) {
2639 case XHCI_TRB_ERROR_RING_UNDERRUN:
2640 case XHCI_TRB_ERROR_RING_OVERRUN:
2641 case XHCI_TRB_ERROR_VF_RING_FULL:
2642 return;
2643 default:
2644 if (trb_0 == 0) {
2645 return;
2646 }
2647 break;
2648 }
2649
2650 switch (XHCI_TRB_3_TYPE_GET(trb_3)) {
2651 case XHCI_TRB_EVENT_TRANSFER:
2652 xhci_event_transfer(sc, trb);
2653 break;
2654 case XHCI_TRB_EVENT_CMD_COMPLETE:
2655 xhci_event_cmd(sc, trb);
2656 break;
2657 case XHCI_TRB_EVENT_PORT_STS_CHANGE:
2658 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff));
2659 break;
2660 default:
2661 break;
2662 }
2663 }
2664
2665 static void
2666 xhci_softintr(void *v)
2667 {
2668 struct usbd_bus * const bus = v;
2669 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2670 struct xhci_ring * const er = sc->sc_er;
2671 struct xhci_trb *trb;
2672 int i, j, k, bn;
2673
2674 XHCIHIST_FUNC();
2675
2676 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2677
2678 i = er->xr_ep;
2679 j = er->xr_cs;
2680
2681 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0);
2682
2683 /*
2684 * Handle deferred root intr xfer, in case we just switched off
2685 * polling. It's not safe to complete root intr xfers while
2686 * polling -- too much kernel machinery gets involved.
2687 */
2688 if (!xhci_polling_p(sc)) {
2689 for (bn = 0; bn < 2; bn++) {
2690 if (__predict_false(sc->sc_intrxfer_deferred[bn])) {
2691 sc->sc_intrxfer_deferred[bn] = false;
2692 usb_transfer_complete(sc->sc_intrxfer[bn]);
2693 }
2694 }
2695 }
2696
2697 while (1) {
2698 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE,
2699 BUS_DMASYNC_POSTREAD);
2700 trb = &er->xr_trb[i];
2701 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0;
2702
2703 if (j != k)
2704 break;
2705
2706 xhci_handle_event(sc, trb);
2707
2708 i++;
2709 if (i == er->xr_ntrb) {
2710 i = 0;
2711 j ^= 1;
2712 }
2713 }
2714
2715 er->xr_ep = i;
2716 er->xr_cs = j;
2717
2718 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) |
2719 XHCI_ERDP_BUSY);
2720
2721 DPRINTFN(16, "ends", 0, 0, 0, 0);
2722
2723 return;
2724 }
2725
2726 static void
2727 xhci_poll(struct usbd_bus *bus)
2728 {
2729 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2730
2731 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2732
2733 mutex_enter(&sc->sc_intr_lock);
2734 int ret = xhci_intr1(sc);
2735 if (ret) {
2736 xhci_softintr(bus);
2737 }
2738 mutex_exit(&sc->sc_intr_lock);
2739
2740 return;
2741 }
2742
2743 static struct usbd_xfer *
2744 xhci_allocx(struct usbd_bus *bus, unsigned int nframes)
2745 {
2746 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2747 struct xhci_xfer *xx;
2748 u_int ntrbs;
2749
2750 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2751
2752 ntrbs = uimax(3, nframes);
2753 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs;
2754
2755 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
2756 if (xx != NULL) {
2757 memset(xx, 0, sizeof(*xx));
2758 if (ntrbs > 0) {
2759 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP);
2760 xx->xx_ntrb = ntrbs;
2761 }
2762 #ifdef DIAGNOSTIC
2763 xx->xx_xfer.ux_state = XFER_BUSY;
2764 #endif
2765 }
2766
2767 return &xx->xx_xfer;
2768 }
2769
2770 static void
2771 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
2772 {
2773 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2774 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
2775
2776 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2777
2778 #ifdef DIAGNOSTIC
2779 if (xfer->ux_state != XFER_BUSY &&
2780 xfer->ux_status != USBD_NOT_STARTED) {
2781 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx",
2782 (uintptr_t)xfer, xfer->ux_state, 0, 0);
2783 }
2784 xfer->ux_state = XFER_FREE;
2785 #endif
2786 if (xx->xx_ntrb > 0) {
2787 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb));
2788 xx->xx_trb = NULL;
2789 xx->xx_ntrb = 0;
2790 }
2791 pool_cache_put(sc->sc_xferpool, xx);
2792 }
2793
2794 static bool
2795 xhci_dying(struct usbd_bus *bus)
2796 {
2797 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2798
2799 return sc->sc_dying;
2800 }
2801
2802 static void
2803 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
2804 {
2805 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2806
2807 *lock = &sc->sc_lock;
2808 }
2809
2810 extern uint32_t usb_cookie_no;
2811
2812 /*
2813 * xHCI 4.3
2814 * Called when uhub_explore finds a new device (via usbd_new_device).
2815 * Port initialization and speed detection (4.3.1) are already done in uhub.c.
2816 * This function does:
2817 * Allocate and construct dev structure of default endpoint (ep0).
2818 * Allocate and open pipe of ep0.
2819 * Enable slot and initialize slot context.
2820 * Set Address.
2821 * Read initial device descriptor.
2822 * Determine initial MaxPacketSize (mps) by speed.
2823 * Read full device descriptor.
2824 * Register this device.
2825 * Finally state of device transitions ADDRESSED.
2826 */
2827 static usbd_status
2828 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth,
2829 int speed, int port, struct usbd_port *up)
2830 {
2831 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2832 struct usbd_device *dev;
2833 usbd_status err;
2834 usb_device_descriptor_t *dd;
2835 struct xhci_slot *xs;
2836 uint32_t *cp;
2837
2838 XHCIHIST_FUNC();
2839 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx",
2840 port, depth, speed, (uintptr_t)up);
2841
2842 KASSERT(KERNEL_LOCKED_P());
2843
2844 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
2845 dev->ud_bus = bus;
2846 dev->ud_quirks = &usbd_no_quirk;
2847 dev->ud_addr = 0;
2848 dev->ud_ddesc.bMaxPacketSize = 0;
2849 dev->ud_depth = depth;
2850 dev->ud_powersrc = up;
2851 dev->ud_myhub = up->up_parent;
2852 dev->ud_speed = speed;
2853 dev->ud_langid = USBD_NOLANG;
2854 dev->ud_cookie.cookie = ++usb_cookie_no;
2855
2856 /* Set up default endpoint handle. */
2857 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc;
2858 /* doesn't matter, just don't let it uninitialized */
2859 dev->ud_ep0.ue_toggle = 0;
2860
2861 /* Set up default endpoint descriptor. */
2862 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE;
2863 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT;
2864 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT;
2865 dev->ud_ep0desc.bmAttributes = UE_CONTROL;
2866 dev->ud_ep0desc.bInterval = 0;
2867
2868 /* 4.3, 4.8.2.1 */
2869 switch (speed) {
2870 case USB_SPEED_SUPER:
2871 case USB_SPEED_SUPER_PLUS:
2872 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET);
2873 break;
2874 case USB_SPEED_FULL:
2875 /* XXX using 64 as initial mps of ep0 in FS */
2876 case USB_SPEED_HIGH:
2877 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET);
2878 break;
2879 case USB_SPEED_LOW:
2880 default:
2881 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET);
2882 break;
2883 }
2884
2885 up->up_dev = dev;
2886
2887 dd = &dev->ud_ddesc;
2888
2889 if (depth == 0 && port == 0) {
2890 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL);
2891 bus->ub_devices[USB_ROOTHUB_INDEX] = dev;
2892
2893 /* Establish the default pipe. */
2894 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2895 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2896 if (err) {
2897 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0);
2898 goto bad;
2899 }
2900 err = usbd_get_initial_ddesc(dev, dd);
2901 if (err) {
2902 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2903 goto bad;
2904 }
2905 } else {
2906 uint8_t slot = 0;
2907
2908 /* 4.3.2 */
2909 err = xhci_enable_slot(sc, &slot);
2910 if (err) {
2911 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0);
2912 goto bad;
2913 }
2914
2915 xs = &sc->sc_slots[slot];
2916 dev->ud_hcpriv = xs;
2917
2918 /* 4.3.3 initialize slot structure */
2919 err = xhci_init_slot(dev, slot);
2920 if (err) {
2921 DPRINTFN(1, "init slot %ju", err, 0, 0, 0);
2922 dev->ud_hcpriv = NULL;
2923 /*
2924 * We have to disable_slot here because
2925 * xs->xs_idx == 0 when xhci_init_slot fails,
2926 * in that case usbd_remove_dev won't work.
2927 */
2928 mutex_enter(&sc->sc_lock);
2929 xhci_disable_slot(sc, slot);
2930 mutex_exit(&sc->sc_lock);
2931 goto bad;
2932 }
2933
2934 /*
2935 * We have to establish the default pipe _after_ slot
2936 * structure has been prepared.
2937 */
2938 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2939 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2940 if (err) {
2941 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0,
2942 0);
2943 goto bad;
2944 }
2945
2946 /* 4.3.4 Address Assignment */
2947 err = xhci_set_address(dev, slot, false);
2948 if (err) {
2949 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0);
2950 goto bad;
2951 }
2952
2953 /* Allow device time to set new address */
2954 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE);
2955
2956 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2957 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT);
2958 HEXDUMP("slot context", cp, sc->sc_ctxsz);
2959 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3]));
2960 DPRINTFN(4, "device address %ju", addr, 0, 0, 0);
2961 /*
2962 * XXX ensure we know when the hardware does something
2963 * we can't yet cope with
2964 */
2965 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr);
2966 dev->ud_addr = addr;
2967
2968 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL,
2969 "addr %d already allocated", dev->ud_addr);
2970 /*
2971 * The root hub is given its own slot
2972 */
2973 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev;
2974
2975 err = usbd_get_initial_ddesc(dev, dd);
2976 if (err) {
2977 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2978 goto bad;
2979 }
2980
2981 /* 4.8.2.1 */
2982 if (USB_IS_SS(speed)) {
2983 if (dd->bMaxPacketSize != 9) {
2984 printf("%s: invalid mps 2^%u for SS ep0,"
2985 " using 512\n",
2986 device_xname(sc->sc_dev),
2987 dd->bMaxPacketSize);
2988 dd->bMaxPacketSize = 9;
2989 }
2990 USETW(dev->ud_ep0desc.wMaxPacketSize,
2991 (1 << dd->bMaxPacketSize));
2992 } else
2993 USETW(dev->ud_ep0desc.wMaxPacketSize,
2994 dd->bMaxPacketSize);
2995 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0);
2996 err = xhci_update_ep0_mps(sc, xs,
2997 UGETW(dev->ud_ep0desc.wMaxPacketSize));
2998 if (err) {
2999 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0);
3000 goto bad;
3001 }
3002 }
3003
3004 err = usbd_reload_device_desc(dev);
3005 if (err) {
3006 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0);
3007 goto bad;
3008 }
3009
3010 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,",
3011 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0);
3012 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,",
3013 dd->bDeviceClass, dd->bDeviceSubClass,
3014 dd->bDeviceProtocol, 0);
3015 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd",
3016 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations,
3017 dev->ud_speed);
3018
3019 usbd_get_device_strings(dev);
3020
3021 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev);
3022
3023 if (depth == 0 && port == 0) {
3024 usbd_attach_roothub(parent, dev);
3025 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0);
3026 return USBD_NORMAL_COMPLETION;
3027 }
3028
3029 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr);
3030 bad:
3031 if (err != USBD_NORMAL_COMPLETION) {
3032 if (depth == 0 && port == 0 && dev->ud_pipe0)
3033 usbd_kill_pipe(dev->ud_pipe0);
3034 usbd_remove_device(dev, up);
3035 }
3036
3037 return err;
3038 }
3039
3040 static usbd_status
3041 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp,
3042 size_t ntrb, size_t align)
3043 {
3044 size_t size = ntrb * XHCI_TRB_SIZE;
3045 struct xhci_ring *xr;
3046
3047 XHCIHIST_FUNC();
3048 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx",
3049 (uintptr_t)*xrp, ntrb, align, 0);
3050
3051 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP);
3052 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0);
3053
3054 int err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
3055 USBMALLOC_ZERO, &xr->xr_dma);
3056 if (err) {
3057 kmem_free(xr, sizeof(struct xhci_ring));
3058 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0);
3059 return err;
3060 }
3061 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
3062 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP);
3063 xr->xr_trb = xhci_ring_trbv(xr, 0);
3064 xr->xr_ntrb = ntrb;
3065 xr->is_halted = false;
3066 xhci_host_dequeue(xr);
3067 *xrp = xr;
3068
3069 return USBD_NORMAL_COMPLETION;
3070 }
3071
3072 static void
3073 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr)
3074 {
3075 if (*xr == NULL)
3076 return;
3077
3078 usb_freemem(&(*xr)->xr_dma);
3079 mutex_destroy(&(*xr)->xr_lock);
3080 kmem_free((*xr)->xr_cookies,
3081 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb);
3082 kmem_free(*xr, sizeof(struct xhci_ring));
3083 *xr = NULL;
3084 }
3085
3086 static void
3087 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr,
3088 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs)
3089 {
3090 size_t i;
3091 u_int ri;
3092 u_int cs;
3093 uint64_t parameter;
3094 uint32_t status;
3095 uint32_t control;
3096
3097 XHCIHIST_FUNC();
3098 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju",
3099 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0);
3100
3101 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u",
3102 ntrbs, xr->xr_ntrb);
3103 for (i = 0; i < ntrbs; i++) {
3104 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr,
3105 (uintptr_t)trbs, i, 0);
3106 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx",
3107 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0);
3108 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) !=
3109 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3);
3110 }
3111
3112 ri = xr->xr_ep;
3113 cs = xr->xr_cs;
3114
3115 /*
3116 * Although the xhci hardware can do scatter/gather dma from
3117 * arbitrary sized buffers, there is a non-obvious restriction
3118 * that a LINK trb is only allowed at the end of a burst of
3119 * transfers - which might be 16kB.
3120 * Arbitrary aligned LINK trb definitely fail on Ivy bridge.
3121 * The simple solution is not to allow a LINK trb in the middle
3122 * of anything - as here.
3123 * XXX: (dsl) There are xhci controllers out there (eg some made by
3124 * ASMedia) that seem to lock up if they process a LINK trb but
3125 * cannot process the linked-to trb yet.
3126 * The code should write the 'cycle' bit on the link trb AFTER
3127 * adding the other trb.
3128 */
3129 u_int firstep = xr->xr_ep;
3130 u_int firstcs = xr->xr_cs;
3131
3132 for (i = 0; i < ntrbs; ) {
3133 u_int oldri = ri;
3134 u_int oldcs = cs;
3135
3136 if (ri >= (xr->xr_ntrb - 1)) {
3137 /* Put Link TD at the end of ring */
3138 parameter = xhci_ring_trbp(xr, 0);
3139 status = 0;
3140 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) |
3141 XHCI_TRB_3_TC_BIT;
3142 xr->xr_cookies[ri] = NULL;
3143 xr->xr_ep = 0;
3144 xr->xr_cs ^= 1;
3145 ri = xr->xr_ep;
3146 cs = xr->xr_cs;
3147 } else {
3148 parameter = trbs[i].trb_0;
3149 status = trbs[i].trb_2;
3150 control = trbs[i].trb_3;
3151
3152 xr->xr_cookies[ri] = cookie;
3153 ri++;
3154 i++;
3155 }
3156 /*
3157 * If this is a first TRB, mark it invalid to prevent
3158 * xHC from running it immediately.
3159 */
3160 if (oldri == firstep) {
3161 if (oldcs) {
3162 control &= ~XHCI_TRB_3_CYCLE_BIT;
3163 } else {
3164 control |= XHCI_TRB_3_CYCLE_BIT;
3165 }
3166 } else {
3167 if (oldcs) {
3168 control |= XHCI_TRB_3_CYCLE_BIT;
3169 } else {
3170 control &= ~XHCI_TRB_3_CYCLE_BIT;
3171 }
3172 }
3173 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control);
3174 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri,
3175 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3176 }
3177
3178 /* Now invert cycle bit of first TRB */
3179 if (firstcs) {
3180 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT);
3181 } else {
3182 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT);
3183 }
3184 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep,
3185 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3186
3187 xr->xr_ep = ri;
3188 xr->xr_cs = cs;
3189
3190 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep,
3191 xr->xr_cs, 0);
3192 }
3193
3194 static inline void
3195 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr,
3196 struct xhci_xfer *xx, u_int ntrb)
3197 {
3198 KASSERT(ntrb <= xx->xx_ntrb);
3199 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb);
3200 }
3201
3202 /*
3203 * Stop execution commands, purge all commands on command ring, and
3204 * rewind dequeue pointer.
3205 */
3206 static void
3207 xhci_abort_command(struct xhci_softc *sc)
3208 {
3209 struct xhci_ring * const cr = sc->sc_cr;
3210 uint64_t crcr;
3211 int i;
3212
3213 XHCIHIST_FUNC();
3214 XHCIHIST_CALLARGS("command %#jx timeout, aborting",
3215 sc->sc_command_addr, 0, 0, 0);
3216
3217 mutex_enter(&cr->xr_lock);
3218
3219 /* 4.6.1.2 Aborting a Command */
3220 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3221 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA);
3222
3223 for (i = 0; i < 500; i++) {
3224 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3225 if ((crcr & XHCI_CRCR_LO_CRR) == 0)
3226 break;
3227 usb_delay_ms(&sc->sc_bus, 1);
3228 }
3229 if ((crcr & XHCI_CRCR_LO_CRR) != 0) {
3230 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0);
3231 /* reset HC here? */
3232 }
3233
3234 /* reset command ring dequeue pointer */
3235 cr->xr_ep = 0;
3236 cr->xr_cs = 1;
3237 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs);
3238
3239 mutex_exit(&cr->xr_lock);
3240 }
3241
3242 /*
3243 * Put a command on command ring, ring bell, set timer, and cv_timedwait.
3244 * Command completion is notified by cv_signal from xhci_event_cmd()
3245 * (called from xhci_softint), or timed-out.
3246 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(),
3247 * then do_command examines it.
3248 */
3249 static usbd_status
3250 xhci_do_command_locked(struct xhci_softc * const sc,
3251 struct xhci_soft_trb * const trb, int timeout)
3252 {
3253 struct xhci_ring * const cr = sc->sc_cr;
3254 usbd_status err;
3255
3256 XHCIHIST_FUNC();
3257 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx",
3258 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3259
3260 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
3261 KASSERT(mutex_owned(&sc->sc_lock));
3262
3263 while (sc->sc_command_addr != 0 ||
3264 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp))
3265 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
3266 if (sc->sc_suspendresume_failed)
3267 return USBD_IOERROR;
3268
3269 /*
3270 * If enqueue pointer points at last of ring, it's Link TRB,
3271 * command TRB will be stored in 0th TRB.
3272 */
3273 if (cr->xr_ep == cr->xr_ntrb - 1)
3274 sc->sc_command_addr = xhci_ring_trbp(cr, 0);
3275 else
3276 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep);
3277
3278 sc->sc_resultpending = true;
3279
3280 mutex_enter(&cr->xr_lock);
3281 xhci_ring_put(sc, cr, NULL, trb, 1);
3282 mutex_exit(&cr->xr_lock);
3283
3284 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0);
3285
3286 while (sc->sc_resultpending) {
3287 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock,
3288 MAX(1, mstohz(timeout))) == EWOULDBLOCK) {
3289 xhci_abort_command(sc);
3290 err = USBD_TIMEOUT;
3291 goto timedout;
3292 }
3293 }
3294
3295 trb->trb_0 = sc->sc_result_trb.trb_0;
3296 trb->trb_2 = sc->sc_result_trb.trb_2;
3297 trb->trb_3 = sc->sc_result_trb.trb_3;
3298
3299 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx",
3300 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3301
3302 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) {
3303 case XHCI_TRB_ERROR_SUCCESS:
3304 err = USBD_NORMAL_COMPLETION;
3305 break;
3306 default:
3307 case 192 ... 223:
3308 DPRINTFN(5, "error %#jx",
3309 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0);
3310 err = USBD_IOERROR;
3311 break;
3312 case 224 ... 255:
3313 err = USBD_NORMAL_COMPLETION;
3314 break;
3315 }
3316
3317 timedout:
3318 sc->sc_resultpending = false;
3319 sc->sc_command_addr = 0;
3320 cv_broadcast(&sc->sc_cmdbusy_cv);
3321
3322 return err;
3323 }
3324
3325 static usbd_status
3326 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb,
3327 int timeout)
3328 {
3329
3330 mutex_enter(&sc->sc_lock);
3331 usbd_status ret = xhci_do_command_locked(sc, trb, timeout);
3332 mutex_exit(&sc->sc_lock);
3333
3334 return ret;
3335 }
3336
3337 static usbd_status
3338 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp)
3339 {
3340 struct xhci_soft_trb trb;
3341 usbd_status err;
3342
3343 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3344
3345 trb.trb_0 = 0;
3346 trb.trb_2 = 0;
3347 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT);
3348
3349 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3350 if (err != USBD_NORMAL_COMPLETION) {
3351 return err;
3352 }
3353
3354 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3);
3355
3356 return err;
3357 }
3358
3359 /*
3360 * xHCI 4.6.4
3361 * Deallocate ring and device/input context DMA buffers, and disable_slot.
3362 * All endpoints in the slot should be stopped.
3363 * Should be called with sc_lock held.
3364 */
3365 static usbd_status
3366 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot)
3367 {
3368 struct xhci_soft_trb trb;
3369 struct xhci_slot *xs;
3370 usbd_status err;
3371
3372 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3373
3374 if (sc->sc_dying)
3375 return USBD_IOERROR;
3376
3377 trb.trb_0 = 0;
3378 trb.trb_2 = 0;
3379 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) |
3380 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT);
3381
3382 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
3383
3384 if (!err) {
3385 xs = &sc->sc_slots[slot];
3386 if (xs->xs_idx != 0) {
3387 xhci_free_slot(sc, xs);
3388 xhci_set_dcba(sc, 0, slot);
3389 memset(xs, 0, sizeof(*xs));
3390 }
3391 }
3392
3393 return err;
3394 }
3395
3396 /*
3397 * Set address of device and transition slot state from ENABLED to ADDRESSED
3398 * if Block Setaddress Request (BSR) is false.
3399 * If BSR==true, transition slot state from ENABLED to DEFAULT.
3400 * see xHCI 1.1 4.5.3, 3.3.4
3401 * Should be called without sc_lock held.
3402 */
3403 static usbd_status
3404 xhci_address_device(struct xhci_softc * const sc,
3405 uint64_t icp, uint8_t slot_id, bool bsr)
3406 {
3407 struct xhci_soft_trb trb;
3408 usbd_status err;
3409
3410 XHCIHIST_FUNC();
3411 if (bsr) {
3412 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr",
3413 icp, slot_id, 0, 0);
3414 } else {
3415 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr",
3416 icp, slot_id, 0, 0);
3417 }
3418
3419 trb.trb_0 = icp;
3420 trb.trb_2 = 0;
3421 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) |
3422 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) |
3423 (bsr ? XHCI_TRB_3_BSR_BIT : 0);
3424
3425 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3426
3427 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS)
3428 err = USBD_NO_ADDR;
3429
3430 return err;
3431 }
3432
3433 static usbd_status
3434 xhci_update_ep0_mps(struct xhci_softc * const sc,
3435 struct xhci_slot * const xs, u_int mps)
3436 {
3437 struct xhci_soft_trb trb;
3438 usbd_status err;
3439 uint32_t * cp;
3440
3441 XHCIHIST_FUNC();
3442 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0);
3443
3444 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3445 cp[0] = htole32(0);
3446 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL));
3447
3448 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL));
3449 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps));
3450
3451 /* sync input contexts before they are read from memory */
3452 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3453 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3454 sc->sc_ctxsz * 4);
3455
3456 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
3457 trb.trb_2 = 0;
3458 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
3459 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX);
3460
3461 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3462 return err;
3463 }
3464
3465 static void
3466 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si)
3467 {
3468 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0);
3469
3470 XHCIHIST_FUNC();
3471 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd",
3472 (uintptr_t)&dcbaa[si], dcba, si, 0);
3473
3474 dcbaa[si] = htole64(dcba);
3475 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t),
3476 BUS_DMASYNC_PREWRITE);
3477 }
3478
3479 /*
3480 * Allocate device and input context DMA buffer, and
3481 * TRB DMA buffer for each endpoint.
3482 */
3483 static usbd_status
3484 xhci_init_slot(struct usbd_device *dev, uint32_t slot)
3485 {
3486 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3487 struct xhci_slot *xs;
3488
3489 XHCIHIST_FUNC();
3490 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0);
3491
3492 xs = &sc->sc_slots[slot];
3493
3494 /* allocate contexts */
3495 int err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3496 USBMALLOC_ZERO, &xs->xs_dc_dma);
3497 if (err) {
3498 DPRINTFN(1, "failed to allocmem output device context %jd",
3499 err, 0, 0, 0);
3500 return USBD_NOMEM;
3501 }
3502
3503 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3504 USBMALLOC_ZERO, &xs->xs_ic_dma);
3505 if (err) {
3506 DPRINTFN(1, "failed to allocmem input device context %jd",
3507 err, 0, 0, 0);
3508 goto bad1;
3509 }
3510
3511 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr));
3512 xs->xs_idx = slot;
3513
3514 return USBD_NORMAL_COMPLETION;
3515
3516 bad1:
3517 usb_freemem(&xs->xs_dc_dma);
3518 xs->xs_idx = 0;
3519 return USBD_NOMEM;
3520 }
3521
3522 static void
3523 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs)
3524 {
3525 u_int dci;
3526
3527 XHCIHIST_FUNC();
3528 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
3529
3530 /* deallocate all allocated rings in the slot */
3531 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
3532 if (xs->xs_xr[dci] != NULL)
3533 xhci_ring_free(sc, &xs->xs_xr[dci]);
3534 }
3535 usb_freemem(&xs->xs_ic_dma);
3536 usb_freemem(&xs->xs_dc_dma);
3537 xs->xs_idx = 0;
3538 }
3539
3540 /*
3541 * Setup slot context, set Device Context Base Address, and issue
3542 * Set Address Device command.
3543 */
3544 static usbd_status
3545 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr)
3546 {
3547 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3548 struct xhci_slot *xs;
3549 usbd_status err;
3550
3551 XHCIHIST_FUNC();
3552 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0);
3553
3554 xs = &sc->sc_slots[slot];
3555
3556 xhci_setup_ctx(dev->ud_pipe0);
3557
3558 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3559 sc->sc_ctxsz * 3);
3560
3561 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot);
3562
3563 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr);
3564
3565 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
3566 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0),
3567 sc->sc_ctxsz * 2);
3568
3569 return err;
3570 }
3571
3572 /*
3573 * 4.8.2, 6.2.3.2
3574 * construct slot/endpoint context parameters and do syncmem
3575 */
3576 static void
3577 xhci_setup_ctx(struct usbd_pipe *pipe)
3578 {
3579 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3580 struct usbd_device *dev = pipe->up_dev;
3581 struct xhci_slot * const xs = dev->ud_hcpriv;
3582 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3583 const u_int dci = xhci_ep_get_dci(ed);
3584 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3585 uint32_t *cp;
3586 uint8_t speed = dev->ud_speed;
3587
3588 XHCIHIST_FUNC();
3589 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju",
3590 (uintptr_t)pipe, xs->xs_idx, dci, speed);
3591
3592 /* set up initial input control context */
3593 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3594 cp[0] = htole32(0);
3595 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci));
3596 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT));
3597 cp[7] = htole32(0);
3598
3599 /* set up input slot context */
3600 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
3601 cp[0] =
3602 XHCI_SCTX_0_CTX_NUM_SET(dci) |
3603 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed));
3604 cp[1] = 0;
3605 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0);
3606 cp[3] = 0;
3607 xhci_setup_route(pipe, cp);
3608 xhci_setup_tthub(pipe, cp);
3609
3610 cp[0] = htole32(cp[0]);
3611 cp[1] = htole32(cp[1]);
3612 cp[2] = htole32(cp[2]);
3613 cp[3] = htole32(cp[3]);
3614
3615 /* set up input endpoint context */
3616 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci));
3617 cp[0] =
3618 XHCI_EPCTX_0_EPSTATE_SET(0) |
3619 XHCI_EPCTX_0_MULT_SET(0) |
3620 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) |
3621 XHCI_EPCTX_0_LSA_SET(0) |
3622 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0);
3623 cp[1] =
3624 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) |
3625 XHCI_EPCTX_1_HID_SET(0) |
3626 XHCI_EPCTX_1_MAXB_SET(0);
3627
3628 if (xfertype != UE_ISOCHRONOUS)
3629 cp[1] |= XHCI_EPCTX_1_CERR_SET(3);
3630
3631 xhci_setup_maxburst(pipe, cp);
3632
3633 DPRINTFN(4, "setting on dci %ju ival %ju mult %ju mps %#jx",
3634 dci, XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_0_MULT_GET(cp[0]),
3635 XHCI_EPCTX_1_MAXP_SIZE_GET(cp[1]));
3636 DPRINTFN(4, " maxburst %ju mep %#jx atl %#jx",
3637 XHCI_EPCTX_1_MAXB_GET(cp[1]),
3638 (XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_GET(cp[0]) << 16) +
3639 XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_GET(cp[4]),
3640 XHCI_EPCTX_4_AVG_TRB_LEN_GET(cp[4]), 0);
3641
3642 /* rewind TR dequeue pointer in xHC */
3643 /* can't use xhci_ep_get_dci() yet? */
3644 *(uint64_t *)(&cp[2]) = htole64(
3645 xhci_ring_trbp(xs->xs_xr[dci], 0) |
3646 XHCI_EPCTX_2_DCS_SET(1));
3647
3648 cp[0] = htole32(cp[0]);
3649 cp[1] = htole32(cp[1]);
3650 cp[4] = htole32(cp[4]);
3651
3652 /* rewind TR dequeue pointer in driver */
3653 struct xhci_ring *xr = xs->xs_xr[dci];
3654 mutex_enter(&xr->xr_lock);
3655 xhci_host_dequeue(xr);
3656 mutex_exit(&xr->xr_lock);
3657
3658 /* sync input contexts before they are read from memory */
3659 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3660 }
3661
3662 /*
3663 * Setup route string and roothub port of given device for slot context
3664 */
3665 static void
3666 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp)
3667 {
3668 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3669 struct usbd_device *dev = pipe->up_dev;
3670 struct usbd_port *up = dev->ud_powersrc;
3671 struct usbd_device *hub;
3672 struct usbd_device *adev;
3673 uint8_t rhport = 0;
3674 uint32_t route = 0;
3675
3676 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3677
3678 /* Locate root hub port and Determine route string */
3679 /* 4.3.3 route string does not include roothub port */
3680 for (hub = dev; hub != NULL; hub = hub->ud_myhub) {
3681 uint32_t dep;
3682
3683 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd",
3684 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc,
3685 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno :
3686 -1);
3687
3688 if (hub->ud_powersrc == NULL)
3689 break;
3690 dep = hub->ud_depth;
3691 if (dep == 0)
3692 break;
3693 rhport = hub->ud_powersrc->up_portno;
3694 if (dep > USB_HUB_MAX_DEPTH)
3695 continue;
3696
3697 route |=
3698 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport)
3699 << ((dep - 1) * 4);
3700 }
3701 route = route >> 4;
3702 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1;
3703
3704 /* Locate port on upstream high speed hub */
3705 for (adev = dev, hub = up->up_parent;
3706 hub != NULL && hub->ud_speed != USB_SPEED_HIGH;
3707 adev = hub, hub = hub->ud_myhub)
3708 ;
3709 if (hub) {
3710 int p;
3711 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) {
3712 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) {
3713 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1];
3714 goto found;
3715 }
3716 }
3717 panic("%s: cannot find HS port", __func__);
3718 found:
3719 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0);
3720 } else {
3721 dev->ud_myhsport = NULL;
3722 }
3723
3724 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport);
3725
3726 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport,
3727 ctlrport, route, (uintptr_t)hub);
3728
3729 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route);
3730 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport);
3731 }
3732
3733 /*
3734 * Setup whether device is hub, whether device uses MTT, and
3735 * TT informations if it uses MTT.
3736 */
3737 static void
3738 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp)
3739 {
3740 struct usbd_device *dev = pipe->up_dev;
3741 struct usbd_port *myhsport = dev->ud_myhsport;
3742 usb_device_descriptor_t * const dd = &dev->ud_ddesc;
3743 uint32_t speed = dev->ud_speed;
3744 uint8_t rhaddr = dev->ud_bus->ub_rhaddr;
3745 uint8_t tthubslot, ttportnum;
3746 bool ishub;
3747 bool usemtt;
3748
3749 XHCIHIST_FUNC();
3750
3751 /*
3752 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2
3753 * tthubslot:
3754 * This is the slot ID of parent HS hub
3755 * if LS/FS device is connected && connected through HS hub.
3756 * This is 0 if device is not LS/FS device ||
3757 * parent hub is not HS hub ||
3758 * attached to root hub.
3759 * ttportnum:
3760 * This is the downstream facing port of parent HS hub
3761 * if LS/FS device is connected.
3762 * This is 0 if device is not LS/FS device ||
3763 * parent hub is not HS hub ||
3764 * attached to root hub.
3765 */
3766 if (myhsport &&
3767 myhsport->up_parent->ud_addr != rhaddr &&
3768 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
3769 ttportnum = myhsport->up_portno;
3770 tthubslot = myhsport->up_parent->ud_addr;
3771 } else {
3772 ttportnum = 0;
3773 tthubslot = 0;
3774 }
3775 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd",
3776 (uintptr_t)myhsport, ttportnum, tthubslot, 0);
3777
3778 /* ishub is valid after reading UDESC_DEVICE */
3779 ishub = (dd->bDeviceClass == UDCLASS_HUB);
3780
3781 /* dev->ud_hub is valid after reading UDESC_HUB */
3782 if (ishub && dev->ud_hub) {
3783 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc;
3784 uint8_t ttt =
3785 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK);
3786
3787 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts);
3788 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt);
3789 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0);
3790 }
3791
3792 #define IS_MTTHUB(dd) \
3793 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT)
3794
3795 /*
3796 * MTT flag is set if
3797 * 1. this is HS hub && MTTs are supported and enabled; or
3798 * 2. this is LS or FS device && there is a parent HS hub where MTTs
3799 * are supported and enabled.
3800 *
3801 * XXX enabled is not tested yet
3802 */
3803 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd))
3804 usemtt = true;
3805 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) &&
3806 myhsport &&
3807 myhsport->up_parent->ud_addr != rhaddr &&
3808 IS_MTTHUB(&myhsport->up_parent->ud_ddesc))
3809 usemtt = true;
3810 else
3811 usemtt = false;
3812 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd",
3813 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt);
3814
3815 #undef IS_MTTHUB
3816
3817 cp[0] |=
3818 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) |
3819 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0);
3820 cp[2] |=
3821 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) |
3822 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum);
3823 }
3824
3825 static const usb_endpoint_ss_comp_descriptor_t *
3826 xhci_get_essc_desc(struct usbd_pipe *pipe)
3827 {
3828 struct usbd_device *dev = pipe->up_dev;
3829 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3830 const usb_cdc_descriptor_t *cdcd;
3831 usbd_desc_iter_t iter;
3832 uint8_t ep;
3833
3834 /* config desc is NULL when opening ep0 */
3835 if (dev == NULL || dev->ud_cdesc == NULL)
3836 return NULL;
3837
3838 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev,
3839 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY);
3840 if (cdcd == NULL)
3841 return NULL;
3842
3843 usb_desc_iter_init(dev, &iter);
3844 iter.cur = (const void *)cdcd;
3845
3846 /* find endpoint_ss_comp desc for ep of this pipe */
3847 for (ep = 0;;) {
3848 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter);
3849 if (cdcd == NULL)
3850 break;
3851 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) {
3852 ep = ((const usb_endpoint_descriptor_t *)cdcd)->
3853 bEndpointAddress;
3854 if (UE_GET_ADDR(ep) ==
3855 UE_GET_ADDR(ed->bEndpointAddress)) {
3856 cdcd = (const usb_cdc_descriptor_t *)
3857 usb_desc_iter_next(&iter);
3858 break;
3859 }
3860 ep = 0;
3861 }
3862 }
3863 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) {
3864 return (const usb_endpoint_ss_comp_descriptor_t *)cdcd;
3865 }
3866 return NULL;
3867 }
3868
3869 /* set up params for periodic endpoint */
3870 static void
3871 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp)
3872 {
3873 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
3874 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3875 struct usbd_device * const dev = pipe->up_dev;
3876 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3877 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3878 uint16_t mps = UGETW(ed->wMaxPacketSize);
3879 uint8_t speed = dev->ud_speed;
3880 uint32_t maxb, mep, atl;
3881 uint8_t ival, mult;
3882
3883 const usb_endpoint_ss_comp_descriptor_t * esscd =
3884 xhci_get_essc_desc(pipe);
3885
3886 /* USB 2.0 9.6.6, xHCI 4.8.2.4, 6.2.3.2 - 6.2.3.8 */
3887 switch (xfertype) {
3888 case UE_ISOCHRONOUS:
3889 case UE_INTERRUPT:
3890 if (USB_IS_SS(speed)) {
3891 maxb = esscd ? esscd->bMaxBurst : UE_GET_TRANS(mps);
3892 mep = esscd ? UGETW(esscd->wBytesPerInterval) :
3893 UE_GET_SIZE(mps) * (maxb + 1);
3894 if (esscd && xfertype == UE_ISOCHRONOUS &&
3895 XHCI_HCC2_LEC(sc->sc_hcc2) == 0) {
3896 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes);
3897 mult = (mult > 2) ? 2 : mult;
3898 } else
3899 mult = 0;
3900
3901 } else {
3902 switch (speed) {
3903 case USB_SPEED_HIGH:
3904 maxb = UE_GET_TRANS(mps);
3905 mep = UE_GET_SIZE(mps) * (maxb + 1);
3906 break;
3907 case USB_SPEED_FULL:
3908 maxb = 0;
3909 mep = UE_GET_SIZE(mps);
3910 break;
3911 default:
3912 maxb = 0;
3913 mep = 0;
3914 break;
3915 }
3916 mult = 0;
3917 }
3918 mps = UE_GET_SIZE(mps);
3919
3920 if (pipe->up_interval == USBD_DEFAULT_INTERVAL)
3921 ival = ed->bInterval;
3922 else
3923 ival = pipe->up_interval;
3924
3925 ival = xhci_bival2ival(ival, speed, xfertype);
3926 atl = mep;
3927 break;
3928 case UE_CONTROL:
3929 case UE_BULK:
3930 default:
3931 if (USB_IS_SS(speed)) {
3932 maxb = esscd ? esscd->bMaxBurst : 0;
3933 } else
3934 maxb = 0;
3935
3936 mps = UE_GET_SIZE(mps);
3937 mep = 0;
3938 mult = 0;
3939 ival = 0;
3940 if (xfertype == UE_CONTROL)
3941 atl = 8; /* 6.2.3 */
3942 else
3943 atl = mps;
3944 break;
3945 }
3946
3947 switch (speed) {
3948 case USB_SPEED_LOW:
3949 break;
3950 case USB_SPEED_FULL:
3951 if (xfertype == UE_INTERRUPT)
3952 if (mep > XHCI_EPCTX_MEP_FS_INTR)
3953 mep = XHCI_EPCTX_MEP_FS_INTR;
3954 if (xfertype == UE_ISOCHRONOUS)
3955 if (mep > XHCI_EPCTX_MEP_FS_ISOC)
3956 mep = XHCI_EPCTX_MEP_FS_ISOC;
3957 break;
3958 case USB_SPEED_HIGH:
3959 if (xfertype == UE_INTERRUPT)
3960 if (mep > XHCI_EPCTX_MEP_HS_INTR)
3961 mep = XHCI_EPCTX_MEP_HS_INTR;
3962 if (xfertype == UE_ISOCHRONOUS)
3963 if (mep > XHCI_EPCTX_MEP_HS_ISOC)
3964 mep = XHCI_EPCTX_MEP_HS_ISOC;
3965 break;
3966 case USB_SPEED_SUPER:
3967 case USB_SPEED_SUPER_PLUS:
3968 default:
3969 if (xfertype == UE_INTERRUPT)
3970 if (mep > XHCI_EPCTX_MEP_SS_INTR)
3971 mep = XHCI_EPCTX_MEP_SS_INTR;
3972 if (xfertype == UE_ISOCHRONOUS) {
3973 if (speed == USB_SPEED_SUPER ||
3974 XHCI_HCC2_LEC(sc->sc_hcc2) == 0) {
3975 if (mep > XHCI_EPCTX_MEP_SS_ISOC)
3976 mep = XHCI_EPCTX_MEP_SS_ISOC;
3977 } else {
3978 if (mep > XHCI_EPCTX_MEP_SS_ISOC_LEC)
3979 mep = XHCI_EPCTX_MEP_SS_ISOC_LEC;
3980 }
3981 }
3982 break;
3983 }
3984
3985 xpipe->xp_maxb = maxb + 1;
3986 xpipe->xp_mult = mult + 1;
3987
3988 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(mep >> 16);
3989 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3990 cp[0] |= XHCI_EPCTX_0_MULT_SET(mult);
3991 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps);
3992 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3993 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(mep & 0xffff);
3994 cp[4] |= XHCI_EPCTX_4_AVG_TRB_LEN_SET(atl);
3995 }
3996
3997 /*
3998 * Convert usbdi bInterval value to xhci endpoint context interval value
3999 * for periodic pipe.
4000 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6
4001 */
4002 static uint32_t
4003 xhci_bival2ival(uint32_t ival, uint32_t speed, uint32_t xfertype)
4004 {
4005 if (xfertype != UE_INTERRUPT && xfertype != UE_ISOCHRONOUS)
4006 return 0;
4007
4008 if (xfertype == UE_INTERRUPT &&
4009 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
4010 u_int i;
4011
4012 /*
4013 * round ival down to "the nearest base 2 multiple of
4014 * bInterval * 8".
4015 * bInterval is at most 255 as its type is uByte.
4016 * 255(ms) = 2040(x 125us) < 2^11, so start with 10.
4017 */
4018 for (i = 10; i > 0; i--) {
4019 if ((ival * 8) >= (1 << i))
4020 break;
4021 }
4022 ival = i;
4023
4024 /* 3 - 10 */
4025 ival = (ival < 3) ? 3 : ival;
4026 } else if (speed == USB_SPEED_FULL) {
4027 /* FS isoc */
4028 ival += 3; /* 1ms -> 125us */
4029 ival--; /* Interval = bInterval-1 */
4030 /* 3 - 18 */
4031 ival = (ival > 18) ? 18 : ival;
4032 ival = (ival < 3) ? 3 : ival;
4033 } else {
4034 /* SS/HS intr/isoc */
4035 if (ival > 0)
4036 ival--; /* Interval = bInterval-1 */
4037 /* 0 - 15 */
4038 ival = (ival > 15) ? 15 : ival;
4039 }
4040
4041 return ival;
4042 }
4043
4044 /* ----- */
4045
4046 static void
4047 xhci_noop(struct usbd_pipe *pipe)
4048 {
4049 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4050 }
4051
4052 /*
4053 * Process root hub request.
4054 */
4055 static int
4056 xhci_roothub_ctrl_locked(struct usbd_bus *bus, usb_device_request_t *req,
4057 void *buf, int buflen)
4058 {
4059 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
4060 usb_port_status_t ps;
4061 int l, totlen = 0;
4062 uint16_t len, value, index;
4063 int port, i;
4064 uint32_t v;
4065
4066 XHCIHIST_FUNC();
4067
4068 KASSERT(mutex_owned(&sc->sc_rhlock));
4069
4070 if (sc->sc_dying)
4071 return -1;
4072
4073 size_t bn = bus == &sc->sc_bus ? 0 : 1;
4074
4075 len = UGETW(req->wLength);
4076 value = UGETW(req->wValue);
4077 index = UGETW(req->wIndex);
4078
4079 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx",
4080 req->bmRequestType | (req->bRequest << 8), value, index, len);
4081
4082 #define C(x,y) ((x) | ((y) << 8))
4083 switch (C(req->bRequest, req->bmRequestType)) {
4084 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
4085 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0);
4086 if (len == 0)
4087 break;
4088 switch (value) {
4089 #define sd ((usb_string_descriptor_t *)buf)
4090 case C(2, UDESC_STRING):
4091 /* Product */
4092 totlen = usb_makestrdesc(sd, len, "xHCI root hub");
4093 break;
4094 #undef sd
4095 default:
4096 /* default from usbroothub */
4097 return buflen;
4098 }
4099 break;
4100
4101 /* Hub requests */
4102 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
4103 break;
4104 /* Clear Port Feature request */
4105 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): {
4106 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4107
4108 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd",
4109 index, value, bn, cp);
4110 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4111 return -1;
4112 }
4113 port = XHCI_PORTSC(cp);
4114 v = xhci_op_read_4(sc, port);
4115 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0);
4116 v &= ~XHCI_PS_CLEAR;
4117 switch (value) {
4118 case UHF_PORT_ENABLE:
4119 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED);
4120 break;
4121 case UHF_PORT_SUSPEND:
4122 return -1;
4123 case UHF_PORT_POWER:
4124 break;
4125 case UHF_PORT_TEST:
4126 case UHF_PORT_INDICATOR:
4127 return -1;
4128 case UHF_C_PORT_CONNECTION:
4129 xhci_op_write_4(sc, port, v | XHCI_PS_CSC);
4130 break;
4131 case UHF_C_PORT_ENABLE:
4132 case UHF_C_PORT_SUSPEND:
4133 case UHF_C_PORT_OVER_CURRENT:
4134 return -1;
4135 case UHF_C_BH_PORT_RESET:
4136 xhci_op_write_4(sc, port, v | XHCI_PS_WRC);
4137 break;
4138 case UHF_C_PORT_RESET:
4139 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4140 break;
4141 case UHF_C_PORT_LINK_STATE:
4142 xhci_op_write_4(sc, port, v | XHCI_PS_PLC);
4143 break;
4144 case UHF_C_PORT_CONFIG_ERROR:
4145 xhci_op_write_4(sc, port, v | XHCI_PS_CEC);
4146 break;
4147 default:
4148 return -1;
4149 }
4150 break;
4151 }
4152 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
4153 if (len == 0)
4154 break;
4155 if ((value & 0xff) != 0) {
4156 return -1;
4157 }
4158 usb_hub_descriptor_t hubd;
4159
4160 totlen = uimin(buflen, sizeof(hubd));
4161 memcpy(&hubd, buf, totlen);
4162 hubd.bNbrPorts = sc->sc_rhportcount[bn];
4163 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH);
4164 hubd.bPwrOn2PwrGood = 200;
4165 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) {
4166 /* XXX can't find out? */
4167 hubd.DeviceRemovable[i++] = 0;
4168 }
4169 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
4170 totlen = uimin(totlen, hubd.bDescLength);
4171 memcpy(buf, &hubd, totlen);
4172 break;
4173 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
4174 if (len != 4) {
4175 return -1;
4176 }
4177 memset(buf, 0, len); /* ? XXX */
4178 totlen = len;
4179 break;
4180 /* Get Port Status request */
4181 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): {
4182 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4183
4184 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju",
4185 bn, index, cp, 0);
4186 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4187 DPRINTFN(5, "bad get port status: index=%jd bn=%jd "
4188 "portcount=%jd",
4189 index, bn, sc->sc_rhportcount[bn], 0);
4190 return -1;
4191 }
4192 if (len != 4) {
4193 DPRINTFN(5, "bad get port status: len %jd != 4",
4194 len, 0, 0, 0);
4195 return -1;
4196 }
4197 v = xhci_op_read_4(sc, XHCI_PORTSC(cp));
4198 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0);
4199 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v));
4200 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS;
4201 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED;
4202 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR;
4203 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND;
4204 if (v & XHCI_PS_PR) i |= UPS_RESET;
4205 if (v & XHCI_PS_PP) {
4206 if (i & UPS_OTHER_SPEED)
4207 i |= UPS_PORT_POWER_SS;
4208 else
4209 i |= UPS_PORT_POWER;
4210 }
4211 if (i & UPS_OTHER_SPEED)
4212 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v));
4213 if (sc->sc_vendor_port_status)
4214 i = sc->sc_vendor_port_status(sc, v, i);
4215 USETW(ps.wPortStatus, i);
4216 i = 0;
4217 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS;
4218 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED;
4219 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR;
4220 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET;
4221 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET;
4222 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE;
4223 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR;
4224 USETW(ps.wPortChange, i);
4225 totlen = uimin(len, sizeof(ps));
4226 memcpy(buf, &ps, totlen);
4227 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx"
4228 " totlen %jd",
4229 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0);
4230 break;
4231 }
4232 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
4233 return -1;
4234 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE):
4235 break;
4236 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
4237 break;
4238 /* Set Port Feature request */
4239 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): {
4240 int optval = (index >> 8) & 0xff;
4241 index &= 0xff;
4242 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4243 return -1;
4244 }
4245
4246 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4247
4248 port = XHCI_PORTSC(cp);
4249 v = xhci_op_read_4(sc, port);
4250 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0);
4251 v &= ~XHCI_PS_CLEAR;
4252 switch (value) {
4253 case UHF_PORT_ENABLE:
4254 xhci_op_write_4(sc, port, v | XHCI_PS_PED);
4255 break;
4256 case UHF_PORT_SUSPEND:
4257 /* XXX suspend */
4258 break;
4259 case UHF_PORT_RESET:
4260 xhci_op_write_4(sc, port, v | XHCI_PS_PR);
4261 /* Wait for reset to complete. */
4262 for (i = 0; i < USB_PORT_ROOT_RESET_DELAY / 10; i++) {
4263 if (sc->sc_dying) {
4264 return -1;
4265 }
4266 v = xhci_op_read_4(sc, port);
4267 if ((v & XHCI_PS_PR) == 0) {
4268 break;
4269 }
4270 usb_delay_ms(&sc->sc_bus, 10);
4271 }
4272 break;
4273 case UHF_PORT_POWER:
4274 /* XXX power control */
4275 break;
4276 /* XXX more */
4277 case UHF_C_PORT_RESET:
4278 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4279 break;
4280 case UHF_PORT_U1_TIMEOUT:
4281 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4282 return -1;
4283 }
4284 port = XHCI_PORTPMSC(cp);
4285 v = xhci_op_read_4(sc, port);
4286 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4287 index, cp, v, 0);
4288 v &= ~XHCI_PM3_U1TO_SET(0xff);
4289 v |= XHCI_PM3_U1TO_SET(optval);
4290 xhci_op_write_4(sc, port, v);
4291 break;
4292 case UHF_PORT_U2_TIMEOUT:
4293 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4294 return -1;
4295 }
4296 port = XHCI_PORTPMSC(cp);
4297 v = xhci_op_read_4(sc, port);
4298 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4299 index, cp, v, 0);
4300 v &= ~XHCI_PM3_U2TO_SET(0xff);
4301 v |= XHCI_PM3_U2TO_SET(optval);
4302 xhci_op_write_4(sc, port, v);
4303 break;
4304 default:
4305 return -1;
4306 }
4307 }
4308 break;
4309 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
4310 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
4311 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
4312 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
4313 break;
4314 default:
4315 /* default from usbroothub */
4316 return buflen;
4317 }
4318
4319 return totlen;
4320 }
4321
4322 static int
4323 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
4324 void *buf, int buflen)
4325 {
4326 struct xhci_softc *sc = XHCI_BUS2SC(bus);
4327 int actlen;
4328
4329 mutex_enter(&sc->sc_rhlock);
4330 actlen = xhci_roothub_ctrl_locked(bus, req, buf, buflen);
4331 mutex_exit(&sc->sc_rhlock);
4332
4333 return actlen;
4334 }
4335
4336 /* root hub interrupt */
4337
4338 static usbd_status
4339 xhci_root_intr_transfer(struct usbd_xfer *xfer)
4340 {
4341 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4342
4343 /* Pipe isn't running, start first */
4344 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4345 }
4346
4347 /* Wait for roothub port status/change */
4348 static usbd_status
4349 xhci_root_intr_start(struct usbd_xfer *xfer)
4350 {
4351 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4352 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4353
4354 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4355
4356 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4357
4358 if (sc->sc_dying)
4359 return USBD_IOERROR;
4360
4361 KASSERT(sc->sc_intrxfer[bn] == NULL);
4362 sc->sc_intrxfer[bn] = xfer;
4363 xfer->ux_status = USBD_IN_PROGRESS;
4364
4365 return USBD_IN_PROGRESS;
4366 }
4367
4368 static void
4369 xhci_root_intr_abort(struct usbd_xfer *xfer)
4370 {
4371 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4372 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4373
4374 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4375
4376 KASSERT(mutex_owned(&sc->sc_lock));
4377 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
4378
4379 /* If xfer has already completed, nothing to do here. */
4380 if (sc->sc_intrxfer[bn] == NULL)
4381 return;
4382
4383 /*
4384 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer.
4385 * Cancel it.
4386 */
4387 KASSERT(sc->sc_intrxfer[bn] == xfer);
4388 xfer->ux_status = USBD_CANCELLED;
4389 usb_transfer_complete(xfer);
4390 }
4391
4392 static void
4393 xhci_root_intr_close(struct usbd_pipe *pipe)
4394 {
4395 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe);
4396 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer;
4397 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4398
4399 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4400
4401 KASSERT(mutex_owned(&sc->sc_lock));
4402
4403 /*
4404 * Caller must guarantee the xfer has completed first, by
4405 * closing the pipe only after normal completion or an abort.
4406 */
4407 KASSERT(sc->sc_intrxfer[bn] == NULL);
4408 }
4409
4410 static void
4411 xhci_root_intr_done(struct usbd_xfer *xfer)
4412 {
4413 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4414 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4415
4416 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4417
4418 KASSERT(mutex_owned(&sc->sc_lock));
4419
4420 /* Claim the xfer so it doesn't get completed again. */
4421 KASSERT(sc->sc_intrxfer[bn] == xfer);
4422 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
4423 sc->sc_intrxfer[bn] = NULL;
4424 }
4425
4426 /* -------------- */
4427 /* device control */
4428
4429 static usbd_status
4430 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
4431 {
4432 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4433
4434 /* Pipe isn't running, start first */
4435 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4436 }
4437
4438 static usbd_status
4439 xhci_device_ctrl_start(struct usbd_xfer *xfer)
4440 {
4441 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4442 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4443 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4444 struct xhci_ring * const tr = xs->xs_xr[dci];
4445 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4446 usb_device_request_t * const req = &xfer->ux_request;
4447 const bool isread = usbd_xfer_isread(xfer);
4448 const uint32_t len = UGETW(req->wLength);
4449 usb_dma_t * const dma = &xfer->ux_dmabuf;
4450 uint64_t parameter;
4451 uint32_t status;
4452 uint32_t control;
4453 u_int i;
4454 const bool polling = xhci_polling_p(sc);
4455
4456 XHCIHIST_FUNC();
4457 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx",
4458 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue),
4459 UGETW(req->wIndex), UGETW(req->wLength));
4460
4461 KASSERT(polling || mutex_owned(&sc->sc_lock));
4462
4463 /* we rely on the bottom bits for extra info */
4464 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %p", xfer);
4465
4466 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0);
4467
4468 if (tr->is_halted)
4469 goto out;
4470
4471 i = 0;
4472
4473 /* setup phase */
4474 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */
4475 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req));
4476 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE :
4477 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) |
4478 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) |
4479 XHCI_TRB_3_IDT_BIT;
4480 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4481
4482 if (len != 0) {
4483 /* data phase */
4484 parameter = DMAADDR(dma, 0);
4485 KASSERTMSG(len <= 0x10000, "len %d", len);
4486 status = XHCI_TRB_2_IRQ_SET(0) |
4487 XHCI_TRB_2_TDSZ_SET(0) |
4488 XHCI_TRB_2_BYTES_SET(len);
4489 control = (isread ? XHCI_TRB_3_DIR_IN : 0) |
4490 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) |
4491 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4492 XHCI_TRB_3_IOC_BIT;
4493 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4494
4495 usb_syncmem(dma, 0, len,
4496 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4497 }
4498
4499 parameter = 0;
4500 status = XHCI_TRB_2_IRQ_SET(0);
4501 /* the status stage has inverted direction */
4502 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) |
4503 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) |
4504 XHCI_TRB_3_IOC_BIT;
4505 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4506
4507 if (!polling)
4508 mutex_enter(&tr->xr_lock);
4509 xhci_ring_put_xfer(sc, tr, xx, i);
4510 if (!polling)
4511 mutex_exit(&tr->xr_lock);
4512
4513 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4514
4515 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4516 xfer->ux_status = USBD_IN_PROGRESS;
4517 usbd_xfer_schedule_timeout(xfer);
4518 } else {
4519 /*
4520 * We must be coming from xhci_pipe_restart -- timeout
4521 * already set up, nothing to do.
4522 */
4523 }
4524 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4525
4526 return USBD_IN_PROGRESS;
4527 }
4528
4529 static void
4530 xhci_device_ctrl_done(struct usbd_xfer *xfer)
4531 {
4532 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4533 usb_device_request_t *req = &xfer->ux_request;
4534 int len = UGETW(req->wLength);
4535 int rd = req->bmRequestType & UT_READ;
4536
4537 if (len)
4538 usb_syncmem(&xfer->ux_dmabuf, 0, len,
4539 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4540 }
4541
4542 static void
4543 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
4544 {
4545 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4546
4547 usbd_xfer_abort(xfer);
4548 }
4549
4550 static void
4551 xhci_device_ctrl_close(struct usbd_pipe *pipe)
4552 {
4553 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4554
4555 xhci_close_pipe(pipe);
4556 }
4557
4558 /* ------------------ */
4559 /* device isochronous */
4560
4561 static usbd_status
4562 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
4563 {
4564 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4565
4566 return xhci_device_isoc_enter(xfer);
4567 }
4568
4569 static usbd_status
4570 xhci_device_isoc_enter(struct usbd_xfer *xfer)
4571 {
4572 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4573 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4574 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4575 struct xhci_ring * const tr = xs->xs_xr[dci];
4576 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4577 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe;
4578 usb_dma_t * const dma = &xfer->ux_dmabuf;
4579 uint64_t parameter;
4580 uint32_t status;
4581 uint32_t control;
4582 uint32_t offs;
4583 int i, ival;
4584 const bool polling = xhci_polling_p(sc);
4585 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
4586 const uint16_t mps = UE_GET_SIZE(MPS);
4587 const uint8_t maxb = xpipe->xp_maxb;
4588
4589 XHCIHIST_FUNC();
4590 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4591 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4592
4593 KASSERT(polling || mutex_owned(&sc->sc_lock));
4594
4595 if (sc->sc_dying)
4596 return USBD_IOERROR;
4597
4598 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths);
4599 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4600
4601 const bool isread = usbd_xfer_isread(xfer);
4602 if (xfer->ux_length)
4603 usb_syncmem(dma, 0, xfer->ux_length,
4604 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4605
4606 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
4607 if (ival >= 1 && ival <= 16)
4608 ival = 1 << (ival - 1);
4609 else
4610 ival = 1; /* fake something up */
4611
4612 if (xpipe->xp_isoc_next == -1) {
4613 uint32_t mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX);
4614
4615 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0);
4616 mfindex = XHCI_MFINDEX_GET(mfindex + 1);
4617 mfindex /= USB_UFRAMES_PER_FRAME;
4618 mfindex += 7; /* 7 frames is max possible IST */
4619 xpipe->xp_isoc_next = roundup2(mfindex, ival);
4620 }
4621
4622 offs = 0;
4623 for (i = 0; i < xfer->ux_nframes; i++) {
4624 const uint32_t len = xfer->ux_frlengths[i];
4625 const unsigned tdpc = howmany(len, mps);
4626 const unsigned tbc = howmany(tdpc, maxb) - 1;
4627 const unsigned tlbpc1 = tdpc % maxb;
4628 const unsigned tlbpc = tlbpc1 ? tlbpc1 - 1 : maxb - 1;
4629
4630 KASSERTMSG(len <= 0x10000, "len %d", len);
4631 parameter = DMAADDR(dma, offs);
4632 status = XHCI_TRB_2_IRQ_SET(0) |
4633 XHCI_TRB_2_TDSZ_SET(0) |
4634 XHCI_TRB_2_BYTES_SET(len);
4635 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) |
4636 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4637 XHCI_TRB_3_TBC_SET(tbc) |
4638 XHCI_TRB_3_TLBPC_SET(tlbpc) |
4639 XHCI_TRB_3_IOC_BIT;
4640 if (XHCI_HCC_CFC(sc->sc_hcc)) {
4641 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4642 #if 0
4643 } else if (xpipe->xp_isoc_next == -1) {
4644 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4645 #endif
4646 } else {
4647 control |= XHCI_TRB_3_ISO_SIA_BIT;
4648 }
4649 #if 0
4650 if (i != xfer->ux_nframes - 1)
4651 control |= XHCI_TRB_3_BEI_BIT;
4652 #endif
4653 xhci_xfer_put_trb(xx, i, parameter, status, control);
4654
4655 xpipe->xp_isoc_next += ival;
4656 offs += len;
4657 }
4658
4659 xx->xx_isoc_done = 0;
4660
4661 if (!polling)
4662 mutex_enter(&tr->xr_lock);
4663 xhci_ring_put_xfer(sc, tr, xx, i);
4664 if (!polling)
4665 mutex_exit(&tr->xr_lock);
4666
4667 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4668 xfer->ux_status = USBD_IN_PROGRESS;
4669 usbd_xfer_schedule_timeout(xfer);
4670
4671 return USBD_IN_PROGRESS;
4672 }
4673
4674 static void
4675 xhci_device_isoc_abort(struct usbd_xfer *xfer)
4676 {
4677 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4678
4679 usbd_xfer_abort(xfer);
4680 }
4681
4682 static void
4683 xhci_device_isoc_close(struct usbd_pipe *pipe)
4684 {
4685 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4686
4687 xhci_close_pipe(pipe);
4688 }
4689
4690 static void
4691 xhci_device_isoc_done(struct usbd_xfer *xfer)
4692 {
4693 #ifdef USB_DEBUG
4694 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4695 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4696 #endif
4697 const bool isread = usbd_xfer_isread(xfer);
4698
4699 XHCIHIST_FUNC();
4700 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4701 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4702
4703 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4704 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4705 }
4706
4707 /* ----------- */
4708 /* device bulk */
4709
4710 static usbd_status
4711 xhci_device_bulk_transfer(struct usbd_xfer *xfer)
4712 {
4713 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4714
4715 /* Pipe isn't running, so start it first. */
4716 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4717 }
4718
4719 static usbd_status
4720 xhci_device_bulk_start(struct usbd_xfer *xfer)
4721 {
4722 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4723 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4724 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4725 struct xhci_ring * const tr = xs->xs_xr[dci];
4726 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4727 const uint32_t len = xfer->ux_length;
4728 usb_dma_t * const dma = &xfer->ux_dmabuf;
4729 uint64_t parameter;
4730 uint32_t status;
4731 uint32_t control;
4732 u_int i = 0;
4733 const bool polling = xhci_polling_p(sc);
4734
4735 XHCIHIST_FUNC();
4736 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4737 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4738
4739 KASSERT(polling || mutex_owned(&sc->sc_lock));
4740
4741 if (sc->sc_dying)
4742 return USBD_IOERROR;
4743
4744 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4745
4746 if (tr->is_halted)
4747 goto out;
4748
4749 parameter = DMAADDR(dma, 0);
4750 const bool isread = usbd_xfer_isread(xfer);
4751 if (len)
4752 usb_syncmem(dma, 0, len,
4753 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4754
4755 /*
4756 * XXX: (dsl) The physical buffer must not cross a 64k boundary.
4757 * If the user supplied buffer crosses such a boundary then 2
4758 * (or more) TRB should be used.
4759 * If multiple TRB are used the td_size field must be set correctly.
4760 * For v1.0 devices (like ivy bridge) this is the number of usb data
4761 * blocks needed to complete the transfer.
4762 * Setting it to 1 in the last TRB causes an extra zero-length
4763 * data block be sent.
4764 * The earlier documentation differs, I don't know how it behaves.
4765 */
4766 KASSERTMSG(len <= 0x10000, "len %d", len);
4767 status = XHCI_TRB_2_IRQ_SET(0) |
4768 XHCI_TRB_2_TDSZ_SET(0) |
4769 XHCI_TRB_2_BYTES_SET(len);
4770 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4771 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4772 XHCI_TRB_3_IOC_BIT;
4773 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4774
4775 if (!polling)
4776 mutex_enter(&tr->xr_lock);
4777 xhci_ring_put_xfer(sc, tr, xx, i);
4778 if (!polling)
4779 mutex_exit(&tr->xr_lock);
4780
4781 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4782
4783 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4784 xfer->ux_status = USBD_IN_PROGRESS;
4785 usbd_xfer_schedule_timeout(xfer);
4786 } else {
4787 /*
4788 * We must be coming from xhci_pipe_restart -- timeout
4789 * already set up, nothing to do.
4790 */
4791 }
4792 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4793
4794 return USBD_IN_PROGRESS;
4795 }
4796
4797 static void
4798 xhci_device_bulk_done(struct usbd_xfer *xfer)
4799 {
4800 #ifdef USB_DEBUG
4801 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4802 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4803 #endif
4804 const bool isread = usbd_xfer_isread(xfer);
4805
4806 XHCIHIST_FUNC();
4807 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4808 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4809
4810 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4811 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4812 }
4813
4814 static void
4815 xhci_device_bulk_abort(struct usbd_xfer *xfer)
4816 {
4817 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4818
4819 usbd_xfer_abort(xfer);
4820 }
4821
4822 static void
4823 xhci_device_bulk_close(struct usbd_pipe *pipe)
4824 {
4825 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4826
4827 xhci_close_pipe(pipe);
4828 }
4829
4830 /* ---------------- */
4831 /* device interrupt */
4832
4833 static usbd_status
4834 xhci_device_intr_transfer(struct usbd_xfer *xfer)
4835 {
4836 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4837
4838 /* Pipe isn't running, so start it first. */
4839 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4840 }
4841
4842 static usbd_status
4843 xhci_device_intr_start(struct usbd_xfer *xfer)
4844 {
4845 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4846 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4847 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4848 struct xhci_ring * const tr = xs->xs_xr[dci];
4849 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4850 const uint32_t len = xfer->ux_length;
4851 const bool polling = xhci_polling_p(sc);
4852 usb_dma_t * const dma = &xfer->ux_dmabuf;
4853 uint64_t parameter;
4854 uint32_t status;
4855 uint32_t control;
4856 u_int i = 0;
4857
4858 XHCIHIST_FUNC();
4859 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4860 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4861
4862 KASSERT(polling || mutex_owned(&sc->sc_lock));
4863
4864 if (sc->sc_dying)
4865 return USBD_IOERROR;
4866
4867 if (tr->is_halted)
4868 goto out;
4869
4870 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4871
4872 const bool isread = usbd_xfer_isread(xfer);
4873 if (len)
4874 usb_syncmem(dma, 0, len,
4875 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4876
4877 parameter = DMAADDR(dma, 0);
4878 KASSERTMSG(len <= 0x10000, "len %d", len);
4879 status = XHCI_TRB_2_IRQ_SET(0) |
4880 XHCI_TRB_2_TDSZ_SET(0) |
4881 XHCI_TRB_2_BYTES_SET(len);
4882 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4883 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT;
4884 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4885
4886 if (!polling)
4887 mutex_enter(&tr->xr_lock);
4888 xhci_ring_put_xfer(sc, tr, xx, i);
4889 if (!polling)
4890 mutex_exit(&tr->xr_lock);
4891
4892 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4893
4894 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4895 xfer->ux_status = USBD_IN_PROGRESS;
4896 usbd_xfer_schedule_timeout(xfer);
4897 } else {
4898 /*
4899 * We must be coming from xhci_pipe_restart -- timeout
4900 * already set up, nothing to do.
4901 */
4902 }
4903 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4904
4905 return USBD_IN_PROGRESS;
4906 }
4907
4908 static void
4909 xhci_device_intr_done(struct usbd_xfer *xfer)
4910 {
4911 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4912 #ifdef USB_DEBUG
4913 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4914 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4915 #endif
4916 const bool isread = usbd_xfer_isread(xfer);
4917
4918 XHCIHIST_FUNC();
4919 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4920 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4921
4922 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4923
4924 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4925 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4926 }
4927
4928 static void
4929 xhci_device_intr_abort(struct usbd_xfer *xfer)
4930 {
4931 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4932
4933 XHCIHIST_FUNC();
4934 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0);
4935
4936 KASSERT(mutex_owned(&sc->sc_lock));
4937 usbd_xfer_abort(xfer);
4938 }
4939
4940 static void
4941 xhci_device_intr_close(struct usbd_pipe *pipe)
4942 {
4943 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
4944
4945 XHCIHIST_FUNC();
4946 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0);
4947
4948 xhci_close_pipe(pipe);
4949 }
4950