1 /* $NetBSD: xhci.c,v 1.191 2025/10/05 20:04:30 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 2013 Jonathan A. Kollasch 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * USB rev 2.0 and rev 3.1 specification 31 * http://www.usb.org/developers/docs/ 32 * xHCI rev 1.1 specification 33 * http://www.intel.com/technology/usb/spec.htm 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.191 2025/10/05 20:04:30 riastradh Exp $"); 38 39 #ifdef _KERNEL_OPT 40 #include "opt_usb.h" 41 #endif 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/kmem.h> 47 #include <sys/device.h> 48 #include <sys/select.h> 49 #include <sys/proc.h> 50 #include <sys/queue.h> 51 #include <sys/mutex.h> 52 #include <sys/condvar.h> 53 #include <sys/bus.h> 54 #include <sys/cpu.h> 55 #include <sys/sysctl.h> 56 57 #include <machine/endian.h> 58 59 #include <dev/usb/usb.h> 60 #include <dev/usb/usbdi.h> 61 #include <dev/usb/usbdivar.h> 62 #include <dev/usb/usbdi_util.h> 63 #include <dev/usb/usbhist.h> 64 #include <dev/usb/usb_mem.h> 65 #include <dev/usb/usb_quirks.h> 66 67 #include <dev/usb/xhcireg.h> 68 #include <dev/usb/xhcivar.h> 69 #include <dev/usb/usbroothub.h> 70 71 72 #ifdef USB_DEBUG 73 #ifndef XHCI_DEBUG 74 #define xhcidebug 0 75 #else /* !XHCI_DEBUG */ 76 #define HEXDUMP(a, b, c) \ 77 do { \ 78 if (xhcidebug > 0) \ 79 hexdump(printf, a, b, c); \ 80 } while (/*CONSTCOND*/0) 81 static int xhcidebug = 0; 82 83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup") 84 { 85 int err; 86 const struct sysctlnode *rnode; 87 const struct sysctlnode *cnode; 88 89 err = sysctl_createv(clog, 0, NULL, &rnode, 90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci", 91 SYSCTL_DESCR("xhci global controls"), 92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 93 94 if (err) 95 goto fail; 96 97 /* control debugging printfs */ 98 err = sysctl_createv(clog, 0, &rnode, &cnode, 99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, 100 "debug", SYSCTL_DESCR("Enable debugging output"), 101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL); 102 if (err) 103 goto fail; 104 105 return; 106 fail: 107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err); 108 } 109 110 #endif /* !XHCI_DEBUG */ 111 #endif /* USB_DEBUG */ 112 113 #ifndef HEXDUMP 114 #define HEXDUMP(a, b, c) 115 #endif 116 117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D) 118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D) 119 #define XHCIHIST_FUNC() USBHIST_FUNC() 120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug) 121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \ 122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D) 123 124 #define XHCI_DCI_SLOT 0 125 #define XHCI_DCI_EP_CONTROL 1 126 127 #define XHCI_ICI_INPUT_CONTROL 0 128 129 struct xhci_pipe { 130 struct usbd_pipe xp_pipe; 131 struct usb_task xp_async_task; 132 int16_t xp_isoc_next; /* next micro frame */ 133 uint8_t xp_maxb; /* max burst */ 134 uint8_t xp_mult; 135 uint8_t xp_ival; 136 }; 137 138 #define XHCI_COMMAND_RING_TRBS 256 139 #define XHCI_EVENT_RING_TRBS 256 140 #define XHCI_EVENT_RING_SEGMENTS 1 141 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT 142 143 static usbd_status xhci_open(struct usbd_pipe *); 144 static void xhci_close_pipe(struct usbd_pipe *); 145 static int xhci_intr1(struct xhci_softc * const); 146 static void xhci_softintr(void *); 147 static void xhci_poll(struct usbd_bus *); 148 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int); 149 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *); 150 static void xhci_abortx(struct usbd_xfer *); 151 static bool xhci_dying(struct usbd_bus *); 152 static void xhci_get_lock(struct usbd_bus *, kmutex_t **); 153 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int, 154 struct usbd_port *); 155 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *, 156 void *, int); 157 158 static void xhci_pipe_restart(struct usbd_pipe *); 159 static void xhci_pipe_restart_async_task(void *); 160 static void xhci_pipe_restart_async(struct usbd_pipe *); 161 162 static usbd_status xhci_configure_endpoint(struct usbd_pipe *); 163 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *); 164 static void xhci_reset_endpoint(struct usbd_pipe *); 165 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *, 166 struct xhci_slot *, u_int, uint32_t); 167 static usbd_status xhci_stop_endpoint(struct usbd_pipe *); 168 169 static void xhci_host_dequeue(struct xhci_ring * const); 170 static void xhci_set_dequeue(struct usbd_pipe *); 171 172 static usbd_status xhci_do_command(struct xhci_softc * const, 173 struct xhci_soft_trb * const, int); 174 static usbd_status xhci_do_command_locked(struct xhci_softc * const, 175 struct xhci_soft_trb * const, int); 176 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t); 177 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *); 178 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool); 179 static usbd_status xhci_enable_slot(struct xhci_softc * const, 180 uint8_t * const); 181 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t); 182 static usbd_status xhci_address_device(struct xhci_softc * const, 183 uint64_t, uint8_t, bool); 184 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int); 185 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const, 186 struct xhci_slot * const, u_int); 187 static usbd_status xhci_ring_init(struct xhci_softc * const, 188 struct xhci_ring **, size_t, size_t); 189 static void xhci_ring_free(struct xhci_softc * const, 190 struct xhci_ring ** const); 191 192 static void xhci_setup_ctx(struct usbd_pipe *); 193 static void xhci_setup_route(struct usbd_pipe *, uint32_t *); 194 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *); 195 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *); 196 static uint32_t xhci_bival2ival(uint32_t, uint32_t, uint32_t); 197 198 static void xhci_noop(struct usbd_pipe *); 199 200 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *); 201 static usbd_status xhci_root_intr_start(struct usbd_xfer *); 202 static void xhci_root_intr_abort(struct usbd_xfer *); 203 static void xhci_root_intr_close(struct usbd_pipe *); 204 static void xhci_root_intr_done(struct usbd_xfer *); 205 206 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *); 207 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *); 208 static void xhci_device_ctrl_abort(struct usbd_xfer *); 209 static void xhci_device_ctrl_close(struct usbd_pipe *); 210 static void xhci_device_ctrl_done(struct usbd_xfer *); 211 212 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *); 213 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *); 214 static void xhci_device_isoc_abort(struct usbd_xfer *); 215 static void xhci_device_isoc_close(struct usbd_pipe *); 216 static void xhci_device_isoc_done(struct usbd_xfer *); 217 218 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *); 219 static usbd_status xhci_device_intr_start(struct usbd_xfer *); 220 static void xhci_device_intr_abort(struct usbd_xfer *); 221 static void xhci_device_intr_close(struct usbd_pipe *); 222 static void xhci_device_intr_done(struct usbd_xfer *); 223 224 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *); 225 static usbd_status xhci_device_bulk_start(struct usbd_xfer *); 226 static void xhci_device_bulk_abort(struct usbd_xfer *); 227 static void xhci_device_bulk_close(struct usbd_pipe *); 228 static void xhci_device_bulk_done(struct usbd_xfer *); 229 230 static const struct usbd_bus_methods xhci_bus_methods = { 231 .ubm_open = xhci_open, 232 .ubm_softint = xhci_softintr, 233 .ubm_dopoll = xhci_poll, 234 .ubm_allocx = xhci_allocx, 235 .ubm_freex = xhci_freex, 236 .ubm_abortx = xhci_abortx, 237 .ubm_dying = xhci_dying, 238 .ubm_getlock = xhci_get_lock, 239 .ubm_newdev = xhci_new_device, 240 .ubm_rhctrl = xhci_roothub_ctrl, 241 }; 242 243 static const struct usbd_pipe_methods xhci_root_intr_methods = { 244 .upm_transfer = xhci_root_intr_transfer, 245 .upm_start = xhci_root_intr_start, 246 .upm_abort = xhci_root_intr_abort, 247 .upm_close = xhci_root_intr_close, 248 .upm_cleartoggle = xhci_noop, 249 .upm_done = xhci_root_intr_done, 250 }; 251 252 253 static const struct usbd_pipe_methods xhci_device_ctrl_methods = { 254 .upm_transfer = xhci_device_ctrl_transfer, 255 .upm_start = xhci_device_ctrl_start, 256 .upm_abort = xhci_device_ctrl_abort, 257 .upm_close = xhci_device_ctrl_close, 258 .upm_cleartoggle = xhci_noop, 259 .upm_done = xhci_device_ctrl_done, 260 }; 261 262 static const struct usbd_pipe_methods xhci_device_isoc_methods = { 263 .upm_transfer = xhci_device_isoc_transfer, 264 .upm_abort = xhci_device_isoc_abort, 265 .upm_close = xhci_device_isoc_close, 266 .upm_cleartoggle = xhci_noop, 267 .upm_done = xhci_device_isoc_done, 268 }; 269 270 static const struct usbd_pipe_methods xhci_device_bulk_methods = { 271 .upm_transfer = xhci_device_bulk_transfer, 272 .upm_start = xhci_device_bulk_start, 273 .upm_abort = xhci_device_bulk_abort, 274 .upm_close = xhci_device_bulk_close, 275 .upm_cleartoggle = xhci_noop, 276 .upm_done = xhci_device_bulk_done, 277 }; 278 279 static const struct usbd_pipe_methods xhci_device_intr_methods = { 280 .upm_transfer = xhci_device_intr_transfer, 281 .upm_start = xhci_device_intr_start, 282 .upm_abort = xhci_device_intr_abort, 283 .upm_close = xhci_device_intr_close, 284 .upm_cleartoggle = xhci_noop, 285 .upm_done = xhci_device_intr_done, 286 }; 287 288 static inline uint32_t 289 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset) 290 { 291 if (ISSET(sc->sc_quirks, XHCI_32BIT_ACCESS)) { 292 uint32_t val; 293 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset & ~3); 294 return (val >> ((offset & 3) * NBBY)) & 0xff; 295 } else { 296 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset); 297 } 298 } 299 300 static inline uint32_t 301 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset) 302 { 303 if (ISSET(sc->sc_quirks, XHCI_32BIT_ACCESS)) { 304 uint32_t val; 305 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset & ~3); 306 return (val >> ((offset & 3) * NBBY)) & 0xffff; 307 } else { 308 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset); 309 } 310 } 311 312 static inline uint32_t 313 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset) 314 { 315 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset); 316 } 317 318 static inline void 319 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset, 320 uint32_t value) 321 { 322 if (ISSET(sc->sc_quirks, XHCI_32BIT_ACCESS)) { 323 const uint32_t mask = 0xffU << ((offset & 3) * NBBY); 324 uint32_t val; 325 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset & ~3); 326 val &= ~mask; 327 val |= __SHIFTIN(value, mask); 328 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset & ~3, val); 329 } else { 330 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value); 331 } 332 } 333 334 #if 0 /* unused */ 335 static inline void 336 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset, 337 uint32_t value) 338 { 339 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value); 340 } 341 #endif /* unused */ 342 343 static inline uint32_t 344 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset) 345 { 346 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset); 347 } 348 349 static inline uint32_t 350 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset) 351 { 352 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset); 353 } 354 355 static inline void 356 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset, 357 uint32_t value) 358 { 359 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value); 360 } 361 362 static inline uint64_t 363 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset) 364 { 365 uint64_t value; 366 367 #ifdef XHCI_USE_BUS_SPACE_8 368 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset); 369 #else 370 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset); 371 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh, 372 offset + 4) << 32; 373 #endif 374 375 return value; 376 } 377 378 static inline void 379 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset, 380 uint64_t value) 381 { 382 #ifdef XHCI_USE_BUS_SPACE_8 383 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value); 384 #else 385 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0, 386 (value >> 0) & 0xffffffff); 387 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4, 388 (value >> 32) & 0xffffffff); 389 #endif 390 } 391 392 static inline uint32_t 393 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset) 394 { 395 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset); 396 } 397 398 static inline void 399 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset, 400 uint32_t value) 401 { 402 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value); 403 } 404 405 static inline uint64_t 406 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset) 407 { 408 uint64_t value; 409 410 #ifdef XHCI_USE_BUS_SPACE_8 411 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset); 412 #else 413 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset); 414 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh, 415 offset + 4) << 32; 416 #endif 417 418 return value; 419 } 420 421 static inline void 422 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset, 423 uint64_t value) 424 { 425 #ifdef XHCI_USE_BUS_SPACE_8 426 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value); 427 #else 428 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0, 429 (value >> 0) & 0xffffffff); 430 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4, 431 (value >> 32) & 0xffffffff); 432 #endif 433 } 434 435 #if 0 /* unused */ 436 static inline uint32_t 437 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset) 438 { 439 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset); 440 } 441 #endif /* unused */ 442 443 static inline void 444 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset, 445 uint32_t value) 446 { 447 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value); 448 } 449 450 /* --- */ 451 452 static inline uint8_t 453 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed) 454 { 455 u_int eptype = 0; 456 457 switch (UE_GET_XFERTYPE(ed->bmAttributes)) { 458 case UE_CONTROL: 459 eptype = 0x0; 460 break; 461 case UE_ISOCHRONOUS: 462 eptype = 0x1; 463 break; 464 case UE_BULK: 465 eptype = 0x2; 466 break; 467 case UE_INTERRUPT: 468 eptype = 0x3; 469 break; 470 } 471 472 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) || 473 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)) 474 return eptype | 0x4; 475 else 476 return eptype; 477 } 478 479 static u_int 480 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed) 481 { 482 /* xHCI 1.0 section 4.5.1 */ 483 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress); 484 u_int in = 0; 485 486 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) || 487 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)) 488 in = 1; 489 490 return epaddr * 2 + in; 491 } 492 493 static inline u_int 494 xhci_dci_to_ici(const u_int i) 495 { 496 return i + 1; 497 } 498 499 static inline void * 500 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs, 501 const u_int dci) 502 { 503 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci); 504 } 505 506 #if 0 /* unused */ 507 static inline bus_addr_t 508 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs, 509 const u_int dci) 510 { 511 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci); 512 } 513 #endif /* unused */ 514 515 static inline void * 516 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs, 517 const u_int ici) 518 { 519 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici); 520 } 521 522 static inline bus_addr_t 523 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs, 524 const u_int ici) 525 { 526 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici); 527 } 528 529 static inline struct xhci_trb * 530 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx) 531 { 532 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx); 533 } 534 535 static inline bus_addr_t 536 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx) 537 { 538 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx); 539 } 540 541 static inline void 542 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx, 543 uint64_t parameter, uint32_t status, uint32_t control) 544 { 545 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb); 546 xx->xx_trb[idx].trb_0 = parameter; 547 xx->xx_trb[idx].trb_2 = status; 548 xx->xx_trb[idx].trb_3 = control; 549 } 550 551 static inline void 552 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status, 553 uint32_t control) 554 { 555 trb->trb_0 = htole64(parameter); 556 trb->trb_2 = htole32(status); 557 trb->trb_3 = htole32(control); 558 } 559 560 static int 561 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx) 562 { 563 /* base address of TRBs */ 564 bus_addr_t trbp = xhci_ring_trbp(xr, 0); 565 566 /* trb_0 range sanity check */ 567 if (trb_0 == 0 || trb_0 < trbp || 568 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 || 569 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) { 570 return 1; 571 } 572 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb); 573 return 0; 574 } 575 576 static unsigned int 577 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs, 578 u_int dci) 579 { 580 uint32_t *cp; 581 582 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 583 cp = xhci_slot_get_dcv(sc, xs, dci); 584 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0])); 585 } 586 587 static inline unsigned int 588 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport) 589 { 590 const unsigned int port = ctlrport - 1; 591 const uint8_t bit = __BIT(port % NBBY); 592 593 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit); 594 } 595 596 /* 597 * Return the roothub port for a controller port. Both are 1..n. 598 */ 599 static inline unsigned int 600 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport) 601 { 602 603 return sc->sc_ctlrportmap[ctrlport - 1]; 604 } 605 606 /* 607 * Return the controller port for a bus roothub port. Both are 1..n. 608 */ 609 static inline unsigned int 610 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn, 611 unsigned int rhport) 612 { 613 614 return sc->sc_rhportmap[bn][rhport - 1]; 615 } 616 617 /* --- */ 618 619 void 620 xhci_childdet(device_t self, device_t child) 621 { 622 struct xhci_softc * const sc = device_private(self); 623 624 mutex_enter(&sc->sc_intr_lock); 625 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child)); 626 if (child == sc->sc_child2) 627 sc->sc_child2 = NULL; 628 else if (child == sc->sc_child) 629 sc->sc_child = NULL; 630 mutex_exit(&sc->sc_intr_lock); 631 } 632 633 int 634 xhci_detach(struct xhci_softc *sc, int flags) 635 { 636 int rv = 0; 637 638 if (sc->sc_child2 != NULL) { 639 rv = config_detach(sc->sc_child2, flags); 640 if (rv != 0) 641 return rv; 642 KASSERT(sc->sc_child2 == NULL); 643 } 644 645 if (sc->sc_child != NULL) { 646 rv = config_detach(sc->sc_child, flags); 647 if (rv != 0) 648 return rv; 649 KASSERT(sc->sc_child == NULL); 650 } 651 652 /* XXX unconfigure/free slots */ 653 654 /* verify: */ 655 xhci_rt_write_4(sc, XHCI_IMAN(0), 0); 656 xhci_op_write_4(sc, XHCI_USBCMD, 0); 657 /* do we need to wait for stop? */ 658 659 xhci_op_write_8(sc, XHCI_CRCR, 0); 660 xhci_ring_free(sc, &sc->sc_cr); 661 cv_destroy(&sc->sc_command_cv); 662 cv_destroy(&sc->sc_cmdbusy_cv); 663 664 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0); 665 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0); 666 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY); 667 xhci_ring_free(sc, &sc->sc_er); 668 669 usb_freemem(&sc->sc_eventst_dma); 670 671 xhci_op_write_8(sc, XHCI_DCBAAP, 0); 672 usb_freemem(&sc->sc_dcbaa_dma); 673 674 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots); 675 676 kmem_free(sc->sc_ctlrportbus, 677 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY)); 678 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int)); 679 680 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) { 681 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int)); 682 } 683 684 mutex_destroy(&sc->sc_rhlock); 685 mutex_destroy(&sc->sc_lock); 686 mutex_destroy(&sc->sc_intr_lock); 687 688 pool_cache_destroy(sc->sc_xferpool); 689 690 return rv; 691 } 692 693 int 694 xhci_activate(device_t self, enum devact act) 695 { 696 struct xhci_softc * const sc = device_private(self); 697 698 switch (act) { 699 case DVACT_DEACTIVATE: 700 sc->sc_dying = true; 701 return 0; 702 default: 703 return EOPNOTSUPP; 704 } 705 } 706 707 bool 708 xhci_suspend(device_t self, const pmf_qual_t *qual) 709 { 710 struct xhci_softc * const sc = device_private(self); 711 size_t i, j, bn, dci; 712 int port; 713 uint32_t v; 714 usbd_status err; 715 bool ok = false; 716 717 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 718 719 /* 720 * Block issuance of new commands, and wait for all pending 721 * commands to complete. 722 */ 723 mutex_enter(&sc->sc_lock); 724 KASSERT(sc->sc_suspender == NULL); 725 sc->sc_suspender = curlwp; 726 while (sc->sc_command_addr != 0) 727 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock); 728 mutex_exit(&sc->sc_lock); 729 730 /* 731 * Block roothub xfers which might touch portsc registers until 732 * we're done suspending. 733 */ 734 mutex_enter(&sc->sc_rhlock); 735 736 /* 737 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2: 738 * xHCI Power Management, p. 342 739 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342 740 */ 741 742 /* 743 * `1. Stop all USB activity by issuing Stop Endpoint Commands 744 * for Busy endpoints in the Running state. If the Force 745 * Save Context Capability (FSC = ``0'') is not supported, 746 * then Stop Endpoint Commands shall be issued for all idle 747 * endpoints in the Running state as well. The Stop 748 * Endpoint Command causes the xHC to update the respective 749 * Endpoint or Stream Contexts in system memory, e.g. the 750 * TR Dequeue Pointer, DCS, etc. fields. Refer to 751 * Implementation Note "0".' 752 */ 753 for (i = 0; i < sc->sc_maxslots; i++) { 754 struct xhci_slot *xs = &sc->sc_slots[i]; 755 756 /* Skip if the slot is not in use. */ 757 if (xs->xs_idx == 0) 758 continue; 759 760 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 761 /* Skip if the endpoint is not Running. */ 762 /* XXX What about Busy? */ 763 if (xhci_get_epstate(sc, xs, dci) != 764 XHCI_EPSTATE_RUNNING) 765 continue; 766 767 /* Stop endpoint. */ 768 mutex_enter(&sc->sc_lock); 769 err = xhci_stop_endpoint_cmd(sc, xs, dci, 770 XHCI_TRB_3_SUSP_EP_BIT); 771 mutex_exit(&sc->sc_lock); 772 if (err) { 773 device_printf(self, "failed to stop endpoint" 774 " slot %zu dci %zu err %d\n", 775 i, dci, err); 776 goto out; 777 } 778 } 779 } 780 781 /* 782 * Next, suspend all the ports: 783 * 784 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15: 785 * Suspend-Resume, pp. 276-283 786 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276 787 */ 788 for (bn = 0; bn < 2; bn++) { 789 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) { 790 /* 4.15.1: Port Suspend. */ 791 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i)); 792 793 /* 794 * `System software places individual ports 795 * into suspend mode by writing a ``3'' into 796 * the appropriate PORTSC register Port Link 797 * State (PLS) field (refer to Section 5.4.8). 798 * Software should only set the PLS field to 799 * ``3'' when the port is in the Enabled 800 * state.' 801 * 802 * `Software should not attempt to suspend a 803 * port unless the port reports that it is in 804 * the enabled (PED = ``1''; PLS < ``3'') 805 * state (refer to Section 5.4.8 for more 806 * information about PED and PLS).' 807 */ 808 v = xhci_op_read_4(sc, port); 809 if (((v & XHCI_PS_PED) == 0) || 810 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3) 811 continue; 812 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 813 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3); 814 xhci_op_write_4(sc, port, v); 815 816 /* 817 * `When the PLS field is written with U3 818 * (``3''), the status of the PLS bit will not 819 * change to the target U state U3 until the 820 * suspend signaling has completed to the 821 * attached device (which may be as long as 822 * 10ms.).' 823 * 824 * `Software is required to wait for U3 825 * transitions to complete before it puts the 826 * xHC into a low power state, and before 827 * resuming the port.' 828 * 829 * XXX Take advantage of the technique to 830 * reduce polling on host controllers that 831 * support the U3C capability. 832 */ 833 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) { 834 v = xhci_op_read_4(sc, port); 835 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3) 836 break; 837 usb_delay_ms(&sc->sc_bus, 1); 838 } 839 if (j == XHCI_WAIT_PLS_U3) { 840 device_printf(self, 841 "suspend timeout on bus %zu port %zu\n", 842 bn, i); 843 goto out; 844 } 845 } 846 } 847 848 /* 849 * `2. Ensure that the Command Ring is in the Stopped state 850 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is 851 * empty), and all Command Completion Events associated 852 * with them have been received.' 853 * 854 * XXX 855 */ 856 857 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */ 858 xhci_op_write_4(sc, XHCI_USBCMD, 859 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS); 860 861 /* 862 * `4. Read the Operational Runtime, and VTIO registers in the 863 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ, 864 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their 865 * state.' 866 * 867 * (We don't use VTIO here (XXX for now?).) 868 */ 869 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD); 870 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL); 871 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP); 872 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG); 873 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0)); 874 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0)); 875 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0)); 876 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0)); 877 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0)); 878 879 /* 880 * `5. Set the Controller Save State (CSS) flag in the USBCMD 881 * register (5.4.1)...' 882 */ 883 xhci_op_write_4(sc, XHCI_USBCMD, 884 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS); 885 886 /* 887 * `...and wait for the Save State Status (SSS) flag in the 888 * USBSTS register (5.4.2) to transition to ``0''.' 889 */ 890 for (i = 0; i < XHCI_WAIT_SSS; i++) { 891 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0) 892 break; 893 usb_delay_ms(&sc->sc_bus, 1); 894 } 895 if (i >= XHCI_WAIT_SSS) { 896 device_printf(self, "suspend timeout, USBSTS.SSS\n"); 897 /* 898 * Just optimistically go on and check SRE anyway -- 899 * what's the worst that could happen? 900 */ 901 } 902 903 /* 904 * `Note: After a Save or Restore operation completes, the 905 * Save/Restore Error (SRE) flag in the USBSTS register should 906 * be checked to ensure that the operation completed 907 * successfully.' 908 */ 909 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) { 910 device_printf(self, "suspend error, USBSTS.SRE\n"); 911 goto out; 912 } 913 914 /* Success! */ 915 ok = true; 916 917 out: mutex_exit(&sc->sc_rhlock); 918 if (!ok) { 919 /* 920 * If suspend failed, stop holding up command issuance 921 * and make it fail instead. 922 */ 923 mutex_enter(&sc->sc_lock); 924 KASSERT(sc->sc_suspender == curlwp); 925 sc->sc_suspender = NULL; 926 sc->sc_suspendresume_failed = true; 927 cv_broadcast(&sc->sc_cmdbusy_cv); 928 mutex_exit(&sc->sc_lock); 929 } 930 return ok; 931 } 932 933 bool 934 xhci_resume(device_t self, const pmf_qual_t *qual) 935 { 936 struct xhci_softc * const sc = device_private(self); 937 size_t i, j, bn, dci; 938 int port; 939 uint32_t v; 940 bool ok = false; 941 942 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 943 944 /* 945 * If resume had previously failed, just try again. Can't make 946 * things worse, probably. 947 */ 948 mutex_enter(&sc->sc_lock); 949 if (sc->sc_suspendresume_failed) { 950 KASSERT(sc->sc_suspender == NULL); 951 sc->sc_suspender = curlwp; 952 sc->sc_suspendresume_failed = false; 953 } 954 KASSERT(sc->sc_suspender); 955 mutex_exit(&sc->sc_lock); 956 957 /* 958 * Block roothub xfers which might touch portsc registers until 959 * we're done resuming. 960 */ 961 mutex_enter(&sc->sc_rhlock); 962 963 /* 964 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2: 965 * xHCI Power Management, p. 343 966 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343 967 */ 968 969 /* 970 * `4. Restore the Operational Runtime, and VTIO registers with 971 * their previously saved state in the following order: 972 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN, 973 * IMOD, and VTIO.' 974 * 975 * (We don't use VTIO here (for now?).) 976 */ 977 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd); 978 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl); 979 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap); 980 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config); 981 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0); 982 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0); 983 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0); 984 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0); 985 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0); 986 987 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */ 988 989 /* 990 * `5. Set the Controller Restore State (CRS) flag in the 991 * USBCMD register (5.4.1) to ``1''...' 992 */ 993 xhci_op_write_4(sc, XHCI_USBCMD, 994 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS); 995 996 /* 997 * `...and wait for the Restore State Status (RSS) in the 998 * USBSTS register (5.4.2) to transition to ``0''.' 999 */ 1000 for (i = 0; i < XHCI_WAIT_RSS; i++) { 1001 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0) 1002 break; 1003 usb_delay_ms(&sc->sc_bus, 1); 1004 } 1005 if (i >= XHCI_WAIT_RSS) { 1006 device_printf(self, "resume timeout, USBSTS.RSS\n"); 1007 goto out; 1008 } 1009 1010 /* 1011 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits 1012 * are consistent with the RCS values to be written to the 1013 * CRCR.' 1014 * 1015 * XXX Hope just zeroing it is good enough! 1016 */ 1017 xhci_host_dequeue(sc->sc_cr); 1018 1019 /* 1020 * `7. Write the CRCR with the address and RCS value of the 1021 * reinitialized Command Ring. Note that this write will 1022 * cause the Command Ring to restart at the address 1023 * specified by the CRCR.' 1024 */ 1025 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) | 1026 sc->sc_cr->xr_cs); 1027 1028 /* 1029 * `8. Enable the controller by setting Run/Stop (R/S) = 1030 * ``1''.' 1031 */ 1032 xhci_op_write_4(sc, XHCI_USBCMD, 1033 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS); 1034 1035 /* 1036 * `9. Software shall walk the USB topology and initialize each 1037 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and 1038 * external hub ports attached to USB devices.' 1039 * 1040 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2 1041 * `Port Resume', 4.15.2.2 `Host Initiated'. 1042 * 1043 * XXX We should maybe batch up initiating the state 1044 * transitions, and then wait for them to complete all at once. 1045 */ 1046 for (bn = 0; bn < 2; bn++) { 1047 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) { 1048 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i)); 1049 1050 /* `When a port is in the U3 state: ...' */ 1051 v = xhci_op_read_4(sc, port); 1052 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3) 1053 continue; 1054 1055 /* 1056 * `For a USB2 protocol port, software shall 1057 * write a ``15'' (Resume) to the PLS field to 1058 * initiate resume signaling. The port shall 1059 * transition to the Resume substate and the 1060 * xHC shall transmit the resume signaling 1061 * within 1ms (T_URSM). Software shall ensure 1062 * that resume is signaled for at least 20ms 1063 * (T_DRSMDN). Software shall start timing 1064 * T_DRSMDN from the write of ``15'' (Resume) 1065 * to PLS.' 1066 */ 1067 if (bn == 1) { 1068 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0); 1069 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 1070 v |= XHCI_PS_LWS; 1071 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME); 1072 xhci_op_write_4(sc, port, v); 1073 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT); 1074 } else { 1075 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0); 1076 } 1077 1078 /* 1079 * `For a USB3 protocol port [and a USB2 1080 * protocol port after transitioning to 1081 * Resume], software shall write a ``0'' (U0) 1082 * to the PLS field...' 1083 */ 1084 v = xhci_op_read_4(sc, port); 1085 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 1086 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0); 1087 xhci_op_write_4(sc, port, v); 1088 1089 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) { 1090 v = xhci_op_read_4(sc, port); 1091 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0) 1092 break; 1093 usb_delay_ms(&sc->sc_bus, 1); 1094 } 1095 if (j == XHCI_WAIT_PLS_U0) { 1096 device_printf(self, 1097 "resume timeout on bus %zu port %zu\n", 1098 bn, i); 1099 goto out; 1100 } 1101 } 1102 } 1103 1104 /* 1105 * `10. Restart each of the previously Running endpoints by 1106 * ringing their doorbells.' 1107 */ 1108 for (i = 0; i < sc->sc_maxslots; i++) { 1109 struct xhci_slot *xs = &sc->sc_slots[i]; 1110 1111 /* Skip if the slot is not in use. */ 1112 if (xs->xs_idx == 0) 1113 continue; 1114 1115 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 1116 /* Skip if the endpoint is not Running. */ 1117 if (xhci_get_epstate(sc, xs, dci) != 1118 XHCI_EPSTATE_RUNNING) 1119 continue; 1120 1121 /* Ring the doorbell. */ 1122 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 1123 } 1124 } 1125 1126 /* 1127 * `Note: After a Save or Restore operation completes, the 1128 * Save/Restore Error (SRE) flag in the USBSTS register should 1129 * be checked to ensure that the operation completed 1130 * successfully.' 1131 */ 1132 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) { 1133 device_printf(self, "resume error, USBSTS.SRE\n"); 1134 goto out; 1135 } 1136 1137 /* Success! */ 1138 ok = true; 1139 1140 out: /* 1141 * Resume command issuance. If the hardware failed to resume, 1142 * well, tough -- deadlocking because everything is held up on 1143 * the suspension, with no opportunity to detach, isn't better 1144 * than timing out waiting for dead hardware. 1145 */ 1146 mutex_enter(&sc->sc_lock); 1147 KASSERT(sc->sc_suspender); 1148 sc->sc_suspender = NULL; 1149 sc->sc_suspendresume_failed = !ok; 1150 cv_broadcast(&sc->sc_cmdbusy_cv); 1151 mutex_exit(&sc->sc_lock); 1152 1153 mutex_exit(&sc->sc_rhlock); 1154 return ok; 1155 } 1156 1157 bool 1158 xhci_shutdown(device_t self, int flags) 1159 { 1160 return false; 1161 } 1162 1163 static int 1164 xhci_hc_reset(struct xhci_softc * const sc) 1165 { 1166 uint32_t usbcmd, usbsts; 1167 int i; 1168 1169 /* Check controller not ready */ 1170 for (i = 0; i < XHCI_WAIT_CNR; i++) { 1171 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1172 if ((usbsts & XHCI_STS_CNR) == 0) 1173 break; 1174 usb_delay_ms(&sc->sc_bus, 1); 1175 } 1176 if (i >= XHCI_WAIT_CNR) { 1177 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n"); 1178 return EIO; 1179 } 1180 1181 /* Halt controller */ 1182 usbcmd = 0; 1183 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd); 1184 usb_delay_ms(&sc->sc_bus, 1); 1185 1186 /* Reset controller */ 1187 usbcmd = XHCI_CMD_HCRST; 1188 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd); 1189 for (i = 0; i < XHCI_WAIT_HCRST; i++) { 1190 /* 1191 * Wait 1ms first. Existing Intel xHCI requires 1ms delay to 1192 * prevent system hang (Errata). 1193 */ 1194 usb_delay_ms(&sc->sc_bus, 1); 1195 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD); 1196 if ((usbcmd & XHCI_CMD_HCRST) == 0) 1197 break; 1198 } 1199 if (i >= XHCI_WAIT_HCRST) { 1200 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n"); 1201 return EIO; 1202 } 1203 1204 /* Check controller not ready */ 1205 for (i = 0; i < XHCI_WAIT_CNR; i++) { 1206 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1207 if ((usbsts & XHCI_STS_CNR) == 0) 1208 break; 1209 usb_delay_ms(&sc->sc_bus, 1); 1210 } 1211 if (i >= XHCI_WAIT_CNR) { 1212 aprint_error_dev(sc->sc_dev, 1213 "controller not ready timeout after reset\n"); 1214 return EIO; 1215 } 1216 1217 return 0; 1218 } 1219 1220 /* 7.2 xHCI Support Protocol Capability */ 1221 static void 1222 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp) 1223 { 1224 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1225 1226 /* XXX Cache this lot */ 1227 1228 const uint32_t w0 = xhci_read_4(sc, ecp); 1229 const uint32_t w4 = xhci_read_4(sc, ecp + 4); 1230 const uint32_t w8 = xhci_read_4(sc, ecp + 8); 1231 const uint32_t wc = xhci_read_4(sc, ecp + 0xc); 1232 1233 aprint_debug_dev(sc->sc_dev, 1234 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc); 1235 1236 if (w4 != XHCI_XECP_USBID) 1237 return; 1238 1239 const int major = XHCI_XECP_SP_W0_MAJOR(w0); 1240 const int minor = XHCI_XECP_SP_W0_MINOR(w0); 1241 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8); 1242 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8); 1243 1244 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16)); 1245 switch (mm) { 1246 case 0x0200: 1247 case 0x0300: 1248 case 0x0301: 1249 case 0x0310: 1250 case 0x0320: 1251 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n", 1252 major == 3 ? "ss" : "hs", cpo, cpo + cpc - 1); 1253 if (major == 3) 1254 sc->sc_usb3nports += cpo + cpc - 1; 1255 else 1256 sc->sc_usb2nports += cpo + cpc - 1; 1257 break; 1258 default: 1259 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n", 1260 major, minor); 1261 return; 1262 } 1263 1264 const size_t bus = (major == 3) ? 0 : 1; 1265 1266 /* Index arrays with 0..n-1 where ports are numbered 1..n */ 1267 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) { 1268 if (sc->sc_ctlrportmap[cp] != 0) { 1269 aprint_error_dev(sc->sc_dev, "controller port %zu " 1270 "already assigned", cp); 1271 continue; 1272 } 1273 1274 sc->sc_ctlrportbus[cp / NBBY] |= 1275 bus == 0 ? 0 : __BIT(cp % NBBY); 1276 1277 const size_t rhp = sc->sc_rhportcount[bus]++; 1278 1279 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0, 1280 "bus %zu rhp %zu is %d", bus, rhp, 1281 sc->sc_rhportmap[bus][rhp]); 1282 1283 sc->sc_rhportmap[bus][rhp] = cp + 1; 1284 sc->sc_ctlrportmap[cp] = rhp + 1; 1285 } 1286 } 1287 1288 /* Process extended capabilities */ 1289 static void 1290 xhci_ecp(struct xhci_softc *sc) 1291 { 1292 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1293 1294 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4; 1295 while (ecp != 0) { 1296 uint32_t ecr = xhci_read_4(sc, ecp); 1297 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr); 1298 switch (XHCI_XECP_ID(ecr)) { 1299 case XHCI_ID_PROTOCOLS: { 1300 xhci_id_protocols(sc, ecp); 1301 break; 1302 } 1303 case XHCI_ID_USB_LEGACY: { 1304 uint8_t bios_sem; 1305 1306 /* Take host controller ownership from BIOS */ 1307 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM); 1308 if (bios_sem) { 1309 /* sets xHCI to be owned by OS */ 1310 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1); 1311 aprint_debug_dev(sc->sc_dev, 1312 "waiting for BIOS to give up control\n"); 1313 for (int i = 0; i < 5000; i++) { 1314 bios_sem = xhci_read_1(sc, ecp + 1315 XHCI_XECP_BIOS_SEM); 1316 if (bios_sem == 0) 1317 break; 1318 DELAY(1000); 1319 } 1320 if (bios_sem) { 1321 aprint_error_dev(sc->sc_dev, 1322 "timed out waiting for BIOS\n"); 1323 } 1324 } 1325 break; 1326 } 1327 default: 1328 break; 1329 } 1330 ecr = xhci_read_4(sc, ecp); 1331 if (XHCI_XECP_NEXT(ecr) == 0) { 1332 ecp = 0; 1333 } else { 1334 ecp += XHCI_XECP_NEXT(ecr) * 4; 1335 } 1336 } 1337 } 1338 1339 #define XHCI_HCCPREV1_BITS \ 1340 "\177\020" /* New bitmask */ \ 1341 "f\020\020XECP\0" \ 1342 "f\014\4MAXPSA\0" \ 1343 "b\013CFC\0" \ 1344 "b\012SEC\0" \ 1345 "b\011SBD\0" \ 1346 "b\010FSE\0" \ 1347 "b\7NSS\0" \ 1348 "b\6LTC\0" \ 1349 "b\5LHRC\0" \ 1350 "b\4PIND\0" \ 1351 "b\3PPC\0" \ 1352 "b\2CZC\0" \ 1353 "b\1BNC\0" \ 1354 "b\0AC64\0" \ 1355 "\0" 1356 #define XHCI_HCCV1_x_BITS \ 1357 "\177\020" /* New bitmask */ \ 1358 "f\020\020XECP\0" \ 1359 "f\014\4MAXPSA\0" \ 1360 "b\013CFC\0" \ 1361 "b\012SEC\0" \ 1362 "b\011SPC\0" \ 1363 "b\010PAE\0" \ 1364 "b\7NSS\0" \ 1365 "b\6LTC\0" \ 1366 "b\5LHRC\0" \ 1367 "b\4PIND\0" \ 1368 "b\3PPC\0" \ 1369 "b\2CSZ\0" \ 1370 "b\1BNC\0" \ 1371 "b\0AC64\0" \ 1372 "\0" 1373 1374 #define XHCI_HCC2_BITS \ 1375 "\177\020" /* New bitmask */ \ 1376 "b\7ETC_TSC\0" \ 1377 "b\6ETC\0" \ 1378 "b\5CIC\0" \ 1379 "b\4LEC\0" \ 1380 "b\3CTC\0" \ 1381 "b\2FSC\0" \ 1382 "b\1CMC\0" \ 1383 "b\0U3C\0" \ 1384 "\0" 1385 1386 void 1387 xhci_start(struct xhci_softc *sc) 1388 { 1389 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA); 1390 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0) 1391 /* Intel xhci needs interrupt rate moderated. */ 1392 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP); 1393 else 1394 xhci_rt_write_4(sc, XHCI_IMOD(0), 0); 1395 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n", 1396 xhci_rt_read_4(sc, XHCI_IMOD(0))); 1397 1398 /* Go! */ 1399 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS); 1400 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n", 1401 xhci_op_read_4(sc, XHCI_USBCMD)); 1402 } 1403 1404 int 1405 xhci_init(struct xhci_softc *sc) 1406 { 1407 bus_size_t bsz; 1408 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff; 1409 uint32_t pagesize, config; 1410 int i = 0; 1411 uint16_t hciversion; 1412 uint8_t caplength; 1413 1414 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1415 1416 /* Set up the bus struct for the usb 3 and usb 2 buses */ 1417 sc->sc_bus.ub_methods = &xhci_bus_methods; 1418 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe); 1419 sc->sc_bus.ub_usedma = true; 1420 sc->sc_bus.ub_hcpriv = sc; 1421 1422 sc->sc_bus2.ub_methods = &xhci_bus_methods; 1423 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe); 1424 sc->sc_bus2.ub_revision = USBREV_2_0; 1425 sc->sc_bus2.ub_usedma = true; 1426 sc->sc_bus2.ub_hcpriv = sc; 1427 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag; 1428 1429 caplength = xhci_read_1(sc, XHCI_CAPLENGTH); 1430 hciversion = xhci_read_2(sc, XHCI_HCIVERSION); 1431 1432 if (hciversion < XHCI_HCIVERSION_0_96 || 1433 hciversion >= 0x0200) { 1434 aprint_normal_dev(sc->sc_dev, 1435 "xHCI version %x.%x not known to be supported\n", 1436 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff); 1437 } else { 1438 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n", 1439 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff); 1440 } 1441 1442 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength, 1443 &sc->sc_cbh) != 0) { 1444 aprint_error_dev(sc->sc_dev, "capability subregion failure\n"); 1445 return ENOMEM; 1446 } 1447 1448 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1); 1449 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1); 1450 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1); 1451 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1); 1452 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2); 1453 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3); 1454 aprint_debug_dev(sc->sc_dev, 1455 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3); 1456 1457 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS); 1458 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32; 1459 1460 char sbuf[128]; 1461 if (hciversion < XHCI_HCIVERSION_1_0) 1462 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc); 1463 else 1464 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc); 1465 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf); 1466 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n", 1467 XHCI_HCC_XECP(sc->sc_hcc) * 4); 1468 if (hciversion >= XHCI_HCIVERSION_1_1) { 1469 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2); 1470 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2); 1471 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf); 1472 } 1473 1474 /* default all ports to bus 0, i.e. usb 3 */ 1475 sc->sc_ctlrportbus = kmem_zalloc( 1476 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP); 1477 sc->sc_ctlrportmap = 1478 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP); 1479 1480 /* controller port to bus roothub port map */ 1481 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) { 1482 sc->sc_rhportmap[j] = 1483 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP); 1484 } 1485 1486 /* 1487 * Process all Extended Capabilities 1488 */ 1489 xhci_ecp(sc); 1490 1491 bsz = XHCI_PORTSC(sc->sc_maxports); 1492 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz, 1493 &sc->sc_obh) != 0) { 1494 aprint_error_dev(sc->sc_dev, "operational subregion failure\n"); 1495 return ENOMEM; 1496 } 1497 1498 dboff = xhci_cap_read_4(sc, XHCI_DBOFF); 1499 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff, 1500 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) { 1501 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n"); 1502 return ENOMEM; 1503 } 1504 1505 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF); 1506 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff, 1507 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) { 1508 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n"); 1509 return ENOMEM; 1510 } 1511 1512 int rv; 1513 rv = xhci_hc_reset(sc); 1514 if (rv != 0) { 1515 return rv; 1516 } 1517 1518 if (sc->sc_vendor_init) 1519 sc->sc_vendor_init(sc); 1520 1521 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE); 1522 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize); 1523 pagesize = ffs(pagesize); 1524 if (pagesize == 0) { 1525 aprint_error_dev(sc->sc_dev, "pagesize is 0\n"); 1526 return EIO; 1527 } 1528 sc->sc_pgsz = 1 << (12 + (pagesize - 1)); 1529 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz); 1530 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n", 1531 (uint32_t)sc->sc_maxslots); 1532 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports); 1533 1534 int err; 1535 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2); 1536 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf); 1537 if (sc->sc_maxspbuf != 0) { 1538 err = usb_allocmem(sc->sc_bus.ub_dmatag, 1539 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t), 1540 USBMALLOC_ZERO, &sc->sc_spbufarray_dma); 1541 if (err) { 1542 aprint_error_dev(sc->sc_dev, 1543 "spbufarray init fail, err %d\n", err); 1544 return ENOMEM; 1545 } 1546 1547 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) * 1548 sc->sc_maxspbuf, KM_SLEEP); 1549 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0); 1550 for (i = 0; i < sc->sc_maxspbuf; i++) { 1551 usb_dma_t * const dma = &sc->sc_spbuf_dma[i]; 1552 /* allocate contexts */ 1553 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, 1554 sc->sc_pgsz, USBMALLOC_ZERO, dma); 1555 if (err) { 1556 aprint_error_dev(sc->sc_dev, 1557 "spbufarray_dma init fail, err %d\n", err); 1558 rv = ENOMEM; 1559 goto bad1; 1560 } 1561 spbufarray[i] = htole64(DMAADDR(dma, 0)); 1562 usb_syncmem(dma, 0, sc->sc_pgsz, 1563 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1564 } 1565 1566 usb_syncmem(&sc->sc_spbufarray_dma, 0, 1567 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE); 1568 } 1569 1570 sc->sc_isthresh = XHCI_HCS2_IST(hcs2); 1571 aprint_debug_dev(sc->sc_dev, "sc_isthresh %d\n", sc->sc_isthresh); 1572 1573 /* 1574 * xHI 5.3.4 1575 * If bit[3] is 0, IST is number of microframes in bit[2:0] 1576 * If bit[3] is 1, IST is number of frames in bit[2:0] 1577 */ 1578 if (sc->sc_isthresh & 0x8) { 1579 sc->sc_isthresh = (sc->sc_isthresh & 0x7) * 1580 USB_UFRAMES_PER_FRAME; 1581 } 1582 1583 config = xhci_op_read_4(sc, XHCI_CONFIG); 1584 config &= ~0xFF; 1585 config |= sc->sc_maxslots & 0xFF; 1586 xhci_op_write_4(sc, XHCI_CONFIG, config); 1587 1588 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS, 1589 XHCI_COMMAND_RING_SEGMENTS_ALIGN); 1590 if (err) { 1591 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n", 1592 err); 1593 rv = ENOMEM; 1594 goto bad1; 1595 } 1596 1597 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS, 1598 XHCI_EVENT_RING_SEGMENTS_ALIGN); 1599 if (err) { 1600 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n", 1601 err); 1602 rv = ENOMEM; 1603 goto bad2; 1604 } 1605 1606 usb_dma_t *dma; 1607 size_t size; 1608 size_t align; 1609 1610 dma = &sc->sc_eventst_dma; 1611 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE, 1612 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN); 1613 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size); 1614 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN; 1615 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align, 1616 USBMALLOC_ZERO, dma); 1617 if (err) { 1618 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n", 1619 err); 1620 rv = ENOMEM; 1621 goto bad3; 1622 } 1623 1624 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n", 1625 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0), 1626 KERNADDR(&sc->sc_eventst_dma, 0), 1627 sc->sc_eventst_dma.udma_block->size); 1628 1629 dma = &sc->sc_dcbaa_dma; 1630 size = (1 + sc->sc_maxslots) * sizeof(uint64_t); 1631 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size); 1632 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN; 1633 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align, 1634 USBMALLOC_ZERO, dma); 1635 if (err) { 1636 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err); 1637 rv = ENOMEM; 1638 goto bad4; 1639 } 1640 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n", 1641 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0), 1642 KERNADDR(&sc->sc_dcbaa_dma, 0), 1643 sc->sc_dcbaa_dma.udma_block->size); 1644 1645 if (sc->sc_maxspbuf != 0) { 1646 /* 1647 * DCBA entry 0 hold the scratchbuf array pointer. 1648 */ 1649 *(uint64_t *)KERNADDR(dma, 0) = 1650 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0)); 1651 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE); 1652 } 1653 1654 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots, 1655 KM_SLEEP); 1656 if (sc->sc_slots == NULL) { 1657 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err); 1658 rv = ENOMEM; 1659 goto bad; 1660 } 1661 1662 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0, 1663 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL); 1664 if (sc->sc_xferpool == NULL) { 1665 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n", 1666 err); 1667 rv = ENOMEM; 1668 goto bad; 1669 } 1670 1671 cv_init(&sc->sc_command_cv, "xhcicmd"); 1672 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq"); 1673 mutex_init(&sc->sc_rhlock, MUTEX_DEFAULT, IPL_NONE); 1674 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 1675 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB); 1676 1677 struct xhci_erste *erst; 1678 erst = KERNADDR(&sc->sc_eventst_dma, 0); 1679 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0)); 1680 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb); 1681 erst[0].erste_3 = htole32(0); 1682 usb_syncmem(&sc->sc_eventst_dma, 0, 1683 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE); 1684 1685 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS); 1686 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0)); 1687 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) | 1688 XHCI_ERDP_BUSY); 1689 1690 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0)); 1691 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) | 1692 sc->sc_cr->xr_cs); 1693 1694 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0), 1695 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS); 1696 1697 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0) 1698 xhci_start(sc); 1699 1700 return 0; 1701 1702 bad: 1703 if (sc->sc_xferpool) { 1704 pool_cache_destroy(sc->sc_xferpool); 1705 sc->sc_xferpool = NULL; 1706 } 1707 1708 if (sc->sc_slots) { 1709 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * 1710 sc->sc_maxslots); 1711 sc->sc_slots = NULL; 1712 } 1713 1714 usb_freemem(&sc->sc_dcbaa_dma); 1715 bad4: 1716 usb_freemem(&sc->sc_eventst_dma); 1717 bad3: 1718 xhci_ring_free(sc, &sc->sc_er); 1719 bad2: 1720 xhci_ring_free(sc, &sc->sc_cr); 1721 i = sc->sc_maxspbuf; 1722 bad1: 1723 for (int j = 0; j < i; j++) 1724 usb_freemem(&sc->sc_spbuf_dma[j]); 1725 usb_freemem(&sc->sc_spbufarray_dma); 1726 1727 return rv; 1728 } 1729 1730 static inline bool 1731 xhci_polling_p(struct xhci_softc * const sc) 1732 { 1733 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling; 1734 } 1735 1736 int 1737 xhci_intr(void *v) 1738 { 1739 struct xhci_softc * const sc = v; 1740 int ret = 0; 1741 1742 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1743 1744 if (sc == NULL) 1745 return 0; 1746 1747 mutex_spin_enter(&sc->sc_intr_lock); 1748 1749 if (sc->sc_dying || !device_has_power(sc->sc_dev)) 1750 goto done; 1751 1752 /* If we get an interrupt while polling, then just ignore it. */ 1753 if (xhci_polling_p(sc)) { 1754 #ifdef DIAGNOSTIC 1755 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0); 1756 #endif 1757 goto done; 1758 } 1759 1760 ret = xhci_intr1(sc); 1761 if (ret) { 1762 KASSERT(sc->sc_child || sc->sc_child2); 1763 1764 /* 1765 * One of child busses could be already detached. It doesn't 1766 * matter on which of the two the softintr is scheduled. 1767 */ 1768 if (sc->sc_child) 1769 usb_schedsoftintr(&sc->sc_bus); 1770 else 1771 usb_schedsoftintr(&sc->sc_bus2); 1772 } 1773 done: 1774 mutex_spin_exit(&sc->sc_intr_lock); 1775 return ret; 1776 } 1777 1778 int 1779 xhci_intr1(struct xhci_softc * const sc) 1780 { 1781 uint32_t usbsts; 1782 uint32_t iman; 1783 1784 XHCIHIST_FUNC(); 1785 1786 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1787 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0); 1788 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD | 1789 XHCI_STS_HCE)) == 0) { 1790 DPRINTFN(16, "ignored intr not for %jd", 1791 device_unit(sc->sc_dev), 0, 0, 0); 1792 return 0; 1793 } 1794 1795 /* 1796 * Clear EINT and other transient flags, to not misenterpret 1797 * next shared interrupt. Also, to avoid race, EINT must be cleared 1798 * before XHCI_IMAN_INTR_PEND is cleared. 1799 */ 1800 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & ~XHCI_STS_RSVDP0); 1801 1802 #ifdef XHCI_DEBUG 1803 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1804 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0); 1805 #endif 1806 1807 iman = xhci_rt_read_4(sc, XHCI_IMAN(0)); 1808 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0); 1809 iman |= XHCI_IMAN_INTR_PEND; 1810 xhci_rt_write_4(sc, XHCI_IMAN(0), iman); 1811 1812 #ifdef XHCI_DEBUG 1813 iman = xhci_rt_read_4(sc, XHCI_IMAN(0)); 1814 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0); 1815 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1816 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0); 1817 #endif 1818 1819 return 1; 1820 } 1821 1822 /* 1823 * 3 port speed types used in USB stack 1824 * 1825 * usbdi speed 1826 * definition: USB_SPEED_* in usb.h 1827 * They are used in struct usbd_device in USB stack. 1828 * ioctl interface uses these values too. 1829 * port_status speed 1830 * definition: UPS_*_SPEED in usb.h 1831 * They are used in usb_port_status_t and valid only for USB 2.0. 1832 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus 1833 * of usb_port_status_ext_t indicates port speed. 1834 * Note that some 3.0 values overlap with 2.0 values. 1835 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and 1836 * means UPS_LOW_SPEED in HS.) 1837 * port status returned from hub also uses these values. 1838 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed 1839 * or more. 1840 * xspeed: 1841 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1) 1842 * They are used in only slot context and PORTSC reg of xhci. 1843 * The difference between usbdi speed and xspeed is 1844 * that FS and LS values are swapped. 1845 */ 1846 1847 /* convert usbdi speed to xspeed */ 1848 static int 1849 xhci_speed2xspeed(int speed) 1850 { 1851 switch (speed) { 1852 case USB_SPEED_LOW: return 2; 1853 case USB_SPEED_FULL: return 1; 1854 default: return speed; 1855 } 1856 } 1857 1858 #if 0 1859 /* convert xspeed to usbdi speed */ 1860 static int 1861 xhci_xspeed2speed(int xspeed) 1862 { 1863 switch (xspeed) { 1864 case 1: return USB_SPEED_FULL; 1865 case 2: return USB_SPEED_LOW; 1866 default: return xspeed; 1867 } 1868 } 1869 #endif 1870 1871 /* convert xspeed to port status speed */ 1872 static int 1873 xhci_xspeed2psspeed(int xspeed) 1874 { 1875 switch (xspeed) { 1876 case 0: return 0; 1877 case 1: return UPS_FULL_SPEED; 1878 case 2: return UPS_LOW_SPEED; 1879 case 3: return UPS_HIGH_SPEED; 1880 default: return UPS_OTHER_SPEED; 1881 } 1882 } 1883 1884 /* 1885 * Construct input contexts and issue TRB to open pipe. 1886 */ 1887 static usbd_status 1888 xhci_configure_endpoint(struct usbd_pipe *pipe) 1889 { 1890 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1891 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1892 #ifdef USB_DEBUG 1893 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1894 #endif 1895 struct xhci_soft_trb trb; 1896 usbd_status err; 1897 1898 XHCIHIST_FUNC(); 1899 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx", 1900 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress, 1901 pipe->up_endpoint->ue_edesc->bmAttributes); 1902 1903 /* XXX ensure input context is available? */ 1904 1905 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz); 1906 1907 /* set up context */ 1908 xhci_setup_ctx(pipe); 1909 1910 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0), 1911 sc->sc_ctxsz * 1); 1912 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs, 1913 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1); 1914 1915 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 1916 trb.trb_2 = 0; 1917 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1918 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP); 1919 1920 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 1921 1922 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 1923 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci), 1924 sc->sc_ctxsz * 1); 1925 1926 return err; 1927 } 1928 1929 #if 0 1930 static usbd_status 1931 xhci_unconfigure_endpoint(struct usbd_pipe *pipe) 1932 { 1933 #ifdef USB_DEBUG 1934 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1935 #endif 1936 1937 XHCIHIST_FUNC(); 1938 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0); 1939 1940 return USBD_NORMAL_COMPLETION; 1941 } 1942 #endif 1943 1944 /* 4.6.8, 6.4.3.7 */ 1945 static void 1946 xhci_reset_endpoint(struct usbd_pipe *pipe) 1947 { 1948 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1949 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1950 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1951 struct xhci_soft_trb trb; 1952 1953 XHCIHIST_FUNC(); 1954 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1955 1956 KASSERT(mutex_owned(&sc->sc_lock)); 1957 1958 trb.trb_0 = 0; 1959 trb.trb_2 = 0; 1960 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1961 XHCI_TRB_3_EP_SET(dci) | 1962 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP); 1963 1964 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) { 1965 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n", 1966 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress); 1967 } 1968 } 1969 1970 /* 1971 * 4.6.9, 6.4.3.8 1972 * Stop execution of TDs on xfer ring. 1973 * Should be called with sc_lock held. 1974 */ 1975 static usbd_status 1976 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci, 1977 uint32_t trb3flags) 1978 { 1979 struct xhci_soft_trb trb; 1980 usbd_status err; 1981 1982 XHCIHIST_FUNC(); 1983 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1984 1985 KASSERT(mutex_owned(&sc->sc_lock)); 1986 1987 trb.trb_0 = 0; 1988 trb.trb_2 = 0; 1989 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1990 XHCI_TRB_3_EP_SET(dci) | 1991 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) | 1992 trb3flags; 1993 1994 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 1995 1996 return err; 1997 } 1998 1999 static usbd_status 2000 xhci_stop_endpoint(struct usbd_pipe *pipe) 2001 { 2002 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2003 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2004 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 2005 2006 XHCIHIST_FUNC(); 2007 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 2008 2009 KASSERT(mutex_owned(&sc->sc_lock)); 2010 2011 return xhci_stop_endpoint_cmd(sc, xs, dci, 0); 2012 } 2013 2014 /* 2015 * Set TR Dequeue Pointer. 2016 * xHCI 1.1 4.6.10 6.4.3.9 2017 * Purge all of the TRBs on ring and reinitialize ring. 2018 * Set TR dequeue Pointer to 0 and Cycle State to 1. 2019 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE 2020 * error will be generated. 2021 */ 2022 static void 2023 xhci_set_dequeue(struct usbd_pipe *pipe) 2024 { 2025 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2026 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2027 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 2028 struct xhci_ring * const xr = xs->xs_xr[dci]; 2029 struct xhci_soft_trb trb; 2030 2031 XHCIHIST_FUNC(); 2032 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 2033 2034 KASSERT(mutex_owned(&sc->sc_lock)); 2035 KASSERT(xr != NULL); 2036 2037 xhci_host_dequeue(xr); 2038 2039 /* set DCS */ 2040 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */ 2041 trb.trb_2 = 0; 2042 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 2043 XHCI_TRB_3_EP_SET(dci) | 2044 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE); 2045 2046 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) { 2047 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n", 2048 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress); 2049 } 2050 } 2051 2052 /* 2053 * Open new pipe: called from usbd_setup_pipe_flags. 2054 * Fills methods of pipe. 2055 * If pipe is not for ep0, calls configure_endpoint. 2056 */ 2057 static usbd_status 2058 xhci_open(struct usbd_pipe *pipe) 2059 { 2060 struct usbd_device * const dev = pipe->up_dev; 2061 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe; 2062 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 2063 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2064 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 2065 const u_int dci = xhci_ep_get_dci(ed); 2066 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 2067 usbd_status err; 2068 2069 XHCIHIST_FUNC(); 2070 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr, 2071 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed); 2072 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx", 2073 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress, 2074 ed->bmAttributes); 2075 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize), 2076 ed->bInterval, 0, 0); 2077 2078 if (sc->sc_dying) 2079 return USBD_IOERROR; 2080 2081 /* Root Hub */ 2082 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) { 2083 switch (ed->bEndpointAddress) { 2084 case USB_CONTROL_ENDPOINT: 2085 pipe->up_methods = &roothub_ctrl_methods; 2086 break; 2087 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT: 2088 pipe->up_methods = &xhci_root_intr_methods; 2089 break; 2090 default: 2091 pipe->up_methods = NULL; 2092 DPRINTFN(0, "bad bEndpointAddress 0x%02jx", 2093 ed->bEndpointAddress, 0, 0, 0); 2094 return USBD_INVAL; 2095 } 2096 return USBD_NORMAL_COMPLETION; 2097 } 2098 2099 usb_init_task(&xpipe->xp_async_task, xhci_pipe_restart_async_task, 2100 pipe, USB_TASKQ_MPSAFE); 2101 2102 switch (xfertype) { 2103 case UE_CONTROL: 2104 pipe->up_methods = &xhci_device_ctrl_methods; 2105 break; 2106 case UE_ISOCHRONOUS: 2107 pipe->up_methods = &xhci_device_isoc_methods; 2108 pipe->up_serialise = false; 2109 xpipe->xp_isoc_next = -1; 2110 break; 2111 case UE_BULK: 2112 pipe->up_methods = &xhci_device_bulk_methods; 2113 break; 2114 case UE_INTERRUPT: 2115 pipe->up_methods = &xhci_device_intr_methods; 2116 break; 2117 default: 2118 return USBD_IOERROR; 2119 break; 2120 } 2121 2122 KASSERT(xs != NULL); 2123 KASSERT(xs->xs_xr[dci] == NULL); 2124 2125 /* allocate transfer ring */ 2126 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS, 2127 XHCI_TRB_ALIGN); 2128 if (err) { 2129 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0); 2130 return err; 2131 } 2132 2133 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT) 2134 return xhci_configure_endpoint(pipe); 2135 2136 return USBD_NORMAL_COMPLETION; 2137 } 2138 2139 /* 2140 * Closes pipe, called from usbd_kill_pipe via close methods. 2141 * If the endpoint to be closed is ep0, disable_slot. 2142 * Should be called with sc_lock held. 2143 */ 2144 static void 2145 xhci_close_pipe(struct usbd_pipe *pipe) 2146 { 2147 struct xhci_pipe * const xp = 2148 container_of(pipe, struct xhci_pipe, xp_pipe); 2149 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2150 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2151 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 2152 const u_int dci = xhci_ep_get_dci(ed); 2153 struct xhci_soft_trb trb; 2154 uint32_t *cp; 2155 2156 XHCIHIST_FUNC(); 2157 2158 usb_rem_task_wait(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC, 2159 &sc->sc_lock); 2160 2161 if (sc->sc_dying) 2162 return; 2163 2164 /* xs is uninitialized before xhci_init_slot */ 2165 if (xs == NULL || xs->xs_idx == 0) 2166 return; 2167 2168 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju", 2169 (uintptr_t)pipe, xs->xs_idx, dci, 0); 2170 2171 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx"); 2172 KASSERT(mutex_owned(&sc->sc_lock)); 2173 2174 if (pipe->up_dev->ud_depth == 0) 2175 return; 2176 2177 if (dci == XHCI_DCI_EP_CONTROL) { 2178 DPRINTFN(4, "closing ep0", 0, 0, 0, 0); 2179 /* This frees all rings */ 2180 xhci_disable_slot(sc, xs->xs_idx); 2181 return; 2182 } 2183 2184 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED) 2185 (void)xhci_stop_endpoint(pipe); 2186 2187 /* 2188 * set appropriate bit to be dropped. 2189 * don't set DC bit to 1, otherwise all endpoints 2190 * would be deconfigured. 2191 */ 2192 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 2193 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci)); 2194 cp[1] = htole32(0); 2195 2196 /* XXX should be most significant one, not dci? */ 2197 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT)); 2198 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci)); 2199 2200 /* configure ep context performs an implicit dequeue */ 2201 xhci_host_dequeue(xs->xs_xr[dci]); 2202 2203 /* sync input contexts before they are read from memory */ 2204 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 2205 2206 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 2207 trb.trb_2 = 0; 2208 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 2209 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP); 2210 2211 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 2212 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 2213 2214 xhci_ring_free(sc, &xs->xs_xr[dci]); 2215 xs->xs_xr[dci] = NULL; 2216 } 2217 2218 /* 2219 * Abort transfer. Must be called with sc_lock held. Releases and 2220 * reacquires sc_lock to sleep until hardware acknowledges abort. 2221 */ 2222 static void 2223 xhci_abortx(struct usbd_xfer *xfer) 2224 { 2225 XHCIHIST_FUNC(); 2226 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 2227 2228 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx", 2229 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0); 2230 2231 KASSERT(mutex_owned(&sc->sc_lock)); 2232 KASSERTMSG((xfer->ux_status == USBD_CANCELLED || 2233 xfer->ux_status == USBD_TIMEOUT), 2234 "bad abort status: %d", xfer->ux_status); 2235 2236 xhci_pipe_restart(xfer->ux_pipe); 2237 2238 DPRINTFN(14, "end", 0, 0, 0, 0); 2239 } 2240 2241 static void 2242 xhci_host_dequeue(struct xhci_ring * const xr) 2243 { 2244 /* When dequeueing the controller, update our struct copy too */ 2245 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE); 2246 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE, 2247 BUS_DMASYNC_PREWRITE); 2248 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies)); 2249 2250 xr->xr_ep = 0; 2251 xr->xr_cs = 1; 2252 } 2253 2254 /* 2255 * Recover STALLed endpoint, or stop endpoint to abort a pipe. 2256 * xHCI 1.1 sect 4.10.2.1 2257 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove 2258 * all transfers on transfer ring. 2259 */ 2260 static void 2261 xhci_pipe_restart(struct usbd_pipe *pipe) 2262 { 2263 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2264 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2265 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 2266 2267 XHCIHIST_FUNC(); 2268 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju", 2269 (uintptr_t)pipe, xs->xs_idx, dci, 0); 2270 2271 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 2272 2273 /* 2274 * - If the endpoint is halted, indicating a stall, reset it. 2275 * - If the endpoint is stopped, we're already good. 2276 * - Otherwise, someone wanted to abort the pipe, so stop the 2277 * endpoint. 2278 * 2279 * In any case, clear the ring. 2280 */ 2281 switch (xhci_get_epstate(sc, xs, dci)) { 2282 case XHCI_EPSTATE_HALTED: 2283 xhci_reset_endpoint(pipe); 2284 break; 2285 case XHCI_EPSTATE_STOPPED: 2286 break; 2287 default: 2288 xhci_stop_endpoint(pipe); 2289 break; 2290 } 2291 2292 switch (xhci_get_epstate(sc, xs, dci)) { 2293 case XHCI_EPSTATE_STOPPED: 2294 break; 2295 case XHCI_EPSTATE_ERROR: 2296 device_printf(sc->sc_dev, "endpoint 0x%x error\n", 2297 pipe->up_endpoint->ue_edesc->bEndpointAddress); 2298 break; 2299 default: 2300 device_printf(sc->sc_dev, "endpoint 0x%x failed to stop\n", 2301 pipe->up_endpoint->ue_edesc->bEndpointAddress); 2302 } 2303 2304 xhci_set_dequeue(pipe); 2305 2306 DPRINTFN(4, "ends", 0, 0, 0, 0); 2307 } 2308 2309 static void 2310 xhci_pipe_restart_async_task(void *cookie) 2311 { 2312 struct usbd_pipe * const pipe = cookie; 2313 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2314 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2315 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 2316 struct xhci_ring * const tr = xs->xs_xr[dci]; 2317 struct usbd_xfer *xfer; 2318 2319 XHCIHIST_FUNC(); 2320 XHCIHIST_CALLARGS("sc=%#jx pipe=%#jx", 2321 (uintptr_t)sc, (uintptr_t)pipe, 0, 0); 2322 2323 mutex_enter(&sc->sc_lock); 2324 2325 xhci_pipe_restart(pipe); 2326 2327 /* 2328 * We halted our own queue because it stalled. Mark it no 2329 * longer halted and start issuing queued transfers again. 2330 */ 2331 tr->is_halted = false; 2332 xfer = SIMPLEQ_FIRST(&pipe->up_queue); 2333 if (xfer) { 2334 /* 2335 * If the first xfer of the queue is not in progress, 2336 * though, there may be a concurrent software abort 2337 * that has already cancelled it and is now in the 2338 * middle of a concurrent xhci_pipe_restart waiting to 2339 * reacquire the pipe (bus) lock. So only restart the 2340 * xfer if it's still USBD_IN_PROGRESS. 2341 * 2342 * Either way, xfers on the queue can't be in 2343 * USBD_NOT_STARTED. 2344 */ 2345 KASSERT(xfer->ux_status != USBD_NOT_STARTED); 2346 if (xfer->ux_status == USBD_IN_PROGRESS) { 2347 if (pipe->up_methods->upm_start != NULL) 2348 (*pipe->up_methods->upm_start)(xfer); 2349 } else { 2350 DPRINTF("pipe restart race xfer=%#jx status=%jd", 2351 (uintptr_t)xfer, xfer->ux_status, 0, 0); 2352 } 2353 } 2354 2355 mutex_exit(&sc->sc_lock); 2356 } 2357 2358 static void 2359 xhci_pipe_restart_async(struct usbd_pipe *pipe) 2360 { 2361 struct xhci_pipe * const xp = 2362 container_of(pipe, struct xhci_pipe, xp_pipe); 2363 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2364 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2365 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 2366 struct xhci_ring * const tr = xs->xs_xr[dci]; 2367 2368 XHCIHIST_FUNC(); 2369 XHCIHIST_CALLARGS("pipe %#jx", (uintptr_t)pipe, 0, 0, 0); 2370 2371 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 2372 2373 tr->is_halted = true; 2374 usb_add_task(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC); 2375 2376 DPRINTFN(4, "ends", 0, 0, 0, 0); 2377 } 2378 2379 /* Process roothub port status/change events and notify to uhub_intr. */ 2380 static void 2381 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport) 2382 { 2383 XHCIHIST_FUNC(); 2384 XHCIHIST_CALLARGS("xhci%jd: port %ju status change", 2385 device_unit(sc->sc_dev), ctlrport, 0, 0); 2386 2387 if (ctlrport > sc->sc_maxports) 2388 return; 2389 2390 const size_t bn = xhci_ctlrport2bus(sc, ctlrport); 2391 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport); 2392 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn]; 2393 2394 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change", 2395 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer); 2396 2397 if (xfer == NULL) 2398 return; 2399 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 2400 2401 uint8_t *p = xfer->ux_buf; 2402 if (!xhci_polling_p(sc) || !sc->sc_intrxfer_deferred[bn]) 2403 memset(p, 0, xfer->ux_length); 2404 p[rhp / NBBY] |= 1 << (rhp % NBBY); 2405 xfer->ux_actlen = xfer->ux_length; 2406 xfer->ux_status = USBD_NORMAL_COMPLETION; 2407 if (xhci_polling_p(sc)) 2408 sc->sc_intrxfer_deferred[bn] = true; 2409 else 2410 usb_transfer_complete(xfer); 2411 } 2412 2413 /* Process Transfer Events */ 2414 static void 2415 xhci_event_transfer(struct xhci_softc * const sc, 2416 const struct xhci_trb * const trb) 2417 { 2418 uint64_t trb_0; 2419 uint32_t trb_2, trb_3; 2420 uint8_t trbcode; 2421 u_int slot, dci; 2422 struct xhci_slot *xs; 2423 struct xhci_ring *xr; 2424 struct xhci_xfer *xx; 2425 struct usbd_xfer *xfer; 2426 usbd_status err; 2427 2428 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2429 2430 trb_0 = le64toh(trb->trb_0); 2431 trb_2 = le32toh(trb->trb_2); 2432 trb_3 = le32toh(trb->trb_3); 2433 trbcode = XHCI_TRB_2_ERROR_GET(trb_2); 2434 slot = XHCI_TRB_3_SLOT_GET(trb_3); 2435 dci = XHCI_TRB_3_EP_GET(trb_3); 2436 xs = &sc->sc_slots[slot]; 2437 xr = xs->xs_xr[dci]; 2438 2439 /* sanity check */ 2440 KASSERT(xr != NULL); 2441 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots, 2442 "invalid xs_idx %u slot %u", xs->xs_idx, slot); 2443 2444 int idx = 0; 2445 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) { 2446 if (xhci_trb_get_idx(xr, trb_0, &idx)) { 2447 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0); 2448 return; 2449 } 2450 xx = xr->xr_cookies[idx]; 2451 2452 /* clear cookie of consumed TRB */ 2453 xr->xr_cookies[idx] = NULL; 2454 2455 /* 2456 * xx is NULL if pipe is opened but xfer is not started. 2457 * It happens when stopping idle pipe. 2458 */ 2459 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) { 2460 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju", 2461 idx, (uintptr_t)xx, trbcode, dci); 2462 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0, 2463 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)), 2464 0, 0); 2465 return; 2466 } 2467 } else { 2468 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */ 2469 xx = (void *)(uintptr_t)(trb_0 & ~0x3); 2470 } 2471 /* XXX this may not happen */ 2472 if (xx == NULL) { 2473 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0); 2474 return; 2475 } 2476 xfer = &xx->xx_xfer; 2477 /* XXX this may happen when detaching */ 2478 if (xfer == NULL) { 2479 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx", 2480 (uintptr_t)xx, trb_0, 0, 0); 2481 return; 2482 } 2483 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0); 2484 /* XXX I dunno why this happens */ 2485 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer); 2486 2487 if (!xfer->ux_pipe->up_repeat && 2488 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) { 2489 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer, 2490 0, 0, 0); 2491 return; 2492 } 2493 2494 const uint8_t xfertype = 2495 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes); 2496 2497 /* 4.11.5.2 Event Data TRB */ 2498 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) { 2499 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx" 2500 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0); 2501 if ((trb_0 & 0x3) == 0x3) { 2502 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2); 2503 } 2504 } 2505 2506 switch (trbcode) { 2507 case XHCI_TRB_ERROR_SHORT_PKT: 2508 case XHCI_TRB_ERROR_SUCCESS: 2509 /* 2510 * A ctrl transfer can generate two events if it has a Data 2511 * stage. A short data stage can be OK and should not 2512 * complete the transfer as the status stage needs to be 2513 * performed. 2514 * 2515 * Note: Data and Status stage events point at same xfer. 2516 * ux_actlen and ux_dmabuf will be passed to 2517 * usb_transfer_complete after the Status stage event. 2518 * 2519 * It can be distinguished which stage generates the event: 2520 * + by checking least 3 bits of trb_0 if ED==1. 2521 * (see xhci_device_ctrl_start). 2522 * + by checking the type of original TRB if ED==0. 2523 * 2524 * In addition, intr, bulk, and isoc transfer currently 2525 * consists of single TD, so the "skip" is not needed. 2526 * ctrl xfer uses EVENT_DATA, and others do not. 2527 * Thus driver can switch the flow by checking ED bit. 2528 */ 2529 if (xfertype == UE_ISOCHRONOUS) { 2530 xfer->ux_frlengths[xx->xx_isoc_done] -= 2531 XHCI_TRB_2_REM_GET(trb_2); 2532 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done]; 2533 } else if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) { 2534 if (xfer->ux_actlen == 0) 2535 xfer->ux_actlen = xfer->ux_length - 2536 XHCI_TRB_2_REM_GET(trb_2); 2537 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)) 2538 == XHCI_TRB_TYPE_DATA_STAGE) { 2539 return; 2540 } 2541 } else if ((trb_0 & 0x3) == 0x3) { 2542 return; 2543 } 2544 err = USBD_NORMAL_COMPLETION; 2545 break; 2546 case XHCI_TRB_ERROR_STOPPED: 2547 case XHCI_TRB_ERROR_LENGTH: 2548 case XHCI_TRB_ERROR_STOPPED_SHORT: 2549 err = USBD_IOERROR; 2550 break; 2551 case XHCI_TRB_ERROR_STALL: 2552 case XHCI_TRB_ERROR_BABBLE: 2553 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0); 2554 xhci_pipe_restart_async(xfer->ux_pipe); 2555 err = USBD_STALLED; 2556 break; 2557 default: 2558 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0); 2559 err = USBD_IOERROR; 2560 break; 2561 } 2562 2563 if (xfertype == UE_ISOCHRONOUS) { 2564 switch (trbcode) { 2565 case XHCI_TRB_ERROR_SHORT_PKT: 2566 case XHCI_TRB_ERROR_SUCCESS: 2567 break; 2568 case XHCI_TRB_ERROR_MISSED_SERVICE: 2569 case XHCI_TRB_ERROR_RING_UNDERRUN: 2570 case XHCI_TRB_ERROR_RING_OVERRUN: 2571 default: 2572 xfer->ux_frlengths[xx->xx_isoc_done] = 0; 2573 break; 2574 } 2575 if (++xx->xx_isoc_done < xfer->ux_nframes) 2576 return; 2577 } 2578 2579 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 || 2580 (trb_0 & 0x3) == 0x0) { 2581 /* 2582 * Try to claim this xfer for completion. If it has 2583 * already completed or aborted, drop it on the floor. 2584 */ 2585 if (!usbd_xfer_trycomplete(xfer)) 2586 return; 2587 2588 /* Set the status. */ 2589 xfer->ux_status = err; 2590 2591 usb_transfer_complete(xfer); 2592 } 2593 } 2594 2595 /* Process Command complete events */ 2596 static void 2597 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb) 2598 { 2599 uint64_t trb_0; 2600 uint32_t trb_2, trb_3; 2601 2602 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2603 2604 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 2605 2606 trb_0 = le64toh(trb->trb_0); 2607 trb_2 = le32toh(trb->trb_2); 2608 trb_3 = le32toh(trb->trb_3); 2609 2610 if (trb_0 == sc->sc_command_addr) { 2611 sc->sc_resultpending = false; 2612 2613 sc->sc_result_trb.trb_0 = trb_0; 2614 sc->sc_result_trb.trb_2 = trb_2; 2615 sc->sc_result_trb.trb_3 = trb_3; 2616 if (XHCI_TRB_2_ERROR_GET(trb_2) != 2617 XHCI_TRB_ERROR_SUCCESS) { 2618 DPRINTFN(1, "command completion " 2619 "failure: 0x%016jx 0x%08jx 0x%08jx", 2620 trb_0, trb_2, trb_3, 0); 2621 } 2622 cv_signal(&sc->sc_command_cv); 2623 } else { 2624 DPRINTFN(1, "spurious event: %#jx 0x%016jx " 2625 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3); 2626 } 2627 } 2628 2629 /* 2630 * Process events. 2631 * called from xhci_softintr 2632 */ 2633 static void 2634 xhci_handle_event(struct xhci_softc * const sc, 2635 const struct xhci_trb * const trb) 2636 { 2637 uint64_t trb_0; 2638 uint32_t trb_2, trb_3; 2639 2640 XHCIHIST_FUNC(); 2641 2642 trb_0 = le64toh(trb->trb_0); 2643 trb_2 = le32toh(trb->trb_2); 2644 trb_3 = le32toh(trb->trb_3); 2645 2646 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx", 2647 (uintptr_t)trb, trb_0, trb_2, trb_3); 2648 2649 /* 2650 * 4.11.3.1, 6.4.2.1 2651 * TRB Pointer is invalid for these completion codes. 2652 */ 2653 switch (XHCI_TRB_2_ERROR_GET(trb_2)) { 2654 case XHCI_TRB_ERROR_RING_UNDERRUN: 2655 case XHCI_TRB_ERROR_RING_OVERRUN: 2656 case XHCI_TRB_ERROR_VF_RING_FULL: 2657 return; 2658 default: 2659 if (trb_0 == 0) { 2660 return; 2661 } 2662 break; 2663 } 2664 2665 switch (XHCI_TRB_3_TYPE_GET(trb_3)) { 2666 case XHCI_TRB_EVENT_TRANSFER: 2667 xhci_event_transfer(sc, trb); 2668 break; 2669 case XHCI_TRB_EVENT_CMD_COMPLETE: 2670 xhci_event_cmd(sc, trb); 2671 break; 2672 case XHCI_TRB_EVENT_PORT_STS_CHANGE: 2673 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff)); 2674 break; 2675 default: 2676 break; 2677 } 2678 } 2679 2680 static void 2681 xhci_softintr(void *v) 2682 { 2683 struct usbd_bus * const bus = v; 2684 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2685 struct xhci_ring * const er = sc->sc_er; 2686 struct xhci_trb *trb; 2687 int i, j, k, bn; 2688 2689 XHCIHIST_FUNC(); 2690 2691 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 2692 2693 i = er->xr_ep; 2694 j = er->xr_cs; 2695 2696 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0); 2697 2698 /* 2699 * Handle deferred root intr xfer, in case we just switched off 2700 * polling. It's not safe to complete root intr xfers while 2701 * polling -- too much kernel machinery gets involved. 2702 */ 2703 if (!xhci_polling_p(sc)) { 2704 for (bn = 0; bn < 2; bn++) { 2705 if (__predict_false(sc->sc_intrxfer_deferred[bn])) { 2706 sc->sc_intrxfer_deferred[bn] = false; 2707 usb_transfer_complete(sc->sc_intrxfer[bn]); 2708 } 2709 } 2710 } 2711 2712 while (1) { 2713 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE, 2714 BUS_DMASYNC_POSTREAD); 2715 trb = &er->xr_trb[i]; 2716 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0; 2717 2718 if (j != k) 2719 break; 2720 2721 xhci_handle_event(sc, trb); 2722 2723 i++; 2724 if (i == er->xr_ntrb) { 2725 i = 0; 2726 j ^= 1; 2727 } 2728 } 2729 2730 er->xr_ep = i; 2731 er->xr_cs = j; 2732 2733 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) | 2734 XHCI_ERDP_BUSY); 2735 2736 DPRINTFN(16, "ends", 0, 0, 0, 0); 2737 2738 return; 2739 } 2740 2741 static void 2742 xhci_poll(struct usbd_bus *bus) 2743 { 2744 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2745 2746 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2747 2748 mutex_enter(&sc->sc_intr_lock); 2749 int ret = xhci_intr1(sc); 2750 if (ret) { 2751 xhci_softintr(bus); 2752 } 2753 mutex_exit(&sc->sc_intr_lock); 2754 2755 return; 2756 } 2757 2758 static struct usbd_xfer * 2759 xhci_allocx(struct usbd_bus *bus, unsigned int nframes) 2760 { 2761 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2762 struct xhci_xfer *xx; 2763 u_int ntrbs; 2764 2765 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2766 2767 ntrbs = uimax(3, nframes); 2768 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs; 2769 2770 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK); 2771 if (xx != NULL) { 2772 memset(xx, 0, sizeof(*xx)); 2773 if (ntrbs > 0) { 2774 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP); 2775 xx->xx_ntrb = ntrbs; 2776 } 2777 #ifdef DIAGNOSTIC 2778 xx->xx_xfer.ux_state = XFER_BUSY; 2779 #endif 2780 } 2781 2782 return &xx->xx_xfer; 2783 } 2784 2785 static void 2786 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 2787 { 2788 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2789 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 2790 2791 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2792 2793 #ifdef DIAGNOSTIC 2794 if (xfer->ux_state != XFER_BUSY && 2795 xfer->ux_status != USBD_NOT_STARTED) { 2796 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx", 2797 (uintptr_t)xfer, xfer->ux_state, 0, 0); 2798 } 2799 xfer->ux_state = XFER_FREE; 2800 #endif 2801 if (xx->xx_ntrb > 0) { 2802 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb)); 2803 xx->xx_trb = NULL; 2804 xx->xx_ntrb = 0; 2805 } 2806 pool_cache_put(sc->sc_xferpool, xx); 2807 } 2808 2809 static bool 2810 xhci_dying(struct usbd_bus *bus) 2811 { 2812 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2813 2814 return sc->sc_dying; 2815 } 2816 2817 static void 2818 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock) 2819 { 2820 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2821 2822 *lock = &sc->sc_lock; 2823 } 2824 2825 extern uint32_t usb_cookie_no; 2826 2827 /* 2828 * xHCI 4.3 2829 * Called when uhub_explore finds a new device (via usbd_new_device). 2830 * Port initialization and speed detection (4.3.1) are already done in uhub.c. 2831 * This function does: 2832 * Allocate and construct dev structure of default endpoint (ep0). 2833 * Allocate and open pipe of ep0. 2834 * Enable slot and initialize slot context. 2835 * Set Address. 2836 * Read initial device descriptor. 2837 * Determine initial MaxPacketSize (mps) by speed. 2838 * Read full device descriptor. 2839 * Register this device. 2840 * Finally state of device transitions ADDRESSED. 2841 */ 2842 static usbd_status 2843 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth, 2844 int speed, int port, struct usbd_port *up) 2845 { 2846 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2847 struct usbd_device *dev; 2848 usbd_status err; 2849 usb_device_descriptor_t *dd; 2850 struct xhci_slot *xs; 2851 uint32_t *cp; 2852 2853 XHCIHIST_FUNC(); 2854 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx", 2855 port, depth, speed, (uintptr_t)up); 2856 2857 KASSERT(KERNEL_LOCKED_P()); 2858 2859 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP); 2860 dev->ud_bus = bus; 2861 dev->ud_quirks = &usbd_no_quirk; 2862 dev->ud_addr = 0; 2863 dev->ud_ddesc.bMaxPacketSize = 0; 2864 dev->ud_config = USB_UNCONFIG_NO; 2865 dev->ud_configidx = USB_UNCONFIG_INDEX; 2866 dev->ud_depth = depth; 2867 dev->ud_powersrc = up; 2868 dev->ud_myhub = up->up_parent; 2869 dev->ud_speed = speed; 2870 dev->ud_langid = USBD_NOLANG; 2871 dev->ud_cookie.cookie = ++usb_cookie_no; 2872 2873 /* Set up default endpoint handle. */ 2874 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc; 2875 /* doesn't matter, just don't let it uninitialized */ 2876 dev->ud_ep0.ue_toggle = 0; 2877 2878 /* Set up default endpoint descriptor. */ 2879 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE; 2880 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT; 2881 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT; 2882 dev->ud_ep0desc.bmAttributes = UE_CONTROL; 2883 dev->ud_ep0desc.bInterval = 0; 2884 2885 /* 4.3, 4.8.2.1 */ 2886 switch (speed) { 2887 case USB_SPEED_SUPER: 2888 case USB_SPEED_SUPER_PLUS: 2889 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET); 2890 break; 2891 case USB_SPEED_FULL: 2892 /* XXX using 64 as initial mps of ep0 in FS */ 2893 case USB_SPEED_HIGH: 2894 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET); 2895 break; 2896 case USB_SPEED_LOW: 2897 default: 2898 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET); 2899 break; 2900 } 2901 2902 up->up_dev = dev; 2903 2904 dd = &dev->ud_ddesc; 2905 2906 if (depth == 0 && port == 0) { 2907 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL); 2908 bus->ub_devices[USB_ROOTHUB_INDEX] = dev; 2909 2910 /* Establish the default pipe. */ 2911 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0, 2912 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0); 2913 if (err) { 2914 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0); 2915 goto bad; 2916 } 2917 err = usbd_get_initial_ddesc(dev, dd); 2918 if (err) { 2919 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0); 2920 goto bad; 2921 } 2922 } else { 2923 uint8_t slot = 0; 2924 2925 /* 4.3.2 */ 2926 err = xhci_enable_slot(sc, &slot); 2927 if (err) { 2928 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0); 2929 goto bad; 2930 } 2931 2932 xs = &sc->sc_slots[slot]; 2933 dev->ud_hcpriv = xs; 2934 2935 /* 4.3.3 initialize slot structure */ 2936 err = xhci_init_slot(dev, slot); 2937 if (err) { 2938 DPRINTFN(1, "init slot %ju", err, 0, 0, 0); 2939 dev->ud_hcpriv = NULL; 2940 /* 2941 * We have to disable_slot here because 2942 * xs->xs_idx == 0 when xhci_init_slot fails, 2943 * in that case usbd_remove_dev won't work. 2944 */ 2945 mutex_enter(&sc->sc_lock); 2946 xhci_disable_slot(sc, slot); 2947 mutex_exit(&sc->sc_lock); 2948 goto bad; 2949 } 2950 2951 /* 2952 * We have to establish the default pipe _after_ slot 2953 * structure has been prepared. 2954 */ 2955 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0, 2956 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0); 2957 if (err) { 2958 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0, 2959 0); 2960 goto bad; 2961 } 2962 2963 /* 4.3.4 Address Assignment */ 2964 err = xhci_set_address(dev, slot, false); 2965 if (err) { 2966 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0); 2967 goto bad; 2968 } 2969 2970 /* Allow device time to set new address */ 2971 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE); 2972 2973 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 2974 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT); 2975 HEXDUMP("slot context", cp, sc->sc_ctxsz); 2976 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3])); 2977 DPRINTFN(4, "device address %ju", addr, 0, 0, 0); 2978 /* 2979 * XXX ensure we know when the hardware does something 2980 * we can't yet cope with 2981 */ 2982 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr); 2983 dev->ud_addr = addr; 2984 2985 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL, 2986 "addr %d already allocated", dev->ud_addr); 2987 /* 2988 * The root hub is given its own slot 2989 */ 2990 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev; 2991 2992 err = usbd_get_initial_ddesc(dev, dd); 2993 if (err) { 2994 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0); 2995 goto bad; 2996 } 2997 2998 /* 4.8.2.1 */ 2999 if (USB_IS_SS(speed)) { 3000 if (dd->bMaxPacketSize != 9) { 3001 printf("%s: invalid mps 2^%u for SS ep0," 3002 " using 512\n", 3003 device_xname(sc->sc_dev), 3004 dd->bMaxPacketSize); 3005 dd->bMaxPacketSize = 9; 3006 } 3007 USETW(dev->ud_ep0desc.wMaxPacketSize, 3008 (1 << dd->bMaxPacketSize)); 3009 } else 3010 USETW(dev->ud_ep0desc.wMaxPacketSize, 3011 dd->bMaxPacketSize); 3012 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0); 3013 err = xhci_update_ep0_mps(sc, xs, 3014 UGETW(dev->ud_ep0desc.wMaxPacketSize)); 3015 if (err) { 3016 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0); 3017 goto bad; 3018 } 3019 } 3020 3021 err = usbd_reload_device_desc(dev); 3022 if (err) { 3023 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0); 3024 goto bad; 3025 } 3026 3027 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,", 3028 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0); 3029 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,", 3030 dd->bDeviceClass, dd->bDeviceSubClass, 3031 dd->bDeviceProtocol, 0); 3032 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd", 3033 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations, 3034 dev->ud_speed); 3035 3036 usbd_get_device_strings(dev); 3037 3038 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev); 3039 3040 if (depth == 0 && port == 0) { 3041 usbd_attach_roothub(parent, dev); 3042 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0); 3043 return USBD_NORMAL_COMPLETION; 3044 } 3045 3046 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr); 3047 bad: 3048 if (err != USBD_NORMAL_COMPLETION) { 3049 if (depth == 0 && port == 0 && dev->ud_pipe0) 3050 usbd_kill_pipe(dev->ud_pipe0); 3051 usbd_remove_device(dev, up); 3052 } 3053 3054 return err; 3055 } 3056 3057 static usbd_status 3058 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp, 3059 size_t ntrb, size_t align) 3060 { 3061 size_t size = ntrb * XHCI_TRB_SIZE; 3062 struct xhci_ring *xr; 3063 3064 XHCIHIST_FUNC(); 3065 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx", 3066 (uintptr_t)*xrp, ntrb, align, 0); 3067 3068 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP); 3069 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0); 3070 3071 int err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align, 3072 USBMALLOC_ZERO, &xr->xr_dma); 3073 if (err) { 3074 kmem_free(xr, sizeof(struct xhci_ring)); 3075 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0); 3076 return err; 3077 } 3078 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 3079 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP); 3080 xr->xr_trb = xhci_ring_trbv(xr, 0); 3081 xr->xr_ntrb = ntrb; 3082 xr->is_halted = false; 3083 xhci_host_dequeue(xr); 3084 *xrp = xr; 3085 3086 return USBD_NORMAL_COMPLETION; 3087 } 3088 3089 static void 3090 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr) 3091 { 3092 if (*xr == NULL) 3093 return; 3094 3095 usb_freemem(&(*xr)->xr_dma); 3096 mutex_destroy(&(*xr)->xr_lock); 3097 kmem_free((*xr)->xr_cookies, 3098 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb); 3099 kmem_free(*xr, sizeof(struct xhci_ring)); 3100 *xr = NULL; 3101 } 3102 3103 static void 3104 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr, 3105 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs) 3106 { 3107 size_t i; 3108 u_int ri; 3109 u_int cs; 3110 uint64_t parameter; 3111 uint32_t status; 3112 uint32_t control; 3113 3114 XHCIHIST_FUNC(); 3115 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju", 3116 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0); 3117 3118 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u", 3119 ntrbs, xr->xr_ntrb); 3120 for (i = 0; i < ntrbs; i++) { 3121 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr, 3122 (uintptr_t)trbs, i, 0); 3123 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx", 3124 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0); 3125 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) != 3126 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3); 3127 } 3128 3129 ri = xr->xr_ep; 3130 cs = xr->xr_cs; 3131 3132 /* 3133 * Although the xhci hardware can do scatter/gather dma from 3134 * arbitrary sized buffers, there is a non-obvious restriction 3135 * that a LINK trb is only allowed at the end of a burst of 3136 * transfers - which might be 16kB. 3137 * Arbitrary aligned LINK trb definitely fail on Ivy bridge. 3138 * The simple solution is not to allow a LINK trb in the middle 3139 * of anything - as here. 3140 * XXX: (dsl) There are xhci controllers out there (eg some made by 3141 * ASMedia) that seem to lock up if they process a LINK trb but 3142 * cannot process the linked-to trb yet. 3143 * The code should write the 'cycle' bit on the link trb AFTER 3144 * adding the other trb. 3145 */ 3146 u_int firstep = xr->xr_ep; 3147 u_int firstcs = xr->xr_cs; 3148 3149 for (i = 0; i < ntrbs; ) { 3150 u_int oldri = ri; 3151 u_int oldcs = cs; 3152 3153 if (ri >= (xr->xr_ntrb - 1)) { 3154 /* Put Link TD at the end of ring */ 3155 parameter = xhci_ring_trbp(xr, 0); 3156 status = 0; 3157 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) | 3158 XHCI_TRB_3_TC_BIT; 3159 xr->xr_cookies[ri] = NULL; 3160 xr->xr_ep = 0; 3161 xr->xr_cs ^= 1; 3162 ri = xr->xr_ep; 3163 cs = xr->xr_cs; 3164 } else { 3165 parameter = trbs[i].trb_0; 3166 status = trbs[i].trb_2; 3167 control = trbs[i].trb_3; 3168 3169 xr->xr_cookies[ri] = cookie; 3170 ri++; 3171 i++; 3172 } 3173 /* 3174 * If this is a first TRB, mark it invalid to prevent 3175 * xHC from running it immediately. 3176 */ 3177 if (oldri == firstep) { 3178 if (oldcs) { 3179 control &= ~XHCI_TRB_3_CYCLE_BIT; 3180 } else { 3181 control |= XHCI_TRB_3_CYCLE_BIT; 3182 } 3183 } else { 3184 if (oldcs) { 3185 control |= XHCI_TRB_3_CYCLE_BIT; 3186 } else { 3187 control &= ~XHCI_TRB_3_CYCLE_BIT; 3188 } 3189 } 3190 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control); 3191 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri, 3192 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE); 3193 } 3194 3195 /* Now invert cycle bit of first TRB */ 3196 if (firstcs) { 3197 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT); 3198 } else { 3199 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT); 3200 } 3201 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep, 3202 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE); 3203 3204 xr->xr_ep = ri; 3205 xr->xr_cs = cs; 3206 3207 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep, 3208 xr->xr_cs, 0); 3209 } 3210 3211 static inline void 3212 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr, 3213 struct xhci_xfer *xx, u_int ntrb) 3214 { 3215 KASSERT(ntrb <= xx->xx_ntrb); 3216 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb); 3217 } 3218 3219 /* 3220 * Stop execution commands, purge all commands on command ring, and 3221 * rewind dequeue pointer. 3222 */ 3223 static void 3224 xhci_abort_command(struct xhci_softc *sc) 3225 { 3226 struct xhci_ring * const cr = sc->sc_cr; 3227 uint64_t crcr; 3228 int i; 3229 3230 XHCIHIST_FUNC(); 3231 XHCIHIST_CALLARGS("command %#jx timeout, aborting", 3232 sc->sc_command_addr, 0, 0, 0); 3233 3234 mutex_enter(&cr->xr_lock); 3235 3236 /* 4.6.1.2 Aborting a Command */ 3237 crcr = xhci_op_read_8(sc, XHCI_CRCR); 3238 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA); 3239 3240 for (i = 0; i < 500; i++) { 3241 crcr = xhci_op_read_8(sc, XHCI_CRCR); 3242 if ((crcr & XHCI_CRCR_LO_CRR) == 0) 3243 break; 3244 usb_delay_ms(&sc->sc_bus, 1); 3245 } 3246 if ((crcr & XHCI_CRCR_LO_CRR) != 0) { 3247 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0); 3248 /* reset HC here? */ 3249 } 3250 3251 /* reset command ring dequeue pointer */ 3252 cr->xr_ep = 0; 3253 cr->xr_cs = 1; 3254 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs); 3255 3256 mutex_exit(&cr->xr_lock); 3257 } 3258 3259 /* 3260 * Put a command on command ring, ring bell, set timer, and cv_timedwait. 3261 * Command completion is notified by cv_signal from xhci_event_cmd() 3262 * (called from xhci_softint), or timed-out. 3263 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(), 3264 * then do_command examines it. 3265 */ 3266 static usbd_status 3267 xhci_do_command_locked(struct xhci_softc * const sc, 3268 struct xhci_soft_trb * const trb, int timeout) 3269 { 3270 struct xhci_ring * const cr = sc->sc_cr; 3271 usbd_status err; 3272 3273 XHCIHIST_FUNC(); 3274 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx", 3275 trb->trb_0, trb->trb_2, trb->trb_3, 0); 3276 3277 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx"); 3278 KASSERT(mutex_owned(&sc->sc_lock)); 3279 3280 while (sc->sc_command_addr != 0 || 3281 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp)) 3282 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock); 3283 if (sc->sc_suspendresume_failed) 3284 return USBD_IOERROR; 3285 3286 /* 3287 * If enqueue pointer points at last of ring, it's Link TRB, 3288 * command TRB will be stored in 0th TRB. 3289 */ 3290 if (cr->xr_ep == cr->xr_ntrb - 1) 3291 sc->sc_command_addr = xhci_ring_trbp(cr, 0); 3292 else 3293 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep); 3294 3295 sc->sc_resultpending = true; 3296 3297 mutex_enter(&cr->xr_lock); 3298 xhci_ring_put(sc, cr, NULL, trb, 1); 3299 mutex_exit(&cr->xr_lock); 3300 3301 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0); 3302 3303 while (sc->sc_resultpending) { 3304 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock, 3305 MAX(1, mstohz(timeout))) == EWOULDBLOCK) { 3306 xhci_abort_command(sc); 3307 err = USBD_TIMEOUT; 3308 goto timedout; 3309 } 3310 } 3311 3312 trb->trb_0 = sc->sc_result_trb.trb_0; 3313 trb->trb_2 = sc->sc_result_trb.trb_2; 3314 trb->trb_3 = sc->sc_result_trb.trb_3; 3315 3316 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx", 3317 trb->trb_0, trb->trb_2, trb->trb_3, 0); 3318 3319 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) { 3320 case XHCI_TRB_ERROR_SUCCESS: 3321 err = USBD_NORMAL_COMPLETION; 3322 break; 3323 default: 3324 case 192 ... 223: 3325 DPRINTFN(5, "error %#jx", 3326 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0); 3327 err = USBD_IOERROR; 3328 break; 3329 case 224 ... 255: 3330 err = USBD_NORMAL_COMPLETION; 3331 break; 3332 } 3333 3334 timedout: 3335 sc->sc_resultpending = false; 3336 sc->sc_command_addr = 0; 3337 cv_broadcast(&sc->sc_cmdbusy_cv); 3338 3339 return err; 3340 } 3341 3342 static usbd_status 3343 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb, 3344 int timeout) 3345 { 3346 3347 mutex_enter(&sc->sc_lock); 3348 usbd_status ret = xhci_do_command_locked(sc, trb, timeout); 3349 mutex_exit(&sc->sc_lock); 3350 3351 return ret; 3352 } 3353 3354 static usbd_status 3355 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp) 3356 { 3357 struct xhci_soft_trb trb; 3358 usbd_status err; 3359 3360 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3361 3362 trb.trb_0 = 0; 3363 trb.trb_2 = 0; 3364 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT); 3365 3366 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3367 if (err != USBD_NORMAL_COMPLETION) { 3368 return err; 3369 } 3370 3371 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3); 3372 3373 return err; 3374 } 3375 3376 /* 3377 * xHCI 4.6.4 3378 * Deallocate ring and device/input context DMA buffers, and disable_slot. 3379 * All endpoints in the slot should be stopped. 3380 * Should be called with sc_lock held. 3381 */ 3382 static usbd_status 3383 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot) 3384 { 3385 struct xhci_soft_trb trb; 3386 struct xhci_slot *xs; 3387 usbd_status err; 3388 3389 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3390 3391 if (sc->sc_dying) 3392 return USBD_IOERROR; 3393 3394 trb.trb_0 = 0; 3395 trb.trb_2 = 0; 3396 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) | 3397 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT); 3398 3399 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 3400 3401 if (!err) { 3402 xs = &sc->sc_slots[slot]; 3403 if (xs->xs_idx != 0) { 3404 xhci_free_slot(sc, xs); 3405 xhci_set_dcba(sc, 0, slot); 3406 memset(xs, 0, sizeof(*xs)); 3407 } 3408 } 3409 3410 return err; 3411 } 3412 3413 /* 3414 * Set address of device and transition slot state from ENABLED to ADDRESSED 3415 * if Block Setaddress Request (BSR) is false. 3416 * If BSR==true, transition slot state from ENABLED to DEFAULT. 3417 * see xHCI 1.1 4.5.3, 3.3.4 3418 * Should be called without sc_lock held. 3419 */ 3420 static usbd_status 3421 xhci_address_device(struct xhci_softc * const sc, 3422 uint64_t icp, uint8_t slot_id, bool bsr) 3423 { 3424 struct xhci_soft_trb trb; 3425 usbd_status err; 3426 3427 XHCIHIST_FUNC(); 3428 if (bsr) { 3429 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr", 3430 icp, slot_id, 0, 0); 3431 } else { 3432 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr", 3433 icp, slot_id, 0, 0); 3434 } 3435 3436 trb.trb_0 = icp; 3437 trb.trb_2 = 0; 3438 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) | 3439 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) | 3440 (bsr ? XHCI_TRB_3_BSR_BIT : 0); 3441 3442 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3443 3444 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS) 3445 err = USBD_NO_ADDR; 3446 3447 return err; 3448 } 3449 3450 static usbd_status 3451 xhci_update_ep0_mps(struct xhci_softc * const sc, 3452 struct xhci_slot * const xs, u_int mps) 3453 { 3454 struct xhci_soft_trb trb; 3455 usbd_status err; 3456 uint32_t * cp; 3457 3458 XHCIHIST_FUNC(); 3459 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0); 3460 3461 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 3462 cp[0] = htole32(0); 3463 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL)); 3464 3465 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL)); 3466 cp[1] &= ~htole32(XHCI_EPCTX_1_MAXP_SIZE_MASK); 3467 cp[1] |= htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps)); 3468 3469 /* sync input contexts before they are read from memory */ 3470 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 3471 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0), 3472 sc->sc_ctxsz * 4); 3473 3474 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 3475 trb.trb_2 = 0; 3476 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 3477 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX); 3478 3479 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3480 return err; 3481 } 3482 3483 static void 3484 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si) 3485 { 3486 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0); 3487 3488 XHCIHIST_FUNC(); 3489 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd", 3490 (uintptr_t)&dcbaa[si], dcba, si, 0); 3491 3492 dcbaa[si] = htole64(dcba); 3493 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t), 3494 BUS_DMASYNC_PREWRITE); 3495 } 3496 3497 /* 3498 * Allocate device and input context DMA buffer, and 3499 * TRB DMA buffer for each endpoint. 3500 */ 3501 static usbd_status 3502 xhci_init_slot(struct usbd_device *dev, uint32_t slot) 3503 { 3504 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 3505 struct xhci_slot *xs; 3506 3507 XHCIHIST_FUNC(); 3508 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0); 3509 3510 xs = &sc->sc_slots[slot]; 3511 3512 /* allocate contexts */ 3513 int err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz, 3514 USBMALLOC_ZERO, &xs->xs_dc_dma); 3515 if (err) { 3516 DPRINTFN(1, "failed to allocmem output device context %jd", 3517 err, 0, 0, 0); 3518 return USBD_NOMEM; 3519 } 3520 3521 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz, 3522 USBMALLOC_ZERO, &xs->xs_ic_dma); 3523 if (err) { 3524 DPRINTFN(1, "failed to allocmem input device context %jd", 3525 err, 0, 0, 0); 3526 goto bad1; 3527 } 3528 3529 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr)); 3530 xs->xs_idx = slot; 3531 3532 return USBD_NORMAL_COMPLETION; 3533 3534 bad1: 3535 usb_freemem(&xs->xs_dc_dma); 3536 xs->xs_idx = 0; 3537 return USBD_NOMEM; 3538 } 3539 3540 static void 3541 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs) 3542 { 3543 u_int dci; 3544 3545 XHCIHIST_FUNC(); 3546 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0); 3547 3548 /* deallocate all allocated rings in the slot */ 3549 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 3550 if (xs->xs_xr[dci] != NULL) 3551 xhci_ring_free(sc, &xs->xs_xr[dci]); 3552 } 3553 usb_freemem(&xs->xs_ic_dma); 3554 usb_freemem(&xs->xs_dc_dma); 3555 xs->xs_idx = 0; 3556 } 3557 3558 /* 3559 * Setup slot context, set Device Context Base Address, and issue 3560 * Set Address Device command. 3561 */ 3562 static usbd_status 3563 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr) 3564 { 3565 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 3566 struct xhci_slot *xs; 3567 usbd_status err; 3568 3569 XHCIHIST_FUNC(); 3570 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0); 3571 3572 xs = &sc->sc_slots[slot]; 3573 3574 xhci_setup_ctx(dev->ud_pipe0); 3575 3576 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0), 3577 sc->sc_ctxsz * 3); 3578 3579 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot); 3580 3581 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr); 3582 3583 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 3584 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0), 3585 sc->sc_ctxsz * 2); 3586 3587 return err; 3588 } 3589 3590 /* 3591 * 4.8.2, 6.2.3.2 3592 * construct slot/endpoint context parameters and do syncmem 3593 */ 3594 static void 3595 xhci_setup_ctx(struct usbd_pipe *pipe) 3596 { 3597 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3598 struct usbd_device *dev = pipe->up_dev; 3599 struct xhci_slot * const xs = dev->ud_hcpriv; 3600 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3601 const u_int dci = xhci_ep_get_dci(ed); 3602 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 3603 uint32_t *cp; 3604 uint8_t speed = dev->ud_speed; 3605 3606 XHCIHIST_FUNC(); 3607 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju", 3608 (uintptr_t)pipe, xs->xs_idx, dci, speed); 3609 3610 /* set up initial input control context */ 3611 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 3612 cp[0] = htole32(0); 3613 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci)); 3614 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT)); 3615 cp[7] = htole32(0); 3616 3617 /* set up input slot context */ 3618 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT)); 3619 cp[0] = 3620 XHCI_SCTX_0_CTX_NUM_SET(dci) | 3621 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed)); 3622 cp[1] = 0; 3623 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0); 3624 cp[3] = 0; 3625 xhci_setup_route(pipe, cp); 3626 xhci_setup_tthub(pipe, cp); 3627 3628 cp[0] = htole32(cp[0]); 3629 cp[1] = htole32(cp[1]); 3630 cp[2] = htole32(cp[2]); 3631 cp[3] = htole32(cp[3]); 3632 3633 /* set up input endpoint context */ 3634 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci)); 3635 cp[0] = 3636 XHCI_EPCTX_0_EPSTATE_SET(0) | 3637 XHCI_EPCTX_0_MULT_SET(0) | 3638 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) | 3639 XHCI_EPCTX_0_LSA_SET(0) | 3640 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0); 3641 cp[1] = 3642 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) | 3643 XHCI_EPCTX_1_HID_SET(0) | 3644 XHCI_EPCTX_1_MAXB_SET(0); 3645 3646 if (xfertype != UE_ISOCHRONOUS) 3647 cp[1] |= XHCI_EPCTX_1_CERR_SET(3); 3648 3649 xhci_setup_maxburst(pipe, cp); 3650 3651 DPRINTFN(4, "setting on dci %ju ival %ju mult %ju mps %#jx", 3652 dci, XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_0_MULT_GET(cp[0]), 3653 XHCI_EPCTX_1_MAXP_SIZE_GET(cp[1])); 3654 DPRINTFN(4, " maxburst %ju mep %#jx atl %#jx", 3655 XHCI_EPCTX_1_MAXB_GET(cp[1]), 3656 (XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_GET(cp[0]) << 16) + 3657 XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_GET(cp[4]), 3658 XHCI_EPCTX_4_AVG_TRB_LEN_GET(cp[4]), 0); 3659 3660 /* rewind TR dequeue pointer in xHC */ 3661 /* can't use xhci_ep_get_dci() yet? */ 3662 *(uint64_t *)(&cp[2]) = htole64( 3663 xhci_ring_trbp(xs->xs_xr[dci], 0) | 3664 XHCI_EPCTX_2_DCS_SET(1)); 3665 3666 cp[0] = htole32(cp[0]); 3667 cp[1] = htole32(cp[1]); 3668 cp[4] = htole32(cp[4]); 3669 3670 /* rewind TR dequeue pointer in driver */ 3671 struct xhci_ring *xr = xs->xs_xr[dci]; 3672 mutex_enter(&xr->xr_lock); 3673 xhci_host_dequeue(xr); 3674 mutex_exit(&xr->xr_lock); 3675 3676 /* sync input contexts before they are read from memory */ 3677 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 3678 } 3679 3680 /* 3681 * Setup route string and roothub port of given device for slot context 3682 */ 3683 static void 3684 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp) 3685 { 3686 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3687 struct usbd_device *dev = pipe->up_dev; 3688 struct usbd_port *up = dev->ud_powersrc; 3689 struct usbd_device *hub; 3690 struct usbd_device *adev; 3691 uint8_t rhport = 0; 3692 uint32_t route = 0; 3693 3694 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3695 3696 /* Locate root hub port and Determine route string */ 3697 /* 4.3.3 route string does not include roothub port */ 3698 for (hub = dev; hub != NULL; hub = hub->ud_myhub) { 3699 uint32_t dep; 3700 3701 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd", 3702 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc, 3703 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno : 3704 -1); 3705 3706 if (hub->ud_powersrc == NULL) 3707 break; 3708 dep = hub->ud_depth; 3709 if (dep == 0) 3710 break; 3711 rhport = hub->ud_powersrc->up_portno; 3712 if (dep > USB_HUB_MAX_DEPTH) 3713 continue; 3714 3715 route |= 3716 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport) 3717 << ((dep - 1) * 4); 3718 } 3719 route = route >> 4; 3720 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1; 3721 3722 /* Locate port on upstream high speed hub */ 3723 for (adev = dev, hub = up->up_parent; 3724 hub != NULL && hub->ud_speed != USB_SPEED_HIGH; 3725 adev = hub, hub = hub->ud_myhub) 3726 ; 3727 if (hub) { 3728 int p; 3729 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) { 3730 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) { 3731 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1]; 3732 goto found; 3733 } 3734 } 3735 panic("%s: cannot find HS port", __func__); 3736 found: 3737 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0); 3738 } else { 3739 dev->ud_myhsport = NULL; 3740 } 3741 3742 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport); 3743 3744 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport, 3745 ctlrport, route, (uintptr_t)hub); 3746 3747 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route); 3748 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport); 3749 } 3750 3751 /* 3752 * Setup whether device is hub, whether device uses MTT, and 3753 * TT informations if it uses MTT. 3754 */ 3755 static void 3756 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp) 3757 { 3758 struct usbd_device *dev = pipe->up_dev; 3759 struct usbd_port *myhsport = dev->ud_myhsport; 3760 usb_device_descriptor_t * const dd = &dev->ud_ddesc; 3761 uint32_t speed = dev->ud_speed; 3762 uint8_t rhaddr = dev->ud_bus->ub_rhaddr; 3763 uint8_t tthubslot, ttportnum; 3764 bool ishub; 3765 bool usemtt; 3766 3767 XHCIHIST_FUNC(); 3768 3769 /* 3770 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2 3771 * tthubslot: 3772 * This is the slot ID of parent HS hub 3773 * if LS/FS device is connected && connected through HS hub. 3774 * This is 0 if device is not LS/FS device || 3775 * parent hub is not HS hub || 3776 * attached to root hub. 3777 * ttportnum: 3778 * This is the downstream facing port of parent HS hub 3779 * if LS/FS device is connected. 3780 * This is 0 if device is not LS/FS device || 3781 * parent hub is not HS hub || 3782 * attached to root hub. 3783 */ 3784 if (myhsport && 3785 myhsport->up_parent->ud_addr != rhaddr && 3786 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) { 3787 ttportnum = myhsport->up_portno; 3788 tthubslot = myhsport->up_parent->ud_addr; 3789 } else { 3790 ttportnum = 0; 3791 tthubslot = 0; 3792 } 3793 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd", 3794 (uintptr_t)myhsport, ttportnum, tthubslot, 0); 3795 3796 /* ishub is valid after reading UDESC_DEVICE */ 3797 ishub = (dd->bDeviceClass == UDCLASS_HUB); 3798 3799 /* dev->ud_hub is valid after reading UDESC_HUB */ 3800 if (ishub && dev->ud_hub) { 3801 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc; 3802 uint8_t ttt = 3803 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK); 3804 3805 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts); 3806 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt); 3807 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0); 3808 } 3809 3810 #define IS_MTTHUB(dd) \ 3811 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT) 3812 3813 /* 3814 * MTT flag is set if 3815 * 1. this is HS hub && MTTs are supported and enabled; or 3816 * 2. this is LS or FS device && there is a parent HS hub where MTTs 3817 * are supported and enabled. 3818 * 3819 * XXX enabled is not tested yet 3820 */ 3821 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd)) 3822 usemtt = true; 3823 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) && 3824 myhsport && 3825 myhsport->up_parent->ud_addr != rhaddr && 3826 IS_MTTHUB(&myhsport->up_parent->ud_ddesc)) 3827 usemtt = true; 3828 else 3829 usemtt = false; 3830 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd", 3831 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt); 3832 3833 #undef IS_MTTHUB 3834 3835 cp[0] |= 3836 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) | 3837 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0); 3838 cp[2] |= 3839 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) | 3840 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum); 3841 } 3842 3843 static const usb_endpoint_ss_comp_descriptor_t * 3844 xhci_get_essc_desc(struct usbd_pipe *pipe) 3845 { 3846 struct usbd_device *dev = pipe->up_dev; 3847 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3848 const usb_cdc_descriptor_t *cdcd; 3849 usbd_desc_iter_t iter; 3850 uint8_t ep; 3851 3852 /* config desc is NULL when opening ep0 */ 3853 if (dev == NULL || dev->ud_cdesc == NULL) 3854 return NULL; 3855 3856 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev, 3857 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY); 3858 if (cdcd == NULL) 3859 return NULL; 3860 3861 usb_desc_iter_init(dev, &iter); 3862 iter.cur = (const void *)cdcd; 3863 3864 /* find endpoint_ss_comp desc for ep of this pipe */ 3865 for (ep = 0;;) { 3866 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter); 3867 if (cdcd == NULL) 3868 break; 3869 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) { 3870 ep = ((const usb_endpoint_descriptor_t *)cdcd)-> 3871 bEndpointAddress; 3872 if (UE_GET_ADDR(ep) == 3873 UE_GET_ADDR(ed->bEndpointAddress)) { 3874 cdcd = (const usb_cdc_descriptor_t *) 3875 usb_desc_iter_next(&iter); 3876 break; 3877 } 3878 ep = 0; 3879 } 3880 } 3881 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) { 3882 return (const usb_endpoint_ss_comp_descriptor_t *)cdcd; 3883 } 3884 return NULL; 3885 } 3886 3887 /* set up params for periodic endpoint */ 3888 static void 3889 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp) 3890 { 3891 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe; 3892 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3893 struct usbd_device * const dev = pipe->up_dev; 3894 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3895 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 3896 uint16_t mps = UGETW(ed->wMaxPacketSize); 3897 uint8_t speed = dev->ud_speed; 3898 uint32_t maxb, mep, atl; 3899 uint8_t ival, mult; 3900 3901 const usb_endpoint_ss_comp_descriptor_t * esscd = 3902 xhci_get_essc_desc(pipe); 3903 3904 /* USB 2.0 9.6.6, xHCI 4.8.2.4, 6.2.3.2 - 6.2.3.8 */ 3905 switch (xfertype) { 3906 case UE_ISOCHRONOUS: 3907 case UE_INTERRUPT: 3908 if (USB_IS_SS(speed)) { 3909 maxb = esscd ? esscd->bMaxBurst : UE_GET_TRANS(mps); 3910 mep = esscd ? UGETW(esscd->wBytesPerInterval) : 3911 UE_GET_SIZE(mps) * (maxb + 1); 3912 if (esscd && xfertype == UE_ISOCHRONOUS && 3913 XHCI_HCC2_LEC(sc->sc_hcc2) == 0) { 3914 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes); 3915 mult = (mult > 2) ? 2 : mult; 3916 } else 3917 mult = 0; 3918 3919 } else { 3920 switch (speed) { 3921 case USB_SPEED_HIGH: 3922 maxb = UE_GET_TRANS(mps); 3923 mep = UE_GET_SIZE(mps) * (maxb + 1); 3924 break; 3925 case USB_SPEED_FULL: 3926 maxb = 0; 3927 mep = UE_GET_SIZE(mps); 3928 break; 3929 default: 3930 maxb = 0; 3931 mep = 0; 3932 break; 3933 } 3934 mult = 0; 3935 } 3936 mps = UE_GET_SIZE(mps); 3937 3938 if (pipe->up_interval == USBD_DEFAULT_INTERVAL) 3939 ival = ed->bInterval; 3940 else 3941 ival = pipe->up_interval; 3942 3943 ival = xhci_bival2ival(ival, speed, xfertype); 3944 atl = mep; 3945 break; 3946 case UE_CONTROL: 3947 case UE_BULK: 3948 default: 3949 if (USB_IS_SS(speed)) { 3950 maxb = esscd ? esscd->bMaxBurst : 0; 3951 } else 3952 maxb = 0; 3953 3954 mps = UE_GET_SIZE(mps); 3955 mep = 0; 3956 mult = 0; 3957 ival = 0; 3958 if (xfertype == UE_CONTROL) 3959 atl = 8; /* 6.2.3 */ 3960 else 3961 atl = mps; 3962 break; 3963 } 3964 3965 switch (speed) { 3966 case USB_SPEED_LOW: 3967 break; 3968 case USB_SPEED_FULL: 3969 if (xfertype == UE_INTERRUPT) 3970 if (mep > XHCI_EPCTX_MEP_FS_INTR) 3971 mep = XHCI_EPCTX_MEP_FS_INTR; 3972 if (xfertype == UE_ISOCHRONOUS) 3973 if (mep > XHCI_EPCTX_MEP_FS_ISOC) 3974 mep = XHCI_EPCTX_MEP_FS_ISOC; 3975 break; 3976 case USB_SPEED_HIGH: 3977 if (xfertype == UE_INTERRUPT) 3978 if (mep > XHCI_EPCTX_MEP_HS_INTR) 3979 mep = XHCI_EPCTX_MEP_HS_INTR; 3980 if (xfertype == UE_ISOCHRONOUS) 3981 if (mep > XHCI_EPCTX_MEP_HS_ISOC) 3982 mep = XHCI_EPCTX_MEP_HS_ISOC; 3983 break; 3984 case USB_SPEED_SUPER: 3985 case USB_SPEED_SUPER_PLUS: 3986 default: 3987 if (xfertype == UE_INTERRUPT) 3988 if (mep > XHCI_EPCTX_MEP_SS_INTR) 3989 mep = XHCI_EPCTX_MEP_SS_INTR; 3990 if (xfertype == UE_ISOCHRONOUS) { 3991 if (speed == USB_SPEED_SUPER || 3992 XHCI_HCC2_LEC(sc->sc_hcc2) == 0) { 3993 if (mep > XHCI_EPCTX_MEP_SS_ISOC) 3994 mep = XHCI_EPCTX_MEP_SS_ISOC; 3995 } else { 3996 if (mep > XHCI_EPCTX_MEP_SS_ISOC_LEC) 3997 mep = XHCI_EPCTX_MEP_SS_ISOC_LEC; 3998 } 3999 } 4000 break; 4001 } 4002 4003 xpipe->xp_ival = ival; 4004 xpipe->xp_maxb = maxb + 1; 4005 xpipe->xp_mult = mult + 1; 4006 4007 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(mep >> 16); 4008 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival); 4009 cp[0] |= XHCI_EPCTX_0_MULT_SET(mult); 4010 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps); 4011 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb); 4012 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(mep & 0xffff); 4013 cp[4] |= XHCI_EPCTX_4_AVG_TRB_LEN_SET(atl); 4014 } 4015 4016 /* 4017 * Convert usbdi bInterval value to xhci endpoint context interval value 4018 * for periodic pipe. 4019 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 4020 */ 4021 static uint32_t 4022 xhci_bival2ival(uint32_t ival, uint32_t speed, uint32_t xfertype) 4023 { 4024 if (xfertype != UE_INTERRUPT && xfertype != UE_ISOCHRONOUS) 4025 return 0; 4026 4027 if (xfertype == UE_INTERRUPT && 4028 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) { 4029 u_int i; 4030 4031 /* 4032 * round ival down to "the nearest base 2 multiple of 4033 * bInterval * 8". 4034 * bInterval is at most 255 as its type is uByte. 4035 * 255(ms) = 2040(x 125us) < 2^11, so start with 10. 4036 */ 4037 for (i = 10; i > 0; i--) { 4038 if ((ival * 8) >= (1 << i)) 4039 break; 4040 } 4041 ival = i; 4042 4043 /* 3 - 10 */ 4044 ival = (ival < 3) ? 3 : ival; 4045 } else if (speed == USB_SPEED_FULL) { 4046 /* FS isoc */ 4047 ival += 3; /* 1ms -> 125us */ 4048 ival--; /* Interval = bInterval-1 */ 4049 /* 3 - 18 */ 4050 ival = (ival > 18) ? 18 : ival; 4051 ival = (ival < 3) ? 3 : ival; 4052 } else { 4053 /* SS/HS intr/isoc */ 4054 if (ival > 0) 4055 ival--; /* Interval = bInterval-1 */ 4056 /* 0 - 15 */ 4057 ival = (ival > 15) ? 15 : ival; 4058 } 4059 4060 return ival; 4061 } 4062 4063 /* ----- */ 4064 4065 static void 4066 xhci_noop(struct usbd_pipe *pipe) 4067 { 4068 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4069 } 4070 4071 /* 4072 * Process root hub request. 4073 */ 4074 static int 4075 xhci_roothub_ctrl_locked(struct usbd_bus *bus, usb_device_request_t *req, 4076 void *buf, int buflen) 4077 { 4078 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 4079 usb_port_status_t ps; 4080 int l, totlen = 0; 4081 uint16_t len, value, index; 4082 int port, i; 4083 uint32_t v; 4084 4085 XHCIHIST_FUNC(); 4086 4087 KASSERT(mutex_owned(&sc->sc_rhlock)); 4088 4089 if (sc->sc_dying) 4090 return -1; 4091 4092 size_t bn = bus == &sc->sc_bus ? 0 : 1; 4093 4094 len = UGETW(req->wLength); 4095 value = UGETW(req->wValue); 4096 index = UGETW(req->wIndex); 4097 4098 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx", 4099 req->bmRequestType | (req->bRequest << 8), value, index, len); 4100 4101 #define C(x,y) ((x) | ((y) << 8)) 4102 switch (C(req->bRequest, req->bmRequestType)) { 4103 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 4104 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0); 4105 if (len == 0) 4106 break; 4107 switch (value) { 4108 #define sd ((usb_string_descriptor_t *)buf) 4109 case C(2, UDESC_STRING): 4110 /* Product */ 4111 totlen = usb_makestrdesc(sd, len, "xHCI root hub"); 4112 break; 4113 #undef sd 4114 default: 4115 /* default from usbroothub */ 4116 return buflen; 4117 } 4118 break; 4119 4120 /* Hub requests */ 4121 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): 4122 break; 4123 /* Clear Port Feature request */ 4124 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): { 4125 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 4126 4127 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd", 4128 index, value, bn, cp); 4129 if (index < 1 || index > sc->sc_rhportcount[bn]) { 4130 return -1; 4131 } 4132 port = XHCI_PORTSC(cp); 4133 v = xhci_op_read_4(sc, port); 4134 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0); 4135 v &= ~XHCI_PS_CLEAR; 4136 switch (value) { 4137 case UHF_PORT_ENABLE: 4138 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED); 4139 break; 4140 case UHF_PORT_SUSPEND: 4141 return -1; 4142 case UHF_PORT_POWER: 4143 break; 4144 case UHF_PORT_TEST: 4145 case UHF_PORT_INDICATOR: 4146 return -1; 4147 case UHF_C_PORT_CONNECTION: 4148 xhci_op_write_4(sc, port, v | XHCI_PS_CSC); 4149 break; 4150 case UHF_C_PORT_ENABLE: 4151 case UHF_C_PORT_SUSPEND: 4152 case UHF_C_PORT_OVER_CURRENT: 4153 return -1; 4154 case UHF_C_BH_PORT_RESET: 4155 xhci_op_write_4(sc, port, v | XHCI_PS_WRC); 4156 break; 4157 case UHF_C_PORT_RESET: 4158 xhci_op_write_4(sc, port, v | XHCI_PS_PRC); 4159 break; 4160 case UHF_C_PORT_LINK_STATE: 4161 xhci_op_write_4(sc, port, v | XHCI_PS_PLC); 4162 break; 4163 case UHF_C_PORT_CONFIG_ERROR: 4164 xhci_op_write_4(sc, port, v | XHCI_PS_CEC); 4165 break; 4166 default: 4167 return -1; 4168 } 4169 break; 4170 } 4171 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): 4172 if (len == 0) 4173 break; 4174 if ((value & 0xff) != 0) { 4175 return -1; 4176 } 4177 usb_hub_descriptor_t hubd; 4178 4179 totlen = uimin(buflen, sizeof(hubd)); 4180 memcpy(&hubd, buf, totlen); 4181 hubd.bNbrPorts = sc->sc_rhportcount[bn]; 4182 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH); 4183 hubd.bPwrOn2PwrGood = 200; 4184 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) { 4185 /* XXX can't find out? */ 4186 hubd.DeviceRemovable[i++] = 0; 4187 } 4188 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i; 4189 totlen = uimin(totlen, hubd.bDescLength); 4190 memcpy(buf, &hubd, totlen); 4191 break; 4192 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): 4193 if (len != 4) { 4194 return -1; 4195 } 4196 memset(buf, 0, len); /* ? XXX */ 4197 totlen = len; 4198 break; 4199 /* Get Port Status request */ 4200 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): { 4201 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 4202 4203 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju", 4204 bn, index, cp, 0); 4205 if (index < 1 || index > sc->sc_rhportcount[bn]) { 4206 DPRINTFN(5, "bad get port status: index=%jd bn=%jd " 4207 "portcount=%jd", 4208 index, bn, sc->sc_rhportcount[bn], 0); 4209 return -1; 4210 } 4211 if (len != 4) { 4212 DPRINTFN(5, "bad get port status: len %jd != 4", 4213 len, 0, 0, 0); 4214 return -1; 4215 } 4216 v = xhci_op_read_4(sc, XHCI_PORTSC(cp)); 4217 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0); 4218 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v)); 4219 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS; 4220 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED; 4221 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR; 4222 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND; 4223 if (v & XHCI_PS_PR) i |= UPS_RESET; 4224 if (v & XHCI_PS_PP) { 4225 if (i & UPS_OTHER_SPEED) 4226 i |= UPS_PORT_POWER_SS; 4227 else 4228 i |= UPS_PORT_POWER; 4229 } 4230 if (i & UPS_OTHER_SPEED) 4231 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v)); 4232 if (sc->sc_vendor_port_status) 4233 i = sc->sc_vendor_port_status(sc, v, i); 4234 USETW(ps.wPortStatus, i); 4235 i = 0; 4236 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS; 4237 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED; 4238 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR; 4239 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET; 4240 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET; 4241 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE; 4242 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR; 4243 USETW(ps.wPortChange, i); 4244 totlen = uimin(len, sizeof(ps)); 4245 memcpy(buf, &ps, totlen); 4246 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx" 4247 " totlen %jd", 4248 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0); 4249 break; 4250 } 4251 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): 4252 return -1; 4253 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE): 4254 break; 4255 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): 4256 break; 4257 /* Set Port Feature request */ 4258 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): { 4259 int optval = (index >> 8) & 0xff; 4260 index &= 0xff; 4261 if (index < 1 || index > sc->sc_rhportcount[bn]) { 4262 return -1; 4263 } 4264 4265 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 4266 4267 port = XHCI_PORTSC(cp); 4268 v = xhci_op_read_4(sc, port); 4269 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0); 4270 v &= ~XHCI_PS_CLEAR; 4271 switch (value) { 4272 case UHF_PORT_ENABLE: 4273 xhci_op_write_4(sc, port, v | XHCI_PS_PED); 4274 break; 4275 case UHF_PORT_SUSPEND: 4276 /* XXX suspend */ 4277 break; 4278 case UHF_PORT_RESET: 4279 xhci_op_write_4(sc, port, v | XHCI_PS_PR); 4280 /* Wait for reset to complete. */ 4281 for (i = 0; i < USB_PORT_ROOT_RESET_DELAY / 10; i++) { 4282 if (sc->sc_dying) { 4283 return -1; 4284 } 4285 v = xhci_op_read_4(sc, port); 4286 if ((v & XHCI_PS_PR) == 0) { 4287 break; 4288 } 4289 usb_delay_ms(&sc->sc_bus, 10); 4290 } 4291 break; 4292 case UHF_PORT_POWER: 4293 /* XXX power control */ 4294 break; 4295 /* XXX more */ 4296 case UHF_C_PORT_RESET: 4297 xhci_op_write_4(sc, port, v | XHCI_PS_PRC); 4298 break; 4299 case UHF_PORT_U1_TIMEOUT: 4300 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) { 4301 return -1; 4302 } 4303 port = XHCI_PORTPMSC(cp); 4304 v = xhci_op_read_4(sc, port); 4305 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx", 4306 index, cp, v, 0); 4307 v &= ~XHCI_PM3_U1TO_SET(0xff); 4308 v |= XHCI_PM3_U1TO_SET(optval); 4309 xhci_op_write_4(sc, port, v); 4310 break; 4311 case UHF_PORT_U2_TIMEOUT: 4312 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) { 4313 return -1; 4314 } 4315 port = XHCI_PORTPMSC(cp); 4316 v = xhci_op_read_4(sc, port); 4317 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx", 4318 index, cp, v, 0); 4319 v &= ~XHCI_PM3_U2TO_SET(0xff); 4320 v |= XHCI_PM3_U2TO_SET(optval); 4321 xhci_op_write_4(sc, port, v); 4322 break; 4323 default: 4324 return -1; 4325 } 4326 } 4327 break; 4328 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER): 4329 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER): 4330 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER): 4331 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER): 4332 break; 4333 default: 4334 /* default from usbroothub */ 4335 return buflen; 4336 } 4337 4338 return totlen; 4339 } 4340 4341 static int 4342 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req, 4343 void *buf, int buflen) 4344 { 4345 struct xhci_softc *sc = XHCI_BUS2SC(bus); 4346 int actlen; 4347 4348 mutex_enter(&sc->sc_rhlock); 4349 actlen = xhci_roothub_ctrl_locked(bus, req, buf, buflen); 4350 mutex_exit(&sc->sc_rhlock); 4351 4352 return actlen; 4353 } 4354 4355 /* root hub interrupt */ 4356 4357 static usbd_status 4358 xhci_root_intr_transfer(struct usbd_xfer *xfer) 4359 { 4360 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4361 4362 /* Pipe isn't running, start first */ 4363 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4364 } 4365 4366 /* Wait for roothub port status/change */ 4367 static usbd_status 4368 xhci_root_intr_start(struct usbd_xfer *xfer) 4369 { 4370 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4371 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4372 4373 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4374 4375 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 4376 4377 if (sc->sc_dying) 4378 return USBD_IOERROR; 4379 4380 KASSERT(sc->sc_intrxfer[bn] == NULL); 4381 sc->sc_intrxfer[bn] = xfer; 4382 xfer->ux_status = USBD_IN_PROGRESS; 4383 4384 return USBD_IN_PROGRESS; 4385 } 4386 4387 static void 4388 xhci_root_intr_abort(struct usbd_xfer *xfer) 4389 { 4390 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4391 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4392 4393 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4394 4395 KASSERT(mutex_owned(&sc->sc_lock)); 4396 KASSERT(xfer->ux_pipe->up_intrxfer == xfer); 4397 4398 /* If xfer has already completed, nothing to do here. */ 4399 if (sc->sc_intrxfer[bn] == NULL) 4400 return; 4401 4402 /* 4403 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer. 4404 * Cancel it. 4405 */ 4406 KASSERT(sc->sc_intrxfer[bn] == xfer); 4407 xfer->ux_status = USBD_CANCELLED; 4408 usb_transfer_complete(xfer); 4409 } 4410 4411 static void 4412 xhci_root_intr_close(struct usbd_pipe *pipe) 4413 { 4414 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe); 4415 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer; 4416 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4417 4418 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4419 4420 KASSERT(mutex_owned(&sc->sc_lock)); 4421 4422 /* 4423 * Caller must guarantee the xfer has completed first, by 4424 * closing the pipe only after normal completion or an abort. 4425 */ 4426 KASSERT(sc->sc_intrxfer[bn] == NULL); 4427 } 4428 4429 static void 4430 xhci_root_intr_done(struct usbd_xfer *xfer) 4431 { 4432 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4433 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4434 4435 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4436 4437 KASSERT(mutex_owned(&sc->sc_lock)); 4438 4439 /* Claim the xfer so it doesn't get completed again. */ 4440 KASSERT(sc->sc_intrxfer[bn] == xfer); 4441 KASSERT(xfer->ux_status != USBD_IN_PROGRESS); 4442 sc->sc_intrxfer[bn] = NULL; 4443 } 4444 4445 /* -------------- */ 4446 /* device control */ 4447 4448 static usbd_status 4449 xhci_device_ctrl_transfer(struct usbd_xfer *xfer) 4450 { 4451 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4452 4453 /* Pipe isn't running, start first */ 4454 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4455 } 4456 4457 static usbd_status 4458 xhci_device_ctrl_start(struct usbd_xfer *xfer) 4459 { 4460 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4461 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4462 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4463 struct xhci_ring * const tr = xs->xs_xr[dci]; 4464 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4465 usb_device_request_t * const req = &xfer->ux_request; 4466 const bool isread = usbd_xfer_isread(xfer); 4467 const uint32_t len = UGETW(req->wLength); 4468 usb_dma_t * const dma = &xfer->ux_dmabuf; 4469 uint64_t parameter; 4470 uint32_t status; 4471 uint32_t control; 4472 u_int i; 4473 const bool polling = xhci_polling_p(sc); 4474 4475 XHCIHIST_FUNC(); 4476 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx", 4477 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue), 4478 UGETW(req->wIndex), UGETW(req->wLength)); 4479 4480 KASSERT(polling || mutex_owned(&sc->sc_lock)); 4481 4482 /* we rely on the bottom bits for extra info */ 4483 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %p", xfer); 4484 4485 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0); 4486 4487 if (tr->is_halted) 4488 goto out; 4489 4490 i = 0; 4491 4492 /* setup phase */ 4493 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */ 4494 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req)); 4495 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE : 4496 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) | 4497 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) | 4498 XHCI_TRB_3_IDT_BIT; 4499 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4500 4501 if (len != 0) { 4502 /* data phase */ 4503 parameter = DMAADDR(dma, 0); 4504 KASSERTMSG(len <= 0x10000, "len %d", len); 4505 status = XHCI_TRB_2_IRQ_SET(0) | 4506 XHCI_TRB_2_TDSZ_SET(0) | 4507 XHCI_TRB_2_BYTES_SET(len); 4508 control = (isread ? XHCI_TRB_3_DIR_IN : 0) | 4509 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) | 4510 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4511 XHCI_TRB_3_IOC_BIT; 4512 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4513 4514 usb_syncmem(dma, 0, len, 4515 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4516 } 4517 4518 parameter = 0; 4519 status = XHCI_TRB_2_IRQ_SET(0); 4520 /* the status stage has inverted direction */ 4521 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) | 4522 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) | 4523 XHCI_TRB_3_IOC_BIT; 4524 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4525 4526 if (!polling) 4527 mutex_enter(&tr->xr_lock); 4528 xhci_ring_put_xfer(sc, tr, xx, i); 4529 if (!polling) 4530 mutex_exit(&tr->xr_lock); 4531 4532 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4533 4534 out: if (xfer->ux_status == USBD_NOT_STARTED) { 4535 xfer->ux_status = USBD_IN_PROGRESS; 4536 usbd_xfer_schedule_timeout(xfer); 4537 } else { 4538 /* 4539 * We must be coming from xhci_pipe_restart -- timeout 4540 * already set up, nothing to do. 4541 */ 4542 } 4543 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 4544 4545 return USBD_IN_PROGRESS; 4546 } 4547 4548 static void 4549 xhci_device_ctrl_done(struct usbd_xfer *xfer) 4550 { 4551 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4552 usb_device_request_t *req = &xfer->ux_request; 4553 int len = UGETW(req->wLength); 4554 int rd = req->bmRequestType & UT_READ; 4555 4556 if (len) 4557 usb_syncmem(&xfer->ux_dmabuf, 0, len, 4558 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4559 } 4560 4561 static void 4562 xhci_device_ctrl_abort(struct usbd_xfer *xfer) 4563 { 4564 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4565 4566 usbd_xfer_abort(xfer); 4567 } 4568 4569 static void 4570 xhci_device_ctrl_close(struct usbd_pipe *pipe) 4571 { 4572 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4573 4574 xhci_close_pipe(pipe); 4575 } 4576 4577 /* ------------------ */ 4578 /* device isochronous */ 4579 4580 static usbd_status 4581 xhci_device_isoc_transfer(struct usbd_xfer *xfer) 4582 { 4583 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4584 4585 return xhci_device_isoc_enter(xfer); 4586 } 4587 4588 static usbd_status 4589 xhci_device_isoc_enter(struct usbd_xfer *xfer) 4590 { 4591 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4592 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4593 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4594 struct xhci_ring * const tr = xs->xs_xr[dci]; 4595 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4596 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe; 4597 usb_dma_t * const dma = &xfer->ux_dmabuf; 4598 uint64_t parameter; 4599 uint32_t status; 4600 uint32_t control; 4601 uint32_t offs; 4602 int i, ival; 4603 const bool polling = xhci_polling_p(sc); 4604 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize); 4605 const uint16_t mps = UE_GET_SIZE(MPS); 4606 const uint8_t maxb = xpipe->xp_maxb; 4607 4608 XHCIHIST_FUNC(); 4609 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4610 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4611 4612 KASSERT(polling || mutex_owned(&sc->sc_lock)); 4613 4614 if (sc->sc_dying) 4615 return USBD_IOERROR; 4616 4617 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths); 4618 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4619 4620 const bool isread = usbd_xfer_isread(xfer); 4621 if (xfer->ux_length) 4622 usb_syncmem(dma, 0, xfer->ux_length, 4623 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4624 4625 ival = xpipe->xp_ival; 4626 if (ival >= 0 && ival <= 15) 4627 ival = 1 << ival; 4628 else 4629 ival = 1; /* fake something up */ 4630 4631 const unsigned mfmask = XHCI_MFINDEX_GET(~(uint32_t)0); 4632 4633 if (xpipe->xp_isoc_next == -1) { 4634 uint32_t mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX); 4635 4636 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0); 4637 mfindex = XHCI_MFINDEX_GET(mfindex); 4638 4639 /* Start Frame = MFINDEX + IST + 1 */ 4640 mfindex += sc->sc_isthresh + 1; 4641 xpipe->xp_isoc_next = roundup2(mfindex, ival) & mfmask; 4642 } 4643 4644 offs = 0; 4645 for (i = 0; i < xfer->ux_nframes; i++) { 4646 const uint32_t len = xfer->ux_frlengths[i]; 4647 const unsigned tdpc = howmany(len, mps); 4648 const unsigned tbc = howmany(tdpc, maxb) - 1; 4649 const unsigned tlbpc1 = tdpc % maxb; 4650 const unsigned tlbpc = tlbpc1 ? tlbpc1 - 1 : maxb - 1; 4651 const unsigned frid = xpipe->xp_isoc_next / 4652 USB_UFRAMES_PER_FRAME; 4653 4654 KASSERTMSG(len <= 0x10000, "len %d", len); 4655 parameter = DMAADDR(dma, offs); 4656 status = XHCI_TRB_2_IRQ_SET(0) | 4657 XHCI_TRB_2_TDSZ_SET(0) | 4658 XHCI_TRB_2_BYTES_SET(len); 4659 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) | 4660 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4661 XHCI_TRB_3_TBC_SET(tbc) | 4662 XHCI_TRB_3_TLBPC_SET(tlbpc) | 4663 XHCI_TRB_3_IOC_BIT; 4664 if (XHCI_HCC_CFC(sc->sc_hcc)) { 4665 control |= XHCI_TRB_3_FRID_SET(frid); 4666 #if 0 4667 } else if (xpipe->xp_isoc_next == -1) { 4668 control |= XHCI_TRB_3_FRID_SET(frid); 4669 #endif 4670 } else { 4671 control |= XHCI_TRB_3_ISO_SIA_BIT; 4672 } 4673 #if 0 4674 if (i != xfer->ux_nframes - 1) 4675 control |= XHCI_TRB_3_BEI_BIT; 4676 #endif 4677 xhci_xfer_put_trb(xx, i, parameter, status, control); 4678 4679 xpipe->xp_isoc_next = (xpipe->xp_isoc_next + ival) & mfmask; 4680 offs += len; 4681 } 4682 4683 xx->xx_isoc_done = 0; 4684 4685 if (!polling) 4686 mutex_enter(&tr->xr_lock); 4687 xhci_ring_put_xfer(sc, tr, xx, i); 4688 if (!polling) 4689 mutex_exit(&tr->xr_lock); 4690 4691 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4692 xfer->ux_status = USBD_IN_PROGRESS; 4693 usbd_xfer_schedule_timeout(xfer); 4694 4695 return USBD_IN_PROGRESS; 4696 } 4697 4698 static void 4699 xhci_device_isoc_abort(struct usbd_xfer *xfer) 4700 { 4701 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4702 4703 usbd_xfer_abort(xfer); 4704 } 4705 4706 static void 4707 xhci_device_isoc_close(struct usbd_pipe *pipe) 4708 { 4709 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4710 4711 xhci_close_pipe(pipe); 4712 } 4713 4714 static void 4715 xhci_device_isoc_done(struct usbd_xfer *xfer) 4716 { 4717 #ifdef USB_DEBUG 4718 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4719 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4720 #endif 4721 const bool isread = usbd_xfer_isread(xfer); 4722 4723 XHCIHIST_FUNC(); 4724 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4725 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4726 4727 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4728 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4729 } 4730 4731 /* ----------- */ 4732 /* device bulk */ 4733 4734 static usbd_status 4735 xhci_device_bulk_transfer(struct usbd_xfer *xfer) 4736 { 4737 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4738 4739 /* Pipe isn't running, so start it first. */ 4740 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4741 } 4742 4743 static usbd_status 4744 xhci_device_bulk_start(struct usbd_xfer *xfer) 4745 { 4746 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4747 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4748 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4749 struct xhci_ring * const tr = xs->xs_xr[dci]; 4750 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4751 const uint32_t len = xfer->ux_length; 4752 usb_dma_t * const dma = &xfer->ux_dmabuf; 4753 uint64_t parameter; 4754 uint32_t status; 4755 uint32_t control; 4756 u_int i = 0; 4757 const bool polling = xhci_polling_p(sc); 4758 4759 XHCIHIST_FUNC(); 4760 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4761 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4762 4763 KASSERT(polling || mutex_owned(&sc->sc_lock)); 4764 4765 if (sc->sc_dying) 4766 return USBD_IOERROR; 4767 4768 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4769 4770 if (tr->is_halted) 4771 goto out; 4772 4773 parameter = DMAADDR(dma, 0); 4774 const bool isread = usbd_xfer_isread(xfer); 4775 if (len) 4776 usb_syncmem(dma, 0, len, 4777 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4778 4779 /* 4780 * XXX: (dsl) The physical buffer must not cross a 64k boundary. 4781 * If the user supplied buffer crosses such a boundary then 2 4782 * (or more) TRB should be used. 4783 * If multiple TRB are used the td_size field must be set correctly. 4784 * For v1.0 devices (like ivy bridge) this is the number of usb data 4785 * blocks needed to complete the transfer. 4786 * Setting it to 1 in the last TRB causes an extra zero-length 4787 * data block be sent. 4788 * The earlier documentation differs, I don't know how it behaves. 4789 */ 4790 KASSERTMSG(len <= 0x10000, "len %d", len); 4791 status = XHCI_TRB_2_IRQ_SET(0) | 4792 XHCI_TRB_2_TDSZ_SET(0) | 4793 XHCI_TRB_2_BYTES_SET(len); 4794 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) | 4795 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4796 XHCI_TRB_3_IOC_BIT; 4797 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4798 4799 if (!isread && (xfer->ux_flags & USBD_FORCE_SHORT_XFER)) { 4800 status = XHCI_TRB_2_IRQ_SET(0) | 4801 XHCI_TRB_2_TDSZ_SET(0) | 4802 XHCI_TRB_2_BYTES_SET(0); 4803 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4804 } 4805 4806 if (!polling) 4807 mutex_enter(&tr->xr_lock); 4808 xhci_ring_put_xfer(sc, tr, xx, i); 4809 if (!polling) 4810 mutex_exit(&tr->xr_lock); 4811 4812 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4813 4814 out: if (xfer->ux_status == USBD_NOT_STARTED) { 4815 xfer->ux_status = USBD_IN_PROGRESS; 4816 usbd_xfer_schedule_timeout(xfer); 4817 } else { 4818 /* 4819 * We must be coming from xhci_pipe_restart -- timeout 4820 * already set up, nothing to do. 4821 */ 4822 } 4823 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 4824 4825 return USBD_IN_PROGRESS; 4826 } 4827 4828 static void 4829 xhci_device_bulk_done(struct usbd_xfer *xfer) 4830 { 4831 #ifdef USB_DEBUG 4832 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4833 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4834 #endif 4835 const bool isread = usbd_xfer_isread(xfer); 4836 4837 XHCIHIST_FUNC(); 4838 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4839 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4840 4841 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4842 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4843 } 4844 4845 static void 4846 xhci_device_bulk_abort(struct usbd_xfer *xfer) 4847 { 4848 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4849 4850 usbd_xfer_abort(xfer); 4851 } 4852 4853 static void 4854 xhci_device_bulk_close(struct usbd_pipe *pipe) 4855 { 4856 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4857 4858 xhci_close_pipe(pipe); 4859 } 4860 4861 /* ---------------- */ 4862 /* device interrupt */ 4863 4864 static usbd_status 4865 xhci_device_intr_transfer(struct usbd_xfer *xfer) 4866 { 4867 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4868 4869 /* Pipe isn't running, so start it first. */ 4870 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4871 } 4872 4873 static usbd_status 4874 xhci_device_intr_start(struct usbd_xfer *xfer) 4875 { 4876 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4877 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4878 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4879 struct xhci_ring * const tr = xs->xs_xr[dci]; 4880 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4881 const uint32_t len = xfer->ux_length; 4882 const bool polling = xhci_polling_p(sc); 4883 usb_dma_t * const dma = &xfer->ux_dmabuf; 4884 uint64_t parameter; 4885 uint32_t status; 4886 uint32_t control; 4887 u_int i = 0; 4888 4889 XHCIHIST_FUNC(); 4890 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4891 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4892 4893 KASSERT(polling || mutex_owned(&sc->sc_lock)); 4894 4895 if (sc->sc_dying) 4896 return USBD_IOERROR; 4897 4898 if (tr->is_halted) 4899 goto out; 4900 4901 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4902 4903 const bool isread = usbd_xfer_isread(xfer); 4904 if (len) 4905 usb_syncmem(dma, 0, len, 4906 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4907 4908 parameter = DMAADDR(dma, 0); 4909 KASSERTMSG(len <= 0x10000, "len %d", len); 4910 status = XHCI_TRB_2_IRQ_SET(0) | 4911 XHCI_TRB_2_TDSZ_SET(0) | 4912 XHCI_TRB_2_BYTES_SET(len); 4913 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) | 4914 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT; 4915 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4916 4917 if (!polling) 4918 mutex_enter(&tr->xr_lock); 4919 xhci_ring_put_xfer(sc, tr, xx, i); 4920 if (!polling) 4921 mutex_exit(&tr->xr_lock); 4922 4923 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4924 4925 out: if (xfer->ux_status == USBD_NOT_STARTED) { 4926 xfer->ux_status = USBD_IN_PROGRESS; 4927 usbd_xfer_schedule_timeout(xfer); 4928 } else { 4929 /* 4930 * We must be coming from xhci_pipe_restart -- timeout 4931 * already set up, nothing to do. 4932 */ 4933 } 4934 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 4935 4936 return USBD_IN_PROGRESS; 4937 } 4938 4939 static void 4940 xhci_device_intr_done(struct usbd_xfer *xfer) 4941 { 4942 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer); 4943 #ifdef USB_DEBUG 4944 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4945 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4946 #endif 4947 const bool isread = usbd_xfer_isread(xfer); 4948 4949 XHCIHIST_FUNC(); 4950 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4951 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4952 4953 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 4954 4955 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4956 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4957 } 4958 4959 static void 4960 xhci_device_intr_abort(struct usbd_xfer *xfer) 4961 { 4962 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer); 4963 4964 XHCIHIST_FUNC(); 4965 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0); 4966 4967 KASSERT(mutex_owned(&sc->sc_lock)); 4968 usbd_xfer_abort(xfer); 4969 } 4970 4971 static void 4972 xhci_device_intr_close(struct usbd_pipe *pipe) 4973 { 4974 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 4975 4976 XHCIHIST_FUNC(); 4977 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0); 4978 4979 xhci_close_pipe(pipe); 4980 } 4981