xhci.c revision 1.188 1 /* $NetBSD: xhci.c,v 1.188 2025/01/30 10:51:39 jmcneill Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * USB rev 2.0 and rev 3.1 specification
31 * http://www.usb.org/developers/docs/
32 * xHCI rev 1.1 specification
33 * http://www.intel.com/technology/usb/spec.htm
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.188 2025/01/30 10:51:39 jmcneill Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_usb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/kmem.h>
47 #include <sys/device.h>
48 #include <sys/select.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/endian.h>
58
59 #include <dev/usb/usb.h>
60 #include <dev/usb/usbdi.h>
61 #include <dev/usb/usbdivar.h>
62 #include <dev/usb/usbdi_util.h>
63 #include <dev/usb/usbhist.h>
64 #include <dev/usb/usb_mem.h>
65 #include <dev/usb/usb_quirks.h>
66
67 #include <dev/usb/xhcireg.h>
68 #include <dev/usb/xhcivar.h>
69 #include <dev/usb/usbroothub.h>
70
71
72 #ifdef USB_DEBUG
73 #ifndef XHCI_DEBUG
74 #define xhcidebug 0
75 #else /* !XHCI_DEBUG */
76 #define HEXDUMP(a, b, c) \
77 do { \
78 if (xhcidebug > 0) \
79 hexdump(printf, a, b, c); \
80 } while (/*CONSTCOND*/0)
81 static int xhcidebug = 0;
82
83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup")
84 {
85 int err;
86 const struct sysctlnode *rnode;
87 const struct sysctlnode *cnode;
88
89 err = sysctl_createv(clog, 0, NULL, &rnode,
90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci",
91 SYSCTL_DESCR("xhci global controls"),
92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
93
94 if (err)
95 goto fail;
96
97 /* control debugging printfs */
98 err = sysctl_createv(clog, 0, &rnode, &cnode,
99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
100 "debug", SYSCTL_DESCR("Enable debugging output"),
101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL);
102 if (err)
103 goto fail;
104
105 return;
106 fail:
107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
108 }
109
110 #endif /* !XHCI_DEBUG */
111 #endif /* USB_DEBUG */
112
113 #ifndef HEXDUMP
114 #define HEXDUMP(a, b, c)
115 #endif
116
117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D)
118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D)
119 #define XHCIHIST_FUNC() USBHIST_FUNC()
120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug)
121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \
122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D)
123
124 #define XHCI_DCI_SLOT 0
125 #define XHCI_DCI_EP_CONTROL 1
126
127 #define XHCI_ICI_INPUT_CONTROL 0
128
129 struct xhci_pipe {
130 struct usbd_pipe xp_pipe;
131 struct usb_task xp_async_task;
132 int16_t xp_isoc_next; /* next frame */
133 uint8_t xp_maxb; /* max burst */
134 uint8_t xp_mult;
135 };
136
137 #define XHCI_COMMAND_RING_TRBS 256
138 #define XHCI_EVENT_RING_TRBS 256
139 #define XHCI_EVENT_RING_SEGMENTS 1
140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT
141
142 static usbd_status xhci_open(struct usbd_pipe *);
143 static void xhci_close_pipe(struct usbd_pipe *);
144 static int xhci_intr1(struct xhci_softc * const);
145 static void xhci_softintr(void *);
146 static void xhci_poll(struct usbd_bus *);
147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int);
148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
149 static void xhci_abortx(struct usbd_xfer *);
150 static bool xhci_dying(struct usbd_bus *);
151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **);
152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int,
153 struct usbd_port *);
154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
155 void *, int);
156
157 static void xhci_pipe_restart(struct usbd_pipe *);
158 static void xhci_pipe_restart_async_task(void *);
159 static void xhci_pipe_restart_async(struct usbd_pipe *);
160
161 static usbd_status xhci_configure_endpoint(struct usbd_pipe *);
162 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *);
163 static void xhci_reset_endpoint(struct usbd_pipe *);
164 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *,
165 struct xhci_slot *, u_int, uint32_t);
166 static usbd_status xhci_stop_endpoint(struct usbd_pipe *);
167
168 static void xhci_host_dequeue(struct xhci_ring * const);
169 static void xhci_set_dequeue(struct usbd_pipe *);
170
171 static usbd_status xhci_do_command(struct xhci_softc * const,
172 struct xhci_soft_trb * const, int);
173 static usbd_status xhci_do_command_locked(struct xhci_softc * const,
174 struct xhci_soft_trb * const, int);
175 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t);
176 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *);
177 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool);
178 static usbd_status xhci_enable_slot(struct xhci_softc * const,
179 uint8_t * const);
180 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t);
181 static usbd_status xhci_address_device(struct xhci_softc * const,
182 uint64_t, uint8_t, bool);
183 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int);
184 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const,
185 struct xhci_slot * const, u_int);
186 static usbd_status xhci_ring_init(struct xhci_softc * const,
187 struct xhci_ring **, size_t, size_t);
188 static void xhci_ring_free(struct xhci_softc * const,
189 struct xhci_ring ** const);
190
191 static void xhci_setup_ctx(struct usbd_pipe *);
192 static void xhci_setup_route(struct usbd_pipe *, uint32_t *);
193 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *);
194 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *);
195 static uint32_t xhci_bival2ival(uint32_t, uint32_t, uint32_t);
196
197 static void xhci_noop(struct usbd_pipe *);
198
199 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *);
200 static usbd_status xhci_root_intr_start(struct usbd_xfer *);
201 static void xhci_root_intr_abort(struct usbd_xfer *);
202 static void xhci_root_intr_close(struct usbd_pipe *);
203 static void xhci_root_intr_done(struct usbd_xfer *);
204
205 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *);
206 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *);
207 static void xhci_device_ctrl_abort(struct usbd_xfer *);
208 static void xhci_device_ctrl_close(struct usbd_pipe *);
209 static void xhci_device_ctrl_done(struct usbd_xfer *);
210
211 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *);
212 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *);
213 static void xhci_device_isoc_abort(struct usbd_xfer *);
214 static void xhci_device_isoc_close(struct usbd_pipe *);
215 static void xhci_device_isoc_done(struct usbd_xfer *);
216
217 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *);
218 static usbd_status xhci_device_intr_start(struct usbd_xfer *);
219 static void xhci_device_intr_abort(struct usbd_xfer *);
220 static void xhci_device_intr_close(struct usbd_pipe *);
221 static void xhci_device_intr_done(struct usbd_xfer *);
222
223 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *);
224 static usbd_status xhci_device_bulk_start(struct usbd_xfer *);
225 static void xhci_device_bulk_abort(struct usbd_xfer *);
226 static void xhci_device_bulk_close(struct usbd_pipe *);
227 static void xhci_device_bulk_done(struct usbd_xfer *);
228
229 static const struct usbd_bus_methods xhci_bus_methods = {
230 .ubm_open = xhci_open,
231 .ubm_softint = xhci_softintr,
232 .ubm_dopoll = xhci_poll,
233 .ubm_allocx = xhci_allocx,
234 .ubm_freex = xhci_freex,
235 .ubm_abortx = xhci_abortx,
236 .ubm_dying = xhci_dying,
237 .ubm_getlock = xhci_get_lock,
238 .ubm_newdev = xhci_new_device,
239 .ubm_rhctrl = xhci_roothub_ctrl,
240 };
241
242 static const struct usbd_pipe_methods xhci_root_intr_methods = {
243 .upm_transfer = xhci_root_intr_transfer,
244 .upm_start = xhci_root_intr_start,
245 .upm_abort = xhci_root_intr_abort,
246 .upm_close = xhci_root_intr_close,
247 .upm_cleartoggle = xhci_noop,
248 .upm_done = xhci_root_intr_done,
249 };
250
251
252 static const struct usbd_pipe_methods xhci_device_ctrl_methods = {
253 .upm_transfer = xhci_device_ctrl_transfer,
254 .upm_start = xhci_device_ctrl_start,
255 .upm_abort = xhci_device_ctrl_abort,
256 .upm_close = xhci_device_ctrl_close,
257 .upm_cleartoggle = xhci_noop,
258 .upm_done = xhci_device_ctrl_done,
259 };
260
261 static const struct usbd_pipe_methods xhci_device_isoc_methods = {
262 .upm_transfer = xhci_device_isoc_transfer,
263 .upm_abort = xhci_device_isoc_abort,
264 .upm_close = xhci_device_isoc_close,
265 .upm_cleartoggle = xhci_noop,
266 .upm_done = xhci_device_isoc_done,
267 };
268
269 static const struct usbd_pipe_methods xhci_device_bulk_methods = {
270 .upm_transfer = xhci_device_bulk_transfer,
271 .upm_start = xhci_device_bulk_start,
272 .upm_abort = xhci_device_bulk_abort,
273 .upm_close = xhci_device_bulk_close,
274 .upm_cleartoggle = xhci_noop,
275 .upm_done = xhci_device_bulk_done,
276 };
277
278 static const struct usbd_pipe_methods xhci_device_intr_methods = {
279 .upm_transfer = xhci_device_intr_transfer,
280 .upm_start = xhci_device_intr_start,
281 .upm_abort = xhci_device_intr_abort,
282 .upm_close = xhci_device_intr_close,
283 .upm_cleartoggle = xhci_noop,
284 .upm_done = xhci_device_intr_done,
285 };
286
287 static inline uint32_t
288 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset)
289 {
290 if (ISSET(sc->sc_quirks, XHCI_32BIT_ACCESS)) {
291 uint32_t val;
292 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset & ~3);
293 return (val >> ((offset & 3) * NBBY)) & 0xff;
294 } else {
295 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset);
296 }
297 }
298
299 static inline uint32_t
300 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset)
301 {
302 if (ISSET(sc->sc_quirks, XHCI_32BIT_ACCESS)) {
303 uint32_t val;
304 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset & ~3);
305 return (val >> ((offset & 3) * NBBY)) & 0xffff;
306 } else {
307 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
308 }
309 }
310
311 static inline uint32_t
312 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset)
313 {
314 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset);
315 }
316
317 static inline void
318 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset,
319 uint32_t value)
320 {
321 if (ISSET(sc->sc_quirks, XHCI_32BIT_ACCESS)) {
322 const uint32_t mask = 0xffU << ((offset & 3) * NBBY);
323 uint32_t val;
324 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset & ~3);
325 val &= ~mask;
326 val |= __SHIFTIN(value, mask);
327 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset & ~3, val);
328 } else {
329 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value);
330 }
331 }
332
333 #if 0 /* unused */
334 static inline void
335 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset,
336 uint32_t value)
337 {
338 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value);
339 }
340 #endif /* unused */
341
342 static inline uint32_t
343 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset)
344 {
345 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset);
346 }
347
348 static inline uint32_t
349 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset)
350 {
351 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
352 }
353
354 static inline void
355 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset,
356 uint32_t value)
357 {
358 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
359 }
360
361 static inline uint64_t
362 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset)
363 {
364 uint64_t value;
365
366 #ifdef XHCI_USE_BUS_SPACE_8
367 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset);
368 #else
369 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
370 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh,
371 offset + 4) << 32;
372 #endif
373
374 return value;
375 }
376
377 static inline void
378 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset,
379 uint64_t value)
380 {
381 #ifdef XHCI_USE_BUS_SPACE_8
382 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value);
383 #else
384 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0,
385 (value >> 0) & 0xffffffff);
386 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4,
387 (value >> 32) & 0xffffffff);
388 #endif
389 }
390
391 static inline uint32_t
392 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset)
393 {
394 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
395 }
396
397 static inline void
398 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset,
399 uint32_t value)
400 {
401 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
402 }
403
404 static inline uint64_t
405 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset)
406 {
407 uint64_t value;
408
409 #ifdef XHCI_USE_BUS_SPACE_8
410 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset);
411 #else
412 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
413 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh,
414 offset + 4) << 32;
415 #endif
416
417 return value;
418 }
419
420 static inline void
421 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset,
422 uint64_t value)
423 {
424 #ifdef XHCI_USE_BUS_SPACE_8
425 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value);
426 #else
427 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0,
428 (value >> 0) & 0xffffffff);
429 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4,
430 (value >> 32) & 0xffffffff);
431 #endif
432 }
433
434 #if 0 /* unused */
435 static inline uint32_t
436 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset)
437 {
438 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset);
439 }
440 #endif /* unused */
441
442 static inline void
443 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset,
444 uint32_t value)
445 {
446 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value);
447 }
448
449 /* --- */
450
451 static inline uint8_t
452 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed)
453 {
454 u_int eptype = 0;
455
456 switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
457 case UE_CONTROL:
458 eptype = 0x0;
459 break;
460 case UE_ISOCHRONOUS:
461 eptype = 0x1;
462 break;
463 case UE_BULK:
464 eptype = 0x2;
465 break;
466 case UE_INTERRUPT:
467 eptype = 0x3;
468 break;
469 }
470
471 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
472 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
473 return eptype | 0x4;
474 else
475 return eptype;
476 }
477
478 static u_int
479 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed)
480 {
481 /* xHCI 1.0 section 4.5.1 */
482 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress);
483 u_int in = 0;
484
485 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
486 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
487 in = 1;
488
489 return epaddr * 2 + in;
490 }
491
492 static inline u_int
493 xhci_dci_to_ici(const u_int i)
494 {
495 return i + 1;
496 }
497
498 static inline void *
499 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs,
500 const u_int dci)
501 {
502 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
503 }
504
505 #if 0 /* unused */
506 static inline bus_addr_t
507 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs,
508 const u_int dci)
509 {
510 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
511 }
512 #endif /* unused */
513
514 static inline void *
515 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs,
516 const u_int ici)
517 {
518 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
519 }
520
521 static inline bus_addr_t
522 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs,
523 const u_int ici)
524 {
525 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
526 }
527
528 static inline struct xhci_trb *
529 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx)
530 {
531 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
532 }
533
534 static inline bus_addr_t
535 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx)
536 {
537 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
538 }
539
540 static inline void
541 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx,
542 uint64_t parameter, uint32_t status, uint32_t control)
543 {
544 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb);
545 xx->xx_trb[idx].trb_0 = parameter;
546 xx->xx_trb[idx].trb_2 = status;
547 xx->xx_trb[idx].trb_3 = control;
548 }
549
550 static inline void
551 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status,
552 uint32_t control)
553 {
554 trb->trb_0 = htole64(parameter);
555 trb->trb_2 = htole32(status);
556 trb->trb_3 = htole32(control);
557 }
558
559 static int
560 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx)
561 {
562 /* base address of TRBs */
563 bus_addr_t trbp = xhci_ring_trbp(xr, 0);
564
565 /* trb_0 range sanity check */
566 if (trb_0 == 0 || trb_0 < trbp ||
567 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 ||
568 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) {
569 return 1;
570 }
571 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb);
572 return 0;
573 }
574
575 static unsigned int
576 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs,
577 u_int dci)
578 {
579 uint32_t *cp;
580
581 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
582 cp = xhci_slot_get_dcv(sc, xs, dci);
583 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0]));
584 }
585
586 static inline unsigned int
587 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport)
588 {
589 const unsigned int port = ctlrport - 1;
590 const uint8_t bit = __BIT(port % NBBY);
591
592 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit);
593 }
594
595 /*
596 * Return the roothub port for a controller port. Both are 1..n.
597 */
598 static inline unsigned int
599 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport)
600 {
601
602 return sc->sc_ctlrportmap[ctrlport - 1];
603 }
604
605 /*
606 * Return the controller port for a bus roothub port. Both are 1..n.
607 */
608 static inline unsigned int
609 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn,
610 unsigned int rhport)
611 {
612
613 return sc->sc_rhportmap[bn][rhport - 1];
614 }
615
616 /* --- */
617
618 void
619 xhci_childdet(device_t self, device_t child)
620 {
621 struct xhci_softc * const sc = device_private(self);
622
623 mutex_enter(&sc->sc_intr_lock);
624 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child));
625 if (child == sc->sc_child2)
626 sc->sc_child2 = NULL;
627 else if (child == sc->sc_child)
628 sc->sc_child = NULL;
629 mutex_exit(&sc->sc_intr_lock);
630 }
631
632 int
633 xhci_detach(struct xhci_softc *sc, int flags)
634 {
635 int rv = 0;
636
637 if (sc->sc_child2 != NULL) {
638 rv = config_detach(sc->sc_child2, flags);
639 if (rv != 0)
640 return rv;
641 KASSERT(sc->sc_child2 == NULL);
642 }
643
644 if (sc->sc_child != NULL) {
645 rv = config_detach(sc->sc_child, flags);
646 if (rv != 0)
647 return rv;
648 KASSERT(sc->sc_child == NULL);
649 }
650
651 /* XXX unconfigure/free slots */
652
653 /* verify: */
654 xhci_rt_write_4(sc, XHCI_IMAN(0), 0);
655 xhci_op_write_4(sc, XHCI_USBCMD, 0);
656 /* do we need to wait for stop? */
657
658 xhci_op_write_8(sc, XHCI_CRCR, 0);
659 xhci_ring_free(sc, &sc->sc_cr);
660 cv_destroy(&sc->sc_command_cv);
661 cv_destroy(&sc->sc_cmdbusy_cv);
662
663 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0);
664 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0);
665 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY);
666 xhci_ring_free(sc, &sc->sc_er);
667
668 usb_freemem(&sc->sc_eventst_dma);
669
670 xhci_op_write_8(sc, XHCI_DCBAAP, 0);
671 usb_freemem(&sc->sc_dcbaa_dma);
672
673 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots);
674
675 kmem_free(sc->sc_ctlrportbus,
676 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY));
677 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int));
678
679 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
680 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int));
681 }
682
683 mutex_destroy(&sc->sc_rhlock);
684 mutex_destroy(&sc->sc_lock);
685 mutex_destroy(&sc->sc_intr_lock);
686
687 pool_cache_destroy(sc->sc_xferpool);
688
689 return rv;
690 }
691
692 int
693 xhci_activate(device_t self, enum devact act)
694 {
695 struct xhci_softc * const sc = device_private(self);
696
697 switch (act) {
698 case DVACT_DEACTIVATE:
699 sc->sc_dying = true;
700 return 0;
701 default:
702 return EOPNOTSUPP;
703 }
704 }
705
706 bool
707 xhci_suspend(device_t self, const pmf_qual_t *qual)
708 {
709 struct xhci_softc * const sc = device_private(self);
710 size_t i, j, bn, dci;
711 int port;
712 uint32_t v;
713 usbd_status err;
714 bool ok = false;
715
716 XHCIHIST_FUNC(); XHCIHIST_CALLED();
717
718 /*
719 * Block issuance of new commands, and wait for all pending
720 * commands to complete.
721 */
722 mutex_enter(&sc->sc_lock);
723 KASSERT(sc->sc_suspender == NULL);
724 sc->sc_suspender = curlwp;
725 while (sc->sc_command_addr != 0)
726 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
727 mutex_exit(&sc->sc_lock);
728
729 /*
730 * Block roothub xfers which might touch portsc registers until
731 * we're done suspending.
732 */
733 mutex_enter(&sc->sc_rhlock);
734
735 /*
736 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
737 * xHCI Power Management, p. 342
738 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342
739 */
740
741 /*
742 * `1. Stop all USB activity by issuing Stop Endpoint Commands
743 * for Busy endpoints in the Running state. If the Force
744 * Save Context Capability (FSC = ``0'') is not supported,
745 * then Stop Endpoint Commands shall be issued for all idle
746 * endpoints in the Running state as well. The Stop
747 * Endpoint Command causes the xHC to update the respective
748 * Endpoint or Stream Contexts in system memory, e.g. the
749 * TR Dequeue Pointer, DCS, etc. fields. Refer to
750 * Implementation Note "0".'
751 */
752 for (i = 0; i < sc->sc_maxslots; i++) {
753 struct xhci_slot *xs = &sc->sc_slots[i];
754
755 /* Skip if the slot is not in use. */
756 if (xs->xs_idx == 0)
757 continue;
758
759 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
760 /* Skip if the endpoint is not Running. */
761 /* XXX What about Busy? */
762 if (xhci_get_epstate(sc, xs, dci) !=
763 XHCI_EPSTATE_RUNNING)
764 continue;
765
766 /* Stop endpoint. */
767 mutex_enter(&sc->sc_lock);
768 err = xhci_stop_endpoint_cmd(sc, xs, dci,
769 XHCI_TRB_3_SUSP_EP_BIT);
770 mutex_exit(&sc->sc_lock);
771 if (err) {
772 device_printf(self, "failed to stop endpoint"
773 " slot %zu dci %zu err %d\n",
774 i, dci, err);
775 goto out;
776 }
777 }
778 }
779
780 /*
781 * Next, suspend all the ports:
782 *
783 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15:
784 * Suspend-Resume, pp. 276-283
785 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276
786 */
787 for (bn = 0; bn < 2; bn++) {
788 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
789 /* 4.15.1: Port Suspend. */
790 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
791
792 /*
793 * `System software places individual ports
794 * into suspend mode by writing a ``3'' into
795 * the appropriate PORTSC register Port Link
796 * State (PLS) field (refer to Section 5.4.8).
797 * Software should only set the PLS field to
798 * ``3'' when the port is in the Enabled
799 * state.'
800 *
801 * `Software should not attempt to suspend a
802 * port unless the port reports that it is in
803 * the enabled (PED = ``1''; PLS < ``3'')
804 * state (refer to Section 5.4.8 for more
805 * information about PED and PLS).'
806 */
807 v = xhci_op_read_4(sc, port);
808 if (((v & XHCI_PS_PED) == 0) ||
809 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3)
810 continue;
811 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
812 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3);
813 xhci_op_write_4(sc, port, v);
814
815 /*
816 * `When the PLS field is written with U3
817 * (``3''), the status of the PLS bit will not
818 * change to the target U state U3 until the
819 * suspend signaling has completed to the
820 * attached device (which may be as long as
821 * 10ms.).'
822 *
823 * `Software is required to wait for U3
824 * transitions to complete before it puts the
825 * xHC into a low power state, and before
826 * resuming the port.'
827 *
828 * XXX Take advantage of the technique to
829 * reduce polling on host controllers that
830 * support the U3C capability.
831 */
832 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) {
833 v = xhci_op_read_4(sc, port);
834 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3)
835 break;
836 usb_delay_ms(&sc->sc_bus, 1);
837 }
838 if (j == XHCI_WAIT_PLS_U3) {
839 device_printf(self,
840 "suspend timeout on bus %zu port %zu\n",
841 bn, i);
842 goto out;
843 }
844 }
845 }
846
847 /*
848 * `2. Ensure that the Command Ring is in the Stopped state
849 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is
850 * empty), and all Command Completion Events associated
851 * with them have been received.'
852 *
853 * XXX
854 */
855
856 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */
857 xhci_op_write_4(sc, XHCI_USBCMD,
858 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS);
859
860 /*
861 * `4. Read the Operational Runtime, and VTIO registers in the
862 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ,
863 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their
864 * state.'
865 *
866 * (We don't use VTIO here (XXX for now?).)
867 */
868 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
869 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL);
870 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP);
871 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG);
872 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0));
873 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0));
874 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0));
875 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0));
876 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0));
877
878 /*
879 * `5. Set the Controller Save State (CSS) flag in the USBCMD
880 * register (5.4.1)...'
881 */
882 xhci_op_write_4(sc, XHCI_USBCMD,
883 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS);
884
885 /*
886 * `...and wait for the Save State Status (SSS) flag in the
887 * USBSTS register (5.4.2) to transition to ``0''.'
888 */
889 for (i = 0; i < XHCI_WAIT_SSS; i++) {
890 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0)
891 break;
892 usb_delay_ms(&sc->sc_bus, 1);
893 }
894 if (i >= XHCI_WAIT_SSS) {
895 device_printf(self, "suspend timeout, USBSTS.SSS\n");
896 /*
897 * Just optimistically go on and check SRE anyway --
898 * what's the worst that could happen?
899 */
900 }
901
902 /*
903 * `Note: After a Save or Restore operation completes, the
904 * Save/Restore Error (SRE) flag in the USBSTS register should
905 * be checked to ensure that the operation completed
906 * successfully.'
907 */
908 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
909 device_printf(self, "suspend error, USBSTS.SRE\n");
910 goto out;
911 }
912
913 /* Success! */
914 ok = true;
915
916 out: mutex_exit(&sc->sc_rhlock);
917 if (!ok) {
918 /*
919 * If suspend failed, stop holding up command issuance
920 * and make it fail instead.
921 */
922 mutex_enter(&sc->sc_lock);
923 KASSERT(sc->sc_suspender == curlwp);
924 sc->sc_suspender = NULL;
925 sc->sc_suspendresume_failed = true;
926 cv_broadcast(&sc->sc_cmdbusy_cv);
927 mutex_exit(&sc->sc_lock);
928 }
929 return ok;
930 }
931
932 bool
933 xhci_resume(device_t self, const pmf_qual_t *qual)
934 {
935 struct xhci_softc * const sc = device_private(self);
936 size_t i, j, bn, dci;
937 int port;
938 uint32_t v;
939 bool ok = false;
940
941 XHCIHIST_FUNC(); XHCIHIST_CALLED();
942
943 /*
944 * If resume had previously failed, just try again. Can't make
945 * things worse, probably.
946 */
947 mutex_enter(&sc->sc_lock);
948 if (sc->sc_suspendresume_failed) {
949 KASSERT(sc->sc_suspender == NULL);
950 sc->sc_suspender = curlwp;
951 sc->sc_suspendresume_failed = false;
952 }
953 KASSERT(sc->sc_suspender);
954 mutex_exit(&sc->sc_lock);
955
956 /*
957 * Block roothub xfers which might touch portsc registers until
958 * we're done resuming.
959 */
960 mutex_enter(&sc->sc_rhlock);
961
962 /*
963 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
964 * xHCI Power Management, p. 343
965 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343
966 */
967
968 /*
969 * `4. Restore the Operational Runtime, and VTIO registers with
970 * their previously saved state in the following order:
971 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN,
972 * IMOD, and VTIO.'
973 *
974 * (We don't use VTIO here (for now?).)
975 */
976 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd);
977 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl);
978 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap);
979 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config);
980 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0);
981 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0);
982 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0);
983 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0);
984 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0);
985
986 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */
987
988 /*
989 * `5. Set the Controller Restore State (CRS) flag in the
990 * USBCMD register (5.4.1) to ``1''...'
991 */
992 xhci_op_write_4(sc, XHCI_USBCMD,
993 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS);
994
995 /*
996 * `...and wait for the Restore State Status (RSS) in the
997 * USBSTS register (5.4.2) to transition to ``0''.'
998 */
999 for (i = 0; i < XHCI_WAIT_RSS; i++) {
1000 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0)
1001 break;
1002 usb_delay_ms(&sc->sc_bus, 1);
1003 }
1004 if (i >= XHCI_WAIT_RSS) {
1005 device_printf(self, "resume timeout, USBSTS.RSS\n");
1006 goto out;
1007 }
1008
1009 /*
1010 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits
1011 * are consistent with the RCS values to be written to the
1012 * CRCR.'
1013 *
1014 * XXX Hope just zeroing it is good enough!
1015 */
1016 xhci_host_dequeue(sc->sc_cr);
1017
1018 /*
1019 * `7. Write the CRCR with the address and RCS value of the
1020 * reinitialized Command Ring. Note that this write will
1021 * cause the Command Ring to restart at the address
1022 * specified by the CRCR.'
1023 */
1024 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1025 sc->sc_cr->xr_cs);
1026
1027 /*
1028 * `8. Enable the controller by setting Run/Stop (R/S) =
1029 * ``1''.'
1030 */
1031 xhci_op_write_4(sc, XHCI_USBCMD,
1032 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS);
1033
1034 /*
1035 * `9. Software shall walk the USB topology and initialize each
1036 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and
1037 * external hub ports attached to USB devices.'
1038 *
1039 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2
1040 * `Port Resume', 4.15.2.2 `Host Initiated'.
1041 *
1042 * XXX We should maybe batch up initiating the state
1043 * transitions, and then wait for them to complete all at once.
1044 */
1045 for (bn = 0; bn < 2; bn++) {
1046 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
1047 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
1048
1049 /* `When a port is in the U3 state: ...' */
1050 v = xhci_op_read_4(sc, port);
1051 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3)
1052 continue;
1053
1054 /*
1055 * `For a USB2 protocol port, software shall
1056 * write a ``15'' (Resume) to the PLS field to
1057 * initiate resume signaling. The port shall
1058 * transition to the Resume substate and the
1059 * xHC shall transmit the resume signaling
1060 * within 1ms (T_URSM). Software shall ensure
1061 * that resume is signaled for at least 20ms
1062 * (T_DRSMDN). Software shall start timing
1063 * T_DRSMDN from the write of ``15'' (Resume)
1064 * to PLS.'
1065 */
1066 if (bn == 1) {
1067 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0);
1068 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1069 v |= XHCI_PS_LWS;
1070 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME);
1071 xhci_op_write_4(sc, port, v);
1072 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
1073 } else {
1074 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0);
1075 }
1076
1077 /*
1078 * `For a USB3 protocol port [and a USB2
1079 * protocol port after transitioning to
1080 * Resume], software shall write a ``0'' (U0)
1081 * to the PLS field...'
1082 */
1083 v = xhci_op_read_4(sc, port);
1084 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1085 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0);
1086 xhci_op_write_4(sc, port, v);
1087
1088 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) {
1089 v = xhci_op_read_4(sc, port);
1090 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0)
1091 break;
1092 usb_delay_ms(&sc->sc_bus, 1);
1093 }
1094 if (j == XHCI_WAIT_PLS_U0) {
1095 device_printf(self,
1096 "resume timeout on bus %zu port %zu\n",
1097 bn, i);
1098 goto out;
1099 }
1100 }
1101 }
1102
1103 /*
1104 * `10. Restart each of the previously Running endpoints by
1105 * ringing their doorbells.'
1106 */
1107 for (i = 0; i < sc->sc_maxslots; i++) {
1108 struct xhci_slot *xs = &sc->sc_slots[i];
1109
1110 /* Skip if the slot is not in use. */
1111 if (xs->xs_idx == 0)
1112 continue;
1113
1114 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
1115 /* Skip if the endpoint is not Running. */
1116 if (xhci_get_epstate(sc, xs, dci) !=
1117 XHCI_EPSTATE_RUNNING)
1118 continue;
1119
1120 /* Ring the doorbell. */
1121 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
1122 }
1123 }
1124
1125 /*
1126 * `Note: After a Save or Restore operation completes, the
1127 * Save/Restore Error (SRE) flag in the USBSTS register should
1128 * be checked to ensure that the operation completed
1129 * successfully.'
1130 */
1131 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
1132 device_printf(self, "resume error, USBSTS.SRE\n");
1133 goto out;
1134 }
1135
1136 /* Success! */
1137 ok = true;
1138
1139 out: /*
1140 * Resume command issuance. If the hardware failed to resume,
1141 * well, tough -- deadlocking because everything is held up on
1142 * the suspension, with no opportunity to detach, isn't better
1143 * than timing out waiting for dead hardware.
1144 */
1145 mutex_enter(&sc->sc_lock);
1146 KASSERT(sc->sc_suspender);
1147 sc->sc_suspender = NULL;
1148 sc->sc_suspendresume_failed = !ok;
1149 cv_broadcast(&sc->sc_cmdbusy_cv);
1150 mutex_exit(&sc->sc_lock);
1151
1152 mutex_exit(&sc->sc_rhlock);
1153 return ok;
1154 }
1155
1156 bool
1157 xhci_shutdown(device_t self, int flags)
1158 {
1159 return false;
1160 }
1161
1162 static int
1163 xhci_hc_reset(struct xhci_softc * const sc)
1164 {
1165 uint32_t usbcmd, usbsts;
1166 int i;
1167
1168 /* Check controller not ready */
1169 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1170 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1171 if ((usbsts & XHCI_STS_CNR) == 0)
1172 break;
1173 usb_delay_ms(&sc->sc_bus, 1);
1174 }
1175 if (i >= XHCI_WAIT_CNR) {
1176 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n");
1177 return EIO;
1178 }
1179
1180 /* Halt controller */
1181 usbcmd = 0;
1182 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1183 usb_delay_ms(&sc->sc_bus, 1);
1184
1185 /* Reset controller */
1186 usbcmd = XHCI_CMD_HCRST;
1187 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1188 for (i = 0; i < XHCI_WAIT_HCRST; i++) {
1189 /*
1190 * Wait 1ms first. Existing Intel xHCI requires 1ms delay to
1191 * prevent system hang (Errata).
1192 */
1193 usb_delay_ms(&sc->sc_bus, 1);
1194 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
1195 if ((usbcmd & XHCI_CMD_HCRST) == 0)
1196 break;
1197 }
1198 if (i >= XHCI_WAIT_HCRST) {
1199 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n");
1200 return EIO;
1201 }
1202
1203 /* Check controller not ready */
1204 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1205 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1206 if ((usbsts & XHCI_STS_CNR) == 0)
1207 break;
1208 usb_delay_ms(&sc->sc_bus, 1);
1209 }
1210 if (i >= XHCI_WAIT_CNR) {
1211 aprint_error_dev(sc->sc_dev,
1212 "controller not ready timeout after reset\n");
1213 return EIO;
1214 }
1215
1216 return 0;
1217 }
1218
1219 /* 7.2 xHCI Support Protocol Capability */
1220 static void
1221 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp)
1222 {
1223 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1224
1225 /* XXX Cache this lot */
1226
1227 const uint32_t w0 = xhci_read_4(sc, ecp);
1228 const uint32_t w4 = xhci_read_4(sc, ecp + 4);
1229 const uint32_t w8 = xhci_read_4(sc, ecp + 8);
1230 const uint32_t wc = xhci_read_4(sc, ecp + 0xc);
1231
1232 aprint_debug_dev(sc->sc_dev,
1233 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc);
1234
1235 if (w4 != XHCI_XECP_USBID)
1236 return;
1237
1238 const int major = XHCI_XECP_SP_W0_MAJOR(w0);
1239 const int minor = XHCI_XECP_SP_W0_MINOR(w0);
1240 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8);
1241 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8);
1242
1243 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16));
1244 switch (mm) {
1245 case 0x0200:
1246 case 0x0300:
1247 case 0x0301:
1248 case 0x0310:
1249 case 0x0320:
1250 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n",
1251 major == 3 ? "ss" : "hs", cpo, cpo + cpc - 1);
1252 if (major == 3)
1253 sc->sc_usb3nports += cpo + cpc - 1;
1254 else
1255 sc->sc_usb2nports += cpo + cpc - 1;
1256 break;
1257 default:
1258 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n",
1259 major, minor);
1260 return;
1261 }
1262
1263 const size_t bus = (major == 3) ? 0 : 1;
1264
1265 /* Index arrays with 0..n-1 where ports are numbered 1..n */
1266 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) {
1267 if (sc->sc_ctlrportmap[cp] != 0) {
1268 aprint_error_dev(sc->sc_dev, "controller port %zu "
1269 "already assigned", cp);
1270 continue;
1271 }
1272
1273 sc->sc_ctlrportbus[cp / NBBY] |=
1274 bus == 0 ? 0 : __BIT(cp % NBBY);
1275
1276 const size_t rhp = sc->sc_rhportcount[bus]++;
1277
1278 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0,
1279 "bus %zu rhp %zu is %d", bus, rhp,
1280 sc->sc_rhportmap[bus][rhp]);
1281
1282 sc->sc_rhportmap[bus][rhp] = cp + 1;
1283 sc->sc_ctlrportmap[cp] = rhp + 1;
1284 }
1285 }
1286
1287 /* Process extended capabilities */
1288 static void
1289 xhci_ecp(struct xhci_softc *sc)
1290 {
1291 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1292
1293 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4;
1294 while (ecp != 0) {
1295 uint32_t ecr = xhci_read_4(sc, ecp);
1296 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr);
1297 switch (XHCI_XECP_ID(ecr)) {
1298 case XHCI_ID_PROTOCOLS: {
1299 xhci_id_protocols(sc, ecp);
1300 break;
1301 }
1302 case XHCI_ID_USB_LEGACY: {
1303 uint8_t bios_sem;
1304
1305 /* Take host controller ownership from BIOS */
1306 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM);
1307 if (bios_sem) {
1308 /* sets xHCI to be owned by OS */
1309 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1);
1310 aprint_debug_dev(sc->sc_dev,
1311 "waiting for BIOS to give up control\n");
1312 for (int i = 0; i < 5000; i++) {
1313 bios_sem = xhci_read_1(sc, ecp +
1314 XHCI_XECP_BIOS_SEM);
1315 if (bios_sem == 0)
1316 break;
1317 DELAY(1000);
1318 }
1319 if (bios_sem) {
1320 aprint_error_dev(sc->sc_dev,
1321 "timed out waiting for BIOS\n");
1322 }
1323 }
1324 break;
1325 }
1326 default:
1327 break;
1328 }
1329 ecr = xhci_read_4(sc, ecp);
1330 if (XHCI_XECP_NEXT(ecr) == 0) {
1331 ecp = 0;
1332 } else {
1333 ecp += XHCI_XECP_NEXT(ecr) * 4;
1334 }
1335 }
1336 }
1337
1338 #define XHCI_HCCPREV1_BITS \
1339 "\177\020" /* New bitmask */ \
1340 "f\020\020XECP\0" \
1341 "f\014\4MAXPSA\0" \
1342 "b\013CFC\0" \
1343 "b\012SEC\0" \
1344 "b\011SBD\0" \
1345 "b\010FSE\0" \
1346 "b\7NSS\0" \
1347 "b\6LTC\0" \
1348 "b\5LHRC\0" \
1349 "b\4PIND\0" \
1350 "b\3PPC\0" \
1351 "b\2CZC\0" \
1352 "b\1BNC\0" \
1353 "b\0AC64\0" \
1354 "\0"
1355 #define XHCI_HCCV1_x_BITS \
1356 "\177\020" /* New bitmask */ \
1357 "f\020\020XECP\0" \
1358 "f\014\4MAXPSA\0" \
1359 "b\013CFC\0" \
1360 "b\012SEC\0" \
1361 "b\011SPC\0" \
1362 "b\010PAE\0" \
1363 "b\7NSS\0" \
1364 "b\6LTC\0" \
1365 "b\5LHRC\0" \
1366 "b\4PIND\0" \
1367 "b\3PPC\0" \
1368 "b\2CSZ\0" \
1369 "b\1BNC\0" \
1370 "b\0AC64\0" \
1371 "\0"
1372
1373 #define XHCI_HCC2_BITS \
1374 "\177\020" /* New bitmask */ \
1375 "b\7ETC_TSC\0" \
1376 "b\6ETC\0" \
1377 "b\5CIC\0" \
1378 "b\4LEC\0" \
1379 "b\3CTC\0" \
1380 "b\2FSC\0" \
1381 "b\1CMC\0" \
1382 "b\0U3C\0" \
1383 "\0"
1384
1385 void
1386 xhci_start(struct xhci_softc *sc)
1387 {
1388 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA);
1389 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0)
1390 /* Intel xhci needs interrupt rate moderated. */
1391 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP);
1392 else
1393 xhci_rt_write_4(sc, XHCI_IMOD(0), 0);
1394 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n",
1395 xhci_rt_read_4(sc, XHCI_IMOD(0)));
1396
1397 /* Go! */
1398 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
1399 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n",
1400 xhci_op_read_4(sc, XHCI_USBCMD));
1401 }
1402
1403 int
1404 xhci_init(struct xhci_softc *sc)
1405 {
1406 bus_size_t bsz;
1407 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff;
1408 uint32_t pagesize, config;
1409 int i = 0;
1410 uint16_t hciversion;
1411 uint8_t caplength;
1412
1413 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1414
1415 /* Set up the bus struct for the usb 3 and usb 2 buses */
1416 sc->sc_bus.ub_methods = &xhci_bus_methods;
1417 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe);
1418 sc->sc_bus.ub_usedma = true;
1419 sc->sc_bus.ub_hcpriv = sc;
1420
1421 sc->sc_bus2.ub_methods = &xhci_bus_methods;
1422 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe);
1423 sc->sc_bus2.ub_revision = USBREV_2_0;
1424 sc->sc_bus2.ub_usedma = true;
1425 sc->sc_bus2.ub_hcpriv = sc;
1426 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag;
1427
1428 caplength = xhci_read_1(sc, XHCI_CAPLENGTH);
1429 hciversion = xhci_read_2(sc, XHCI_HCIVERSION);
1430
1431 if (hciversion < XHCI_HCIVERSION_0_96 ||
1432 hciversion >= 0x0200) {
1433 aprint_normal_dev(sc->sc_dev,
1434 "xHCI version %x.%x not known to be supported\n",
1435 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1436 } else {
1437 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n",
1438 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1439 }
1440
1441 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength,
1442 &sc->sc_cbh) != 0) {
1443 aprint_error_dev(sc->sc_dev, "capability subregion failure\n");
1444 return ENOMEM;
1445 }
1446
1447 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1);
1448 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1);
1449 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1);
1450 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1);
1451 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2);
1452 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3);
1453 aprint_debug_dev(sc->sc_dev,
1454 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3);
1455
1456 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS);
1457 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32;
1458
1459 char sbuf[128];
1460 if (hciversion < XHCI_HCIVERSION_1_0)
1461 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc);
1462 else
1463 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc);
1464 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf);
1465 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n",
1466 XHCI_HCC_XECP(sc->sc_hcc) * 4);
1467 if (hciversion >= XHCI_HCIVERSION_1_1) {
1468 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2);
1469 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2);
1470 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf);
1471 }
1472
1473 /* default all ports to bus 0, i.e. usb 3 */
1474 sc->sc_ctlrportbus = kmem_zalloc(
1475 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP);
1476 sc->sc_ctlrportmap =
1477 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1478
1479 /* controller port to bus roothub port map */
1480 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
1481 sc->sc_rhportmap[j] =
1482 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1483 }
1484
1485 /*
1486 * Process all Extended Capabilities
1487 */
1488 xhci_ecp(sc);
1489
1490 bsz = XHCI_PORTSC(sc->sc_maxports);
1491 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz,
1492 &sc->sc_obh) != 0) {
1493 aprint_error_dev(sc->sc_dev, "operational subregion failure\n");
1494 return ENOMEM;
1495 }
1496
1497 dboff = xhci_cap_read_4(sc, XHCI_DBOFF);
1498 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff,
1499 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) {
1500 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n");
1501 return ENOMEM;
1502 }
1503
1504 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF);
1505 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff,
1506 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) {
1507 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n");
1508 return ENOMEM;
1509 }
1510
1511 int rv;
1512 rv = xhci_hc_reset(sc);
1513 if (rv != 0) {
1514 return rv;
1515 }
1516
1517 if (sc->sc_vendor_init)
1518 sc->sc_vendor_init(sc);
1519
1520 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE);
1521 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize);
1522 pagesize = ffs(pagesize);
1523 if (pagesize == 0) {
1524 aprint_error_dev(sc->sc_dev, "pagesize is 0\n");
1525 return EIO;
1526 }
1527 sc->sc_pgsz = 1 << (12 + (pagesize - 1));
1528 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz);
1529 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n",
1530 (uint32_t)sc->sc_maxslots);
1531 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports);
1532
1533 int err;
1534 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2);
1535 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf);
1536 if (sc->sc_maxspbuf != 0) {
1537 err = usb_allocmem(sc->sc_bus.ub_dmatag,
1538 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t),
1539 USBMALLOC_ZERO, &sc->sc_spbufarray_dma);
1540 if (err) {
1541 aprint_error_dev(sc->sc_dev,
1542 "spbufarray init fail, err %d\n", err);
1543 return ENOMEM;
1544 }
1545
1546 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) *
1547 sc->sc_maxspbuf, KM_SLEEP);
1548 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0);
1549 for (i = 0; i < sc->sc_maxspbuf; i++) {
1550 usb_dma_t * const dma = &sc->sc_spbuf_dma[i];
1551 /* allocate contexts */
1552 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz,
1553 sc->sc_pgsz, USBMALLOC_ZERO, dma);
1554 if (err) {
1555 aprint_error_dev(sc->sc_dev,
1556 "spbufarray_dma init fail, err %d\n", err);
1557 rv = ENOMEM;
1558 goto bad1;
1559 }
1560 spbufarray[i] = htole64(DMAADDR(dma, 0));
1561 usb_syncmem(dma, 0, sc->sc_pgsz,
1562 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1563 }
1564
1565 usb_syncmem(&sc->sc_spbufarray_dma, 0,
1566 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE);
1567 }
1568
1569 config = xhci_op_read_4(sc, XHCI_CONFIG);
1570 config &= ~0xFF;
1571 config |= sc->sc_maxslots & 0xFF;
1572 xhci_op_write_4(sc, XHCI_CONFIG, config);
1573
1574 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS,
1575 XHCI_COMMAND_RING_SEGMENTS_ALIGN);
1576 if (err) {
1577 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n",
1578 err);
1579 rv = ENOMEM;
1580 goto bad1;
1581 }
1582
1583 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS,
1584 XHCI_EVENT_RING_SEGMENTS_ALIGN);
1585 if (err) {
1586 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n",
1587 err);
1588 rv = ENOMEM;
1589 goto bad2;
1590 }
1591
1592 usb_dma_t *dma;
1593 size_t size;
1594 size_t align;
1595
1596 dma = &sc->sc_eventst_dma;
1597 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE,
1598 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN);
1599 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size);
1600 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN;
1601 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1602 USBMALLOC_ZERO, dma);
1603 if (err) {
1604 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n",
1605 err);
1606 rv = ENOMEM;
1607 goto bad3;
1608 }
1609
1610 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n",
1611 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0),
1612 KERNADDR(&sc->sc_eventst_dma, 0),
1613 sc->sc_eventst_dma.udma_block->size);
1614
1615 dma = &sc->sc_dcbaa_dma;
1616 size = (1 + sc->sc_maxslots) * sizeof(uint64_t);
1617 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size);
1618 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN;
1619 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1620 USBMALLOC_ZERO, dma);
1621 if (err) {
1622 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err);
1623 rv = ENOMEM;
1624 goto bad4;
1625 }
1626 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n",
1627 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0),
1628 KERNADDR(&sc->sc_dcbaa_dma, 0),
1629 sc->sc_dcbaa_dma.udma_block->size);
1630
1631 if (sc->sc_maxspbuf != 0) {
1632 /*
1633 * DCBA entry 0 hold the scratchbuf array pointer.
1634 */
1635 *(uint64_t *)KERNADDR(dma, 0) =
1636 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0));
1637 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1638 }
1639
1640 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots,
1641 KM_SLEEP);
1642 if (sc->sc_slots == NULL) {
1643 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err);
1644 rv = ENOMEM;
1645 goto bad;
1646 }
1647
1648 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0,
1649 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
1650 if (sc->sc_xferpool == NULL) {
1651 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n",
1652 err);
1653 rv = ENOMEM;
1654 goto bad;
1655 }
1656
1657 cv_init(&sc->sc_command_cv, "xhcicmd");
1658 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq");
1659 mutex_init(&sc->sc_rhlock, MUTEX_DEFAULT, IPL_NONE);
1660 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
1661 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
1662
1663 struct xhci_erste *erst;
1664 erst = KERNADDR(&sc->sc_eventst_dma, 0);
1665 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0));
1666 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb);
1667 erst[0].erste_3 = htole32(0);
1668 usb_syncmem(&sc->sc_eventst_dma, 0,
1669 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE);
1670
1671 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS);
1672 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0));
1673 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) |
1674 XHCI_ERDP_BUSY);
1675
1676 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0));
1677 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1678 sc->sc_cr->xr_cs);
1679
1680 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0),
1681 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS);
1682
1683 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0)
1684 xhci_start(sc);
1685
1686 return 0;
1687
1688 bad:
1689 if (sc->sc_xferpool) {
1690 pool_cache_destroy(sc->sc_xferpool);
1691 sc->sc_xferpool = NULL;
1692 }
1693
1694 if (sc->sc_slots) {
1695 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) *
1696 sc->sc_maxslots);
1697 sc->sc_slots = NULL;
1698 }
1699
1700 usb_freemem(&sc->sc_dcbaa_dma);
1701 bad4:
1702 usb_freemem(&sc->sc_eventst_dma);
1703 bad3:
1704 xhci_ring_free(sc, &sc->sc_er);
1705 bad2:
1706 xhci_ring_free(sc, &sc->sc_cr);
1707 i = sc->sc_maxspbuf;
1708 bad1:
1709 for (int j = 0; j < i; j++)
1710 usb_freemem(&sc->sc_spbuf_dma[j]);
1711 usb_freemem(&sc->sc_spbufarray_dma);
1712
1713 return rv;
1714 }
1715
1716 static inline bool
1717 xhci_polling_p(struct xhci_softc * const sc)
1718 {
1719 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling;
1720 }
1721
1722 int
1723 xhci_intr(void *v)
1724 {
1725 struct xhci_softc * const sc = v;
1726 int ret = 0;
1727
1728 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1729
1730 if (sc == NULL)
1731 return 0;
1732
1733 mutex_spin_enter(&sc->sc_intr_lock);
1734
1735 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1736 goto done;
1737
1738 /* If we get an interrupt while polling, then just ignore it. */
1739 if (xhci_polling_p(sc)) {
1740 #ifdef DIAGNOSTIC
1741 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1742 #endif
1743 goto done;
1744 }
1745
1746 ret = xhci_intr1(sc);
1747 if (ret) {
1748 KASSERT(sc->sc_child || sc->sc_child2);
1749
1750 /*
1751 * One of child busses could be already detached. It doesn't
1752 * matter on which of the two the softintr is scheduled.
1753 */
1754 if (sc->sc_child)
1755 usb_schedsoftintr(&sc->sc_bus);
1756 else
1757 usb_schedsoftintr(&sc->sc_bus2);
1758 }
1759 done:
1760 mutex_spin_exit(&sc->sc_intr_lock);
1761 return ret;
1762 }
1763
1764 int
1765 xhci_intr1(struct xhci_softc * const sc)
1766 {
1767 uint32_t usbsts;
1768 uint32_t iman;
1769
1770 XHCIHIST_FUNC();
1771
1772 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1773 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0);
1774 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD |
1775 XHCI_STS_HCE)) == 0) {
1776 DPRINTFN(16, "ignored intr not for %jd",
1777 device_unit(sc->sc_dev), 0, 0, 0);
1778 return 0;
1779 }
1780
1781 /*
1782 * Clear EINT and other transient flags, to not misenterpret
1783 * next shared interrupt. Also, to avoid race, EINT must be cleared
1784 * before XHCI_IMAN_INTR_PEND is cleared.
1785 */
1786 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & ~XHCI_STS_RSVDP0);
1787
1788 #ifdef XHCI_DEBUG
1789 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1790 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1791 #endif
1792
1793 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1794 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1795 iman |= XHCI_IMAN_INTR_PEND;
1796 xhci_rt_write_4(sc, XHCI_IMAN(0), iman);
1797
1798 #ifdef XHCI_DEBUG
1799 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1800 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1801 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1802 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1803 #endif
1804
1805 return 1;
1806 }
1807
1808 /*
1809 * 3 port speed types used in USB stack
1810 *
1811 * usbdi speed
1812 * definition: USB_SPEED_* in usb.h
1813 * They are used in struct usbd_device in USB stack.
1814 * ioctl interface uses these values too.
1815 * port_status speed
1816 * definition: UPS_*_SPEED in usb.h
1817 * They are used in usb_port_status_t and valid only for USB 2.0.
1818 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus
1819 * of usb_port_status_ext_t indicates port speed.
1820 * Note that some 3.0 values overlap with 2.0 values.
1821 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and
1822 * means UPS_LOW_SPEED in HS.)
1823 * port status returned from hub also uses these values.
1824 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed
1825 * or more.
1826 * xspeed:
1827 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1)
1828 * They are used in only slot context and PORTSC reg of xhci.
1829 * The difference between usbdi speed and xspeed is
1830 * that FS and LS values are swapped.
1831 */
1832
1833 /* convert usbdi speed to xspeed */
1834 static int
1835 xhci_speed2xspeed(int speed)
1836 {
1837 switch (speed) {
1838 case USB_SPEED_LOW: return 2;
1839 case USB_SPEED_FULL: return 1;
1840 default: return speed;
1841 }
1842 }
1843
1844 #if 0
1845 /* convert xspeed to usbdi speed */
1846 static int
1847 xhci_xspeed2speed(int xspeed)
1848 {
1849 switch (xspeed) {
1850 case 1: return USB_SPEED_FULL;
1851 case 2: return USB_SPEED_LOW;
1852 default: return xspeed;
1853 }
1854 }
1855 #endif
1856
1857 /* convert xspeed to port status speed */
1858 static int
1859 xhci_xspeed2psspeed(int xspeed)
1860 {
1861 switch (xspeed) {
1862 case 0: return 0;
1863 case 1: return UPS_FULL_SPEED;
1864 case 2: return UPS_LOW_SPEED;
1865 case 3: return UPS_HIGH_SPEED;
1866 default: return UPS_OTHER_SPEED;
1867 }
1868 }
1869
1870 /*
1871 * Construct input contexts and issue TRB to open pipe.
1872 */
1873 static usbd_status
1874 xhci_configure_endpoint(struct usbd_pipe *pipe)
1875 {
1876 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1877 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1878 #ifdef USB_DEBUG
1879 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1880 #endif
1881 struct xhci_soft_trb trb;
1882 usbd_status err;
1883
1884 XHCIHIST_FUNC();
1885 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx",
1886 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress,
1887 pipe->up_endpoint->ue_edesc->bmAttributes);
1888
1889 /* XXX ensure input context is available? */
1890
1891 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz);
1892
1893 /* set up context */
1894 xhci_setup_ctx(pipe);
1895
1896 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0),
1897 sc->sc_ctxsz * 1);
1898 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs,
1899 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1);
1900
1901 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1902 trb.trb_2 = 0;
1903 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1904 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1905
1906 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
1907
1908 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1909 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci),
1910 sc->sc_ctxsz * 1);
1911
1912 return err;
1913 }
1914
1915 #if 0
1916 static usbd_status
1917 xhci_unconfigure_endpoint(struct usbd_pipe *pipe)
1918 {
1919 #ifdef USB_DEBUG
1920 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1921 #endif
1922
1923 XHCIHIST_FUNC();
1924 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
1925
1926 return USBD_NORMAL_COMPLETION;
1927 }
1928 #endif
1929
1930 /* 4.6.8, 6.4.3.7 */
1931 static void
1932 xhci_reset_endpoint(struct usbd_pipe *pipe)
1933 {
1934 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1935 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1936 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1937 struct xhci_soft_trb trb;
1938
1939 XHCIHIST_FUNC();
1940 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1941
1942 KASSERT(mutex_owned(&sc->sc_lock));
1943
1944 trb.trb_0 = 0;
1945 trb.trb_2 = 0;
1946 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1947 XHCI_TRB_3_EP_SET(dci) |
1948 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP);
1949
1950 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
1951 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
1952 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
1953 }
1954 }
1955
1956 /*
1957 * 4.6.9, 6.4.3.8
1958 * Stop execution of TDs on xfer ring.
1959 * Should be called with sc_lock held.
1960 */
1961 static usbd_status
1962 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci,
1963 uint32_t trb3flags)
1964 {
1965 struct xhci_soft_trb trb;
1966 usbd_status err;
1967
1968 XHCIHIST_FUNC();
1969 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1970
1971 KASSERT(mutex_owned(&sc->sc_lock));
1972
1973 trb.trb_0 = 0;
1974 trb.trb_2 = 0;
1975 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1976 XHCI_TRB_3_EP_SET(dci) |
1977 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) |
1978 trb3flags;
1979
1980 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1981
1982 return err;
1983 }
1984
1985 static usbd_status
1986 xhci_stop_endpoint(struct usbd_pipe *pipe)
1987 {
1988 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1989 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1990 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1991
1992 XHCIHIST_FUNC();
1993 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1994
1995 KASSERT(mutex_owned(&sc->sc_lock));
1996
1997 return xhci_stop_endpoint_cmd(sc, xs, dci, 0);
1998 }
1999
2000 /*
2001 * Set TR Dequeue Pointer.
2002 * xHCI 1.1 4.6.10 6.4.3.9
2003 * Purge all of the TRBs on ring and reinitialize ring.
2004 * Set TR dequeue Pointer to 0 and Cycle State to 1.
2005 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE
2006 * error will be generated.
2007 */
2008 static void
2009 xhci_set_dequeue(struct usbd_pipe *pipe)
2010 {
2011 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2012 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2013 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2014 struct xhci_ring * const xr = xs->xs_xr[dci];
2015 struct xhci_soft_trb trb;
2016
2017 XHCIHIST_FUNC();
2018 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
2019
2020 KASSERT(mutex_owned(&sc->sc_lock));
2021 KASSERT(xr != NULL);
2022
2023 xhci_host_dequeue(xr);
2024
2025 /* set DCS */
2026 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */
2027 trb.trb_2 = 0;
2028 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2029 XHCI_TRB_3_EP_SET(dci) |
2030 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE);
2031
2032 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
2033 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
2034 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
2035 }
2036 }
2037
2038 /*
2039 * Open new pipe: called from usbd_setup_pipe_flags.
2040 * Fills methods of pipe.
2041 * If pipe is not for ep0, calls configure_endpoint.
2042 */
2043 static usbd_status
2044 xhci_open(struct usbd_pipe *pipe)
2045 {
2046 struct usbd_device * const dev = pipe->up_dev;
2047 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
2048 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
2049 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2050 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2051 const u_int dci = xhci_ep_get_dci(ed);
2052 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
2053 usbd_status err;
2054
2055 XHCIHIST_FUNC();
2056 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr,
2057 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed);
2058 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx",
2059 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress,
2060 ed->bmAttributes);
2061 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize),
2062 ed->bInterval, 0, 0);
2063
2064 if (sc->sc_dying)
2065 return USBD_IOERROR;
2066
2067 /* Root Hub */
2068 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) {
2069 switch (ed->bEndpointAddress) {
2070 case USB_CONTROL_ENDPOINT:
2071 pipe->up_methods = &roothub_ctrl_methods;
2072 break;
2073 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
2074 pipe->up_methods = &xhci_root_intr_methods;
2075 break;
2076 default:
2077 pipe->up_methods = NULL;
2078 DPRINTFN(0, "bad bEndpointAddress 0x%02jx",
2079 ed->bEndpointAddress, 0, 0, 0);
2080 return USBD_INVAL;
2081 }
2082 return USBD_NORMAL_COMPLETION;
2083 }
2084
2085 usb_init_task(&xpipe->xp_async_task, xhci_pipe_restart_async_task,
2086 pipe, USB_TASKQ_MPSAFE);
2087
2088 switch (xfertype) {
2089 case UE_CONTROL:
2090 pipe->up_methods = &xhci_device_ctrl_methods;
2091 break;
2092 case UE_ISOCHRONOUS:
2093 pipe->up_methods = &xhci_device_isoc_methods;
2094 pipe->up_serialise = false;
2095 xpipe->xp_isoc_next = -1;
2096 break;
2097 case UE_BULK:
2098 pipe->up_methods = &xhci_device_bulk_methods;
2099 break;
2100 case UE_INTERRUPT:
2101 pipe->up_methods = &xhci_device_intr_methods;
2102 break;
2103 default:
2104 return USBD_IOERROR;
2105 break;
2106 }
2107
2108 KASSERT(xs != NULL);
2109 KASSERT(xs->xs_xr[dci] == NULL);
2110
2111 /* allocate transfer ring */
2112 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS,
2113 XHCI_TRB_ALIGN);
2114 if (err) {
2115 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0);
2116 return err;
2117 }
2118
2119 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT)
2120 return xhci_configure_endpoint(pipe);
2121
2122 return USBD_NORMAL_COMPLETION;
2123 }
2124
2125 /*
2126 * Closes pipe, called from usbd_kill_pipe via close methods.
2127 * If the endpoint to be closed is ep0, disable_slot.
2128 * Should be called with sc_lock held.
2129 */
2130 static void
2131 xhci_close_pipe(struct usbd_pipe *pipe)
2132 {
2133 struct xhci_pipe * const xp =
2134 container_of(pipe, struct xhci_pipe, xp_pipe);
2135 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2136 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2137 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2138 const u_int dci = xhci_ep_get_dci(ed);
2139 struct xhci_soft_trb trb;
2140 uint32_t *cp;
2141
2142 XHCIHIST_FUNC();
2143
2144 usb_rem_task_wait(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC,
2145 &sc->sc_lock);
2146
2147 if (sc->sc_dying)
2148 return;
2149
2150 /* xs is uninitialized before xhci_init_slot */
2151 if (xs == NULL || xs->xs_idx == 0)
2152 return;
2153
2154 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2155 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2156
2157 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
2158 KASSERT(mutex_owned(&sc->sc_lock));
2159
2160 if (pipe->up_dev->ud_depth == 0)
2161 return;
2162
2163 if (dci == XHCI_DCI_EP_CONTROL) {
2164 DPRINTFN(4, "closing ep0", 0, 0, 0, 0);
2165 /* This frees all rings */
2166 xhci_disable_slot(sc, xs->xs_idx);
2167 return;
2168 }
2169
2170 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED)
2171 (void)xhci_stop_endpoint(pipe);
2172
2173 /*
2174 * set appropriate bit to be dropped.
2175 * don't set DC bit to 1, otherwise all endpoints
2176 * would be deconfigured.
2177 */
2178 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
2179 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci));
2180 cp[1] = htole32(0);
2181
2182 /* XXX should be most significant one, not dci? */
2183 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
2184 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci));
2185
2186 /* configure ep context performs an implicit dequeue */
2187 xhci_host_dequeue(xs->xs_xr[dci]);
2188
2189 /* sync input contexts before they are read from memory */
2190 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
2191
2192 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
2193 trb.trb_2 = 0;
2194 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2195 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
2196
2197 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
2198 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2199
2200 xhci_ring_free(sc, &xs->xs_xr[dci]);
2201 xs->xs_xr[dci] = NULL;
2202 }
2203
2204 /*
2205 * Abort transfer. Must be called with sc_lock held. Releases and
2206 * reacquires sc_lock to sleep until hardware acknowledges abort.
2207 */
2208 static void
2209 xhci_abortx(struct usbd_xfer *xfer)
2210 {
2211 XHCIHIST_FUNC();
2212 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2213
2214 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx",
2215 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0);
2216
2217 KASSERT(mutex_owned(&sc->sc_lock));
2218 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2219 xfer->ux_status == USBD_TIMEOUT),
2220 "bad abort status: %d", xfer->ux_status);
2221
2222 xhci_pipe_restart(xfer->ux_pipe);
2223
2224 DPRINTFN(14, "end", 0, 0, 0, 0);
2225 }
2226
2227 static void
2228 xhci_host_dequeue(struct xhci_ring * const xr)
2229 {
2230 /* When dequeueing the controller, update our struct copy too */
2231 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE);
2232 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE,
2233 BUS_DMASYNC_PREWRITE);
2234 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies));
2235
2236 xr->xr_ep = 0;
2237 xr->xr_cs = 1;
2238 }
2239
2240 /*
2241 * Recover STALLed endpoint, or stop endpoint to abort a pipe.
2242 * xHCI 1.1 sect 4.10.2.1
2243 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove
2244 * all transfers on transfer ring.
2245 */
2246 static void
2247 xhci_pipe_restart(struct usbd_pipe *pipe)
2248 {
2249 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2250 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2251 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2252
2253 XHCIHIST_FUNC();
2254 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2255 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2256
2257 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2258
2259 /*
2260 * - If the endpoint is halted, indicating a stall, reset it.
2261 * - If the endpoint is stopped, we're already good.
2262 * - Otherwise, someone wanted to abort the pipe, so stop the
2263 * endpoint.
2264 *
2265 * In any case, clear the ring.
2266 */
2267 switch (xhci_get_epstate(sc, xs, dci)) {
2268 case XHCI_EPSTATE_HALTED:
2269 xhci_reset_endpoint(pipe);
2270 break;
2271 case XHCI_EPSTATE_STOPPED:
2272 break;
2273 default:
2274 xhci_stop_endpoint(pipe);
2275 break;
2276 }
2277
2278 switch (xhci_get_epstate(sc, xs, dci)) {
2279 case XHCI_EPSTATE_STOPPED:
2280 break;
2281 case XHCI_EPSTATE_ERROR:
2282 device_printf(sc->sc_dev, "endpoint 0x%x error\n",
2283 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2284 break;
2285 default:
2286 device_printf(sc->sc_dev, "endpoint 0x%x failed to stop\n",
2287 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2288 }
2289
2290 xhci_set_dequeue(pipe);
2291
2292 DPRINTFN(4, "ends", 0, 0, 0, 0);
2293 }
2294
2295 static void
2296 xhci_pipe_restart_async_task(void *cookie)
2297 {
2298 struct usbd_pipe * const pipe = cookie;
2299 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2300 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2301 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2302 struct xhci_ring * const tr = xs->xs_xr[dci];
2303 struct usbd_xfer *xfer;
2304
2305 XHCIHIST_FUNC();
2306 XHCIHIST_CALLARGS("sc=%#jx pipe=%#jx",
2307 (uintptr_t)sc, (uintptr_t)pipe, 0, 0);
2308
2309 mutex_enter(&sc->sc_lock);
2310
2311 xhci_pipe_restart(pipe);
2312
2313 /*
2314 * We halted our own queue because it stalled. Mark it no
2315 * longer halted and start issuing queued transfers again.
2316 */
2317 tr->is_halted = false;
2318 xfer = SIMPLEQ_FIRST(&pipe->up_queue);
2319 if (xfer) {
2320 /*
2321 * If the first xfer of the queue is not in progress,
2322 * though, there may be a concurrent software abort
2323 * that has already cancelled it and is now in the
2324 * middle of a concurrent xhci_pipe_restart waiting to
2325 * reacquire the pipe (bus) lock. So only restart the
2326 * xfer if it's still USBD_IN_PROGRESS.
2327 *
2328 * Either way, xfers on the queue can't be in
2329 * USBD_NOT_STARTED.
2330 */
2331 KASSERT(xfer->ux_status != USBD_NOT_STARTED);
2332 if (xfer->ux_status == USBD_IN_PROGRESS) {
2333 (*pipe->up_methods->upm_start)(xfer);
2334 } else {
2335 DPRINTF("pipe restart race xfer=%#jx status=%jd",
2336 (uintptr_t)xfer, xfer->ux_status, 0, 0);
2337 }
2338 }
2339
2340 mutex_exit(&sc->sc_lock);
2341 }
2342
2343 static void
2344 xhci_pipe_restart_async(struct usbd_pipe *pipe)
2345 {
2346 struct xhci_pipe * const xp =
2347 container_of(pipe, struct xhci_pipe, xp_pipe);
2348 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2349 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2350 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2351 struct xhci_ring * const tr = xs->xs_xr[dci];
2352
2353 XHCIHIST_FUNC();
2354 XHCIHIST_CALLARGS("pipe %#jx", (uintptr_t)pipe, 0, 0, 0);
2355
2356 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2357
2358 tr->is_halted = true;
2359 usb_add_task(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC);
2360
2361 DPRINTFN(4, "ends", 0, 0, 0, 0);
2362 }
2363
2364 /* Process roothub port status/change events and notify to uhub_intr. */
2365 static void
2366 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport)
2367 {
2368 XHCIHIST_FUNC();
2369 XHCIHIST_CALLARGS("xhci%jd: port %ju status change",
2370 device_unit(sc->sc_dev), ctlrport, 0, 0);
2371
2372 if (ctlrport > sc->sc_maxports)
2373 return;
2374
2375 const size_t bn = xhci_ctlrport2bus(sc, ctlrport);
2376 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport);
2377 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn];
2378
2379 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change",
2380 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer);
2381
2382 if (xfer == NULL)
2383 return;
2384 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
2385
2386 uint8_t *p = xfer->ux_buf;
2387 if (!xhci_polling_p(sc) || !sc->sc_intrxfer_deferred[bn])
2388 memset(p, 0, xfer->ux_length);
2389 p[rhp / NBBY] |= 1 << (rhp % NBBY);
2390 xfer->ux_actlen = xfer->ux_length;
2391 xfer->ux_status = USBD_NORMAL_COMPLETION;
2392 if (xhci_polling_p(sc))
2393 sc->sc_intrxfer_deferred[bn] = true;
2394 else
2395 usb_transfer_complete(xfer);
2396 }
2397
2398 /* Process Transfer Events */
2399 static void
2400 xhci_event_transfer(struct xhci_softc * const sc,
2401 const struct xhci_trb * const trb)
2402 {
2403 uint64_t trb_0;
2404 uint32_t trb_2, trb_3;
2405 uint8_t trbcode;
2406 u_int slot, dci;
2407 struct xhci_slot *xs;
2408 struct xhci_ring *xr;
2409 struct xhci_xfer *xx;
2410 struct usbd_xfer *xfer;
2411 usbd_status err;
2412
2413 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2414
2415 trb_0 = le64toh(trb->trb_0);
2416 trb_2 = le32toh(trb->trb_2);
2417 trb_3 = le32toh(trb->trb_3);
2418 trbcode = XHCI_TRB_2_ERROR_GET(trb_2);
2419 slot = XHCI_TRB_3_SLOT_GET(trb_3);
2420 dci = XHCI_TRB_3_EP_GET(trb_3);
2421 xs = &sc->sc_slots[slot];
2422 xr = xs->xs_xr[dci];
2423
2424 /* sanity check */
2425 KASSERT(xr != NULL);
2426 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots,
2427 "invalid xs_idx %u slot %u", xs->xs_idx, slot);
2428
2429 int idx = 0;
2430 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2431 if (xhci_trb_get_idx(xr, trb_0, &idx)) {
2432 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0);
2433 return;
2434 }
2435 xx = xr->xr_cookies[idx];
2436
2437 /* clear cookie of consumed TRB */
2438 xr->xr_cookies[idx] = NULL;
2439
2440 /*
2441 * xx is NULL if pipe is opened but xfer is not started.
2442 * It happens when stopping idle pipe.
2443 */
2444 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) {
2445 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju",
2446 idx, (uintptr_t)xx, trbcode, dci);
2447 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0,
2448 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)),
2449 0, 0);
2450 return;
2451 }
2452 } else {
2453 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */
2454 xx = (void *)(uintptr_t)(trb_0 & ~0x3);
2455 }
2456 /* XXX this may not happen */
2457 if (xx == NULL) {
2458 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0);
2459 return;
2460 }
2461 xfer = &xx->xx_xfer;
2462 /* XXX this may happen when detaching */
2463 if (xfer == NULL) {
2464 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx",
2465 (uintptr_t)xx, trb_0, 0, 0);
2466 return;
2467 }
2468 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2469 /* XXX I dunno why this happens */
2470 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer);
2471
2472 if (!xfer->ux_pipe->up_repeat &&
2473 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) {
2474 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer,
2475 0, 0, 0);
2476 return;
2477 }
2478
2479 const uint8_t xfertype =
2480 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes);
2481
2482 /* 4.11.5.2 Event Data TRB */
2483 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) {
2484 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx"
2485 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0);
2486 if ((trb_0 & 0x3) == 0x3) {
2487 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2);
2488 }
2489 }
2490
2491 switch (trbcode) {
2492 case XHCI_TRB_ERROR_SHORT_PKT:
2493 case XHCI_TRB_ERROR_SUCCESS:
2494 /*
2495 * A ctrl transfer can generate two events if it has a Data
2496 * stage. A short data stage can be OK and should not
2497 * complete the transfer as the status stage needs to be
2498 * performed.
2499 *
2500 * Note: Data and Status stage events point at same xfer.
2501 * ux_actlen and ux_dmabuf will be passed to
2502 * usb_transfer_complete after the Status stage event.
2503 *
2504 * It can be distinguished which stage generates the event:
2505 * + by checking least 3 bits of trb_0 if ED==1.
2506 * (see xhci_device_ctrl_start).
2507 * + by checking the type of original TRB if ED==0.
2508 *
2509 * In addition, intr, bulk, and isoc transfer currently
2510 * consists of single TD, so the "skip" is not needed.
2511 * ctrl xfer uses EVENT_DATA, and others do not.
2512 * Thus driver can switch the flow by checking ED bit.
2513 */
2514 if (xfertype == UE_ISOCHRONOUS) {
2515 xfer->ux_frlengths[xx->xx_isoc_done] -=
2516 XHCI_TRB_2_REM_GET(trb_2);
2517 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done];
2518 } else if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2519 if (xfer->ux_actlen == 0)
2520 xfer->ux_actlen = xfer->ux_length -
2521 XHCI_TRB_2_REM_GET(trb_2);
2522 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3))
2523 == XHCI_TRB_TYPE_DATA_STAGE) {
2524 return;
2525 }
2526 } else if ((trb_0 & 0x3) == 0x3) {
2527 return;
2528 }
2529 err = USBD_NORMAL_COMPLETION;
2530 break;
2531 case XHCI_TRB_ERROR_STOPPED:
2532 case XHCI_TRB_ERROR_LENGTH:
2533 case XHCI_TRB_ERROR_STOPPED_SHORT:
2534 err = USBD_IOERROR;
2535 break;
2536 case XHCI_TRB_ERROR_STALL:
2537 case XHCI_TRB_ERROR_BABBLE:
2538 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2539 xhci_pipe_restart_async(xfer->ux_pipe);
2540 err = USBD_STALLED;
2541 break;
2542 default:
2543 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2544 err = USBD_IOERROR;
2545 break;
2546 }
2547
2548 if (xfertype == UE_ISOCHRONOUS) {
2549 switch (trbcode) {
2550 case XHCI_TRB_ERROR_SHORT_PKT:
2551 case XHCI_TRB_ERROR_SUCCESS:
2552 break;
2553 case XHCI_TRB_ERROR_MISSED_SERVICE:
2554 case XHCI_TRB_ERROR_RING_UNDERRUN:
2555 case XHCI_TRB_ERROR_RING_OVERRUN:
2556 default:
2557 xfer->ux_frlengths[xx->xx_isoc_done] = 0;
2558 break;
2559 }
2560 if (++xx->xx_isoc_done < xfer->ux_nframes)
2561 return;
2562 }
2563
2564 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 ||
2565 (trb_0 & 0x3) == 0x0) {
2566 /*
2567 * Try to claim this xfer for completion. If it has
2568 * already completed or aborted, drop it on the floor.
2569 */
2570 if (!usbd_xfer_trycomplete(xfer))
2571 return;
2572
2573 /* Set the status. */
2574 xfer->ux_status = err;
2575
2576 usb_transfer_complete(xfer);
2577 }
2578 }
2579
2580 /* Process Command complete events */
2581 static void
2582 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb)
2583 {
2584 uint64_t trb_0;
2585 uint32_t trb_2, trb_3;
2586
2587 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2588
2589 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2590
2591 trb_0 = le64toh(trb->trb_0);
2592 trb_2 = le32toh(trb->trb_2);
2593 trb_3 = le32toh(trb->trb_3);
2594
2595 if (trb_0 == sc->sc_command_addr) {
2596 sc->sc_resultpending = false;
2597
2598 sc->sc_result_trb.trb_0 = trb_0;
2599 sc->sc_result_trb.trb_2 = trb_2;
2600 sc->sc_result_trb.trb_3 = trb_3;
2601 if (XHCI_TRB_2_ERROR_GET(trb_2) !=
2602 XHCI_TRB_ERROR_SUCCESS) {
2603 DPRINTFN(1, "command completion "
2604 "failure: 0x%016jx 0x%08jx 0x%08jx",
2605 trb_0, trb_2, trb_3, 0);
2606 }
2607 cv_signal(&sc->sc_command_cv);
2608 } else {
2609 DPRINTFN(1, "spurious event: %#jx 0x%016jx "
2610 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3);
2611 }
2612 }
2613
2614 /*
2615 * Process events.
2616 * called from xhci_softintr
2617 */
2618 static void
2619 xhci_handle_event(struct xhci_softc * const sc,
2620 const struct xhci_trb * const trb)
2621 {
2622 uint64_t trb_0;
2623 uint32_t trb_2, trb_3;
2624
2625 XHCIHIST_FUNC();
2626
2627 trb_0 = le64toh(trb->trb_0);
2628 trb_2 = le32toh(trb->trb_2);
2629 trb_3 = le32toh(trb->trb_3);
2630
2631 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx",
2632 (uintptr_t)trb, trb_0, trb_2, trb_3);
2633
2634 /*
2635 * 4.11.3.1, 6.4.2.1
2636 * TRB Pointer is invalid for these completion codes.
2637 */
2638 switch (XHCI_TRB_2_ERROR_GET(trb_2)) {
2639 case XHCI_TRB_ERROR_RING_UNDERRUN:
2640 case XHCI_TRB_ERROR_RING_OVERRUN:
2641 case XHCI_TRB_ERROR_VF_RING_FULL:
2642 return;
2643 default:
2644 if (trb_0 == 0) {
2645 return;
2646 }
2647 break;
2648 }
2649
2650 switch (XHCI_TRB_3_TYPE_GET(trb_3)) {
2651 case XHCI_TRB_EVENT_TRANSFER:
2652 xhci_event_transfer(sc, trb);
2653 break;
2654 case XHCI_TRB_EVENT_CMD_COMPLETE:
2655 xhci_event_cmd(sc, trb);
2656 break;
2657 case XHCI_TRB_EVENT_PORT_STS_CHANGE:
2658 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff));
2659 break;
2660 default:
2661 break;
2662 }
2663 }
2664
2665 static void
2666 xhci_softintr(void *v)
2667 {
2668 struct usbd_bus * const bus = v;
2669 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2670 struct xhci_ring * const er = sc->sc_er;
2671 struct xhci_trb *trb;
2672 int i, j, k, bn;
2673
2674 XHCIHIST_FUNC();
2675
2676 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2677
2678 i = er->xr_ep;
2679 j = er->xr_cs;
2680
2681 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0);
2682
2683 /*
2684 * Handle deferred root intr xfer, in case we just switched off
2685 * polling. It's not safe to complete root intr xfers while
2686 * polling -- too much kernel machinery gets involved.
2687 */
2688 if (!xhci_polling_p(sc)) {
2689 for (bn = 0; bn < 2; bn++) {
2690 if (__predict_false(sc->sc_intrxfer_deferred[bn])) {
2691 sc->sc_intrxfer_deferred[bn] = false;
2692 usb_transfer_complete(sc->sc_intrxfer[bn]);
2693 }
2694 }
2695 }
2696
2697 while (1) {
2698 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE,
2699 BUS_DMASYNC_POSTREAD);
2700 trb = &er->xr_trb[i];
2701 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0;
2702
2703 if (j != k)
2704 break;
2705
2706 xhci_handle_event(sc, trb);
2707
2708 i++;
2709 if (i == er->xr_ntrb) {
2710 i = 0;
2711 j ^= 1;
2712 }
2713 }
2714
2715 er->xr_ep = i;
2716 er->xr_cs = j;
2717
2718 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) |
2719 XHCI_ERDP_BUSY);
2720
2721 DPRINTFN(16, "ends", 0, 0, 0, 0);
2722
2723 return;
2724 }
2725
2726 static void
2727 xhci_poll(struct usbd_bus *bus)
2728 {
2729 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2730
2731 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2732
2733 mutex_enter(&sc->sc_intr_lock);
2734 int ret = xhci_intr1(sc);
2735 if (ret) {
2736 xhci_softintr(bus);
2737 }
2738 mutex_exit(&sc->sc_intr_lock);
2739
2740 return;
2741 }
2742
2743 static struct usbd_xfer *
2744 xhci_allocx(struct usbd_bus *bus, unsigned int nframes)
2745 {
2746 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2747 struct xhci_xfer *xx;
2748 u_int ntrbs;
2749
2750 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2751
2752 ntrbs = uimax(3, nframes);
2753 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs;
2754
2755 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
2756 if (xx != NULL) {
2757 memset(xx, 0, sizeof(*xx));
2758 if (ntrbs > 0) {
2759 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP);
2760 xx->xx_ntrb = ntrbs;
2761 }
2762 #ifdef DIAGNOSTIC
2763 xx->xx_xfer.ux_state = XFER_BUSY;
2764 #endif
2765 }
2766
2767 return &xx->xx_xfer;
2768 }
2769
2770 static void
2771 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
2772 {
2773 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2774 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
2775
2776 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2777
2778 #ifdef DIAGNOSTIC
2779 if (xfer->ux_state != XFER_BUSY &&
2780 xfer->ux_status != USBD_NOT_STARTED) {
2781 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx",
2782 (uintptr_t)xfer, xfer->ux_state, 0, 0);
2783 }
2784 xfer->ux_state = XFER_FREE;
2785 #endif
2786 if (xx->xx_ntrb > 0) {
2787 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb));
2788 xx->xx_trb = NULL;
2789 xx->xx_ntrb = 0;
2790 }
2791 pool_cache_put(sc->sc_xferpool, xx);
2792 }
2793
2794 static bool
2795 xhci_dying(struct usbd_bus *bus)
2796 {
2797 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2798
2799 return sc->sc_dying;
2800 }
2801
2802 static void
2803 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
2804 {
2805 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2806
2807 *lock = &sc->sc_lock;
2808 }
2809
2810 extern uint32_t usb_cookie_no;
2811
2812 /*
2813 * xHCI 4.3
2814 * Called when uhub_explore finds a new device (via usbd_new_device).
2815 * Port initialization and speed detection (4.3.1) are already done in uhub.c.
2816 * This function does:
2817 * Allocate and construct dev structure of default endpoint (ep0).
2818 * Allocate and open pipe of ep0.
2819 * Enable slot and initialize slot context.
2820 * Set Address.
2821 * Read initial device descriptor.
2822 * Determine initial MaxPacketSize (mps) by speed.
2823 * Read full device descriptor.
2824 * Register this device.
2825 * Finally state of device transitions ADDRESSED.
2826 */
2827 static usbd_status
2828 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth,
2829 int speed, int port, struct usbd_port *up)
2830 {
2831 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2832 struct usbd_device *dev;
2833 usbd_status err;
2834 usb_device_descriptor_t *dd;
2835 struct xhci_slot *xs;
2836 uint32_t *cp;
2837
2838 XHCIHIST_FUNC();
2839 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx",
2840 port, depth, speed, (uintptr_t)up);
2841
2842 KASSERT(KERNEL_LOCKED_P());
2843
2844 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
2845 dev->ud_bus = bus;
2846 dev->ud_quirks = &usbd_no_quirk;
2847 dev->ud_addr = 0;
2848 dev->ud_ddesc.bMaxPacketSize = 0;
2849 dev->ud_depth = depth;
2850 dev->ud_powersrc = up;
2851 dev->ud_myhub = up->up_parent;
2852 dev->ud_speed = speed;
2853 dev->ud_langid = USBD_NOLANG;
2854 dev->ud_cookie.cookie = ++usb_cookie_no;
2855
2856 /* Set up default endpoint handle. */
2857 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc;
2858 /* doesn't matter, just don't let it uninitialized */
2859 dev->ud_ep0.ue_toggle = 0;
2860
2861 /* Set up default endpoint descriptor. */
2862 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE;
2863 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT;
2864 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT;
2865 dev->ud_ep0desc.bmAttributes = UE_CONTROL;
2866 dev->ud_ep0desc.bInterval = 0;
2867
2868 /* 4.3, 4.8.2.1 */
2869 switch (speed) {
2870 case USB_SPEED_SUPER:
2871 case USB_SPEED_SUPER_PLUS:
2872 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET);
2873 break;
2874 case USB_SPEED_FULL:
2875 /* XXX using 64 as initial mps of ep0 in FS */
2876 case USB_SPEED_HIGH:
2877 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET);
2878 break;
2879 case USB_SPEED_LOW:
2880 default:
2881 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET);
2882 break;
2883 }
2884
2885 up->up_dev = dev;
2886
2887 dd = &dev->ud_ddesc;
2888
2889 if (depth == 0 && port == 0) {
2890 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL);
2891 bus->ub_devices[USB_ROOTHUB_INDEX] = dev;
2892
2893 /* Establish the default pipe. */
2894 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2895 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2896 if (err) {
2897 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0);
2898 goto bad;
2899 }
2900 err = usbd_get_initial_ddesc(dev, dd);
2901 if (err) {
2902 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2903 goto bad;
2904 }
2905 } else {
2906 uint8_t slot = 0;
2907
2908 /* 4.3.2 */
2909 err = xhci_enable_slot(sc, &slot);
2910 if (err) {
2911 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0);
2912 goto bad;
2913 }
2914
2915 xs = &sc->sc_slots[slot];
2916 dev->ud_hcpriv = xs;
2917
2918 /* 4.3.3 initialize slot structure */
2919 err = xhci_init_slot(dev, slot);
2920 if (err) {
2921 DPRINTFN(1, "init slot %ju", err, 0, 0, 0);
2922 dev->ud_hcpriv = NULL;
2923 /*
2924 * We have to disable_slot here because
2925 * xs->xs_idx == 0 when xhci_init_slot fails,
2926 * in that case usbd_remove_dev won't work.
2927 */
2928 mutex_enter(&sc->sc_lock);
2929 xhci_disable_slot(sc, slot);
2930 mutex_exit(&sc->sc_lock);
2931 goto bad;
2932 }
2933
2934 /*
2935 * We have to establish the default pipe _after_ slot
2936 * structure has been prepared.
2937 */
2938 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2939 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2940 if (err) {
2941 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0,
2942 0);
2943 goto bad;
2944 }
2945
2946 /* 4.3.4 Address Assignment */
2947 err = xhci_set_address(dev, slot, false);
2948 if (err) {
2949 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0);
2950 goto bad;
2951 }
2952
2953 /* Allow device time to set new address */
2954 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE);
2955
2956 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2957 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT);
2958 HEXDUMP("slot context", cp, sc->sc_ctxsz);
2959 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3]));
2960 DPRINTFN(4, "device address %ju", addr, 0, 0, 0);
2961 /*
2962 * XXX ensure we know when the hardware does something
2963 * we can't yet cope with
2964 */
2965 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr);
2966 dev->ud_addr = addr;
2967
2968 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL,
2969 "addr %d already allocated", dev->ud_addr);
2970 /*
2971 * The root hub is given its own slot
2972 */
2973 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev;
2974
2975 err = usbd_get_initial_ddesc(dev, dd);
2976 if (err) {
2977 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2978 goto bad;
2979 }
2980
2981 /* 4.8.2.1 */
2982 if (USB_IS_SS(speed)) {
2983 if (dd->bMaxPacketSize != 9) {
2984 printf("%s: invalid mps 2^%u for SS ep0,"
2985 " using 512\n",
2986 device_xname(sc->sc_dev),
2987 dd->bMaxPacketSize);
2988 dd->bMaxPacketSize = 9;
2989 }
2990 USETW(dev->ud_ep0desc.wMaxPacketSize,
2991 (1 << dd->bMaxPacketSize));
2992 } else
2993 USETW(dev->ud_ep0desc.wMaxPacketSize,
2994 dd->bMaxPacketSize);
2995 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0);
2996 err = xhci_update_ep0_mps(sc, xs,
2997 UGETW(dev->ud_ep0desc.wMaxPacketSize));
2998 if (err) {
2999 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0);
3000 goto bad;
3001 }
3002 }
3003
3004 err = usbd_reload_device_desc(dev);
3005 if (err) {
3006 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0);
3007 goto bad;
3008 }
3009
3010 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,",
3011 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0);
3012 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,",
3013 dd->bDeviceClass, dd->bDeviceSubClass,
3014 dd->bDeviceProtocol, 0);
3015 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd",
3016 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations,
3017 dev->ud_speed);
3018
3019 usbd_get_device_strings(dev);
3020
3021 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev);
3022
3023 if (depth == 0 && port == 0) {
3024 usbd_attach_roothub(parent, dev);
3025 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0);
3026 return USBD_NORMAL_COMPLETION;
3027 }
3028
3029 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr);
3030 bad:
3031 if (err != USBD_NORMAL_COMPLETION) {
3032 if (depth == 0 && port == 0 && dev->ud_pipe0)
3033 usbd_kill_pipe(dev->ud_pipe0);
3034 usbd_remove_device(dev, up);
3035 }
3036
3037 return err;
3038 }
3039
3040 static usbd_status
3041 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp,
3042 size_t ntrb, size_t align)
3043 {
3044 size_t size = ntrb * XHCI_TRB_SIZE;
3045 struct xhci_ring *xr;
3046
3047 XHCIHIST_FUNC();
3048 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx",
3049 (uintptr_t)*xrp, ntrb, align, 0);
3050
3051 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP);
3052 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0);
3053
3054 int err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
3055 USBMALLOC_ZERO, &xr->xr_dma);
3056 if (err) {
3057 kmem_free(xr, sizeof(struct xhci_ring));
3058 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0);
3059 return err;
3060 }
3061 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
3062 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP);
3063 xr->xr_trb = xhci_ring_trbv(xr, 0);
3064 xr->xr_ntrb = ntrb;
3065 xr->is_halted = false;
3066 xhci_host_dequeue(xr);
3067 *xrp = xr;
3068
3069 return USBD_NORMAL_COMPLETION;
3070 }
3071
3072 static void
3073 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr)
3074 {
3075 if (*xr == NULL)
3076 return;
3077
3078 usb_freemem(&(*xr)->xr_dma);
3079 mutex_destroy(&(*xr)->xr_lock);
3080 kmem_free((*xr)->xr_cookies,
3081 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb);
3082 kmem_free(*xr, sizeof(struct xhci_ring));
3083 *xr = NULL;
3084 }
3085
3086 static void
3087 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr,
3088 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs)
3089 {
3090 size_t i;
3091 u_int ri;
3092 u_int cs;
3093 uint64_t parameter;
3094 uint32_t status;
3095 uint32_t control;
3096
3097 XHCIHIST_FUNC();
3098 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju",
3099 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0);
3100
3101 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u",
3102 ntrbs, xr->xr_ntrb);
3103 for (i = 0; i < ntrbs; i++) {
3104 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr,
3105 (uintptr_t)trbs, i, 0);
3106 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx",
3107 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0);
3108 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) !=
3109 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3);
3110 }
3111
3112 ri = xr->xr_ep;
3113 cs = xr->xr_cs;
3114
3115 /*
3116 * Although the xhci hardware can do scatter/gather dma from
3117 * arbitrary sized buffers, there is a non-obvious restriction
3118 * that a LINK trb is only allowed at the end of a burst of
3119 * transfers - which might be 16kB.
3120 * Arbitrary aligned LINK trb definitely fail on Ivy bridge.
3121 * The simple solution is not to allow a LINK trb in the middle
3122 * of anything - as here.
3123 * XXX: (dsl) There are xhci controllers out there (eg some made by
3124 * ASMedia) that seem to lock up if they process a LINK trb but
3125 * cannot process the linked-to trb yet.
3126 * The code should write the 'cycle' bit on the link trb AFTER
3127 * adding the other trb.
3128 */
3129 u_int firstep = xr->xr_ep;
3130 u_int firstcs = xr->xr_cs;
3131
3132 for (i = 0; i < ntrbs; ) {
3133 u_int oldri = ri;
3134 u_int oldcs = cs;
3135
3136 if (ri >= (xr->xr_ntrb - 1)) {
3137 /* Put Link TD at the end of ring */
3138 parameter = xhci_ring_trbp(xr, 0);
3139 status = 0;
3140 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) |
3141 XHCI_TRB_3_TC_BIT;
3142 xr->xr_cookies[ri] = NULL;
3143 xr->xr_ep = 0;
3144 xr->xr_cs ^= 1;
3145 ri = xr->xr_ep;
3146 cs = xr->xr_cs;
3147 } else {
3148 parameter = trbs[i].trb_0;
3149 status = trbs[i].trb_2;
3150 control = trbs[i].trb_3;
3151
3152 xr->xr_cookies[ri] = cookie;
3153 ri++;
3154 i++;
3155 }
3156 /*
3157 * If this is a first TRB, mark it invalid to prevent
3158 * xHC from running it immediately.
3159 */
3160 if (oldri == firstep) {
3161 if (oldcs) {
3162 control &= ~XHCI_TRB_3_CYCLE_BIT;
3163 } else {
3164 control |= XHCI_TRB_3_CYCLE_BIT;
3165 }
3166 } else {
3167 if (oldcs) {
3168 control |= XHCI_TRB_3_CYCLE_BIT;
3169 } else {
3170 control &= ~XHCI_TRB_3_CYCLE_BIT;
3171 }
3172 }
3173 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control);
3174 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri,
3175 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3176 }
3177
3178 /* Now invert cycle bit of first TRB */
3179 if (firstcs) {
3180 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT);
3181 } else {
3182 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT);
3183 }
3184 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep,
3185 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3186
3187 xr->xr_ep = ri;
3188 xr->xr_cs = cs;
3189
3190 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep,
3191 xr->xr_cs, 0);
3192 }
3193
3194 static inline void
3195 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr,
3196 struct xhci_xfer *xx, u_int ntrb)
3197 {
3198 KASSERT(ntrb <= xx->xx_ntrb);
3199 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb);
3200 }
3201
3202 /*
3203 * Stop execution commands, purge all commands on command ring, and
3204 * rewind dequeue pointer.
3205 */
3206 static void
3207 xhci_abort_command(struct xhci_softc *sc)
3208 {
3209 struct xhci_ring * const cr = sc->sc_cr;
3210 uint64_t crcr;
3211 int i;
3212
3213 XHCIHIST_FUNC();
3214 XHCIHIST_CALLARGS("command %#jx timeout, aborting",
3215 sc->sc_command_addr, 0, 0, 0);
3216
3217 mutex_enter(&cr->xr_lock);
3218
3219 /* 4.6.1.2 Aborting a Command */
3220 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3221 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA);
3222
3223 for (i = 0; i < 500; i++) {
3224 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3225 if ((crcr & XHCI_CRCR_LO_CRR) == 0)
3226 break;
3227 usb_delay_ms(&sc->sc_bus, 1);
3228 }
3229 if ((crcr & XHCI_CRCR_LO_CRR) != 0) {
3230 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0);
3231 /* reset HC here? */
3232 }
3233
3234 /* reset command ring dequeue pointer */
3235 cr->xr_ep = 0;
3236 cr->xr_cs = 1;
3237 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs);
3238
3239 mutex_exit(&cr->xr_lock);
3240 }
3241
3242 /*
3243 * Put a command on command ring, ring bell, set timer, and cv_timedwait.
3244 * Command completion is notified by cv_signal from xhci_event_cmd()
3245 * (called from xhci_softint), or timed-out.
3246 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(),
3247 * then do_command examines it.
3248 */
3249 static usbd_status
3250 xhci_do_command_locked(struct xhci_softc * const sc,
3251 struct xhci_soft_trb * const trb, int timeout)
3252 {
3253 struct xhci_ring * const cr = sc->sc_cr;
3254 usbd_status err;
3255
3256 XHCIHIST_FUNC();
3257 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx",
3258 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3259
3260 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
3261 KASSERT(mutex_owned(&sc->sc_lock));
3262
3263 while (sc->sc_command_addr != 0 ||
3264 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp))
3265 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
3266 if (sc->sc_suspendresume_failed)
3267 return USBD_IOERROR;
3268
3269 /*
3270 * If enqueue pointer points at last of ring, it's Link TRB,
3271 * command TRB will be stored in 0th TRB.
3272 */
3273 if (cr->xr_ep == cr->xr_ntrb - 1)
3274 sc->sc_command_addr = xhci_ring_trbp(cr, 0);
3275 else
3276 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep);
3277
3278 sc->sc_resultpending = true;
3279
3280 mutex_enter(&cr->xr_lock);
3281 xhci_ring_put(sc, cr, NULL, trb, 1);
3282 mutex_exit(&cr->xr_lock);
3283
3284 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0);
3285
3286 while (sc->sc_resultpending) {
3287 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock,
3288 MAX(1, mstohz(timeout))) == EWOULDBLOCK) {
3289 xhci_abort_command(sc);
3290 err = USBD_TIMEOUT;
3291 goto timedout;
3292 }
3293 }
3294
3295 trb->trb_0 = sc->sc_result_trb.trb_0;
3296 trb->trb_2 = sc->sc_result_trb.trb_2;
3297 trb->trb_3 = sc->sc_result_trb.trb_3;
3298
3299 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx",
3300 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3301
3302 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) {
3303 case XHCI_TRB_ERROR_SUCCESS:
3304 err = USBD_NORMAL_COMPLETION;
3305 break;
3306 default:
3307 case 192 ... 223:
3308 DPRINTFN(5, "error %#jx",
3309 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0);
3310 err = USBD_IOERROR;
3311 break;
3312 case 224 ... 255:
3313 err = USBD_NORMAL_COMPLETION;
3314 break;
3315 }
3316
3317 timedout:
3318 sc->sc_resultpending = false;
3319 sc->sc_command_addr = 0;
3320 cv_broadcast(&sc->sc_cmdbusy_cv);
3321
3322 return err;
3323 }
3324
3325 static usbd_status
3326 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb,
3327 int timeout)
3328 {
3329
3330 mutex_enter(&sc->sc_lock);
3331 usbd_status ret = xhci_do_command_locked(sc, trb, timeout);
3332 mutex_exit(&sc->sc_lock);
3333
3334 return ret;
3335 }
3336
3337 static usbd_status
3338 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp)
3339 {
3340 struct xhci_soft_trb trb;
3341 usbd_status err;
3342
3343 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3344
3345 trb.trb_0 = 0;
3346 trb.trb_2 = 0;
3347 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT);
3348
3349 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3350 if (err != USBD_NORMAL_COMPLETION) {
3351 return err;
3352 }
3353
3354 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3);
3355
3356 return err;
3357 }
3358
3359 /*
3360 * xHCI 4.6.4
3361 * Deallocate ring and device/input context DMA buffers, and disable_slot.
3362 * All endpoints in the slot should be stopped.
3363 * Should be called with sc_lock held.
3364 */
3365 static usbd_status
3366 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot)
3367 {
3368 struct xhci_soft_trb trb;
3369 struct xhci_slot *xs;
3370 usbd_status err;
3371
3372 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3373
3374 if (sc->sc_dying)
3375 return USBD_IOERROR;
3376
3377 trb.trb_0 = 0;
3378 trb.trb_2 = 0;
3379 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) |
3380 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT);
3381
3382 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
3383
3384 if (!err) {
3385 xs = &sc->sc_slots[slot];
3386 if (xs->xs_idx != 0) {
3387 xhci_free_slot(sc, xs);
3388 xhci_set_dcba(sc, 0, slot);
3389 memset(xs, 0, sizeof(*xs));
3390 }
3391 }
3392
3393 return err;
3394 }
3395
3396 /*
3397 * Set address of device and transition slot state from ENABLED to ADDRESSED
3398 * if Block Setaddress Request (BSR) is false.
3399 * If BSR==true, transition slot state from ENABLED to DEFAULT.
3400 * see xHCI 1.1 4.5.3, 3.3.4
3401 * Should be called without sc_lock held.
3402 */
3403 static usbd_status
3404 xhci_address_device(struct xhci_softc * const sc,
3405 uint64_t icp, uint8_t slot_id, bool bsr)
3406 {
3407 struct xhci_soft_trb trb;
3408 usbd_status err;
3409
3410 XHCIHIST_FUNC();
3411 if (bsr) {
3412 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr",
3413 icp, slot_id, 0, 0);
3414 } else {
3415 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr",
3416 icp, slot_id, 0, 0);
3417 }
3418
3419 trb.trb_0 = icp;
3420 trb.trb_2 = 0;
3421 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) |
3422 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) |
3423 (bsr ? XHCI_TRB_3_BSR_BIT : 0);
3424
3425 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3426
3427 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS)
3428 err = USBD_NO_ADDR;
3429
3430 return err;
3431 }
3432
3433 static usbd_status
3434 xhci_update_ep0_mps(struct xhci_softc * const sc,
3435 struct xhci_slot * const xs, u_int mps)
3436 {
3437 struct xhci_soft_trb trb;
3438 usbd_status err;
3439 uint32_t * cp;
3440
3441 XHCIHIST_FUNC();
3442 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0);
3443
3444 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3445 cp[0] = htole32(0);
3446 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL));
3447
3448 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL));
3449 cp[1] &= ~htole32(XHCI_EPCTX_1_MAXP_SIZE_MASK);
3450 cp[1] |= htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps));
3451
3452 /* sync input contexts before they are read from memory */
3453 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3454 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3455 sc->sc_ctxsz * 4);
3456
3457 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
3458 trb.trb_2 = 0;
3459 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
3460 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX);
3461
3462 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3463 return err;
3464 }
3465
3466 static void
3467 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si)
3468 {
3469 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0);
3470
3471 XHCIHIST_FUNC();
3472 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd",
3473 (uintptr_t)&dcbaa[si], dcba, si, 0);
3474
3475 dcbaa[si] = htole64(dcba);
3476 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t),
3477 BUS_DMASYNC_PREWRITE);
3478 }
3479
3480 /*
3481 * Allocate device and input context DMA buffer, and
3482 * TRB DMA buffer for each endpoint.
3483 */
3484 static usbd_status
3485 xhci_init_slot(struct usbd_device *dev, uint32_t slot)
3486 {
3487 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3488 struct xhci_slot *xs;
3489
3490 XHCIHIST_FUNC();
3491 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0);
3492
3493 xs = &sc->sc_slots[slot];
3494
3495 /* allocate contexts */
3496 int err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3497 USBMALLOC_ZERO, &xs->xs_dc_dma);
3498 if (err) {
3499 DPRINTFN(1, "failed to allocmem output device context %jd",
3500 err, 0, 0, 0);
3501 return USBD_NOMEM;
3502 }
3503
3504 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3505 USBMALLOC_ZERO, &xs->xs_ic_dma);
3506 if (err) {
3507 DPRINTFN(1, "failed to allocmem input device context %jd",
3508 err, 0, 0, 0);
3509 goto bad1;
3510 }
3511
3512 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr));
3513 xs->xs_idx = slot;
3514
3515 return USBD_NORMAL_COMPLETION;
3516
3517 bad1:
3518 usb_freemem(&xs->xs_dc_dma);
3519 xs->xs_idx = 0;
3520 return USBD_NOMEM;
3521 }
3522
3523 static void
3524 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs)
3525 {
3526 u_int dci;
3527
3528 XHCIHIST_FUNC();
3529 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
3530
3531 /* deallocate all allocated rings in the slot */
3532 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
3533 if (xs->xs_xr[dci] != NULL)
3534 xhci_ring_free(sc, &xs->xs_xr[dci]);
3535 }
3536 usb_freemem(&xs->xs_ic_dma);
3537 usb_freemem(&xs->xs_dc_dma);
3538 xs->xs_idx = 0;
3539 }
3540
3541 /*
3542 * Setup slot context, set Device Context Base Address, and issue
3543 * Set Address Device command.
3544 */
3545 static usbd_status
3546 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr)
3547 {
3548 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3549 struct xhci_slot *xs;
3550 usbd_status err;
3551
3552 XHCIHIST_FUNC();
3553 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0);
3554
3555 xs = &sc->sc_slots[slot];
3556
3557 xhci_setup_ctx(dev->ud_pipe0);
3558
3559 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3560 sc->sc_ctxsz * 3);
3561
3562 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot);
3563
3564 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr);
3565
3566 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
3567 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0),
3568 sc->sc_ctxsz * 2);
3569
3570 return err;
3571 }
3572
3573 /*
3574 * 4.8.2, 6.2.3.2
3575 * construct slot/endpoint context parameters and do syncmem
3576 */
3577 static void
3578 xhci_setup_ctx(struct usbd_pipe *pipe)
3579 {
3580 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3581 struct usbd_device *dev = pipe->up_dev;
3582 struct xhci_slot * const xs = dev->ud_hcpriv;
3583 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3584 const u_int dci = xhci_ep_get_dci(ed);
3585 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3586 uint32_t *cp;
3587 uint8_t speed = dev->ud_speed;
3588
3589 XHCIHIST_FUNC();
3590 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju",
3591 (uintptr_t)pipe, xs->xs_idx, dci, speed);
3592
3593 /* set up initial input control context */
3594 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3595 cp[0] = htole32(0);
3596 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci));
3597 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT));
3598 cp[7] = htole32(0);
3599
3600 /* set up input slot context */
3601 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
3602 cp[0] =
3603 XHCI_SCTX_0_CTX_NUM_SET(dci) |
3604 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed));
3605 cp[1] = 0;
3606 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0);
3607 cp[3] = 0;
3608 xhci_setup_route(pipe, cp);
3609 xhci_setup_tthub(pipe, cp);
3610
3611 cp[0] = htole32(cp[0]);
3612 cp[1] = htole32(cp[1]);
3613 cp[2] = htole32(cp[2]);
3614 cp[3] = htole32(cp[3]);
3615
3616 /* set up input endpoint context */
3617 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci));
3618 cp[0] =
3619 XHCI_EPCTX_0_EPSTATE_SET(0) |
3620 XHCI_EPCTX_0_MULT_SET(0) |
3621 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) |
3622 XHCI_EPCTX_0_LSA_SET(0) |
3623 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0);
3624 cp[1] =
3625 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) |
3626 XHCI_EPCTX_1_HID_SET(0) |
3627 XHCI_EPCTX_1_MAXB_SET(0);
3628
3629 if (xfertype != UE_ISOCHRONOUS)
3630 cp[1] |= XHCI_EPCTX_1_CERR_SET(3);
3631
3632 xhci_setup_maxburst(pipe, cp);
3633
3634 DPRINTFN(4, "setting on dci %ju ival %ju mult %ju mps %#jx",
3635 dci, XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_0_MULT_GET(cp[0]),
3636 XHCI_EPCTX_1_MAXP_SIZE_GET(cp[1]));
3637 DPRINTFN(4, " maxburst %ju mep %#jx atl %#jx",
3638 XHCI_EPCTX_1_MAXB_GET(cp[1]),
3639 (XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_GET(cp[0]) << 16) +
3640 XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_GET(cp[4]),
3641 XHCI_EPCTX_4_AVG_TRB_LEN_GET(cp[4]), 0);
3642
3643 /* rewind TR dequeue pointer in xHC */
3644 /* can't use xhci_ep_get_dci() yet? */
3645 *(uint64_t *)(&cp[2]) = htole64(
3646 xhci_ring_trbp(xs->xs_xr[dci], 0) |
3647 XHCI_EPCTX_2_DCS_SET(1));
3648
3649 cp[0] = htole32(cp[0]);
3650 cp[1] = htole32(cp[1]);
3651 cp[4] = htole32(cp[4]);
3652
3653 /* rewind TR dequeue pointer in driver */
3654 struct xhci_ring *xr = xs->xs_xr[dci];
3655 mutex_enter(&xr->xr_lock);
3656 xhci_host_dequeue(xr);
3657 mutex_exit(&xr->xr_lock);
3658
3659 /* sync input contexts before they are read from memory */
3660 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3661 }
3662
3663 /*
3664 * Setup route string and roothub port of given device for slot context
3665 */
3666 static void
3667 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp)
3668 {
3669 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3670 struct usbd_device *dev = pipe->up_dev;
3671 struct usbd_port *up = dev->ud_powersrc;
3672 struct usbd_device *hub;
3673 struct usbd_device *adev;
3674 uint8_t rhport = 0;
3675 uint32_t route = 0;
3676
3677 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3678
3679 /* Locate root hub port and Determine route string */
3680 /* 4.3.3 route string does not include roothub port */
3681 for (hub = dev; hub != NULL; hub = hub->ud_myhub) {
3682 uint32_t dep;
3683
3684 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd",
3685 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc,
3686 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno :
3687 -1);
3688
3689 if (hub->ud_powersrc == NULL)
3690 break;
3691 dep = hub->ud_depth;
3692 if (dep == 0)
3693 break;
3694 rhport = hub->ud_powersrc->up_portno;
3695 if (dep > USB_HUB_MAX_DEPTH)
3696 continue;
3697
3698 route |=
3699 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport)
3700 << ((dep - 1) * 4);
3701 }
3702 route = route >> 4;
3703 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1;
3704
3705 /* Locate port on upstream high speed hub */
3706 for (adev = dev, hub = up->up_parent;
3707 hub != NULL && hub->ud_speed != USB_SPEED_HIGH;
3708 adev = hub, hub = hub->ud_myhub)
3709 ;
3710 if (hub) {
3711 int p;
3712 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) {
3713 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) {
3714 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1];
3715 goto found;
3716 }
3717 }
3718 panic("%s: cannot find HS port", __func__);
3719 found:
3720 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0);
3721 } else {
3722 dev->ud_myhsport = NULL;
3723 }
3724
3725 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport);
3726
3727 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport,
3728 ctlrport, route, (uintptr_t)hub);
3729
3730 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route);
3731 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport);
3732 }
3733
3734 /*
3735 * Setup whether device is hub, whether device uses MTT, and
3736 * TT informations if it uses MTT.
3737 */
3738 static void
3739 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp)
3740 {
3741 struct usbd_device *dev = pipe->up_dev;
3742 struct usbd_port *myhsport = dev->ud_myhsport;
3743 usb_device_descriptor_t * const dd = &dev->ud_ddesc;
3744 uint32_t speed = dev->ud_speed;
3745 uint8_t rhaddr = dev->ud_bus->ub_rhaddr;
3746 uint8_t tthubslot, ttportnum;
3747 bool ishub;
3748 bool usemtt;
3749
3750 XHCIHIST_FUNC();
3751
3752 /*
3753 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2
3754 * tthubslot:
3755 * This is the slot ID of parent HS hub
3756 * if LS/FS device is connected && connected through HS hub.
3757 * This is 0 if device is not LS/FS device ||
3758 * parent hub is not HS hub ||
3759 * attached to root hub.
3760 * ttportnum:
3761 * This is the downstream facing port of parent HS hub
3762 * if LS/FS device is connected.
3763 * This is 0 if device is not LS/FS device ||
3764 * parent hub is not HS hub ||
3765 * attached to root hub.
3766 */
3767 if (myhsport &&
3768 myhsport->up_parent->ud_addr != rhaddr &&
3769 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
3770 ttportnum = myhsport->up_portno;
3771 tthubslot = myhsport->up_parent->ud_addr;
3772 } else {
3773 ttportnum = 0;
3774 tthubslot = 0;
3775 }
3776 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd",
3777 (uintptr_t)myhsport, ttportnum, tthubslot, 0);
3778
3779 /* ishub is valid after reading UDESC_DEVICE */
3780 ishub = (dd->bDeviceClass == UDCLASS_HUB);
3781
3782 /* dev->ud_hub is valid after reading UDESC_HUB */
3783 if (ishub && dev->ud_hub) {
3784 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc;
3785 uint8_t ttt =
3786 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK);
3787
3788 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts);
3789 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt);
3790 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0);
3791 }
3792
3793 #define IS_MTTHUB(dd) \
3794 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT)
3795
3796 /*
3797 * MTT flag is set if
3798 * 1. this is HS hub && MTTs are supported and enabled; or
3799 * 2. this is LS or FS device && there is a parent HS hub where MTTs
3800 * are supported and enabled.
3801 *
3802 * XXX enabled is not tested yet
3803 */
3804 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd))
3805 usemtt = true;
3806 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) &&
3807 myhsport &&
3808 myhsport->up_parent->ud_addr != rhaddr &&
3809 IS_MTTHUB(&myhsport->up_parent->ud_ddesc))
3810 usemtt = true;
3811 else
3812 usemtt = false;
3813 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd",
3814 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt);
3815
3816 #undef IS_MTTHUB
3817
3818 cp[0] |=
3819 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) |
3820 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0);
3821 cp[2] |=
3822 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) |
3823 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum);
3824 }
3825
3826 static const usb_endpoint_ss_comp_descriptor_t *
3827 xhci_get_essc_desc(struct usbd_pipe *pipe)
3828 {
3829 struct usbd_device *dev = pipe->up_dev;
3830 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3831 const usb_cdc_descriptor_t *cdcd;
3832 usbd_desc_iter_t iter;
3833 uint8_t ep;
3834
3835 /* config desc is NULL when opening ep0 */
3836 if (dev == NULL || dev->ud_cdesc == NULL)
3837 return NULL;
3838
3839 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev,
3840 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY);
3841 if (cdcd == NULL)
3842 return NULL;
3843
3844 usb_desc_iter_init(dev, &iter);
3845 iter.cur = (const void *)cdcd;
3846
3847 /* find endpoint_ss_comp desc for ep of this pipe */
3848 for (ep = 0;;) {
3849 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter);
3850 if (cdcd == NULL)
3851 break;
3852 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) {
3853 ep = ((const usb_endpoint_descriptor_t *)cdcd)->
3854 bEndpointAddress;
3855 if (UE_GET_ADDR(ep) ==
3856 UE_GET_ADDR(ed->bEndpointAddress)) {
3857 cdcd = (const usb_cdc_descriptor_t *)
3858 usb_desc_iter_next(&iter);
3859 break;
3860 }
3861 ep = 0;
3862 }
3863 }
3864 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) {
3865 return (const usb_endpoint_ss_comp_descriptor_t *)cdcd;
3866 }
3867 return NULL;
3868 }
3869
3870 /* set up params for periodic endpoint */
3871 static void
3872 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp)
3873 {
3874 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
3875 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3876 struct usbd_device * const dev = pipe->up_dev;
3877 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3878 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3879 uint16_t mps = UGETW(ed->wMaxPacketSize);
3880 uint8_t speed = dev->ud_speed;
3881 uint32_t maxb, mep, atl;
3882 uint8_t ival, mult;
3883
3884 const usb_endpoint_ss_comp_descriptor_t * esscd =
3885 xhci_get_essc_desc(pipe);
3886
3887 /* USB 2.0 9.6.6, xHCI 4.8.2.4, 6.2.3.2 - 6.2.3.8 */
3888 switch (xfertype) {
3889 case UE_ISOCHRONOUS:
3890 case UE_INTERRUPT:
3891 if (USB_IS_SS(speed)) {
3892 maxb = esscd ? esscd->bMaxBurst : UE_GET_TRANS(mps);
3893 mep = esscd ? UGETW(esscd->wBytesPerInterval) :
3894 UE_GET_SIZE(mps) * (maxb + 1);
3895 if (esscd && xfertype == UE_ISOCHRONOUS &&
3896 XHCI_HCC2_LEC(sc->sc_hcc2) == 0) {
3897 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes);
3898 mult = (mult > 2) ? 2 : mult;
3899 } else
3900 mult = 0;
3901
3902 } else {
3903 switch (speed) {
3904 case USB_SPEED_HIGH:
3905 maxb = UE_GET_TRANS(mps);
3906 mep = UE_GET_SIZE(mps) * (maxb + 1);
3907 break;
3908 case USB_SPEED_FULL:
3909 maxb = 0;
3910 mep = UE_GET_SIZE(mps);
3911 break;
3912 default:
3913 maxb = 0;
3914 mep = 0;
3915 break;
3916 }
3917 mult = 0;
3918 }
3919 mps = UE_GET_SIZE(mps);
3920
3921 if (pipe->up_interval == USBD_DEFAULT_INTERVAL)
3922 ival = ed->bInterval;
3923 else
3924 ival = pipe->up_interval;
3925
3926 ival = xhci_bival2ival(ival, speed, xfertype);
3927 atl = mep;
3928 break;
3929 case UE_CONTROL:
3930 case UE_BULK:
3931 default:
3932 if (USB_IS_SS(speed)) {
3933 maxb = esscd ? esscd->bMaxBurst : 0;
3934 } else
3935 maxb = 0;
3936
3937 mps = UE_GET_SIZE(mps);
3938 mep = 0;
3939 mult = 0;
3940 ival = 0;
3941 if (xfertype == UE_CONTROL)
3942 atl = 8; /* 6.2.3 */
3943 else
3944 atl = mps;
3945 break;
3946 }
3947
3948 switch (speed) {
3949 case USB_SPEED_LOW:
3950 break;
3951 case USB_SPEED_FULL:
3952 if (xfertype == UE_INTERRUPT)
3953 if (mep > XHCI_EPCTX_MEP_FS_INTR)
3954 mep = XHCI_EPCTX_MEP_FS_INTR;
3955 if (xfertype == UE_ISOCHRONOUS)
3956 if (mep > XHCI_EPCTX_MEP_FS_ISOC)
3957 mep = XHCI_EPCTX_MEP_FS_ISOC;
3958 break;
3959 case USB_SPEED_HIGH:
3960 if (xfertype == UE_INTERRUPT)
3961 if (mep > XHCI_EPCTX_MEP_HS_INTR)
3962 mep = XHCI_EPCTX_MEP_HS_INTR;
3963 if (xfertype == UE_ISOCHRONOUS)
3964 if (mep > XHCI_EPCTX_MEP_HS_ISOC)
3965 mep = XHCI_EPCTX_MEP_HS_ISOC;
3966 break;
3967 case USB_SPEED_SUPER:
3968 case USB_SPEED_SUPER_PLUS:
3969 default:
3970 if (xfertype == UE_INTERRUPT)
3971 if (mep > XHCI_EPCTX_MEP_SS_INTR)
3972 mep = XHCI_EPCTX_MEP_SS_INTR;
3973 if (xfertype == UE_ISOCHRONOUS) {
3974 if (speed == USB_SPEED_SUPER ||
3975 XHCI_HCC2_LEC(sc->sc_hcc2) == 0) {
3976 if (mep > XHCI_EPCTX_MEP_SS_ISOC)
3977 mep = XHCI_EPCTX_MEP_SS_ISOC;
3978 } else {
3979 if (mep > XHCI_EPCTX_MEP_SS_ISOC_LEC)
3980 mep = XHCI_EPCTX_MEP_SS_ISOC_LEC;
3981 }
3982 }
3983 break;
3984 }
3985
3986 xpipe->xp_maxb = maxb + 1;
3987 xpipe->xp_mult = mult + 1;
3988
3989 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(mep >> 16);
3990 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3991 cp[0] |= XHCI_EPCTX_0_MULT_SET(mult);
3992 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps);
3993 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3994 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(mep & 0xffff);
3995 cp[4] |= XHCI_EPCTX_4_AVG_TRB_LEN_SET(atl);
3996 }
3997
3998 /*
3999 * Convert usbdi bInterval value to xhci endpoint context interval value
4000 * for periodic pipe.
4001 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6
4002 */
4003 static uint32_t
4004 xhci_bival2ival(uint32_t ival, uint32_t speed, uint32_t xfertype)
4005 {
4006 if (xfertype != UE_INTERRUPT && xfertype != UE_ISOCHRONOUS)
4007 return 0;
4008
4009 if (xfertype == UE_INTERRUPT &&
4010 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
4011 u_int i;
4012
4013 /*
4014 * round ival down to "the nearest base 2 multiple of
4015 * bInterval * 8".
4016 * bInterval is at most 255 as its type is uByte.
4017 * 255(ms) = 2040(x 125us) < 2^11, so start with 10.
4018 */
4019 for (i = 10; i > 0; i--) {
4020 if ((ival * 8) >= (1 << i))
4021 break;
4022 }
4023 ival = i;
4024
4025 /* 3 - 10 */
4026 ival = (ival < 3) ? 3 : ival;
4027 } else if (speed == USB_SPEED_FULL) {
4028 /* FS isoc */
4029 ival += 3; /* 1ms -> 125us */
4030 ival--; /* Interval = bInterval-1 */
4031 /* 3 - 18 */
4032 ival = (ival > 18) ? 18 : ival;
4033 ival = (ival < 3) ? 3 : ival;
4034 } else {
4035 /* SS/HS intr/isoc */
4036 if (ival > 0)
4037 ival--; /* Interval = bInterval-1 */
4038 /* 0 - 15 */
4039 ival = (ival > 15) ? 15 : ival;
4040 }
4041
4042 return ival;
4043 }
4044
4045 /* ----- */
4046
4047 static void
4048 xhci_noop(struct usbd_pipe *pipe)
4049 {
4050 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4051 }
4052
4053 /*
4054 * Process root hub request.
4055 */
4056 static int
4057 xhci_roothub_ctrl_locked(struct usbd_bus *bus, usb_device_request_t *req,
4058 void *buf, int buflen)
4059 {
4060 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
4061 usb_port_status_t ps;
4062 int l, totlen = 0;
4063 uint16_t len, value, index;
4064 int port, i;
4065 uint32_t v;
4066
4067 XHCIHIST_FUNC();
4068
4069 KASSERT(mutex_owned(&sc->sc_rhlock));
4070
4071 if (sc->sc_dying)
4072 return -1;
4073
4074 size_t bn = bus == &sc->sc_bus ? 0 : 1;
4075
4076 len = UGETW(req->wLength);
4077 value = UGETW(req->wValue);
4078 index = UGETW(req->wIndex);
4079
4080 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx",
4081 req->bmRequestType | (req->bRequest << 8), value, index, len);
4082
4083 #define C(x,y) ((x) | ((y) << 8))
4084 switch (C(req->bRequest, req->bmRequestType)) {
4085 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
4086 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0);
4087 if (len == 0)
4088 break;
4089 switch (value) {
4090 #define sd ((usb_string_descriptor_t *)buf)
4091 case C(2, UDESC_STRING):
4092 /* Product */
4093 totlen = usb_makestrdesc(sd, len, "xHCI root hub");
4094 break;
4095 #undef sd
4096 default:
4097 /* default from usbroothub */
4098 return buflen;
4099 }
4100 break;
4101
4102 /* Hub requests */
4103 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
4104 break;
4105 /* Clear Port Feature request */
4106 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): {
4107 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4108
4109 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd",
4110 index, value, bn, cp);
4111 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4112 return -1;
4113 }
4114 port = XHCI_PORTSC(cp);
4115 v = xhci_op_read_4(sc, port);
4116 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0);
4117 v &= ~XHCI_PS_CLEAR;
4118 switch (value) {
4119 case UHF_PORT_ENABLE:
4120 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED);
4121 break;
4122 case UHF_PORT_SUSPEND:
4123 return -1;
4124 case UHF_PORT_POWER:
4125 break;
4126 case UHF_PORT_TEST:
4127 case UHF_PORT_INDICATOR:
4128 return -1;
4129 case UHF_C_PORT_CONNECTION:
4130 xhci_op_write_4(sc, port, v | XHCI_PS_CSC);
4131 break;
4132 case UHF_C_PORT_ENABLE:
4133 case UHF_C_PORT_SUSPEND:
4134 case UHF_C_PORT_OVER_CURRENT:
4135 return -1;
4136 case UHF_C_BH_PORT_RESET:
4137 xhci_op_write_4(sc, port, v | XHCI_PS_WRC);
4138 break;
4139 case UHF_C_PORT_RESET:
4140 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4141 break;
4142 case UHF_C_PORT_LINK_STATE:
4143 xhci_op_write_4(sc, port, v | XHCI_PS_PLC);
4144 break;
4145 case UHF_C_PORT_CONFIG_ERROR:
4146 xhci_op_write_4(sc, port, v | XHCI_PS_CEC);
4147 break;
4148 default:
4149 return -1;
4150 }
4151 break;
4152 }
4153 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
4154 if (len == 0)
4155 break;
4156 if ((value & 0xff) != 0) {
4157 return -1;
4158 }
4159 usb_hub_descriptor_t hubd;
4160
4161 totlen = uimin(buflen, sizeof(hubd));
4162 memcpy(&hubd, buf, totlen);
4163 hubd.bNbrPorts = sc->sc_rhportcount[bn];
4164 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH);
4165 hubd.bPwrOn2PwrGood = 200;
4166 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) {
4167 /* XXX can't find out? */
4168 hubd.DeviceRemovable[i++] = 0;
4169 }
4170 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
4171 totlen = uimin(totlen, hubd.bDescLength);
4172 memcpy(buf, &hubd, totlen);
4173 break;
4174 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
4175 if (len != 4) {
4176 return -1;
4177 }
4178 memset(buf, 0, len); /* ? XXX */
4179 totlen = len;
4180 break;
4181 /* Get Port Status request */
4182 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): {
4183 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4184
4185 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju",
4186 bn, index, cp, 0);
4187 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4188 DPRINTFN(5, "bad get port status: index=%jd bn=%jd "
4189 "portcount=%jd",
4190 index, bn, sc->sc_rhportcount[bn], 0);
4191 return -1;
4192 }
4193 if (len != 4) {
4194 DPRINTFN(5, "bad get port status: len %jd != 4",
4195 len, 0, 0, 0);
4196 return -1;
4197 }
4198 v = xhci_op_read_4(sc, XHCI_PORTSC(cp));
4199 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0);
4200 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v));
4201 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS;
4202 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED;
4203 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR;
4204 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND;
4205 if (v & XHCI_PS_PR) i |= UPS_RESET;
4206 if (v & XHCI_PS_PP) {
4207 if (i & UPS_OTHER_SPEED)
4208 i |= UPS_PORT_POWER_SS;
4209 else
4210 i |= UPS_PORT_POWER;
4211 }
4212 if (i & UPS_OTHER_SPEED)
4213 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v));
4214 if (sc->sc_vendor_port_status)
4215 i = sc->sc_vendor_port_status(sc, v, i);
4216 USETW(ps.wPortStatus, i);
4217 i = 0;
4218 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS;
4219 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED;
4220 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR;
4221 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET;
4222 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET;
4223 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE;
4224 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR;
4225 USETW(ps.wPortChange, i);
4226 totlen = uimin(len, sizeof(ps));
4227 memcpy(buf, &ps, totlen);
4228 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx"
4229 " totlen %jd",
4230 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0);
4231 break;
4232 }
4233 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
4234 return -1;
4235 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE):
4236 break;
4237 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
4238 break;
4239 /* Set Port Feature request */
4240 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): {
4241 int optval = (index >> 8) & 0xff;
4242 index &= 0xff;
4243 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4244 return -1;
4245 }
4246
4247 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4248
4249 port = XHCI_PORTSC(cp);
4250 v = xhci_op_read_4(sc, port);
4251 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0);
4252 v &= ~XHCI_PS_CLEAR;
4253 switch (value) {
4254 case UHF_PORT_ENABLE:
4255 xhci_op_write_4(sc, port, v | XHCI_PS_PED);
4256 break;
4257 case UHF_PORT_SUSPEND:
4258 /* XXX suspend */
4259 break;
4260 case UHF_PORT_RESET:
4261 xhci_op_write_4(sc, port, v | XHCI_PS_PR);
4262 /* Wait for reset to complete. */
4263 for (i = 0; i < USB_PORT_ROOT_RESET_DELAY / 10; i++) {
4264 if (sc->sc_dying) {
4265 return -1;
4266 }
4267 v = xhci_op_read_4(sc, port);
4268 if ((v & XHCI_PS_PR) == 0) {
4269 break;
4270 }
4271 usb_delay_ms(&sc->sc_bus, 10);
4272 }
4273 break;
4274 case UHF_PORT_POWER:
4275 /* XXX power control */
4276 break;
4277 /* XXX more */
4278 case UHF_C_PORT_RESET:
4279 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4280 break;
4281 case UHF_PORT_U1_TIMEOUT:
4282 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4283 return -1;
4284 }
4285 port = XHCI_PORTPMSC(cp);
4286 v = xhci_op_read_4(sc, port);
4287 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4288 index, cp, v, 0);
4289 v &= ~XHCI_PM3_U1TO_SET(0xff);
4290 v |= XHCI_PM3_U1TO_SET(optval);
4291 xhci_op_write_4(sc, port, v);
4292 break;
4293 case UHF_PORT_U2_TIMEOUT:
4294 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4295 return -1;
4296 }
4297 port = XHCI_PORTPMSC(cp);
4298 v = xhci_op_read_4(sc, port);
4299 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4300 index, cp, v, 0);
4301 v &= ~XHCI_PM3_U2TO_SET(0xff);
4302 v |= XHCI_PM3_U2TO_SET(optval);
4303 xhci_op_write_4(sc, port, v);
4304 break;
4305 default:
4306 return -1;
4307 }
4308 }
4309 break;
4310 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
4311 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
4312 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
4313 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
4314 break;
4315 default:
4316 /* default from usbroothub */
4317 return buflen;
4318 }
4319
4320 return totlen;
4321 }
4322
4323 static int
4324 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
4325 void *buf, int buflen)
4326 {
4327 struct xhci_softc *sc = XHCI_BUS2SC(bus);
4328 int actlen;
4329
4330 mutex_enter(&sc->sc_rhlock);
4331 actlen = xhci_roothub_ctrl_locked(bus, req, buf, buflen);
4332 mutex_exit(&sc->sc_rhlock);
4333
4334 return actlen;
4335 }
4336
4337 /* root hub interrupt */
4338
4339 static usbd_status
4340 xhci_root_intr_transfer(struct usbd_xfer *xfer)
4341 {
4342 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4343
4344 /* Pipe isn't running, start first */
4345 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4346 }
4347
4348 /* Wait for roothub port status/change */
4349 static usbd_status
4350 xhci_root_intr_start(struct usbd_xfer *xfer)
4351 {
4352 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4353 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4354
4355 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4356
4357 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4358
4359 if (sc->sc_dying)
4360 return USBD_IOERROR;
4361
4362 KASSERT(sc->sc_intrxfer[bn] == NULL);
4363 sc->sc_intrxfer[bn] = xfer;
4364 xfer->ux_status = USBD_IN_PROGRESS;
4365
4366 return USBD_IN_PROGRESS;
4367 }
4368
4369 static void
4370 xhci_root_intr_abort(struct usbd_xfer *xfer)
4371 {
4372 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4373 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4374
4375 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4376
4377 KASSERT(mutex_owned(&sc->sc_lock));
4378 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
4379
4380 /* If xfer has already completed, nothing to do here. */
4381 if (sc->sc_intrxfer[bn] == NULL)
4382 return;
4383
4384 /*
4385 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer.
4386 * Cancel it.
4387 */
4388 KASSERT(sc->sc_intrxfer[bn] == xfer);
4389 xfer->ux_status = USBD_CANCELLED;
4390 usb_transfer_complete(xfer);
4391 }
4392
4393 static void
4394 xhci_root_intr_close(struct usbd_pipe *pipe)
4395 {
4396 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe);
4397 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer;
4398 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4399
4400 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4401
4402 KASSERT(mutex_owned(&sc->sc_lock));
4403
4404 /*
4405 * Caller must guarantee the xfer has completed first, by
4406 * closing the pipe only after normal completion or an abort.
4407 */
4408 KASSERT(sc->sc_intrxfer[bn] == NULL);
4409 }
4410
4411 static void
4412 xhci_root_intr_done(struct usbd_xfer *xfer)
4413 {
4414 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4415 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4416
4417 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4418
4419 KASSERT(mutex_owned(&sc->sc_lock));
4420
4421 /* Claim the xfer so it doesn't get completed again. */
4422 KASSERT(sc->sc_intrxfer[bn] == xfer);
4423 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
4424 sc->sc_intrxfer[bn] = NULL;
4425 }
4426
4427 /* -------------- */
4428 /* device control */
4429
4430 static usbd_status
4431 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
4432 {
4433 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4434
4435 /* Pipe isn't running, start first */
4436 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4437 }
4438
4439 static usbd_status
4440 xhci_device_ctrl_start(struct usbd_xfer *xfer)
4441 {
4442 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4443 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4444 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4445 struct xhci_ring * const tr = xs->xs_xr[dci];
4446 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4447 usb_device_request_t * const req = &xfer->ux_request;
4448 const bool isread = usbd_xfer_isread(xfer);
4449 const uint32_t len = UGETW(req->wLength);
4450 usb_dma_t * const dma = &xfer->ux_dmabuf;
4451 uint64_t parameter;
4452 uint32_t status;
4453 uint32_t control;
4454 u_int i;
4455 const bool polling = xhci_polling_p(sc);
4456
4457 XHCIHIST_FUNC();
4458 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx",
4459 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue),
4460 UGETW(req->wIndex), UGETW(req->wLength));
4461
4462 KASSERT(polling || mutex_owned(&sc->sc_lock));
4463
4464 /* we rely on the bottom bits for extra info */
4465 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %p", xfer);
4466
4467 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0);
4468
4469 if (tr->is_halted)
4470 goto out;
4471
4472 i = 0;
4473
4474 /* setup phase */
4475 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */
4476 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req));
4477 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE :
4478 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) |
4479 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) |
4480 XHCI_TRB_3_IDT_BIT;
4481 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4482
4483 if (len != 0) {
4484 /* data phase */
4485 parameter = DMAADDR(dma, 0);
4486 KASSERTMSG(len <= 0x10000, "len %d", len);
4487 status = XHCI_TRB_2_IRQ_SET(0) |
4488 XHCI_TRB_2_TDSZ_SET(0) |
4489 XHCI_TRB_2_BYTES_SET(len);
4490 control = (isread ? XHCI_TRB_3_DIR_IN : 0) |
4491 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) |
4492 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4493 XHCI_TRB_3_IOC_BIT;
4494 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4495
4496 usb_syncmem(dma, 0, len,
4497 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4498 }
4499
4500 parameter = 0;
4501 status = XHCI_TRB_2_IRQ_SET(0);
4502 /* the status stage has inverted direction */
4503 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) |
4504 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) |
4505 XHCI_TRB_3_IOC_BIT;
4506 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4507
4508 if (!polling)
4509 mutex_enter(&tr->xr_lock);
4510 xhci_ring_put_xfer(sc, tr, xx, i);
4511 if (!polling)
4512 mutex_exit(&tr->xr_lock);
4513
4514 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4515
4516 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4517 xfer->ux_status = USBD_IN_PROGRESS;
4518 usbd_xfer_schedule_timeout(xfer);
4519 } else {
4520 /*
4521 * We must be coming from xhci_pipe_restart -- timeout
4522 * already set up, nothing to do.
4523 */
4524 }
4525 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4526
4527 return USBD_IN_PROGRESS;
4528 }
4529
4530 static void
4531 xhci_device_ctrl_done(struct usbd_xfer *xfer)
4532 {
4533 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4534 usb_device_request_t *req = &xfer->ux_request;
4535 int len = UGETW(req->wLength);
4536 int rd = req->bmRequestType & UT_READ;
4537
4538 if (len)
4539 usb_syncmem(&xfer->ux_dmabuf, 0, len,
4540 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4541 }
4542
4543 static void
4544 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
4545 {
4546 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4547
4548 usbd_xfer_abort(xfer);
4549 }
4550
4551 static void
4552 xhci_device_ctrl_close(struct usbd_pipe *pipe)
4553 {
4554 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4555
4556 xhci_close_pipe(pipe);
4557 }
4558
4559 /* ------------------ */
4560 /* device isochronous */
4561
4562 static usbd_status
4563 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
4564 {
4565 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4566
4567 return xhci_device_isoc_enter(xfer);
4568 }
4569
4570 static usbd_status
4571 xhci_device_isoc_enter(struct usbd_xfer *xfer)
4572 {
4573 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4574 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4575 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4576 struct xhci_ring * const tr = xs->xs_xr[dci];
4577 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4578 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe;
4579 usb_dma_t * const dma = &xfer->ux_dmabuf;
4580 uint64_t parameter;
4581 uint32_t status;
4582 uint32_t control;
4583 uint32_t offs;
4584 int i, ival;
4585 const bool polling = xhci_polling_p(sc);
4586 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
4587 const uint16_t mps = UE_GET_SIZE(MPS);
4588 const uint8_t maxb = xpipe->xp_maxb;
4589
4590 XHCIHIST_FUNC();
4591 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4592 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4593
4594 KASSERT(polling || mutex_owned(&sc->sc_lock));
4595
4596 if (sc->sc_dying)
4597 return USBD_IOERROR;
4598
4599 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths);
4600 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4601
4602 const bool isread = usbd_xfer_isread(xfer);
4603 if (xfer->ux_length)
4604 usb_syncmem(dma, 0, xfer->ux_length,
4605 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4606
4607 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
4608 if (ival >= 1 && ival <= 16)
4609 ival = 1 << (ival - 1);
4610 else
4611 ival = 1; /* fake something up */
4612
4613 if (xpipe->xp_isoc_next == -1) {
4614 uint32_t mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX);
4615
4616 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0);
4617 mfindex = XHCI_MFINDEX_GET(mfindex + 1);
4618 mfindex /= USB_UFRAMES_PER_FRAME;
4619 mfindex += 7; /* 7 frames is max possible IST */
4620 xpipe->xp_isoc_next = roundup2(mfindex, ival);
4621 }
4622
4623 offs = 0;
4624 for (i = 0; i < xfer->ux_nframes; i++) {
4625 const uint32_t len = xfer->ux_frlengths[i];
4626 const unsigned tdpc = howmany(len, mps);
4627 const unsigned tbc = howmany(tdpc, maxb) - 1;
4628 const unsigned tlbpc1 = tdpc % maxb;
4629 const unsigned tlbpc = tlbpc1 ? tlbpc1 - 1 : maxb - 1;
4630
4631 KASSERTMSG(len <= 0x10000, "len %d", len);
4632 parameter = DMAADDR(dma, offs);
4633 status = XHCI_TRB_2_IRQ_SET(0) |
4634 XHCI_TRB_2_TDSZ_SET(0) |
4635 XHCI_TRB_2_BYTES_SET(len);
4636 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) |
4637 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4638 XHCI_TRB_3_TBC_SET(tbc) |
4639 XHCI_TRB_3_TLBPC_SET(tlbpc) |
4640 XHCI_TRB_3_IOC_BIT;
4641 if (XHCI_HCC_CFC(sc->sc_hcc)) {
4642 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4643 #if 0
4644 } else if (xpipe->xp_isoc_next == -1) {
4645 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4646 #endif
4647 } else {
4648 control |= XHCI_TRB_3_ISO_SIA_BIT;
4649 }
4650 #if 0
4651 if (i != xfer->ux_nframes - 1)
4652 control |= XHCI_TRB_3_BEI_BIT;
4653 #endif
4654 xhci_xfer_put_trb(xx, i, parameter, status, control);
4655
4656 xpipe->xp_isoc_next += ival;
4657 offs += len;
4658 }
4659
4660 xx->xx_isoc_done = 0;
4661
4662 if (!polling)
4663 mutex_enter(&tr->xr_lock);
4664 xhci_ring_put_xfer(sc, tr, xx, i);
4665 if (!polling)
4666 mutex_exit(&tr->xr_lock);
4667
4668 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4669 xfer->ux_status = USBD_IN_PROGRESS;
4670 usbd_xfer_schedule_timeout(xfer);
4671
4672 return USBD_IN_PROGRESS;
4673 }
4674
4675 static void
4676 xhci_device_isoc_abort(struct usbd_xfer *xfer)
4677 {
4678 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4679
4680 usbd_xfer_abort(xfer);
4681 }
4682
4683 static void
4684 xhci_device_isoc_close(struct usbd_pipe *pipe)
4685 {
4686 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4687
4688 xhci_close_pipe(pipe);
4689 }
4690
4691 static void
4692 xhci_device_isoc_done(struct usbd_xfer *xfer)
4693 {
4694 #ifdef USB_DEBUG
4695 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4696 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4697 #endif
4698 const bool isread = usbd_xfer_isread(xfer);
4699
4700 XHCIHIST_FUNC();
4701 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4702 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4703
4704 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4705 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4706 }
4707
4708 /* ----------- */
4709 /* device bulk */
4710
4711 static usbd_status
4712 xhci_device_bulk_transfer(struct usbd_xfer *xfer)
4713 {
4714 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4715
4716 /* Pipe isn't running, so start it first. */
4717 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4718 }
4719
4720 static usbd_status
4721 xhci_device_bulk_start(struct usbd_xfer *xfer)
4722 {
4723 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4724 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4725 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4726 struct xhci_ring * const tr = xs->xs_xr[dci];
4727 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4728 const uint32_t len = xfer->ux_length;
4729 usb_dma_t * const dma = &xfer->ux_dmabuf;
4730 uint64_t parameter;
4731 uint32_t status;
4732 uint32_t control;
4733 u_int i = 0;
4734 const bool polling = xhci_polling_p(sc);
4735
4736 XHCIHIST_FUNC();
4737 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4738 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4739
4740 KASSERT(polling || mutex_owned(&sc->sc_lock));
4741
4742 if (sc->sc_dying)
4743 return USBD_IOERROR;
4744
4745 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4746
4747 if (tr->is_halted)
4748 goto out;
4749
4750 parameter = DMAADDR(dma, 0);
4751 const bool isread = usbd_xfer_isread(xfer);
4752 if (len)
4753 usb_syncmem(dma, 0, len,
4754 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4755
4756 /*
4757 * XXX: (dsl) The physical buffer must not cross a 64k boundary.
4758 * If the user supplied buffer crosses such a boundary then 2
4759 * (or more) TRB should be used.
4760 * If multiple TRB are used the td_size field must be set correctly.
4761 * For v1.0 devices (like ivy bridge) this is the number of usb data
4762 * blocks needed to complete the transfer.
4763 * Setting it to 1 in the last TRB causes an extra zero-length
4764 * data block be sent.
4765 * The earlier documentation differs, I don't know how it behaves.
4766 */
4767 KASSERTMSG(len <= 0x10000, "len %d", len);
4768 status = XHCI_TRB_2_IRQ_SET(0) |
4769 XHCI_TRB_2_TDSZ_SET(0) |
4770 XHCI_TRB_2_BYTES_SET(len);
4771 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4772 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4773 XHCI_TRB_3_IOC_BIT;
4774 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4775
4776 if (!polling)
4777 mutex_enter(&tr->xr_lock);
4778 xhci_ring_put_xfer(sc, tr, xx, i);
4779 if (!polling)
4780 mutex_exit(&tr->xr_lock);
4781
4782 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4783
4784 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4785 xfer->ux_status = USBD_IN_PROGRESS;
4786 usbd_xfer_schedule_timeout(xfer);
4787 } else {
4788 /*
4789 * We must be coming from xhci_pipe_restart -- timeout
4790 * already set up, nothing to do.
4791 */
4792 }
4793 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4794
4795 return USBD_IN_PROGRESS;
4796 }
4797
4798 static void
4799 xhci_device_bulk_done(struct usbd_xfer *xfer)
4800 {
4801 #ifdef USB_DEBUG
4802 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4803 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4804 #endif
4805 const bool isread = usbd_xfer_isread(xfer);
4806
4807 XHCIHIST_FUNC();
4808 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4809 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4810
4811 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4812 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4813 }
4814
4815 static void
4816 xhci_device_bulk_abort(struct usbd_xfer *xfer)
4817 {
4818 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4819
4820 usbd_xfer_abort(xfer);
4821 }
4822
4823 static void
4824 xhci_device_bulk_close(struct usbd_pipe *pipe)
4825 {
4826 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4827
4828 xhci_close_pipe(pipe);
4829 }
4830
4831 /* ---------------- */
4832 /* device interrupt */
4833
4834 static usbd_status
4835 xhci_device_intr_transfer(struct usbd_xfer *xfer)
4836 {
4837 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4838
4839 /* Pipe isn't running, so start it first. */
4840 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4841 }
4842
4843 static usbd_status
4844 xhci_device_intr_start(struct usbd_xfer *xfer)
4845 {
4846 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4847 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4848 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4849 struct xhci_ring * const tr = xs->xs_xr[dci];
4850 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4851 const uint32_t len = xfer->ux_length;
4852 const bool polling = xhci_polling_p(sc);
4853 usb_dma_t * const dma = &xfer->ux_dmabuf;
4854 uint64_t parameter;
4855 uint32_t status;
4856 uint32_t control;
4857 u_int i = 0;
4858
4859 XHCIHIST_FUNC();
4860 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4861 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4862
4863 KASSERT(polling || mutex_owned(&sc->sc_lock));
4864
4865 if (sc->sc_dying)
4866 return USBD_IOERROR;
4867
4868 if (tr->is_halted)
4869 goto out;
4870
4871 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4872
4873 const bool isread = usbd_xfer_isread(xfer);
4874 if (len)
4875 usb_syncmem(dma, 0, len,
4876 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4877
4878 parameter = DMAADDR(dma, 0);
4879 KASSERTMSG(len <= 0x10000, "len %d", len);
4880 status = XHCI_TRB_2_IRQ_SET(0) |
4881 XHCI_TRB_2_TDSZ_SET(0) |
4882 XHCI_TRB_2_BYTES_SET(len);
4883 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4884 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT;
4885 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4886
4887 if (!polling)
4888 mutex_enter(&tr->xr_lock);
4889 xhci_ring_put_xfer(sc, tr, xx, i);
4890 if (!polling)
4891 mutex_exit(&tr->xr_lock);
4892
4893 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4894
4895 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4896 xfer->ux_status = USBD_IN_PROGRESS;
4897 usbd_xfer_schedule_timeout(xfer);
4898 } else {
4899 /*
4900 * We must be coming from xhci_pipe_restart -- timeout
4901 * already set up, nothing to do.
4902 */
4903 }
4904 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4905
4906 return USBD_IN_PROGRESS;
4907 }
4908
4909 static void
4910 xhci_device_intr_done(struct usbd_xfer *xfer)
4911 {
4912 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4913 #ifdef USB_DEBUG
4914 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4915 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4916 #endif
4917 const bool isread = usbd_xfer_isread(xfer);
4918
4919 XHCIHIST_FUNC();
4920 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4921 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4922
4923 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4924
4925 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4926 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4927 }
4928
4929 static void
4930 xhci_device_intr_abort(struct usbd_xfer *xfer)
4931 {
4932 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4933
4934 XHCIHIST_FUNC();
4935 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0);
4936
4937 KASSERT(mutex_owned(&sc->sc_lock));
4938 usbd_xfer_abort(xfer);
4939 }
4940
4941 static void
4942 xhci_device_intr_close(struct usbd_pipe *pipe)
4943 {
4944 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
4945
4946 XHCIHIST_FUNC();
4947 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0);
4948
4949 xhci_close_pipe(pipe);
4950 }
4951