xhci.c revision 1.190 1 /* $NetBSD: xhci.c,v 1.190 2025/08/24 09:43:09 nat Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * USB rev 2.0 and rev 3.1 specification
31 * http://www.usb.org/developers/docs/
32 * xHCI rev 1.1 specification
33 * http://www.intel.com/technology/usb/spec.htm
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.190 2025/08/24 09:43:09 nat Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_usb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/kmem.h>
47 #include <sys/device.h>
48 #include <sys/select.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/endian.h>
58
59 #include <dev/usb/usb.h>
60 #include <dev/usb/usbdi.h>
61 #include <dev/usb/usbdivar.h>
62 #include <dev/usb/usbdi_util.h>
63 #include <dev/usb/usbhist.h>
64 #include <dev/usb/usb_mem.h>
65 #include <dev/usb/usb_quirks.h>
66
67 #include <dev/usb/xhcireg.h>
68 #include <dev/usb/xhcivar.h>
69 #include <dev/usb/usbroothub.h>
70
71
72 #ifdef USB_DEBUG
73 #ifndef XHCI_DEBUG
74 #define xhcidebug 0
75 #else /* !XHCI_DEBUG */
76 #define HEXDUMP(a, b, c) \
77 do { \
78 if (xhcidebug > 0) \
79 hexdump(printf, a, b, c); \
80 } while (/*CONSTCOND*/0)
81 static int xhcidebug = 0;
82
83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup")
84 {
85 int err;
86 const struct sysctlnode *rnode;
87 const struct sysctlnode *cnode;
88
89 err = sysctl_createv(clog, 0, NULL, &rnode,
90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci",
91 SYSCTL_DESCR("xhci global controls"),
92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
93
94 if (err)
95 goto fail;
96
97 /* control debugging printfs */
98 err = sysctl_createv(clog, 0, &rnode, &cnode,
99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
100 "debug", SYSCTL_DESCR("Enable debugging output"),
101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL);
102 if (err)
103 goto fail;
104
105 return;
106 fail:
107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
108 }
109
110 #endif /* !XHCI_DEBUG */
111 #endif /* USB_DEBUG */
112
113 #ifndef HEXDUMP
114 #define HEXDUMP(a, b, c)
115 #endif
116
117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D)
118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D)
119 #define XHCIHIST_FUNC() USBHIST_FUNC()
120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug)
121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \
122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D)
123
124 #define XHCI_DCI_SLOT 0
125 #define XHCI_DCI_EP_CONTROL 1
126
127 #define XHCI_ICI_INPUT_CONTROL 0
128
129 struct xhci_pipe {
130 struct usbd_pipe xp_pipe;
131 struct usb_task xp_async_task;
132 int16_t xp_isoc_next; /* next micro frame */
133 uint8_t xp_maxb; /* max burst */
134 uint8_t xp_mult;
135 uint8_t xp_ival;
136 };
137
138 #define XHCI_COMMAND_RING_TRBS 256
139 #define XHCI_EVENT_RING_TRBS 256
140 #define XHCI_EVENT_RING_SEGMENTS 1
141 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT
142
143 static usbd_status xhci_open(struct usbd_pipe *);
144 static void xhci_close_pipe(struct usbd_pipe *);
145 static int xhci_intr1(struct xhci_softc * const);
146 static void xhci_softintr(void *);
147 static void xhci_poll(struct usbd_bus *);
148 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int);
149 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
150 static void xhci_abortx(struct usbd_xfer *);
151 static bool xhci_dying(struct usbd_bus *);
152 static void xhci_get_lock(struct usbd_bus *, kmutex_t **);
153 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int,
154 struct usbd_port *);
155 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
156 void *, int);
157
158 static void xhci_pipe_restart(struct usbd_pipe *);
159 static void xhci_pipe_restart_async_task(void *);
160 static void xhci_pipe_restart_async(struct usbd_pipe *);
161
162 static usbd_status xhci_configure_endpoint(struct usbd_pipe *);
163 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *);
164 static void xhci_reset_endpoint(struct usbd_pipe *);
165 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *,
166 struct xhci_slot *, u_int, uint32_t);
167 static usbd_status xhci_stop_endpoint(struct usbd_pipe *);
168
169 static void xhci_host_dequeue(struct xhci_ring * const);
170 static void xhci_set_dequeue(struct usbd_pipe *);
171
172 static usbd_status xhci_do_command(struct xhci_softc * const,
173 struct xhci_soft_trb * const, int);
174 static usbd_status xhci_do_command_locked(struct xhci_softc * const,
175 struct xhci_soft_trb * const, int);
176 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t);
177 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *);
178 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool);
179 static usbd_status xhci_enable_slot(struct xhci_softc * const,
180 uint8_t * const);
181 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t);
182 static usbd_status xhci_address_device(struct xhci_softc * const,
183 uint64_t, uint8_t, bool);
184 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int);
185 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const,
186 struct xhci_slot * const, u_int);
187 static usbd_status xhci_ring_init(struct xhci_softc * const,
188 struct xhci_ring **, size_t, size_t);
189 static void xhci_ring_free(struct xhci_softc * const,
190 struct xhci_ring ** const);
191
192 static void xhci_setup_ctx(struct usbd_pipe *);
193 static void xhci_setup_route(struct usbd_pipe *, uint32_t *);
194 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *);
195 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *);
196 static uint32_t xhci_bival2ival(uint32_t, uint32_t, uint32_t);
197
198 static void xhci_noop(struct usbd_pipe *);
199
200 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *);
201 static usbd_status xhci_root_intr_start(struct usbd_xfer *);
202 static void xhci_root_intr_abort(struct usbd_xfer *);
203 static void xhci_root_intr_close(struct usbd_pipe *);
204 static void xhci_root_intr_done(struct usbd_xfer *);
205
206 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *);
207 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *);
208 static void xhci_device_ctrl_abort(struct usbd_xfer *);
209 static void xhci_device_ctrl_close(struct usbd_pipe *);
210 static void xhci_device_ctrl_done(struct usbd_xfer *);
211
212 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *);
213 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *);
214 static void xhci_device_isoc_abort(struct usbd_xfer *);
215 static void xhci_device_isoc_close(struct usbd_pipe *);
216 static void xhci_device_isoc_done(struct usbd_xfer *);
217
218 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *);
219 static usbd_status xhci_device_intr_start(struct usbd_xfer *);
220 static void xhci_device_intr_abort(struct usbd_xfer *);
221 static void xhci_device_intr_close(struct usbd_pipe *);
222 static void xhci_device_intr_done(struct usbd_xfer *);
223
224 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *);
225 static usbd_status xhci_device_bulk_start(struct usbd_xfer *);
226 static void xhci_device_bulk_abort(struct usbd_xfer *);
227 static void xhci_device_bulk_close(struct usbd_pipe *);
228 static void xhci_device_bulk_done(struct usbd_xfer *);
229
230 static const struct usbd_bus_methods xhci_bus_methods = {
231 .ubm_open = xhci_open,
232 .ubm_softint = xhci_softintr,
233 .ubm_dopoll = xhci_poll,
234 .ubm_allocx = xhci_allocx,
235 .ubm_freex = xhci_freex,
236 .ubm_abortx = xhci_abortx,
237 .ubm_dying = xhci_dying,
238 .ubm_getlock = xhci_get_lock,
239 .ubm_newdev = xhci_new_device,
240 .ubm_rhctrl = xhci_roothub_ctrl,
241 };
242
243 static const struct usbd_pipe_methods xhci_root_intr_methods = {
244 .upm_transfer = xhci_root_intr_transfer,
245 .upm_start = xhci_root_intr_start,
246 .upm_abort = xhci_root_intr_abort,
247 .upm_close = xhci_root_intr_close,
248 .upm_cleartoggle = xhci_noop,
249 .upm_done = xhci_root_intr_done,
250 };
251
252
253 static const struct usbd_pipe_methods xhci_device_ctrl_methods = {
254 .upm_transfer = xhci_device_ctrl_transfer,
255 .upm_start = xhci_device_ctrl_start,
256 .upm_abort = xhci_device_ctrl_abort,
257 .upm_close = xhci_device_ctrl_close,
258 .upm_cleartoggle = xhci_noop,
259 .upm_done = xhci_device_ctrl_done,
260 };
261
262 static const struct usbd_pipe_methods xhci_device_isoc_methods = {
263 .upm_transfer = xhci_device_isoc_transfer,
264 .upm_abort = xhci_device_isoc_abort,
265 .upm_close = xhci_device_isoc_close,
266 .upm_cleartoggle = xhci_noop,
267 .upm_done = xhci_device_isoc_done,
268 };
269
270 static const struct usbd_pipe_methods xhci_device_bulk_methods = {
271 .upm_transfer = xhci_device_bulk_transfer,
272 .upm_start = xhci_device_bulk_start,
273 .upm_abort = xhci_device_bulk_abort,
274 .upm_close = xhci_device_bulk_close,
275 .upm_cleartoggle = xhci_noop,
276 .upm_done = xhci_device_bulk_done,
277 };
278
279 static const struct usbd_pipe_methods xhci_device_intr_methods = {
280 .upm_transfer = xhci_device_intr_transfer,
281 .upm_start = xhci_device_intr_start,
282 .upm_abort = xhci_device_intr_abort,
283 .upm_close = xhci_device_intr_close,
284 .upm_cleartoggle = xhci_noop,
285 .upm_done = xhci_device_intr_done,
286 };
287
288 static inline uint32_t
289 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset)
290 {
291 if (ISSET(sc->sc_quirks, XHCI_32BIT_ACCESS)) {
292 uint32_t val;
293 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset & ~3);
294 return (val >> ((offset & 3) * NBBY)) & 0xff;
295 } else {
296 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset);
297 }
298 }
299
300 static inline uint32_t
301 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset)
302 {
303 if (ISSET(sc->sc_quirks, XHCI_32BIT_ACCESS)) {
304 uint32_t val;
305 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset & ~3);
306 return (val >> ((offset & 3) * NBBY)) & 0xffff;
307 } else {
308 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
309 }
310 }
311
312 static inline uint32_t
313 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset)
314 {
315 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset);
316 }
317
318 static inline void
319 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset,
320 uint32_t value)
321 {
322 if (ISSET(sc->sc_quirks, XHCI_32BIT_ACCESS)) {
323 const uint32_t mask = 0xffU << ((offset & 3) * NBBY);
324 uint32_t val;
325 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset & ~3);
326 val &= ~mask;
327 val |= __SHIFTIN(value, mask);
328 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset & ~3, val);
329 } else {
330 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value);
331 }
332 }
333
334 #if 0 /* unused */
335 static inline void
336 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset,
337 uint32_t value)
338 {
339 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value);
340 }
341 #endif /* unused */
342
343 static inline uint32_t
344 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset)
345 {
346 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset);
347 }
348
349 static inline uint32_t
350 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset)
351 {
352 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
353 }
354
355 static inline void
356 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset,
357 uint32_t value)
358 {
359 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
360 }
361
362 static inline uint64_t
363 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset)
364 {
365 uint64_t value;
366
367 #ifdef XHCI_USE_BUS_SPACE_8
368 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset);
369 #else
370 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
371 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh,
372 offset + 4) << 32;
373 #endif
374
375 return value;
376 }
377
378 static inline void
379 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset,
380 uint64_t value)
381 {
382 #ifdef XHCI_USE_BUS_SPACE_8
383 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value);
384 #else
385 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0,
386 (value >> 0) & 0xffffffff);
387 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4,
388 (value >> 32) & 0xffffffff);
389 #endif
390 }
391
392 static inline uint32_t
393 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset)
394 {
395 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
396 }
397
398 static inline void
399 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset,
400 uint32_t value)
401 {
402 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
403 }
404
405 static inline uint64_t
406 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset)
407 {
408 uint64_t value;
409
410 #ifdef XHCI_USE_BUS_SPACE_8
411 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset);
412 #else
413 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
414 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh,
415 offset + 4) << 32;
416 #endif
417
418 return value;
419 }
420
421 static inline void
422 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset,
423 uint64_t value)
424 {
425 #ifdef XHCI_USE_BUS_SPACE_8
426 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value);
427 #else
428 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0,
429 (value >> 0) & 0xffffffff);
430 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4,
431 (value >> 32) & 0xffffffff);
432 #endif
433 }
434
435 #if 0 /* unused */
436 static inline uint32_t
437 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset)
438 {
439 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset);
440 }
441 #endif /* unused */
442
443 static inline void
444 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset,
445 uint32_t value)
446 {
447 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value);
448 }
449
450 /* --- */
451
452 static inline uint8_t
453 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed)
454 {
455 u_int eptype = 0;
456
457 switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
458 case UE_CONTROL:
459 eptype = 0x0;
460 break;
461 case UE_ISOCHRONOUS:
462 eptype = 0x1;
463 break;
464 case UE_BULK:
465 eptype = 0x2;
466 break;
467 case UE_INTERRUPT:
468 eptype = 0x3;
469 break;
470 }
471
472 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
473 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
474 return eptype | 0x4;
475 else
476 return eptype;
477 }
478
479 static u_int
480 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed)
481 {
482 /* xHCI 1.0 section 4.5.1 */
483 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress);
484 u_int in = 0;
485
486 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
487 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
488 in = 1;
489
490 return epaddr * 2 + in;
491 }
492
493 static inline u_int
494 xhci_dci_to_ici(const u_int i)
495 {
496 return i + 1;
497 }
498
499 static inline void *
500 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs,
501 const u_int dci)
502 {
503 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
504 }
505
506 #if 0 /* unused */
507 static inline bus_addr_t
508 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs,
509 const u_int dci)
510 {
511 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
512 }
513 #endif /* unused */
514
515 static inline void *
516 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs,
517 const u_int ici)
518 {
519 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
520 }
521
522 static inline bus_addr_t
523 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs,
524 const u_int ici)
525 {
526 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
527 }
528
529 static inline struct xhci_trb *
530 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx)
531 {
532 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
533 }
534
535 static inline bus_addr_t
536 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx)
537 {
538 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
539 }
540
541 static inline void
542 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx,
543 uint64_t parameter, uint32_t status, uint32_t control)
544 {
545 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb);
546 xx->xx_trb[idx].trb_0 = parameter;
547 xx->xx_trb[idx].trb_2 = status;
548 xx->xx_trb[idx].trb_3 = control;
549 }
550
551 static inline void
552 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status,
553 uint32_t control)
554 {
555 trb->trb_0 = htole64(parameter);
556 trb->trb_2 = htole32(status);
557 trb->trb_3 = htole32(control);
558 }
559
560 static int
561 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx)
562 {
563 /* base address of TRBs */
564 bus_addr_t trbp = xhci_ring_trbp(xr, 0);
565
566 /* trb_0 range sanity check */
567 if (trb_0 == 0 || trb_0 < trbp ||
568 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 ||
569 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) {
570 return 1;
571 }
572 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb);
573 return 0;
574 }
575
576 static unsigned int
577 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs,
578 u_int dci)
579 {
580 uint32_t *cp;
581
582 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
583 cp = xhci_slot_get_dcv(sc, xs, dci);
584 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0]));
585 }
586
587 static inline unsigned int
588 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport)
589 {
590 const unsigned int port = ctlrport - 1;
591 const uint8_t bit = __BIT(port % NBBY);
592
593 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit);
594 }
595
596 /*
597 * Return the roothub port for a controller port. Both are 1..n.
598 */
599 static inline unsigned int
600 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport)
601 {
602
603 return sc->sc_ctlrportmap[ctrlport - 1];
604 }
605
606 /*
607 * Return the controller port for a bus roothub port. Both are 1..n.
608 */
609 static inline unsigned int
610 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn,
611 unsigned int rhport)
612 {
613
614 return sc->sc_rhportmap[bn][rhport - 1];
615 }
616
617 /* --- */
618
619 void
620 xhci_childdet(device_t self, device_t child)
621 {
622 struct xhci_softc * const sc = device_private(self);
623
624 mutex_enter(&sc->sc_intr_lock);
625 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child));
626 if (child == sc->sc_child2)
627 sc->sc_child2 = NULL;
628 else if (child == sc->sc_child)
629 sc->sc_child = NULL;
630 mutex_exit(&sc->sc_intr_lock);
631 }
632
633 int
634 xhci_detach(struct xhci_softc *sc, int flags)
635 {
636 int rv = 0;
637
638 if (sc->sc_child2 != NULL) {
639 rv = config_detach(sc->sc_child2, flags);
640 if (rv != 0)
641 return rv;
642 KASSERT(sc->sc_child2 == NULL);
643 }
644
645 if (sc->sc_child != NULL) {
646 rv = config_detach(sc->sc_child, flags);
647 if (rv != 0)
648 return rv;
649 KASSERT(sc->sc_child == NULL);
650 }
651
652 /* XXX unconfigure/free slots */
653
654 /* verify: */
655 xhci_rt_write_4(sc, XHCI_IMAN(0), 0);
656 xhci_op_write_4(sc, XHCI_USBCMD, 0);
657 /* do we need to wait for stop? */
658
659 xhci_op_write_8(sc, XHCI_CRCR, 0);
660 xhci_ring_free(sc, &sc->sc_cr);
661 cv_destroy(&sc->sc_command_cv);
662 cv_destroy(&sc->sc_cmdbusy_cv);
663
664 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0);
665 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0);
666 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY);
667 xhci_ring_free(sc, &sc->sc_er);
668
669 usb_freemem(&sc->sc_eventst_dma);
670
671 xhci_op_write_8(sc, XHCI_DCBAAP, 0);
672 usb_freemem(&sc->sc_dcbaa_dma);
673
674 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots);
675
676 kmem_free(sc->sc_ctlrportbus,
677 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY));
678 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int));
679
680 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
681 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int));
682 }
683
684 mutex_destroy(&sc->sc_rhlock);
685 mutex_destroy(&sc->sc_lock);
686 mutex_destroy(&sc->sc_intr_lock);
687
688 pool_cache_destroy(sc->sc_xferpool);
689
690 return rv;
691 }
692
693 int
694 xhci_activate(device_t self, enum devact act)
695 {
696 struct xhci_softc * const sc = device_private(self);
697
698 switch (act) {
699 case DVACT_DEACTIVATE:
700 sc->sc_dying = true;
701 return 0;
702 default:
703 return EOPNOTSUPP;
704 }
705 }
706
707 bool
708 xhci_suspend(device_t self, const pmf_qual_t *qual)
709 {
710 struct xhci_softc * const sc = device_private(self);
711 size_t i, j, bn, dci;
712 int port;
713 uint32_t v;
714 usbd_status err;
715 bool ok = false;
716
717 XHCIHIST_FUNC(); XHCIHIST_CALLED();
718
719 /*
720 * Block issuance of new commands, and wait for all pending
721 * commands to complete.
722 */
723 mutex_enter(&sc->sc_lock);
724 KASSERT(sc->sc_suspender == NULL);
725 sc->sc_suspender = curlwp;
726 while (sc->sc_command_addr != 0)
727 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
728 mutex_exit(&sc->sc_lock);
729
730 /*
731 * Block roothub xfers which might touch portsc registers until
732 * we're done suspending.
733 */
734 mutex_enter(&sc->sc_rhlock);
735
736 /*
737 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
738 * xHCI Power Management, p. 342
739 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342
740 */
741
742 /*
743 * `1. Stop all USB activity by issuing Stop Endpoint Commands
744 * for Busy endpoints in the Running state. If the Force
745 * Save Context Capability (FSC = ``0'') is not supported,
746 * then Stop Endpoint Commands shall be issued for all idle
747 * endpoints in the Running state as well. The Stop
748 * Endpoint Command causes the xHC to update the respective
749 * Endpoint or Stream Contexts in system memory, e.g. the
750 * TR Dequeue Pointer, DCS, etc. fields. Refer to
751 * Implementation Note "0".'
752 */
753 for (i = 0; i < sc->sc_maxslots; i++) {
754 struct xhci_slot *xs = &sc->sc_slots[i];
755
756 /* Skip if the slot is not in use. */
757 if (xs->xs_idx == 0)
758 continue;
759
760 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
761 /* Skip if the endpoint is not Running. */
762 /* XXX What about Busy? */
763 if (xhci_get_epstate(sc, xs, dci) !=
764 XHCI_EPSTATE_RUNNING)
765 continue;
766
767 /* Stop endpoint. */
768 mutex_enter(&sc->sc_lock);
769 err = xhci_stop_endpoint_cmd(sc, xs, dci,
770 XHCI_TRB_3_SUSP_EP_BIT);
771 mutex_exit(&sc->sc_lock);
772 if (err) {
773 device_printf(self, "failed to stop endpoint"
774 " slot %zu dci %zu err %d\n",
775 i, dci, err);
776 goto out;
777 }
778 }
779 }
780
781 /*
782 * Next, suspend all the ports:
783 *
784 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15:
785 * Suspend-Resume, pp. 276-283
786 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276
787 */
788 for (bn = 0; bn < 2; bn++) {
789 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
790 /* 4.15.1: Port Suspend. */
791 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
792
793 /*
794 * `System software places individual ports
795 * into suspend mode by writing a ``3'' into
796 * the appropriate PORTSC register Port Link
797 * State (PLS) field (refer to Section 5.4.8).
798 * Software should only set the PLS field to
799 * ``3'' when the port is in the Enabled
800 * state.'
801 *
802 * `Software should not attempt to suspend a
803 * port unless the port reports that it is in
804 * the enabled (PED = ``1''; PLS < ``3'')
805 * state (refer to Section 5.4.8 for more
806 * information about PED and PLS).'
807 */
808 v = xhci_op_read_4(sc, port);
809 if (((v & XHCI_PS_PED) == 0) ||
810 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3)
811 continue;
812 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
813 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3);
814 xhci_op_write_4(sc, port, v);
815
816 /*
817 * `When the PLS field is written with U3
818 * (``3''), the status of the PLS bit will not
819 * change to the target U state U3 until the
820 * suspend signaling has completed to the
821 * attached device (which may be as long as
822 * 10ms.).'
823 *
824 * `Software is required to wait for U3
825 * transitions to complete before it puts the
826 * xHC into a low power state, and before
827 * resuming the port.'
828 *
829 * XXX Take advantage of the technique to
830 * reduce polling on host controllers that
831 * support the U3C capability.
832 */
833 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) {
834 v = xhci_op_read_4(sc, port);
835 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3)
836 break;
837 usb_delay_ms(&sc->sc_bus, 1);
838 }
839 if (j == XHCI_WAIT_PLS_U3) {
840 device_printf(self,
841 "suspend timeout on bus %zu port %zu\n",
842 bn, i);
843 goto out;
844 }
845 }
846 }
847
848 /*
849 * `2. Ensure that the Command Ring is in the Stopped state
850 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is
851 * empty), and all Command Completion Events associated
852 * with them have been received.'
853 *
854 * XXX
855 */
856
857 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */
858 xhci_op_write_4(sc, XHCI_USBCMD,
859 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS);
860
861 /*
862 * `4. Read the Operational Runtime, and VTIO registers in the
863 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ,
864 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their
865 * state.'
866 *
867 * (We don't use VTIO here (XXX for now?).)
868 */
869 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
870 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL);
871 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP);
872 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG);
873 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0));
874 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0));
875 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0));
876 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0));
877 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0));
878
879 /*
880 * `5. Set the Controller Save State (CSS) flag in the USBCMD
881 * register (5.4.1)...'
882 */
883 xhci_op_write_4(sc, XHCI_USBCMD,
884 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS);
885
886 /*
887 * `...and wait for the Save State Status (SSS) flag in the
888 * USBSTS register (5.4.2) to transition to ``0''.'
889 */
890 for (i = 0; i < XHCI_WAIT_SSS; i++) {
891 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0)
892 break;
893 usb_delay_ms(&sc->sc_bus, 1);
894 }
895 if (i >= XHCI_WAIT_SSS) {
896 device_printf(self, "suspend timeout, USBSTS.SSS\n");
897 /*
898 * Just optimistically go on and check SRE anyway --
899 * what's the worst that could happen?
900 */
901 }
902
903 /*
904 * `Note: After a Save or Restore operation completes, the
905 * Save/Restore Error (SRE) flag in the USBSTS register should
906 * be checked to ensure that the operation completed
907 * successfully.'
908 */
909 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
910 device_printf(self, "suspend error, USBSTS.SRE\n");
911 goto out;
912 }
913
914 /* Success! */
915 ok = true;
916
917 out: mutex_exit(&sc->sc_rhlock);
918 if (!ok) {
919 /*
920 * If suspend failed, stop holding up command issuance
921 * and make it fail instead.
922 */
923 mutex_enter(&sc->sc_lock);
924 KASSERT(sc->sc_suspender == curlwp);
925 sc->sc_suspender = NULL;
926 sc->sc_suspendresume_failed = true;
927 cv_broadcast(&sc->sc_cmdbusy_cv);
928 mutex_exit(&sc->sc_lock);
929 }
930 return ok;
931 }
932
933 bool
934 xhci_resume(device_t self, const pmf_qual_t *qual)
935 {
936 struct xhci_softc * const sc = device_private(self);
937 size_t i, j, bn, dci;
938 int port;
939 uint32_t v;
940 bool ok = false;
941
942 XHCIHIST_FUNC(); XHCIHIST_CALLED();
943
944 /*
945 * If resume had previously failed, just try again. Can't make
946 * things worse, probably.
947 */
948 mutex_enter(&sc->sc_lock);
949 if (sc->sc_suspendresume_failed) {
950 KASSERT(sc->sc_suspender == NULL);
951 sc->sc_suspender = curlwp;
952 sc->sc_suspendresume_failed = false;
953 }
954 KASSERT(sc->sc_suspender);
955 mutex_exit(&sc->sc_lock);
956
957 /*
958 * Block roothub xfers which might touch portsc registers until
959 * we're done resuming.
960 */
961 mutex_enter(&sc->sc_rhlock);
962
963 /*
964 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
965 * xHCI Power Management, p. 343
966 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343
967 */
968
969 /*
970 * `4. Restore the Operational Runtime, and VTIO registers with
971 * their previously saved state in the following order:
972 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN,
973 * IMOD, and VTIO.'
974 *
975 * (We don't use VTIO here (for now?).)
976 */
977 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd);
978 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl);
979 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap);
980 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config);
981 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0);
982 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0);
983 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0);
984 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0);
985 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0);
986
987 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */
988
989 /*
990 * `5. Set the Controller Restore State (CRS) flag in the
991 * USBCMD register (5.4.1) to ``1''...'
992 */
993 xhci_op_write_4(sc, XHCI_USBCMD,
994 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS);
995
996 /*
997 * `...and wait for the Restore State Status (RSS) in the
998 * USBSTS register (5.4.2) to transition to ``0''.'
999 */
1000 for (i = 0; i < XHCI_WAIT_RSS; i++) {
1001 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0)
1002 break;
1003 usb_delay_ms(&sc->sc_bus, 1);
1004 }
1005 if (i >= XHCI_WAIT_RSS) {
1006 device_printf(self, "resume timeout, USBSTS.RSS\n");
1007 goto out;
1008 }
1009
1010 /*
1011 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits
1012 * are consistent with the RCS values to be written to the
1013 * CRCR.'
1014 *
1015 * XXX Hope just zeroing it is good enough!
1016 */
1017 xhci_host_dequeue(sc->sc_cr);
1018
1019 /*
1020 * `7. Write the CRCR with the address and RCS value of the
1021 * reinitialized Command Ring. Note that this write will
1022 * cause the Command Ring to restart at the address
1023 * specified by the CRCR.'
1024 */
1025 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1026 sc->sc_cr->xr_cs);
1027
1028 /*
1029 * `8. Enable the controller by setting Run/Stop (R/S) =
1030 * ``1''.'
1031 */
1032 xhci_op_write_4(sc, XHCI_USBCMD,
1033 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS);
1034
1035 /*
1036 * `9. Software shall walk the USB topology and initialize each
1037 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and
1038 * external hub ports attached to USB devices.'
1039 *
1040 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2
1041 * `Port Resume', 4.15.2.2 `Host Initiated'.
1042 *
1043 * XXX We should maybe batch up initiating the state
1044 * transitions, and then wait for them to complete all at once.
1045 */
1046 for (bn = 0; bn < 2; bn++) {
1047 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
1048 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
1049
1050 /* `When a port is in the U3 state: ...' */
1051 v = xhci_op_read_4(sc, port);
1052 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3)
1053 continue;
1054
1055 /*
1056 * `For a USB2 protocol port, software shall
1057 * write a ``15'' (Resume) to the PLS field to
1058 * initiate resume signaling. The port shall
1059 * transition to the Resume substate and the
1060 * xHC shall transmit the resume signaling
1061 * within 1ms (T_URSM). Software shall ensure
1062 * that resume is signaled for at least 20ms
1063 * (T_DRSMDN). Software shall start timing
1064 * T_DRSMDN from the write of ``15'' (Resume)
1065 * to PLS.'
1066 */
1067 if (bn == 1) {
1068 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0);
1069 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1070 v |= XHCI_PS_LWS;
1071 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME);
1072 xhci_op_write_4(sc, port, v);
1073 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
1074 } else {
1075 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0);
1076 }
1077
1078 /*
1079 * `For a USB3 protocol port [and a USB2
1080 * protocol port after transitioning to
1081 * Resume], software shall write a ``0'' (U0)
1082 * to the PLS field...'
1083 */
1084 v = xhci_op_read_4(sc, port);
1085 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1086 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0);
1087 xhci_op_write_4(sc, port, v);
1088
1089 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) {
1090 v = xhci_op_read_4(sc, port);
1091 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0)
1092 break;
1093 usb_delay_ms(&sc->sc_bus, 1);
1094 }
1095 if (j == XHCI_WAIT_PLS_U0) {
1096 device_printf(self,
1097 "resume timeout on bus %zu port %zu\n",
1098 bn, i);
1099 goto out;
1100 }
1101 }
1102 }
1103
1104 /*
1105 * `10. Restart each of the previously Running endpoints by
1106 * ringing their doorbells.'
1107 */
1108 for (i = 0; i < sc->sc_maxslots; i++) {
1109 struct xhci_slot *xs = &sc->sc_slots[i];
1110
1111 /* Skip if the slot is not in use. */
1112 if (xs->xs_idx == 0)
1113 continue;
1114
1115 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
1116 /* Skip if the endpoint is not Running. */
1117 if (xhci_get_epstate(sc, xs, dci) !=
1118 XHCI_EPSTATE_RUNNING)
1119 continue;
1120
1121 /* Ring the doorbell. */
1122 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
1123 }
1124 }
1125
1126 /*
1127 * `Note: After a Save or Restore operation completes, the
1128 * Save/Restore Error (SRE) flag in the USBSTS register should
1129 * be checked to ensure that the operation completed
1130 * successfully.'
1131 */
1132 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
1133 device_printf(self, "resume error, USBSTS.SRE\n");
1134 goto out;
1135 }
1136
1137 /* Success! */
1138 ok = true;
1139
1140 out: /*
1141 * Resume command issuance. If the hardware failed to resume,
1142 * well, tough -- deadlocking because everything is held up on
1143 * the suspension, with no opportunity to detach, isn't better
1144 * than timing out waiting for dead hardware.
1145 */
1146 mutex_enter(&sc->sc_lock);
1147 KASSERT(sc->sc_suspender);
1148 sc->sc_suspender = NULL;
1149 sc->sc_suspendresume_failed = !ok;
1150 cv_broadcast(&sc->sc_cmdbusy_cv);
1151 mutex_exit(&sc->sc_lock);
1152
1153 mutex_exit(&sc->sc_rhlock);
1154 return ok;
1155 }
1156
1157 bool
1158 xhci_shutdown(device_t self, int flags)
1159 {
1160 return false;
1161 }
1162
1163 static int
1164 xhci_hc_reset(struct xhci_softc * const sc)
1165 {
1166 uint32_t usbcmd, usbsts;
1167 int i;
1168
1169 /* Check controller not ready */
1170 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1171 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1172 if ((usbsts & XHCI_STS_CNR) == 0)
1173 break;
1174 usb_delay_ms(&sc->sc_bus, 1);
1175 }
1176 if (i >= XHCI_WAIT_CNR) {
1177 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n");
1178 return EIO;
1179 }
1180
1181 /* Halt controller */
1182 usbcmd = 0;
1183 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1184 usb_delay_ms(&sc->sc_bus, 1);
1185
1186 /* Reset controller */
1187 usbcmd = XHCI_CMD_HCRST;
1188 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1189 for (i = 0; i < XHCI_WAIT_HCRST; i++) {
1190 /*
1191 * Wait 1ms first. Existing Intel xHCI requires 1ms delay to
1192 * prevent system hang (Errata).
1193 */
1194 usb_delay_ms(&sc->sc_bus, 1);
1195 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
1196 if ((usbcmd & XHCI_CMD_HCRST) == 0)
1197 break;
1198 }
1199 if (i >= XHCI_WAIT_HCRST) {
1200 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n");
1201 return EIO;
1202 }
1203
1204 /* Check controller not ready */
1205 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1206 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1207 if ((usbsts & XHCI_STS_CNR) == 0)
1208 break;
1209 usb_delay_ms(&sc->sc_bus, 1);
1210 }
1211 if (i >= XHCI_WAIT_CNR) {
1212 aprint_error_dev(sc->sc_dev,
1213 "controller not ready timeout after reset\n");
1214 return EIO;
1215 }
1216
1217 return 0;
1218 }
1219
1220 /* 7.2 xHCI Support Protocol Capability */
1221 static void
1222 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp)
1223 {
1224 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1225
1226 /* XXX Cache this lot */
1227
1228 const uint32_t w0 = xhci_read_4(sc, ecp);
1229 const uint32_t w4 = xhci_read_4(sc, ecp + 4);
1230 const uint32_t w8 = xhci_read_4(sc, ecp + 8);
1231 const uint32_t wc = xhci_read_4(sc, ecp + 0xc);
1232
1233 aprint_debug_dev(sc->sc_dev,
1234 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc);
1235
1236 if (w4 != XHCI_XECP_USBID)
1237 return;
1238
1239 const int major = XHCI_XECP_SP_W0_MAJOR(w0);
1240 const int minor = XHCI_XECP_SP_W0_MINOR(w0);
1241 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8);
1242 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8);
1243
1244 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16));
1245 switch (mm) {
1246 case 0x0200:
1247 case 0x0300:
1248 case 0x0301:
1249 case 0x0310:
1250 case 0x0320:
1251 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n",
1252 major == 3 ? "ss" : "hs", cpo, cpo + cpc - 1);
1253 if (major == 3)
1254 sc->sc_usb3nports += cpo + cpc - 1;
1255 else
1256 sc->sc_usb2nports += cpo + cpc - 1;
1257 break;
1258 default:
1259 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n",
1260 major, minor);
1261 return;
1262 }
1263
1264 const size_t bus = (major == 3) ? 0 : 1;
1265
1266 /* Index arrays with 0..n-1 where ports are numbered 1..n */
1267 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) {
1268 if (sc->sc_ctlrportmap[cp] != 0) {
1269 aprint_error_dev(sc->sc_dev, "controller port %zu "
1270 "already assigned", cp);
1271 continue;
1272 }
1273
1274 sc->sc_ctlrportbus[cp / NBBY] |=
1275 bus == 0 ? 0 : __BIT(cp % NBBY);
1276
1277 const size_t rhp = sc->sc_rhportcount[bus]++;
1278
1279 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0,
1280 "bus %zu rhp %zu is %d", bus, rhp,
1281 sc->sc_rhportmap[bus][rhp]);
1282
1283 sc->sc_rhportmap[bus][rhp] = cp + 1;
1284 sc->sc_ctlrportmap[cp] = rhp + 1;
1285 }
1286 }
1287
1288 /* Process extended capabilities */
1289 static void
1290 xhci_ecp(struct xhci_softc *sc)
1291 {
1292 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1293
1294 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4;
1295 while (ecp != 0) {
1296 uint32_t ecr = xhci_read_4(sc, ecp);
1297 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr);
1298 switch (XHCI_XECP_ID(ecr)) {
1299 case XHCI_ID_PROTOCOLS: {
1300 xhci_id_protocols(sc, ecp);
1301 break;
1302 }
1303 case XHCI_ID_USB_LEGACY: {
1304 uint8_t bios_sem;
1305
1306 /* Take host controller ownership from BIOS */
1307 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM);
1308 if (bios_sem) {
1309 /* sets xHCI to be owned by OS */
1310 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1);
1311 aprint_debug_dev(sc->sc_dev,
1312 "waiting for BIOS to give up control\n");
1313 for (int i = 0; i < 5000; i++) {
1314 bios_sem = xhci_read_1(sc, ecp +
1315 XHCI_XECP_BIOS_SEM);
1316 if (bios_sem == 0)
1317 break;
1318 DELAY(1000);
1319 }
1320 if (bios_sem) {
1321 aprint_error_dev(sc->sc_dev,
1322 "timed out waiting for BIOS\n");
1323 }
1324 }
1325 break;
1326 }
1327 default:
1328 break;
1329 }
1330 ecr = xhci_read_4(sc, ecp);
1331 if (XHCI_XECP_NEXT(ecr) == 0) {
1332 ecp = 0;
1333 } else {
1334 ecp += XHCI_XECP_NEXT(ecr) * 4;
1335 }
1336 }
1337 }
1338
1339 #define XHCI_HCCPREV1_BITS \
1340 "\177\020" /* New bitmask */ \
1341 "f\020\020XECP\0" \
1342 "f\014\4MAXPSA\0" \
1343 "b\013CFC\0" \
1344 "b\012SEC\0" \
1345 "b\011SBD\0" \
1346 "b\010FSE\0" \
1347 "b\7NSS\0" \
1348 "b\6LTC\0" \
1349 "b\5LHRC\0" \
1350 "b\4PIND\0" \
1351 "b\3PPC\0" \
1352 "b\2CZC\0" \
1353 "b\1BNC\0" \
1354 "b\0AC64\0" \
1355 "\0"
1356 #define XHCI_HCCV1_x_BITS \
1357 "\177\020" /* New bitmask */ \
1358 "f\020\020XECP\0" \
1359 "f\014\4MAXPSA\0" \
1360 "b\013CFC\0" \
1361 "b\012SEC\0" \
1362 "b\011SPC\0" \
1363 "b\010PAE\0" \
1364 "b\7NSS\0" \
1365 "b\6LTC\0" \
1366 "b\5LHRC\0" \
1367 "b\4PIND\0" \
1368 "b\3PPC\0" \
1369 "b\2CSZ\0" \
1370 "b\1BNC\0" \
1371 "b\0AC64\0" \
1372 "\0"
1373
1374 #define XHCI_HCC2_BITS \
1375 "\177\020" /* New bitmask */ \
1376 "b\7ETC_TSC\0" \
1377 "b\6ETC\0" \
1378 "b\5CIC\0" \
1379 "b\4LEC\0" \
1380 "b\3CTC\0" \
1381 "b\2FSC\0" \
1382 "b\1CMC\0" \
1383 "b\0U3C\0" \
1384 "\0"
1385
1386 void
1387 xhci_start(struct xhci_softc *sc)
1388 {
1389 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA);
1390 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0)
1391 /* Intel xhci needs interrupt rate moderated. */
1392 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP);
1393 else
1394 xhci_rt_write_4(sc, XHCI_IMOD(0), 0);
1395 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n",
1396 xhci_rt_read_4(sc, XHCI_IMOD(0)));
1397
1398 /* Go! */
1399 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
1400 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n",
1401 xhci_op_read_4(sc, XHCI_USBCMD));
1402 }
1403
1404 int
1405 xhci_init(struct xhci_softc *sc)
1406 {
1407 bus_size_t bsz;
1408 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff;
1409 uint32_t pagesize, config;
1410 int i = 0;
1411 uint16_t hciversion;
1412 uint8_t caplength;
1413
1414 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1415
1416 /* Set up the bus struct for the usb 3 and usb 2 buses */
1417 sc->sc_bus.ub_methods = &xhci_bus_methods;
1418 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe);
1419 sc->sc_bus.ub_usedma = true;
1420 sc->sc_bus.ub_hcpriv = sc;
1421
1422 sc->sc_bus2.ub_methods = &xhci_bus_methods;
1423 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe);
1424 sc->sc_bus2.ub_revision = USBREV_2_0;
1425 sc->sc_bus2.ub_usedma = true;
1426 sc->sc_bus2.ub_hcpriv = sc;
1427 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag;
1428
1429 caplength = xhci_read_1(sc, XHCI_CAPLENGTH);
1430 hciversion = xhci_read_2(sc, XHCI_HCIVERSION);
1431
1432 if (hciversion < XHCI_HCIVERSION_0_96 ||
1433 hciversion >= 0x0200) {
1434 aprint_normal_dev(sc->sc_dev,
1435 "xHCI version %x.%x not known to be supported\n",
1436 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1437 } else {
1438 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n",
1439 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1440 }
1441
1442 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength,
1443 &sc->sc_cbh) != 0) {
1444 aprint_error_dev(sc->sc_dev, "capability subregion failure\n");
1445 return ENOMEM;
1446 }
1447
1448 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1);
1449 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1);
1450 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1);
1451 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1);
1452 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2);
1453 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3);
1454 aprint_debug_dev(sc->sc_dev,
1455 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3);
1456
1457 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS);
1458 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32;
1459
1460 char sbuf[128];
1461 if (hciversion < XHCI_HCIVERSION_1_0)
1462 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc);
1463 else
1464 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc);
1465 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf);
1466 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n",
1467 XHCI_HCC_XECP(sc->sc_hcc) * 4);
1468 if (hciversion >= XHCI_HCIVERSION_1_1) {
1469 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2);
1470 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2);
1471 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf);
1472 }
1473
1474 /* default all ports to bus 0, i.e. usb 3 */
1475 sc->sc_ctlrportbus = kmem_zalloc(
1476 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP);
1477 sc->sc_ctlrportmap =
1478 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1479
1480 /* controller port to bus roothub port map */
1481 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
1482 sc->sc_rhportmap[j] =
1483 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1484 }
1485
1486 /*
1487 * Process all Extended Capabilities
1488 */
1489 xhci_ecp(sc);
1490
1491 bsz = XHCI_PORTSC(sc->sc_maxports);
1492 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz,
1493 &sc->sc_obh) != 0) {
1494 aprint_error_dev(sc->sc_dev, "operational subregion failure\n");
1495 return ENOMEM;
1496 }
1497
1498 dboff = xhci_cap_read_4(sc, XHCI_DBOFF);
1499 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff,
1500 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) {
1501 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n");
1502 return ENOMEM;
1503 }
1504
1505 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF);
1506 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff,
1507 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) {
1508 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n");
1509 return ENOMEM;
1510 }
1511
1512 int rv;
1513 rv = xhci_hc_reset(sc);
1514 if (rv != 0) {
1515 return rv;
1516 }
1517
1518 if (sc->sc_vendor_init)
1519 sc->sc_vendor_init(sc);
1520
1521 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE);
1522 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize);
1523 pagesize = ffs(pagesize);
1524 if (pagesize == 0) {
1525 aprint_error_dev(sc->sc_dev, "pagesize is 0\n");
1526 return EIO;
1527 }
1528 sc->sc_pgsz = 1 << (12 + (pagesize - 1));
1529 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz);
1530 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n",
1531 (uint32_t)sc->sc_maxslots);
1532 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports);
1533
1534 int err;
1535 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2);
1536 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf);
1537 if (sc->sc_maxspbuf != 0) {
1538 err = usb_allocmem(sc->sc_bus.ub_dmatag,
1539 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t),
1540 USBMALLOC_ZERO, &sc->sc_spbufarray_dma);
1541 if (err) {
1542 aprint_error_dev(sc->sc_dev,
1543 "spbufarray init fail, err %d\n", err);
1544 return ENOMEM;
1545 }
1546
1547 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) *
1548 sc->sc_maxspbuf, KM_SLEEP);
1549 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0);
1550 for (i = 0; i < sc->sc_maxspbuf; i++) {
1551 usb_dma_t * const dma = &sc->sc_spbuf_dma[i];
1552 /* allocate contexts */
1553 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz,
1554 sc->sc_pgsz, USBMALLOC_ZERO, dma);
1555 if (err) {
1556 aprint_error_dev(sc->sc_dev,
1557 "spbufarray_dma init fail, err %d\n", err);
1558 rv = ENOMEM;
1559 goto bad1;
1560 }
1561 spbufarray[i] = htole64(DMAADDR(dma, 0));
1562 usb_syncmem(dma, 0, sc->sc_pgsz,
1563 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1564 }
1565
1566 usb_syncmem(&sc->sc_spbufarray_dma, 0,
1567 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE);
1568 }
1569
1570 sc->sc_isthresh = XHCI_HCS2_IST(hcs2);
1571 aprint_debug_dev(sc->sc_dev, "sc_isthresh %d\n", sc->sc_isthresh);
1572
1573 /*
1574 * xHI 5.3.4
1575 * If bit[3] is 0, IST is number of microframes in bit[2:0]
1576 * If bit[3] is 1, IST is number of frames in bit[2:0]
1577 */
1578 if (sc->sc_isthresh & 0x8) {
1579 sc->sc_isthresh = (sc->sc_isthresh & 0x7) *
1580 USB_UFRAMES_PER_FRAME;
1581 }
1582
1583 config = xhci_op_read_4(sc, XHCI_CONFIG);
1584 config &= ~0xFF;
1585 config |= sc->sc_maxslots & 0xFF;
1586 xhci_op_write_4(sc, XHCI_CONFIG, config);
1587
1588 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS,
1589 XHCI_COMMAND_RING_SEGMENTS_ALIGN);
1590 if (err) {
1591 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n",
1592 err);
1593 rv = ENOMEM;
1594 goto bad1;
1595 }
1596
1597 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS,
1598 XHCI_EVENT_RING_SEGMENTS_ALIGN);
1599 if (err) {
1600 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n",
1601 err);
1602 rv = ENOMEM;
1603 goto bad2;
1604 }
1605
1606 usb_dma_t *dma;
1607 size_t size;
1608 size_t align;
1609
1610 dma = &sc->sc_eventst_dma;
1611 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE,
1612 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN);
1613 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size);
1614 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN;
1615 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1616 USBMALLOC_ZERO, dma);
1617 if (err) {
1618 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n",
1619 err);
1620 rv = ENOMEM;
1621 goto bad3;
1622 }
1623
1624 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n",
1625 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0),
1626 KERNADDR(&sc->sc_eventst_dma, 0),
1627 sc->sc_eventst_dma.udma_block->size);
1628
1629 dma = &sc->sc_dcbaa_dma;
1630 size = (1 + sc->sc_maxslots) * sizeof(uint64_t);
1631 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size);
1632 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN;
1633 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1634 USBMALLOC_ZERO, dma);
1635 if (err) {
1636 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err);
1637 rv = ENOMEM;
1638 goto bad4;
1639 }
1640 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n",
1641 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0),
1642 KERNADDR(&sc->sc_dcbaa_dma, 0),
1643 sc->sc_dcbaa_dma.udma_block->size);
1644
1645 if (sc->sc_maxspbuf != 0) {
1646 /*
1647 * DCBA entry 0 hold the scratchbuf array pointer.
1648 */
1649 *(uint64_t *)KERNADDR(dma, 0) =
1650 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0));
1651 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1652 }
1653
1654 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots,
1655 KM_SLEEP);
1656 if (sc->sc_slots == NULL) {
1657 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err);
1658 rv = ENOMEM;
1659 goto bad;
1660 }
1661
1662 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0,
1663 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
1664 if (sc->sc_xferpool == NULL) {
1665 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n",
1666 err);
1667 rv = ENOMEM;
1668 goto bad;
1669 }
1670
1671 cv_init(&sc->sc_command_cv, "xhcicmd");
1672 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq");
1673 mutex_init(&sc->sc_rhlock, MUTEX_DEFAULT, IPL_NONE);
1674 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
1675 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
1676
1677 struct xhci_erste *erst;
1678 erst = KERNADDR(&sc->sc_eventst_dma, 0);
1679 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0));
1680 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb);
1681 erst[0].erste_3 = htole32(0);
1682 usb_syncmem(&sc->sc_eventst_dma, 0,
1683 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE);
1684
1685 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS);
1686 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0));
1687 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) |
1688 XHCI_ERDP_BUSY);
1689
1690 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0));
1691 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1692 sc->sc_cr->xr_cs);
1693
1694 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0),
1695 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS);
1696
1697 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0)
1698 xhci_start(sc);
1699
1700 return 0;
1701
1702 bad:
1703 if (sc->sc_xferpool) {
1704 pool_cache_destroy(sc->sc_xferpool);
1705 sc->sc_xferpool = NULL;
1706 }
1707
1708 if (sc->sc_slots) {
1709 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) *
1710 sc->sc_maxslots);
1711 sc->sc_slots = NULL;
1712 }
1713
1714 usb_freemem(&sc->sc_dcbaa_dma);
1715 bad4:
1716 usb_freemem(&sc->sc_eventst_dma);
1717 bad3:
1718 xhci_ring_free(sc, &sc->sc_er);
1719 bad2:
1720 xhci_ring_free(sc, &sc->sc_cr);
1721 i = sc->sc_maxspbuf;
1722 bad1:
1723 for (int j = 0; j < i; j++)
1724 usb_freemem(&sc->sc_spbuf_dma[j]);
1725 usb_freemem(&sc->sc_spbufarray_dma);
1726
1727 return rv;
1728 }
1729
1730 static inline bool
1731 xhci_polling_p(struct xhci_softc * const sc)
1732 {
1733 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling;
1734 }
1735
1736 int
1737 xhci_intr(void *v)
1738 {
1739 struct xhci_softc * const sc = v;
1740 int ret = 0;
1741
1742 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1743
1744 if (sc == NULL)
1745 return 0;
1746
1747 mutex_spin_enter(&sc->sc_intr_lock);
1748
1749 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1750 goto done;
1751
1752 /* If we get an interrupt while polling, then just ignore it. */
1753 if (xhci_polling_p(sc)) {
1754 #ifdef DIAGNOSTIC
1755 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1756 #endif
1757 goto done;
1758 }
1759
1760 ret = xhci_intr1(sc);
1761 if (ret) {
1762 KASSERT(sc->sc_child || sc->sc_child2);
1763
1764 /*
1765 * One of child busses could be already detached. It doesn't
1766 * matter on which of the two the softintr is scheduled.
1767 */
1768 if (sc->sc_child)
1769 usb_schedsoftintr(&sc->sc_bus);
1770 else
1771 usb_schedsoftintr(&sc->sc_bus2);
1772 }
1773 done:
1774 mutex_spin_exit(&sc->sc_intr_lock);
1775 return ret;
1776 }
1777
1778 int
1779 xhci_intr1(struct xhci_softc * const sc)
1780 {
1781 uint32_t usbsts;
1782 uint32_t iman;
1783
1784 XHCIHIST_FUNC();
1785
1786 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1787 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0);
1788 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD |
1789 XHCI_STS_HCE)) == 0) {
1790 DPRINTFN(16, "ignored intr not for %jd",
1791 device_unit(sc->sc_dev), 0, 0, 0);
1792 return 0;
1793 }
1794
1795 /*
1796 * Clear EINT and other transient flags, to not misenterpret
1797 * next shared interrupt. Also, to avoid race, EINT must be cleared
1798 * before XHCI_IMAN_INTR_PEND is cleared.
1799 */
1800 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & ~XHCI_STS_RSVDP0);
1801
1802 #ifdef XHCI_DEBUG
1803 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1804 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1805 #endif
1806
1807 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1808 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1809 iman |= XHCI_IMAN_INTR_PEND;
1810 xhci_rt_write_4(sc, XHCI_IMAN(0), iman);
1811
1812 #ifdef XHCI_DEBUG
1813 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1814 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1815 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1816 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1817 #endif
1818
1819 return 1;
1820 }
1821
1822 /*
1823 * 3 port speed types used in USB stack
1824 *
1825 * usbdi speed
1826 * definition: USB_SPEED_* in usb.h
1827 * They are used in struct usbd_device in USB stack.
1828 * ioctl interface uses these values too.
1829 * port_status speed
1830 * definition: UPS_*_SPEED in usb.h
1831 * They are used in usb_port_status_t and valid only for USB 2.0.
1832 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus
1833 * of usb_port_status_ext_t indicates port speed.
1834 * Note that some 3.0 values overlap with 2.0 values.
1835 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and
1836 * means UPS_LOW_SPEED in HS.)
1837 * port status returned from hub also uses these values.
1838 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed
1839 * or more.
1840 * xspeed:
1841 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1)
1842 * They are used in only slot context and PORTSC reg of xhci.
1843 * The difference between usbdi speed and xspeed is
1844 * that FS and LS values are swapped.
1845 */
1846
1847 /* convert usbdi speed to xspeed */
1848 static int
1849 xhci_speed2xspeed(int speed)
1850 {
1851 switch (speed) {
1852 case USB_SPEED_LOW: return 2;
1853 case USB_SPEED_FULL: return 1;
1854 default: return speed;
1855 }
1856 }
1857
1858 #if 0
1859 /* convert xspeed to usbdi speed */
1860 static int
1861 xhci_xspeed2speed(int xspeed)
1862 {
1863 switch (xspeed) {
1864 case 1: return USB_SPEED_FULL;
1865 case 2: return USB_SPEED_LOW;
1866 default: return xspeed;
1867 }
1868 }
1869 #endif
1870
1871 /* convert xspeed to port status speed */
1872 static int
1873 xhci_xspeed2psspeed(int xspeed)
1874 {
1875 switch (xspeed) {
1876 case 0: return 0;
1877 case 1: return UPS_FULL_SPEED;
1878 case 2: return UPS_LOW_SPEED;
1879 case 3: return UPS_HIGH_SPEED;
1880 default: return UPS_OTHER_SPEED;
1881 }
1882 }
1883
1884 /*
1885 * Construct input contexts and issue TRB to open pipe.
1886 */
1887 static usbd_status
1888 xhci_configure_endpoint(struct usbd_pipe *pipe)
1889 {
1890 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1891 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1892 #ifdef USB_DEBUG
1893 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1894 #endif
1895 struct xhci_soft_trb trb;
1896 usbd_status err;
1897
1898 XHCIHIST_FUNC();
1899 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx",
1900 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress,
1901 pipe->up_endpoint->ue_edesc->bmAttributes);
1902
1903 /* XXX ensure input context is available? */
1904
1905 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz);
1906
1907 /* set up context */
1908 xhci_setup_ctx(pipe);
1909
1910 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0),
1911 sc->sc_ctxsz * 1);
1912 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs,
1913 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1);
1914
1915 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1916 trb.trb_2 = 0;
1917 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1918 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1919
1920 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
1921
1922 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1923 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci),
1924 sc->sc_ctxsz * 1);
1925
1926 return err;
1927 }
1928
1929 #if 0
1930 static usbd_status
1931 xhci_unconfigure_endpoint(struct usbd_pipe *pipe)
1932 {
1933 #ifdef USB_DEBUG
1934 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1935 #endif
1936
1937 XHCIHIST_FUNC();
1938 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
1939
1940 return USBD_NORMAL_COMPLETION;
1941 }
1942 #endif
1943
1944 /* 4.6.8, 6.4.3.7 */
1945 static void
1946 xhci_reset_endpoint(struct usbd_pipe *pipe)
1947 {
1948 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1949 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1950 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1951 struct xhci_soft_trb trb;
1952
1953 XHCIHIST_FUNC();
1954 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1955
1956 KASSERT(mutex_owned(&sc->sc_lock));
1957
1958 trb.trb_0 = 0;
1959 trb.trb_2 = 0;
1960 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1961 XHCI_TRB_3_EP_SET(dci) |
1962 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP);
1963
1964 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
1965 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
1966 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
1967 }
1968 }
1969
1970 /*
1971 * 4.6.9, 6.4.3.8
1972 * Stop execution of TDs on xfer ring.
1973 * Should be called with sc_lock held.
1974 */
1975 static usbd_status
1976 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci,
1977 uint32_t trb3flags)
1978 {
1979 struct xhci_soft_trb trb;
1980 usbd_status err;
1981
1982 XHCIHIST_FUNC();
1983 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1984
1985 KASSERT(mutex_owned(&sc->sc_lock));
1986
1987 trb.trb_0 = 0;
1988 trb.trb_2 = 0;
1989 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1990 XHCI_TRB_3_EP_SET(dci) |
1991 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) |
1992 trb3flags;
1993
1994 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1995
1996 return err;
1997 }
1998
1999 static usbd_status
2000 xhci_stop_endpoint(struct usbd_pipe *pipe)
2001 {
2002 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2003 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2004 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2005
2006 XHCIHIST_FUNC();
2007 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
2008
2009 KASSERT(mutex_owned(&sc->sc_lock));
2010
2011 return xhci_stop_endpoint_cmd(sc, xs, dci, 0);
2012 }
2013
2014 /*
2015 * Set TR Dequeue Pointer.
2016 * xHCI 1.1 4.6.10 6.4.3.9
2017 * Purge all of the TRBs on ring and reinitialize ring.
2018 * Set TR dequeue Pointer to 0 and Cycle State to 1.
2019 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE
2020 * error will be generated.
2021 */
2022 static void
2023 xhci_set_dequeue(struct usbd_pipe *pipe)
2024 {
2025 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2026 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2027 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2028 struct xhci_ring * const xr = xs->xs_xr[dci];
2029 struct xhci_soft_trb trb;
2030
2031 XHCIHIST_FUNC();
2032 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
2033
2034 KASSERT(mutex_owned(&sc->sc_lock));
2035 KASSERT(xr != NULL);
2036
2037 xhci_host_dequeue(xr);
2038
2039 /* set DCS */
2040 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */
2041 trb.trb_2 = 0;
2042 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2043 XHCI_TRB_3_EP_SET(dci) |
2044 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE);
2045
2046 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
2047 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
2048 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
2049 }
2050 }
2051
2052 /*
2053 * Open new pipe: called from usbd_setup_pipe_flags.
2054 * Fills methods of pipe.
2055 * If pipe is not for ep0, calls configure_endpoint.
2056 */
2057 static usbd_status
2058 xhci_open(struct usbd_pipe *pipe)
2059 {
2060 struct usbd_device * const dev = pipe->up_dev;
2061 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
2062 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
2063 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2064 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2065 const u_int dci = xhci_ep_get_dci(ed);
2066 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
2067 usbd_status err;
2068
2069 XHCIHIST_FUNC();
2070 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr,
2071 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed);
2072 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx",
2073 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress,
2074 ed->bmAttributes);
2075 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize),
2076 ed->bInterval, 0, 0);
2077
2078 if (sc->sc_dying)
2079 return USBD_IOERROR;
2080
2081 /* Root Hub */
2082 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) {
2083 switch (ed->bEndpointAddress) {
2084 case USB_CONTROL_ENDPOINT:
2085 pipe->up_methods = &roothub_ctrl_methods;
2086 break;
2087 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
2088 pipe->up_methods = &xhci_root_intr_methods;
2089 break;
2090 default:
2091 pipe->up_methods = NULL;
2092 DPRINTFN(0, "bad bEndpointAddress 0x%02jx",
2093 ed->bEndpointAddress, 0, 0, 0);
2094 return USBD_INVAL;
2095 }
2096 return USBD_NORMAL_COMPLETION;
2097 }
2098
2099 usb_init_task(&xpipe->xp_async_task, xhci_pipe_restart_async_task,
2100 pipe, USB_TASKQ_MPSAFE);
2101
2102 switch (xfertype) {
2103 case UE_CONTROL:
2104 pipe->up_methods = &xhci_device_ctrl_methods;
2105 break;
2106 case UE_ISOCHRONOUS:
2107 pipe->up_methods = &xhci_device_isoc_methods;
2108 pipe->up_serialise = false;
2109 xpipe->xp_isoc_next = -1;
2110 break;
2111 case UE_BULK:
2112 pipe->up_methods = &xhci_device_bulk_methods;
2113 break;
2114 case UE_INTERRUPT:
2115 pipe->up_methods = &xhci_device_intr_methods;
2116 break;
2117 default:
2118 return USBD_IOERROR;
2119 break;
2120 }
2121
2122 KASSERT(xs != NULL);
2123 KASSERT(xs->xs_xr[dci] == NULL);
2124
2125 /* allocate transfer ring */
2126 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS,
2127 XHCI_TRB_ALIGN);
2128 if (err) {
2129 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0);
2130 return err;
2131 }
2132
2133 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT)
2134 return xhci_configure_endpoint(pipe);
2135
2136 return USBD_NORMAL_COMPLETION;
2137 }
2138
2139 /*
2140 * Closes pipe, called from usbd_kill_pipe via close methods.
2141 * If the endpoint to be closed is ep0, disable_slot.
2142 * Should be called with sc_lock held.
2143 */
2144 static void
2145 xhci_close_pipe(struct usbd_pipe *pipe)
2146 {
2147 struct xhci_pipe * const xp =
2148 container_of(pipe, struct xhci_pipe, xp_pipe);
2149 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2150 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2151 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2152 const u_int dci = xhci_ep_get_dci(ed);
2153 struct xhci_soft_trb trb;
2154 uint32_t *cp;
2155
2156 XHCIHIST_FUNC();
2157
2158 usb_rem_task_wait(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC,
2159 &sc->sc_lock);
2160
2161 if (sc->sc_dying)
2162 return;
2163
2164 /* xs is uninitialized before xhci_init_slot */
2165 if (xs == NULL || xs->xs_idx == 0)
2166 return;
2167
2168 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2169 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2170
2171 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
2172 KASSERT(mutex_owned(&sc->sc_lock));
2173
2174 if (pipe->up_dev->ud_depth == 0)
2175 return;
2176
2177 if (dci == XHCI_DCI_EP_CONTROL) {
2178 DPRINTFN(4, "closing ep0", 0, 0, 0, 0);
2179 /* This frees all rings */
2180 xhci_disable_slot(sc, xs->xs_idx);
2181 return;
2182 }
2183
2184 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED)
2185 (void)xhci_stop_endpoint(pipe);
2186
2187 /*
2188 * set appropriate bit to be dropped.
2189 * don't set DC bit to 1, otherwise all endpoints
2190 * would be deconfigured.
2191 */
2192 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
2193 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci));
2194 cp[1] = htole32(0);
2195
2196 /* XXX should be most significant one, not dci? */
2197 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
2198 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci));
2199
2200 /* configure ep context performs an implicit dequeue */
2201 xhci_host_dequeue(xs->xs_xr[dci]);
2202
2203 /* sync input contexts before they are read from memory */
2204 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
2205
2206 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
2207 trb.trb_2 = 0;
2208 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2209 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
2210
2211 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
2212 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2213
2214 xhci_ring_free(sc, &xs->xs_xr[dci]);
2215 xs->xs_xr[dci] = NULL;
2216 }
2217
2218 /*
2219 * Abort transfer. Must be called with sc_lock held. Releases and
2220 * reacquires sc_lock to sleep until hardware acknowledges abort.
2221 */
2222 static void
2223 xhci_abortx(struct usbd_xfer *xfer)
2224 {
2225 XHCIHIST_FUNC();
2226 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2227
2228 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx",
2229 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0);
2230
2231 KASSERT(mutex_owned(&sc->sc_lock));
2232 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2233 xfer->ux_status == USBD_TIMEOUT),
2234 "bad abort status: %d", xfer->ux_status);
2235
2236 xhci_pipe_restart(xfer->ux_pipe);
2237
2238 DPRINTFN(14, "end", 0, 0, 0, 0);
2239 }
2240
2241 static void
2242 xhci_host_dequeue(struct xhci_ring * const xr)
2243 {
2244 /* When dequeueing the controller, update our struct copy too */
2245 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE);
2246 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE,
2247 BUS_DMASYNC_PREWRITE);
2248 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies));
2249
2250 xr->xr_ep = 0;
2251 xr->xr_cs = 1;
2252 }
2253
2254 /*
2255 * Recover STALLed endpoint, or stop endpoint to abort a pipe.
2256 * xHCI 1.1 sect 4.10.2.1
2257 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove
2258 * all transfers on transfer ring.
2259 */
2260 static void
2261 xhci_pipe_restart(struct usbd_pipe *pipe)
2262 {
2263 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2264 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2265 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2266
2267 XHCIHIST_FUNC();
2268 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2269 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2270
2271 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2272
2273 /*
2274 * - If the endpoint is halted, indicating a stall, reset it.
2275 * - If the endpoint is stopped, we're already good.
2276 * - Otherwise, someone wanted to abort the pipe, so stop the
2277 * endpoint.
2278 *
2279 * In any case, clear the ring.
2280 */
2281 switch (xhci_get_epstate(sc, xs, dci)) {
2282 case XHCI_EPSTATE_HALTED:
2283 xhci_reset_endpoint(pipe);
2284 break;
2285 case XHCI_EPSTATE_STOPPED:
2286 break;
2287 default:
2288 xhci_stop_endpoint(pipe);
2289 break;
2290 }
2291
2292 switch (xhci_get_epstate(sc, xs, dci)) {
2293 case XHCI_EPSTATE_STOPPED:
2294 break;
2295 case XHCI_EPSTATE_ERROR:
2296 device_printf(sc->sc_dev, "endpoint 0x%x error\n",
2297 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2298 break;
2299 default:
2300 device_printf(sc->sc_dev, "endpoint 0x%x failed to stop\n",
2301 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2302 }
2303
2304 xhci_set_dequeue(pipe);
2305
2306 DPRINTFN(4, "ends", 0, 0, 0, 0);
2307 }
2308
2309 static void
2310 xhci_pipe_restart_async_task(void *cookie)
2311 {
2312 struct usbd_pipe * const pipe = cookie;
2313 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2314 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2315 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2316 struct xhci_ring * const tr = xs->xs_xr[dci];
2317 struct usbd_xfer *xfer;
2318
2319 XHCIHIST_FUNC();
2320 XHCIHIST_CALLARGS("sc=%#jx pipe=%#jx",
2321 (uintptr_t)sc, (uintptr_t)pipe, 0, 0);
2322
2323 mutex_enter(&sc->sc_lock);
2324
2325 xhci_pipe_restart(pipe);
2326
2327 /*
2328 * We halted our own queue because it stalled. Mark it no
2329 * longer halted and start issuing queued transfers again.
2330 */
2331 tr->is_halted = false;
2332 xfer = SIMPLEQ_FIRST(&pipe->up_queue);
2333 if (xfer) {
2334 /*
2335 * If the first xfer of the queue is not in progress,
2336 * though, there may be a concurrent software abort
2337 * that has already cancelled it and is now in the
2338 * middle of a concurrent xhci_pipe_restart waiting to
2339 * reacquire the pipe (bus) lock. So only restart the
2340 * xfer if it's still USBD_IN_PROGRESS.
2341 *
2342 * Either way, xfers on the queue can't be in
2343 * USBD_NOT_STARTED.
2344 */
2345 KASSERT(xfer->ux_status != USBD_NOT_STARTED);
2346 if (xfer->ux_status == USBD_IN_PROGRESS) {
2347 if (pipe->up_methods->upm_start != NULL)
2348 (*pipe->up_methods->upm_start)(xfer);
2349 } else {
2350 DPRINTF("pipe restart race xfer=%#jx status=%jd",
2351 (uintptr_t)xfer, xfer->ux_status, 0, 0);
2352 }
2353 }
2354
2355 mutex_exit(&sc->sc_lock);
2356 }
2357
2358 static void
2359 xhci_pipe_restart_async(struct usbd_pipe *pipe)
2360 {
2361 struct xhci_pipe * const xp =
2362 container_of(pipe, struct xhci_pipe, xp_pipe);
2363 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2364 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2365 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2366 struct xhci_ring * const tr = xs->xs_xr[dci];
2367
2368 XHCIHIST_FUNC();
2369 XHCIHIST_CALLARGS("pipe %#jx", (uintptr_t)pipe, 0, 0, 0);
2370
2371 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2372
2373 tr->is_halted = true;
2374 usb_add_task(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC);
2375
2376 DPRINTFN(4, "ends", 0, 0, 0, 0);
2377 }
2378
2379 /* Process roothub port status/change events and notify to uhub_intr. */
2380 static void
2381 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport)
2382 {
2383 XHCIHIST_FUNC();
2384 XHCIHIST_CALLARGS("xhci%jd: port %ju status change",
2385 device_unit(sc->sc_dev), ctlrport, 0, 0);
2386
2387 if (ctlrport > sc->sc_maxports)
2388 return;
2389
2390 const size_t bn = xhci_ctlrport2bus(sc, ctlrport);
2391 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport);
2392 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn];
2393
2394 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change",
2395 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer);
2396
2397 if (xfer == NULL)
2398 return;
2399 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
2400
2401 uint8_t *p = xfer->ux_buf;
2402 if (!xhci_polling_p(sc) || !sc->sc_intrxfer_deferred[bn])
2403 memset(p, 0, xfer->ux_length);
2404 p[rhp / NBBY] |= 1 << (rhp % NBBY);
2405 xfer->ux_actlen = xfer->ux_length;
2406 xfer->ux_status = USBD_NORMAL_COMPLETION;
2407 if (xhci_polling_p(sc))
2408 sc->sc_intrxfer_deferred[bn] = true;
2409 else
2410 usb_transfer_complete(xfer);
2411 }
2412
2413 /* Process Transfer Events */
2414 static void
2415 xhci_event_transfer(struct xhci_softc * const sc,
2416 const struct xhci_trb * const trb)
2417 {
2418 uint64_t trb_0;
2419 uint32_t trb_2, trb_3;
2420 uint8_t trbcode;
2421 u_int slot, dci;
2422 struct xhci_slot *xs;
2423 struct xhci_ring *xr;
2424 struct xhci_xfer *xx;
2425 struct usbd_xfer *xfer;
2426 usbd_status err;
2427
2428 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2429
2430 trb_0 = le64toh(trb->trb_0);
2431 trb_2 = le32toh(trb->trb_2);
2432 trb_3 = le32toh(trb->trb_3);
2433 trbcode = XHCI_TRB_2_ERROR_GET(trb_2);
2434 slot = XHCI_TRB_3_SLOT_GET(trb_3);
2435 dci = XHCI_TRB_3_EP_GET(trb_3);
2436 xs = &sc->sc_slots[slot];
2437 xr = xs->xs_xr[dci];
2438
2439 /* sanity check */
2440 KASSERT(xr != NULL);
2441 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots,
2442 "invalid xs_idx %u slot %u", xs->xs_idx, slot);
2443
2444 int idx = 0;
2445 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2446 if (xhci_trb_get_idx(xr, trb_0, &idx)) {
2447 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0);
2448 return;
2449 }
2450 xx = xr->xr_cookies[idx];
2451
2452 /* clear cookie of consumed TRB */
2453 xr->xr_cookies[idx] = NULL;
2454
2455 /*
2456 * xx is NULL if pipe is opened but xfer is not started.
2457 * It happens when stopping idle pipe.
2458 */
2459 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) {
2460 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju",
2461 idx, (uintptr_t)xx, trbcode, dci);
2462 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0,
2463 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)),
2464 0, 0);
2465 return;
2466 }
2467 } else {
2468 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */
2469 xx = (void *)(uintptr_t)(trb_0 & ~0x3);
2470 }
2471 /* XXX this may not happen */
2472 if (xx == NULL) {
2473 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0);
2474 return;
2475 }
2476 xfer = &xx->xx_xfer;
2477 /* XXX this may happen when detaching */
2478 if (xfer == NULL) {
2479 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx",
2480 (uintptr_t)xx, trb_0, 0, 0);
2481 return;
2482 }
2483 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2484 /* XXX I dunno why this happens */
2485 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer);
2486
2487 if (!xfer->ux_pipe->up_repeat &&
2488 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) {
2489 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer,
2490 0, 0, 0);
2491 return;
2492 }
2493
2494 const uint8_t xfertype =
2495 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes);
2496
2497 /* 4.11.5.2 Event Data TRB */
2498 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) {
2499 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx"
2500 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0);
2501 if ((trb_0 & 0x3) == 0x3) {
2502 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2);
2503 }
2504 }
2505
2506 switch (trbcode) {
2507 case XHCI_TRB_ERROR_SHORT_PKT:
2508 case XHCI_TRB_ERROR_SUCCESS:
2509 /*
2510 * A ctrl transfer can generate two events if it has a Data
2511 * stage. A short data stage can be OK and should not
2512 * complete the transfer as the status stage needs to be
2513 * performed.
2514 *
2515 * Note: Data and Status stage events point at same xfer.
2516 * ux_actlen and ux_dmabuf will be passed to
2517 * usb_transfer_complete after the Status stage event.
2518 *
2519 * It can be distinguished which stage generates the event:
2520 * + by checking least 3 bits of trb_0 if ED==1.
2521 * (see xhci_device_ctrl_start).
2522 * + by checking the type of original TRB if ED==0.
2523 *
2524 * In addition, intr, bulk, and isoc transfer currently
2525 * consists of single TD, so the "skip" is not needed.
2526 * ctrl xfer uses EVENT_DATA, and others do not.
2527 * Thus driver can switch the flow by checking ED bit.
2528 */
2529 if (xfertype == UE_ISOCHRONOUS) {
2530 xfer->ux_frlengths[xx->xx_isoc_done] -=
2531 XHCI_TRB_2_REM_GET(trb_2);
2532 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done];
2533 } else if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2534 if (xfer->ux_actlen == 0)
2535 xfer->ux_actlen = xfer->ux_length -
2536 XHCI_TRB_2_REM_GET(trb_2);
2537 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3))
2538 == XHCI_TRB_TYPE_DATA_STAGE) {
2539 return;
2540 }
2541 } else if ((trb_0 & 0x3) == 0x3) {
2542 return;
2543 }
2544 err = USBD_NORMAL_COMPLETION;
2545 break;
2546 case XHCI_TRB_ERROR_STOPPED:
2547 case XHCI_TRB_ERROR_LENGTH:
2548 case XHCI_TRB_ERROR_STOPPED_SHORT:
2549 err = USBD_IOERROR;
2550 break;
2551 case XHCI_TRB_ERROR_STALL:
2552 case XHCI_TRB_ERROR_BABBLE:
2553 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2554 xhci_pipe_restart_async(xfer->ux_pipe);
2555 err = USBD_STALLED;
2556 break;
2557 default:
2558 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2559 err = USBD_IOERROR;
2560 break;
2561 }
2562
2563 if (xfertype == UE_ISOCHRONOUS) {
2564 switch (trbcode) {
2565 case XHCI_TRB_ERROR_SHORT_PKT:
2566 case XHCI_TRB_ERROR_SUCCESS:
2567 break;
2568 case XHCI_TRB_ERROR_MISSED_SERVICE:
2569 case XHCI_TRB_ERROR_RING_UNDERRUN:
2570 case XHCI_TRB_ERROR_RING_OVERRUN:
2571 default:
2572 xfer->ux_frlengths[xx->xx_isoc_done] = 0;
2573 break;
2574 }
2575 if (++xx->xx_isoc_done < xfer->ux_nframes)
2576 return;
2577 }
2578
2579 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 ||
2580 (trb_0 & 0x3) == 0x0) {
2581 /*
2582 * Try to claim this xfer for completion. If it has
2583 * already completed or aborted, drop it on the floor.
2584 */
2585 if (!usbd_xfer_trycomplete(xfer))
2586 return;
2587
2588 /* Set the status. */
2589 xfer->ux_status = err;
2590
2591 usb_transfer_complete(xfer);
2592 }
2593 }
2594
2595 /* Process Command complete events */
2596 static void
2597 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb)
2598 {
2599 uint64_t trb_0;
2600 uint32_t trb_2, trb_3;
2601
2602 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2603
2604 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2605
2606 trb_0 = le64toh(trb->trb_0);
2607 trb_2 = le32toh(trb->trb_2);
2608 trb_3 = le32toh(trb->trb_3);
2609
2610 if (trb_0 == sc->sc_command_addr) {
2611 sc->sc_resultpending = false;
2612
2613 sc->sc_result_trb.trb_0 = trb_0;
2614 sc->sc_result_trb.trb_2 = trb_2;
2615 sc->sc_result_trb.trb_3 = trb_3;
2616 if (XHCI_TRB_2_ERROR_GET(trb_2) !=
2617 XHCI_TRB_ERROR_SUCCESS) {
2618 DPRINTFN(1, "command completion "
2619 "failure: 0x%016jx 0x%08jx 0x%08jx",
2620 trb_0, trb_2, trb_3, 0);
2621 }
2622 cv_signal(&sc->sc_command_cv);
2623 } else {
2624 DPRINTFN(1, "spurious event: %#jx 0x%016jx "
2625 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3);
2626 }
2627 }
2628
2629 /*
2630 * Process events.
2631 * called from xhci_softintr
2632 */
2633 static void
2634 xhci_handle_event(struct xhci_softc * const sc,
2635 const struct xhci_trb * const trb)
2636 {
2637 uint64_t trb_0;
2638 uint32_t trb_2, trb_3;
2639
2640 XHCIHIST_FUNC();
2641
2642 trb_0 = le64toh(trb->trb_0);
2643 trb_2 = le32toh(trb->trb_2);
2644 trb_3 = le32toh(trb->trb_3);
2645
2646 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx",
2647 (uintptr_t)trb, trb_0, trb_2, trb_3);
2648
2649 /*
2650 * 4.11.3.1, 6.4.2.1
2651 * TRB Pointer is invalid for these completion codes.
2652 */
2653 switch (XHCI_TRB_2_ERROR_GET(trb_2)) {
2654 case XHCI_TRB_ERROR_RING_UNDERRUN:
2655 case XHCI_TRB_ERROR_RING_OVERRUN:
2656 case XHCI_TRB_ERROR_VF_RING_FULL:
2657 return;
2658 default:
2659 if (trb_0 == 0) {
2660 return;
2661 }
2662 break;
2663 }
2664
2665 switch (XHCI_TRB_3_TYPE_GET(trb_3)) {
2666 case XHCI_TRB_EVENT_TRANSFER:
2667 xhci_event_transfer(sc, trb);
2668 break;
2669 case XHCI_TRB_EVENT_CMD_COMPLETE:
2670 xhci_event_cmd(sc, trb);
2671 break;
2672 case XHCI_TRB_EVENT_PORT_STS_CHANGE:
2673 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff));
2674 break;
2675 default:
2676 break;
2677 }
2678 }
2679
2680 static void
2681 xhci_softintr(void *v)
2682 {
2683 struct usbd_bus * const bus = v;
2684 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2685 struct xhci_ring * const er = sc->sc_er;
2686 struct xhci_trb *trb;
2687 int i, j, k, bn;
2688
2689 XHCIHIST_FUNC();
2690
2691 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2692
2693 i = er->xr_ep;
2694 j = er->xr_cs;
2695
2696 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0);
2697
2698 /*
2699 * Handle deferred root intr xfer, in case we just switched off
2700 * polling. It's not safe to complete root intr xfers while
2701 * polling -- too much kernel machinery gets involved.
2702 */
2703 if (!xhci_polling_p(sc)) {
2704 for (bn = 0; bn < 2; bn++) {
2705 if (__predict_false(sc->sc_intrxfer_deferred[bn])) {
2706 sc->sc_intrxfer_deferred[bn] = false;
2707 usb_transfer_complete(sc->sc_intrxfer[bn]);
2708 }
2709 }
2710 }
2711
2712 while (1) {
2713 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE,
2714 BUS_DMASYNC_POSTREAD);
2715 trb = &er->xr_trb[i];
2716 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0;
2717
2718 if (j != k)
2719 break;
2720
2721 xhci_handle_event(sc, trb);
2722
2723 i++;
2724 if (i == er->xr_ntrb) {
2725 i = 0;
2726 j ^= 1;
2727 }
2728 }
2729
2730 er->xr_ep = i;
2731 er->xr_cs = j;
2732
2733 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) |
2734 XHCI_ERDP_BUSY);
2735
2736 DPRINTFN(16, "ends", 0, 0, 0, 0);
2737
2738 return;
2739 }
2740
2741 static void
2742 xhci_poll(struct usbd_bus *bus)
2743 {
2744 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2745
2746 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2747
2748 mutex_enter(&sc->sc_intr_lock);
2749 int ret = xhci_intr1(sc);
2750 if (ret) {
2751 xhci_softintr(bus);
2752 }
2753 mutex_exit(&sc->sc_intr_lock);
2754
2755 return;
2756 }
2757
2758 static struct usbd_xfer *
2759 xhci_allocx(struct usbd_bus *bus, unsigned int nframes)
2760 {
2761 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2762 struct xhci_xfer *xx;
2763 u_int ntrbs;
2764
2765 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2766
2767 ntrbs = uimax(3, nframes);
2768 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs;
2769
2770 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
2771 if (xx != NULL) {
2772 memset(xx, 0, sizeof(*xx));
2773 if (ntrbs > 0) {
2774 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP);
2775 xx->xx_ntrb = ntrbs;
2776 }
2777 #ifdef DIAGNOSTIC
2778 xx->xx_xfer.ux_state = XFER_BUSY;
2779 #endif
2780 }
2781
2782 return &xx->xx_xfer;
2783 }
2784
2785 static void
2786 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
2787 {
2788 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2789 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
2790
2791 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2792
2793 #ifdef DIAGNOSTIC
2794 if (xfer->ux_state != XFER_BUSY &&
2795 xfer->ux_status != USBD_NOT_STARTED) {
2796 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx",
2797 (uintptr_t)xfer, xfer->ux_state, 0, 0);
2798 }
2799 xfer->ux_state = XFER_FREE;
2800 #endif
2801 if (xx->xx_ntrb > 0) {
2802 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb));
2803 xx->xx_trb = NULL;
2804 xx->xx_ntrb = 0;
2805 }
2806 pool_cache_put(sc->sc_xferpool, xx);
2807 }
2808
2809 static bool
2810 xhci_dying(struct usbd_bus *bus)
2811 {
2812 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2813
2814 return sc->sc_dying;
2815 }
2816
2817 static void
2818 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
2819 {
2820 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2821
2822 *lock = &sc->sc_lock;
2823 }
2824
2825 extern uint32_t usb_cookie_no;
2826
2827 /*
2828 * xHCI 4.3
2829 * Called when uhub_explore finds a new device (via usbd_new_device).
2830 * Port initialization and speed detection (4.3.1) are already done in uhub.c.
2831 * This function does:
2832 * Allocate and construct dev structure of default endpoint (ep0).
2833 * Allocate and open pipe of ep0.
2834 * Enable slot and initialize slot context.
2835 * Set Address.
2836 * Read initial device descriptor.
2837 * Determine initial MaxPacketSize (mps) by speed.
2838 * Read full device descriptor.
2839 * Register this device.
2840 * Finally state of device transitions ADDRESSED.
2841 */
2842 static usbd_status
2843 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth,
2844 int speed, int port, struct usbd_port *up)
2845 {
2846 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2847 struct usbd_device *dev;
2848 usbd_status err;
2849 usb_device_descriptor_t *dd;
2850 struct xhci_slot *xs;
2851 uint32_t *cp;
2852
2853 XHCIHIST_FUNC();
2854 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx",
2855 port, depth, speed, (uintptr_t)up);
2856
2857 KASSERT(KERNEL_LOCKED_P());
2858
2859 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
2860 dev->ud_bus = bus;
2861 dev->ud_quirks = &usbd_no_quirk;
2862 dev->ud_addr = 0;
2863 dev->ud_ddesc.bMaxPacketSize = 0;
2864 dev->ud_depth = depth;
2865 dev->ud_powersrc = up;
2866 dev->ud_myhub = up->up_parent;
2867 dev->ud_speed = speed;
2868 dev->ud_langid = USBD_NOLANG;
2869 dev->ud_cookie.cookie = ++usb_cookie_no;
2870
2871 /* Set up default endpoint handle. */
2872 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc;
2873 /* doesn't matter, just don't let it uninitialized */
2874 dev->ud_ep0.ue_toggle = 0;
2875
2876 /* Set up default endpoint descriptor. */
2877 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE;
2878 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT;
2879 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT;
2880 dev->ud_ep0desc.bmAttributes = UE_CONTROL;
2881 dev->ud_ep0desc.bInterval = 0;
2882
2883 /* 4.3, 4.8.2.1 */
2884 switch (speed) {
2885 case USB_SPEED_SUPER:
2886 case USB_SPEED_SUPER_PLUS:
2887 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET);
2888 break;
2889 case USB_SPEED_FULL:
2890 /* XXX using 64 as initial mps of ep0 in FS */
2891 case USB_SPEED_HIGH:
2892 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET);
2893 break;
2894 case USB_SPEED_LOW:
2895 default:
2896 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET);
2897 break;
2898 }
2899
2900 up->up_dev = dev;
2901
2902 dd = &dev->ud_ddesc;
2903
2904 if (depth == 0 && port == 0) {
2905 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL);
2906 bus->ub_devices[USB_ROOTHUB_INDEX] = dev;
2907
2908 /* Establish the default pipe. */
2909 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2910 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2911 if (err) {
2912 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0);
2913 goto bad;
2914 }
2915 err = usbd_get_initial_ddesc(dev, dd);
2916 if (err) {
2917 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2918 goto bad;
2919 }
2920 } else {
2921 uint8_t slot = 0;
2922
2923 /* 4.3.2 */
2924 err = xhci_enable_slot(sc, &slot);
2925 if (err) {
2926 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0);
2927 goto bad;
2928 }
2929
2930 xs = &sc->sc_slots[slot];
2931 dev->ud_hcpriv = xs;
2932
2933 /* 4.3.3 initialize slot structure */
2934 err = xhci_init_slot(dev, slot);
2935 if (err) {
2936 DPRINTFN(1, "init slot %ju", err, 0, 0, 0);
2937 dev->ud_hcpriv = NULL;
2938 /*
2939 * We have to disable_slot here because
2940 * xs->xs_idx == 0 when xhci_init_slot fails,
2941 * in that case usbd_remove_dev won't work.
2942 */
2943 mutex_enter(&sc->sc_lock);
2944 xhci_disable_slot(sc, slot);
2945 mutex_exit(&sc->sc_lock);
2946 goto bad;
2947 }
2948
2949 /*
2950 * We have to establish the default pipe _after_ slot
2951 * structure has been prepared.
2952 */
2953 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2954 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2955 if (err) {
2956 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0,
2957 0);
2958 goto bad;
2959 }
2960
2961 /* 4.3.4 Address Assignment */
2962 err = xhci_set_address(dev, slot, false);
2963 if (err) {
2964 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0);
2965 goto bad;
2966 }
2967
2968 /* Allow device time to set new address */
2969 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE);
2970
2971 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2972 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT);
2973 HEXDUMP("slot context", cp, sc->sc_ctxsz);
2974 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3]));
2975 DPRINTFN(4, "device address %ju", addr, 0, 0, 0);
2976 /*
2977 * XXX ensure we know when the hardware does something
2978 * we can't yet cope with
2979 */
2980 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr);
2981 dev->ud_addr = addr;
2982
2983 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL,
2984 "addr %d already allocated", dev->ud_addr);
2985 /*
2986 * The root hub is given its own slot
2987 */
2988 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev;
2989
2990 err = usbd_get_initial_ddesc(dev, dd);
2991 if (err) {
2992 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2993 goto bad;
2994 }
2995
2996 /* 4.8.2.1 */
2997 if (USB_IS_SS(speed)) {
2998 if (dd->bMaxPacketSize != 9) {
2999 printf("%s: invalid mps 2^%u for SS ep0,"
3000 " using 512\n",
3001 device_xname(sc->sc_dev),
3002 dd->bMaxPacketSize);
3003 dd->bMaxPacketSize = 9;
3004 }
3005 USETW(dev->ud_ep0desc.wMaxPacketSize,
3006 (1 << dd->bMaxPacketSize));
3007 } else
3008 USETW(dev->ud_ep0desc.wMaxPacketSize,
3009 dd->bMaxPacketSize);
3010 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0);
3011 err = xhci_update_ep0_mps(sc, xs,
3012 UGETW(dev->ud_ep0desc.wMaxPacketSize));
3013 if (err) {
3014 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0);
3015 goto bad;
3016 }
3017 }
3018
3019 err = usbd_reload_device_desc(dev);
3020 if (err) {
3021 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0);
3022 goto bad;
3023 }
3024
3025 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,",
3026 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0);
3027 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,",
3028 dd->bDeviceClass, dd->bDeviceSubClass,
3029 dd->bDeviceProtocol, 0);
3030 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd",
3031 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations,
3032 dev->ud_speed);
3033
3034 usbd_get_device_strings(dev);
3035
3036 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev);
3037
3038 if (depth == 0 && port == 0) {
3039 usbd_attach_roothub(parent, dev);
3040 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0);
3041 return USBD_NORMAL_COMPLETION;
3042 }
3043
3044 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr);
3045 bad:
3046 if (err != USBD_NORMAL_COMPLETION) {
3047 if (depth == 0 && port == 0 && dev->ud_pipe0)
3048 usbd_kill_pipe(dev->ud_pipe0);
3049 usbd_remove_device(dev, up);
3050 }
3051
3052 return err;
3053 }
3054
3055 static usbd_status
3056 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp,
3057 size_t ntrb, size_t align)
3058 {
3059 size_t size = ntrb * XHCI_TRB_SIZE;
3060 struct xhci_ring *xr;
3061
3062 XHCIHIST_FUNC();
3063 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx",
3064 (uintptr_t)*xrp, ntrb, align, 0);
3065
3066 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP);
3067 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0);
3068
3069 int err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
3070 USBMALLOC_ZERO, &xr->xr_dma);
3071 if (err) {
3072 kmem_free(xr, sizeof(struct xhci_ring));
3073 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0);
3074 return err;
3075 }
3076 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
3077 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP);
3078 xr->xr_trb = xhci_ring_trbv(xr, 0);
3079 xr->xr_ntrb = ntrb;
3080 xr->is_halted = false;
3081 xhci_host_dequeue(xr);
3082 *xrp = xr;
3083
3084 return USBD_NORMAL_COMPLETION;
3085 }
3086
3087 static void
3088 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr)
3089 {
3090 if (*xr == NULL)
3091 return;
3092
3093 usb_freemem(&(*xr)->xr_dma);
3094 mutex_destroy(&(*xr)->xr_lock);
3095 kmem_free((*xr)->xr_cookies,
3096 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb);
3097 kmem_free(*xr, sizeof(struct xhci_ring));
3098 *xr = NULL;
3099 }
3100
3101 static void
3102 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr,
3103 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs)
3104 {
3105 size_t i;
3106 u_int ri;
3107 u_int cs;
3108 uint64_t parameter;
3109 uint32_t status;
3110 uint32_t control;
3111
3112 XHCIHIST_FUNC();
3113 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju",
3114 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0);
3115
3116 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u",
3117 ntrbs, xr->xr_ntrb);
3118 for (i = 0; i < ntrbs; i++) {
3119 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr,
3120 (uintptr_t)trbs, i, 0);
3121 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx",
3122 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0);
3123 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) !=
3124 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3);
3125 }
3126
3127 ri = xr->xr_ep;
3128 cs = xr->xr_cs;
3129
3130 /*
3131 * Although the xhci hardware can do scatter/gather dma from
3132 * arbitrary sized buffers, there is a non-obvious restriction
3133 * that a LINK trb is only allowed at the end of a burst of
3134 * transfers - which might be 16kB.
3135 * Arbitrary aligned LINK trb definitely fail on Ivy bridge.
3136 * The simple solution is not to allow a LINK trb in the middle
3137 * of anything - as here.
3138 * XXX: (dsl) There are xhci controllers out there (eg some made by
3139 * ASMedia) that seem to lock up if they process a LINK trb but
3140 * cannot process the linked-to trb yet.
3141 * The code should write the 'cycle' bit on the link trb AFTER
3142 * adding the other trb.
3143 */
3144 u_int firstep = xr->xr_ep;
3145 u_int firstcs = xr->xr_cs;
3146
3147 for (i = 0; i < ntrbs; ) {
3148 u_int oldri = ri;
3149 u_int oldcs = cs;
3150
3151 if (ri >= (xr->xr_ntrb - 1)) {
3152 /* Put Link TD at the end of ring */
3153 parameter = xhci_ring_trbp(xr, 0);
3154 status = 0;
3155 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) |
3156 XHCI_TRB_3_TC_BIT;
3157 xr->xr_cookies[ri] = NULL;
3158 xr->xr_ep = 0;
3159 xr->xr_cs ^= 1;
3160 ri = xr->xr_ep;
3161 cs = xr->xr_cs;
3162 } else {
3163 parameter = trbs[i].trb_0;
3164 status = trbs[i].trb_2;
3165 control = trbs[i].trb_3;
3166
3167 xr->xr_cookies[ri] = cookie;
3168 ri++;
3169 i++;
3170 }
3171 /*
3172 * If this is a first TRB, mark it invalid to prevent
3173 * xHC from running it immediately.
3174 */
3175 if (oldri == firstep) {
3176 if (oldcs) {
3177 control &= ~XHCI_TRB_3_CYCLE_BIT;
3178 } else {
3179 control |= XHCI_TRB_3_CYCLE_BIT;
3180 }
3181 } else {
3182 if (oldcs) {
3183 control |= XHCI_TRB_3_CYCLE_BIT;
3184 } else {
3185 control &= ~XHCI_TRB_3_CYCLE_BIT;
3186 }
3187 }
3188 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control);
3189 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri,
3190 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3191 }
3192
3193 /* Now invert cycle bit of first TRB */
3194 if (firstcs) {
3195 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT);
3196 } else {
3197 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT);
3198 }
3199 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep,
3200 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3201
3202 xr->xr_ep = ri;
3203 xr->xr_cs = cs;
3204
3205 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep,
3206 xr->xr_cs, 0);
3207 }
3208
3209 static inline void
3210 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr,
3211 struct xhci_xfer *xx, u_int ntrb)
3212 {
3213 KASSERT(ntrb <= xx->xx_ntrb);
3214 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb);
3215 }
3216
3217 /*
3218 * Stop execution commands, purge all commands on command ring, and
3219 * rewind dequeue pointer.
3220 */
3221 static void
3222 xhci_abort_command(struct xhci_softc *sc)
3223 {
3224 struct xhci_ring * const cr = sc->sc_cr;
3225 uint64_t crcr;
3226 int i;
3227
3228 XHCIHIST_FUNC();
3229 XHCIHIST_CALLARGS("command %#jx timeout, aborting",
3230 sc->sc_command_addr, 0, 0, 0);
3231
3232 mutex_enter(&cr->xr_lock);
3233
3234 /* 4.6.1.2 Aborting a Command */
3235 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3236 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA);
3237
3238 for (i = 0; i < 500; i++) {
3239 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3240 if ((crcr & XHCI_CRCR_LO_CRR) == 0)
3241 break;
3242 usb_delay_ms(&sc->sc_bus, 1);
3243 }
3244 if ((crcr & XHCI_CRCR_LO_CRR) != 0) {
3245 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0);
3246 /* reset HC here? */
3247 }
3248
3249 /* reset command ring dequeue pointer */
3250 cr->xr_ep = 0;
3251 cr->xr_cs = 1;
3252 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs);
3253
3254 mutex_exit(&cr->xr_lock);
3255 }
3256
3257 /*
3258 * Put a command on command ring, ring bell, set timer, and cv_timedwait.
3259 * Command completion is notified by cv_signal from xhci_event_cmd()
3260 * (called from xhci_softint), or timed-out.
3261 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(),
3262 * then do_command examines it.
3263 */
3264 static usbd_status
3265 xhci_do_command_locked(struct xhci_softc * const sc,
3266 struct xhci_soft_trb * const trb, int timeout)
3267 {
3268 struct xhci_ring * const cr = sc->sc_cr;
3269 usbd_status err;
3270
3271 XHCIHIST_FUNC();
3272 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx",
3273 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3274
3275 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
3276 KASSERT(mutex_owned(&sc->sc_lock));
3277
3278 while (sc->sc_command_addr != 0 ||
3279 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp))
3280 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
3281 if (sc->sc_suspendresume_failed)
3282 return USBD_IOERROR;
3283
3284 /*
3285 * If enqueue pointer points at last of ring, it's Link TRB,
3286 * command TRB will be stored in 0th TRB.
3287 */
3288 if (cr->xr_ep == cr->xr_ntrb - 1)
3289 sc->sc_command_addr = xhci_ring_trbp(cr, 0);
3290 else
3291 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep);
3292
3293 sc->sc_resultpending = true;
3294
3295 mutex_enter(&cr->xr_lock);
3296 xhci_ring_put(sc, cr, NULL, trb, 1);
3297 mutex_exit(&cr->xr_lock);
3298
3299 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0);
3300
3301 while (sc->sc_resultpending) {
3302 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock,
3303 MAX(1, mstohz(timeout))) == EWOULDBLOCK) {
3304 xhci_abort_command(sc);
3305 err = USBD_TIMEOUT;
3306 goto timedout;
3307 }
3308 }
3309
3310 trb->trb_0 = sc->sc_result_trb.trb_0;
3311 trb->trb_2 = sc->sc_result_trb.trb_2;
3312 trb->trb_3 = sc->sc_result_trb.trb_3;
3313
3314 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx",
3315 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3316
3317 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) {
3318 case XHCI_TRB_ERROR_SUCCESS:
3319 err = USBD_NORMAL_COMPLETION;
3320 break;
3321 default:
3322 case 192 ... 223:
3323 DPRINTFN(5, "error %#jx",
3324 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0);
3325 err = USBD_IOERROR;
3326 break;
3327 case 224 ... 255:
3328 err = USBD_NORMAL_COMPLETION;
3329 break;
3330 }
3331
3332 timedout:
3333 sc->sc_resultpending = false;
3334 sc->sc_command_addr = 0;
3335 cv_broadcast(&sc->sc_cmdbusy_cv);
3336
3337 return err;
3338 }
3339
3340 static usbd_status
3341 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb,
3342 int timeout)
3343 {
3344
3345 mutex_enter(&sc->sc_lock);
3346 usbd_status ret = xhci_do_command_locked(sc, trb, timeout);
3347 mutex_exit(&sc->sc_lock);
3348
3349 return ret;
3350 }
3351
3352 static usbd_status
3353 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp)
3354 {
3355 struct xhci_soft_trb trb;
3356 usbd_status err;
3357
3358 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3359
3360 trb.trb_0 = 0;
3361 trb.trb_2 = 0;
3362 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT);
3363
3364 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3365 if (err != USBD_NORMAL_COMPLETION) {
3366 return err;
3367 }
3368
3369 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3);
3370
3371 return err;
3372 }
3373
3374 /*
3375 * xHCI 4.6.4
3376 * Deallocate ring and device/input context DMA buffers, and disable_slot.
3377 * All endpoints in the slot should be stopped.
3378 * Should be called with sc_lock held.
3379 */
3380 static usbd_status
3381 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot)
3382 {
3383 struct xhci_soft_trb trb;
3384 struct xhci_slot *xs;
3385 usbd_status err;
3386
3387 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3388
3389 if (sc->sc_dying)
3390 return USBD_IOERROR;
3391
3392 trb.trb_0 = 0;
3393 trb.trb_2 = 0;
3394 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) |
3395 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT);
3396
3397 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
3398
3399 if (!err) {
3400 xs = &sc->sc_slots[slot];
3401 if (xs->xs_idx != 0) {
3402 xhci_free_slot(sc, xs);
3403 xhci_set_dcba(sc, 0, slot);
3404 memset(xs, 0, sizeof(*xs));
3405 }
3406 }
3407
3408 return err;
3409 }
3410
3411 /*
3412 * Set address of device and transition slot state from ENABLED to ADDRESSED
3413 * if Block Setaddress Request (BSR) is false.
3414 * If BSR==true, transition slot state from ENABLED to DEFAULT.
3415 * see xHCI 1.1 4.5.3, 3.3.4
3416 * Should be called without sc_lock held.
3417 */
3418 static usbd_status
3419 xhci_address_device(struct xhci_softc * const sc,
3420 uint64_t icp, uint8_t slot_id, bool bsr)
3421 {
3422 struct xhci_soft_trb trb;
3423 usbd_status err;
3424
3425 XHCIHIST_FUNC();
3426 if (bsr) {
3427 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr",
3428 icp, slot_id, 0, 0);
3429 } else {
3430 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr",
3431 icp, slot_id, 0, 0);
3432 }
3433
3434 trb.trb_0 = icp;
3435 trb.trb_2 = 0;
3436 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) |
3437 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) |
3438 (bsr ? XHCI_TRB_3_BSR_BIT : 0);
3439
3440 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3441
3442 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS)
3443 err = USBD_NO_ADDR;
3444
3445 return err;
3446 }
3447
3448 static usbd_status
3449 xhci_update_ep0_mps(struct xhci_softc * const sc,
3450 struct xhci_slot * const xs, u_int mps)
3451 {
3452 struct xhci_soft_trb trb;
3453 usbd_status err;
3454 uint32_t * cp;
3455
3456 XHCIHIST_FUNC();
3457 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0);
3458
3459 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3460 cp[0] = htole32(0);
3461 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL));
3462
3463 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL));
3464 cp[1] &= ~htole32(XHCI_EPCTX_1_MAXP_SIZE_MASK);
3465 cp[1] |= htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps));
3466
3467 /* sync input contexts before they are read from memory */
3468 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3469 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3470 sc->sc_ctxsz * 4);
3471
3472 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
3473 trb.trb_2 = 0;
3474 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
3475 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX);
3476
3477 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3478 return err;
3479 }
3480
3481 static void
3482 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si)
3483 {
3484 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0);
3485
3486 XHCIHIST_FUNC();
3487 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd",
3488 (uintptr_t)&dcbaa[si], dcba, si, 0);
3489
3490 dcbaa[si] = htole64(dcba);
3491 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t),
3492 BUS_DMASYNC_PREWRITE);
3493 }
3494
3495 /*
3496 * Allocate device and input context DMA buffer, and
3497 * TRB DMA buffer for each endpoint.
3498 */
3499 static usbd_status
3500 xhci_init_slot(struct usbd_device *dev, uint32_t slot)
3501 {
3502 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3503 struct xhci_slot *xs;
3504
3505 XHCIHIST_FUNC();
3506 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0);
3507
3508 xs = &sc->sc_slots[slot];
3509
3510 /* allocate contexts */
3511 int err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3512 USBMALLOC_ZERO, &xs->xs_dc_dma);
3513 if (err) {
3514 DPRINTFN(1, "failed to allocmem output device context %jd",
3515 err, 0, 0, 0);
3516 return USBD_NOMEM;
3517 }
3518
3519 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3520 USBMALLOC_ZERO, &xs->xs_ic_dma);
3521 if (err) {
3522 DPRINTFN(1, "failed to allocmem input device context %jd",
3523 err, 0, 0, 0);
3524 goto bad1;
3525 }
3526
3527 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr));
3528 xs->xs_idx = slot;
3529
3530 return USBD_NORMAL_COMPLETION;
3531
3532 bad1:
3533 usb_freemem(&xs->xs_dc_dma);
3534 xs->xs_idx = 0;
3535 return USBD_NOMEM;
3536 }
3537
3538 static void
3539 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs)
3540 {
3541 u_int dci;
3542
3543 XHCIHIST_FUNC();
3544 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
3545
3546 /* deallocate all allocated rings in the slot */
3547 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
3548 if (xs->xs_xr[dci] != NULL)
3549 xhci_ring_free(sc, &xs->xs_xr[dci]);
3550 }
3551 usb_freemem(&xs->xs_ic_dma);
3552 usb_freemem(&xs->xs_dc_dma);
3553 xs->xs_idx = 0;
3554 }
3555
3556 /*
3557 * Setup slot context, set Device Context Base Address, and issue
3558 * Set Address Device command.
3559 */
3560 static usbd_status
3561 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr)
3562 {
3563 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3564 struct xhci_slot *xs;
3565 usbd_status err;
3566
3567 XHCIHIST_FUNC();
3568 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0);
3569
3570 xs = &sc->sc_slots[slot];
3571
3572 xhci_setup_ctx(dev->ud_pipe0);
3573
3574 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3575 sc->sc_ctxsz * 3);
3576
3577 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot);
3578
3579 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr);
3580
3581 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
3582 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0),
3583 sc->sc_ctxsz * 2);
3584
3585 return err;
3586 }
3587
3588 /*
3589 * 4.8.2, 6.2.3.2
3590 * construct slot/endpoint context parameters and do syncmem
3591 */
3592 static void
3593 xhci_setup_ctx(struct usbd_pipe *pipe)
3594 {
3595 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3596 struct usbd_device *dev = pipe->up_dev;
3597 struct xhci_slot * const xs = dev->ud_hcpriv;
3598 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3599 const u_int dci = xhci_ep_get_dci(ed);
3600 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3601 uint32_t *cp;
3602 uint8_t speed = dev->ud_speed;
3603
3604 XHCIHIST_FUNC();
3605 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju",
3606 (uintptr_t)pipe, xs->xs_idx, dci, speed);
3607
3608 /* set up initial input control context */
3609 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3610 cp[0] = htole32(0);
3611 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci));
3612 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT));
3613 cp[7] = htole32(0);
3614
3615 /* set up input slot context */
3616 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
3617 cp[0] =
3618 XHCI_SCTX_0_CTX_NUM_SET(dci) |
3619 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed));
3620 cp[1] = 0;
3621 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0);
3622 cp[3] = 0;
3623 xhci_setup_route(pipe, cp);
3624 xhci_setup_tthub(pipe, cp);
3625
3626 cp[0] = htole32(cp[0]);
3627 cp[1] = htole32(cp[1]);
3628 cp[2] = htole32(cp[2]);
3629 cp[3] = htole32(cp[3]);
3630
3631 /* set up input endpoint context */
3632 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci));
3633 cp[0] =
3634 XHCI_EPCTX_0_EPSTATE_SET(0) |
3635 XHCI_EPCTX_0_MULT_SET(0) |
3636 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) |
3637 XHCI_EPCTX_0_LSA_SET(0) |
3638 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0);
3639 cp[1] =
3640 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) |
3641 XHCI_EPCTX_1_HID_SET(0) |
3642 XHCI_EPCTX_1_MAXB_SET(0);
3643
3644 if (xfertype != UE_ISOCHRONOUS)
3645 cp[1] |= XHCI_EPCTX_1_CERR_SET(3);
3646
3647 xhci_setup_maxburst(pipe, cp);
3648
3649 DPRINTFN(4, "setting on dci %ju ival %ju mult %ju mps %#jx",
3650 dci, XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_0_MULT_GET(cp[0]),
3651 XHCI_EPCTX_1_MAXP_SIZE_GET(cp[1]));
3652 DPRINTFN(4, " maxburst %ju mep %#jx atl %#jx",
3653 XHCI_EPCTX_1_MAXB_GET(cp[1]),
3654 (XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_GET(cp[0]) << 16) +
3655 XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_GET(cp[4]),
3656 XHCI_EPCTX_4_AVG_TRB_LEN_GET(cp[4]), 0);
3657
3658 /* rewind TR dequeue pointer in xHC */
3659 /* can't use xhci_ep_get_dci() yet? */
3660 *(uint64_t *)(&cp[2]) = htole64(
3661 xhci_ring_trbp(xs->xs_xr[dci], 0) |
3662 XHCI_EPCTX_2_DCS_SET(1));
3663
3664 cp[0] = htole32(cp[0]);
3665 cp[1] = htole32(cp[1]);
3666 cp[4] = htole32(cp[4]);
3667
3668 /* rewind TR dequeue pointer in driver */
3669 struct xhci_ring *xr = xs->xs_xr[dci];
3670 mutex_enter(&xr->xr_lock);
3671 xhci_host_dequeue(xr);
3672 mutex_exit(&xr->xr_lock);
3673
3674 /* sync input contexts before they are read from memory */
3675 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3676 }
3677
3678 /*
3679 * Setup route string and roothub port of given device for slot context
3680 */
3681 static void
3682 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp)
3683 {
3684 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3685 struct usbd_device *dev = pipe->up_dev;
3686 struct usbd_port *up = dev->ud_powersrc;
3687 struct usbd_device *hub;
3688 struct usbd_device *adev;
3689 uint8_t rhport = 0;
3690 uint32_t route = 0;
3691
3692 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3693
3694 /* Locate root hub port and Determine route string */
3695 /* 4.3.3 route string does not include roothub port */
3696 for (hub = dev; hub != NULL; hub = hub->ud_myhub) {
3697 uint32_t dep;
3698
3699 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd",
3700 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc,
3701 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno :
3702 -1);
3703
3704 if (hub->ud_powersrc == NULL)
3705 break;
3706 dep = hub->ud_depth;
3707 if (dep == 0)
3708 break;
3709 rhport = hub->ud_powersrc->up_portno;
3710 if (dep > USB_HUB_MAX_DEPTH)
3711 continue;
3712
3713 route |=
3714 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport)
3715 << ((dep - 1) * 4);
3716 }
3717 route = route >> 4;
3718 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1;
3719
3720 /* Locate port on upstream high speed hub */
3721 for (adev = dev, hub = up->up_parent;
3722 hub != NULL && hub->ud_speed != USB_SPEED_HIGH;
3723 adev = hub, hub = hub->ud_myhub)
3724 ;
3725 if (hub) {
3726 int p;
3727 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) {
3728 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) {
3729 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1];
3730 goto found;
3731 }
3732 }
3733 panic("%s: cannot find HS port", __func__);
3734 found:
3735 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0);
3736 } else {
3737 dev->ud_myhsport = NULL;
3738 }
3739
3740 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport);
3741
3742 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport,
3743 ctlrport, route, (uintptr_t)hub);
3744
3745 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route);
3746 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport);
3747 }
3748
3749 /*
3750 * Setup whether device is hub, whether device uses MTT, and
3751 * TT informations if it uses MTT.
3752 */
3753 static void
3754 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp)
3755 {
3756 struct usbd_device *dev = pipe->up_dev;
3757 struct usbd_port *myhsport = dev->ud_myhsport;
3758 usb_device_descriptor_t * const dd = &dev->ud_ddesc;
3759 uint32_t speed = dev->ud_speed;
3760 uint8_t rhaddr = dev->ud_bus->ub_rhaddr;
3761 uint8_t tthubslot, ttportnum;
3762 bool ishub;
3763 bool usemtt;
3764
3765 XHCIHIST_FUNC();
3766
3767 /*
3768 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2
3769 * tthubslot:
3770 * This is the slot ID of parent HS hub
3771 * if LS/FS device is connected && connected through HS hub.
3772 * This is 0 if device is not LS/FS device ||
3773 * parent hub is not HS hub ||
3774 * attached to root hub.
3775 * ttportnum:
3776 * This is the downstream facing port of parent HS hub
3777 * if LS/FS device is connected.
3778 * This is 0 if device is not LS/FS device ||
3779 * parent hub is not HS hub ||
3780 * attached to root hub.
3781 */
3782 if (myhsport &&
3783 myhsport->up_parent->ud_addr != rhaddr &&
3784 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
3785 ttportnum = myhsport->up_portno;
3786 tthubslot = myhsport->up_parent->ud_addr;
3787 } else {
3788 ttportnum = 0;
3789 tthubslot = 0;
3790 }
3791 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd",
3792 (uintptr_t)myhsport, ttportnum, tthubslot, 0);
3793
3794 /* ishub is valid after reading UDESC_DEVICE */
3795 ishub = (dd->bDeviceClass == UDCLASS_HUB);
3796
3797 /* dev->ud_hub is valid after reading UDESC_HUB */
3798 if (ishub && dev->ud_hub) {
3799 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc;
3800 uint8_t ttt =
3801 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK);
3802
3803 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts);
3804 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt);
3805 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0);
3806 }
3807
3808 #define IS_MTTHUB(dd) \
3809 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT)
3810
3811 /*
3812 * MTT flag is set if
3813 * 1. this is HS hub && MTTs are supported and enabled; or
3814 * 2. this is LS or FS device && there is a parent HS hub where MTTs
3815 * are supported and enabled.
3816 *
3817 * XXX enabled is not tested yet
3818 */
3819 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd))
3820 usemtt = true;
3821 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) &&
3822 myhsport &&
3823 myhsport->up_parent->ud_addr != rhaddr &&
3824 IS_MTTHUB(&myhsport->up_parent->ud_ddesc))
3825 usemtt = true;
3826 else
3827 usemtt = false;
3828 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd",
3829 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt);
3830
3831 #undef IS_MTTHUB
3832
3833 cp[0] |=
3834 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) |
3835 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0);
3836 cp[2] |=
3837 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) |
3838 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum);
3839 }
3840
3841 static const usb_endpoint_ss_comp_descriptor_t *
3842 xhci_get_essc_desc(struct usbd_pipe *pipe)
3843 {
3844 struct usbd_device *dev = pipe->up_dev;
3845 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3846 const usb_cdc_descriptor_t *cdcd;
3847 usbd_desc_iter_t iter;
3848 uint8_t ep;
3849
3850 /* config desc is NULL when opening ep0 */
3851 if (dev == NULL || dev->ud_cdesc == NULL)
3852 return NULL;
3853
3854 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev,
3855 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY);
3856 if (cdcd == NULL)
3857 return NULL;
3858
3859 usb_desc_iter_init(dev, &iter);
3860 iter.cur = (const void *)cdcd;
3861
3862 /* find endpoint_ss_comp desc for ep of this pipe */
3863 for (ep = 0;;) {
3864 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter);
3865 if (cdcd == NULL)
3866 break;
3867 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) {
3868 ep = ((const usb_endpoint_descriptor_t *)cdcd)->
3869 bEndpointAddress;
3870 if (UE_GET_ADDR(ep) ==
3871 UE_GET_ADDR(ed->bEndpointAddress)) {
3872 cdcd = (const usb_cdc_descriptor_t *)
3873 usb_desc_iter_next(&iter);
3874 break;
3875 }
3876 ep = 0;
3877 }
3878 }
3879 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) {
3880 return (const usb_endpoint_ss_comp_descriptor_t *)cdcd;
3881 }
3882 return NULL;
3883 }
3884
3885 /* set up params for periodic endpoint */
3886 static void
3887 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp)
3888 {
3889 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
3890 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3891 struct usbd_device * const dev = pipe->up_dev;
3892 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3893 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3894 uint16_t mps = UGETW(ed->wMaxPacketSize);
3895 uint8_t speed = dev->ud_speed;
3896 uint32_t maxb, mep, atl;
3897 uint8_t ival, mult;
3898
3899 const usb_endpoint_ss_comp_descriptor_t * esscd =
3900 xhci_get_essc_desc(pipe);
3901
3902 /* USB 2.0 9.6.6, xHCI 4.8.2.4, 6.2.3.2 - 6.2.3.8 */
3903 switch (xfertype) {
3904 case UE_ISOCHRONOUS:
3905 case UE_INTERRUPT:
3906 if (USB_IS_SS(speed)) {
3907 maxb = esscd ? esscd->bMaxBurst : UE_GET_TRANS(mps);
3908 mep = esscd ? UGETW(esscd->wBytesPerInterval) :
3909 UE_GET_SIZE(mps) * (maxb + 1);
3910 if (esscd && xfertype == UE_ISOCHRONOUS &&
3911 XHCI_HCC2_LEC(sc->sc_hcc2) == 0) {
3912 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes);
3913 mult = (mult > 2) ? 2 : mult;
3914 } else
3915 mult = 0;
3916
3917 } else {
3918 switch (speed) {
3919 case USB_SPEED_HIGH:
3920 maxb = UE_GET_TRANS(mps);
3921 mep = UE_GET_SIZE(mps) * (maxb + 1);
3922 break;
3923 case USB_SPEED_FULL:
3924 maxb = 0;
3925 mep = UE_GET_SIZE(mps);
3926 break;
3927 default:
3928 maxb = 0;
3929 mep = 0;
3930 break;
3931 }
3932 mult = 0;
3933 }
3934 mps = UE_GET_SIZE(mps);
3935
3936 if (pipe->up_interval == USBD_DEFAULT_INTERVAL)
3937 ival = ed->bInterval;
3938 else
3939 ival = pipe->up_interval;
3940
3941 ival = xhci_bival2ival(ival, speed, xfertype);
3942 atl = mep;
3943 break;
3944 case UE_CONTROL:
3945 case UE_BULK:
3946 default:
3947 if (USB_IS_SS(speed)) {
3948 maxb = esscd ? esscd->bMaxBurst : 0;
3949 } else
3950 maxb = 0;
3951
3952 mps = UE_GET_SIZE(mps);
3953 mep = 0;
3954 mult = 0;
3955 ival = 0;
3956 if (xfertype == UE_CONTROL)
3957 atl = 8; /* 6.2.3 */
3958 else
3959 atl = mps;
3960 break;
3961 }
3962
3963 switch (speed) {
3964 case USB_SPEED_LOW:
3965 break;
3966 case USB_SPEED_FULL:
3967 if (xfertype == UE_INTERRUPT)
3968 if (mep > XHCI_EPCTX_MEP_FS_INTR)
3969 mep = XHCI_EPCTX_MEP_FS_INTR;
3970 if (xfertype == UE_ISOCHRONOUS)
3971 if (mep > XHCI_EPCTX_MEP_FS_ISOC)
3972 mep = XHCI_EPCTX_MEP_FS_ISOC;
3973 break;
3974 case USB_SPEED_HIGH:
3975 if (xfertype == UE_INTERRUPT)
3976 if (mep > XHCI_EPCTX_MEP_HS_INTR)
3977 mep = XHCI_EPCTX_MEP_HS_INTR;
3978 if (xfertype == UE_ISOCHRONOUS)
3979 if (mep > XHCI_EPCTX_MEP_HS_ISOC)
3980 mep = XHCI_EPCTX_MEP_HS_ISOC;
3981 break;
3982 case USB_SPEED_SUPER:
3983 case USB_SPEED_SUPER_PLUS:
3984 default:
3985 if (xfertype == UE_INTERRUPT)
3986 if (mep > XHCI_EPCTX_MEP_SS_INTR)
3987 mep = XHCI_EPCTX_MEP_SS_INTR;
3988 if (xfertype == UE_ISOCHRONOUS) {
3989 if (speed == USB_SPEED_SUPER ||
3990 XHCI_HCC2_LEC(sc->sc_hcc2) == 0) {
3991 if (mep > XHCI_EPCTX_MEP_SS_ISOC)
3992 mep = XHCI_EPCTX_MEP_SS_ISOC;
3993 } else {
3994 if (mep > XHCI_EPCTX_MEP_SS_ISOC_LEC)
3995 mep = XHCI_EPCTX_MEP_SS_ISOC_LEC;
3996 }
3997 }
3998 break;
3999 }
4000
4001 xpipe->xp_ival = ival;
4002 xpipe->xp_maxb = maxb + 1;
4003 xpipe->xp_mult = mult + 1;
4004
4005 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(mep >> 16);
4006 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
4007 cp[0] |= XHCI_EPCTX_0_MULT_SET(mult);
4008 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps);
4009 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
4010 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(mep & 0xffff);
4011 cp[4] |= XHCI_EPCTX_4_AVG_TRB_LEN_SET(atl);
4012 }
4013
4014 /*
4015 * Convert usbdi bInterval value to xhci endpoint context interval value
4016 * for periodic pipe.
4017 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6
4018 */
4019 static uint32_t
4020 xhci_bival2ival(uint32_t ival, uint32_t speed, uint32_t xfertype)
4021 {
4022 if (xfertype != UE_INTERRUPT && xfertype != UE_ISOCHRONOUS)
4023 return 0;
4024
4025 if (xfertype == UE_INTERRUPT &&
4026 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
4027 u_int i;
4028
4029 /*
4030 * round ival down to "the nearest base 2 multiple of
4031 * bInterval * 8".
4032 * bInterval is at most 255 as its type is uByte.
4033 * 255(ms) = 2040(x 125us) < 2^11, so start with 10.
4034 */
4035 for (i = 10; i > 0; i--) {
4036 if ((ival * 8) >= (1 << i))
4037 break;
4038 }
4039 ival = i;
4040
4041 /* 3 - 10 */
4042 ival = (ival < 3) ? 3 : ival;
4043 } else if (speed == USB_SPEED_FULL) {
4044 /* FS isoc */
4045 ival += 3; /* 1ms -> 125us */
4046 ival--; /* Interval = bInterval-1 */
4047 /* 3 - 18 */
4048 ival = (ival > 18) ? 18 : ival;
4049 ival = (ival < 3) ? 3 : ival;
4050 } else {
4051 /* SS/HS intr/isoc */
4052 if (ival > 0)
4053 ival--; /* Interval = bInterval-1 */
4054 /* 0 - 15 */
4055 ival = (ival > 15) ? 15 : ival;
4056 }
4057
4058 return ival;
4059 }
4060
4061 /* ----- */
4062
4063 static void
4064 xhci_noop(struct usbd_pipe *pipe)
4065 {
4066 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4067 }
4068
4069 /*
4070 * Process root hub request.
4071 */
4072 static int
4073 xhci_roothub_ctrl_locked(struct usbd_bus *bus, usb_device_request_t *req,
4074 void *buf, int buflen)
4075 {
4076 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
4077 usb_port_status_t ps;
4078 int l, totlen = 0;
4079 uint16_t len, value, index;
4080 int port, i;
4081 uint32_t v;
4082
4083 XHCIHIST_FUNC();
4084
4085 KASSERT(mutex_owned(&sc->sc_rhlock));
4086
4087 if (sc->sc_dying)
4088 return -1;
4089
4090 size_t bn = bus == &sc->sc_bus ? 0 : 1;
4091
4092 len = UGETW(req->wLength);
4093 value = UGETW(req->wValue);
4094 index = UGETW(req->wIndex);
4095
4096 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx",
4097 req->bmRequestType | (req->bRequest << 8), value, index, len);
4098
4099 #define C(x,y) ((x) | ((y) << 8))
4100 switch (C(req->bRequest, req->bmRequestType)) {
4101 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
4102 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0);
4103 if (len == 0)
4104 break;
4105 switch (value) {
4106 #define sd ((usb_string_descriptor_t *)buf)
4107 case C(2, UDESC_STRING):
4108 /* Product */
4109 totlen = usb_makestrdesc(sd, len, "xHCI root hub");
4110 break;
4111 #undef sd
4112 default:
4113 /* default from usbroothub */
4114 return buflen;
4115 }
4116 break;
4117
4118 /* Hub requests */
4119 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
4120 break;
4121 /* Clear Port Feature request */
4122 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): {
4123 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4124
4125 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd",
4126 index, value, bn, cp);
4127 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4128 return -1;
4129 }
4130 port = XHCI_PORTSC(cp);
4131 v = xhci_op_read_4(sc, port);
4132 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0);
4133 v &= ~XHCI_PS_CLEAR;
4134 switch (value) {
4135 case UHF_PORT_ENABLE:
4136 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED);
4137 break;
4138 case UHF_PORT_SUSPEND:
4139 return -1;
4140 case UHF_PORT_POWER:
4141 break;
4142 case UHF_PORT_TEST:
4143 case UHF_PORT_INDICATOR:
4144 return -1;
4145 case UHF_C_PORT_CONNECTION:
4146 xhci_op_write_4(sc, port, v | XHCI_PS_CSC);
4147 break;
4148 case UHF_C_PORT_ENABLE:
4149 case UHF_C_PORT_SUSPEND:
4150 case UHF_C_PORT_OVER_CURRENT:
4151 return -1;
4152 case UHF_C_BH_PORT_RESET:
4153 xhci_op_write_4(sc, port, v | XHCI_PS_WRC);
4154 break;
4155 case UHF_C_PORT_RESET:
4156 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4157 break;
4158 case UHF_C_PORT_LINK_STATE:
4159 xhci_op_write_4(sc, port, v | XHCI_PS_PLC);
4160 break;
4161 case UHF_C_PORT_CONFIG_ERROR:
4162 xhci_op_write_4(sc, port, v | XHCI_PS_CEC);
4163 break;
4164 default:
4165 return -1;
4166 }
4167 break;
4168 }
4169 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
4170 if (len == 0)
4171 break;
4172 if ((value & 0xff) != 0) {
4173 return -1;
4174 }
4175 usb_hub_descriptor_t hubd;
4176
4177 totlen = uimin(buflen, sizeof(hubd));
4178 memcpy(&hubd, buf, totlen);
4179 hubd.bNbrPorts = sc->sc_rhportcount[bn];
4180 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH);
4181 hubd.bPwrOn2PwrGood = 200;
4182 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) {
4183 /* XXX can't find out? */
4184 hubd.DeviceRemovable[i++] = 0;
4185 }
4186 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
4187 totlen = uimin(totlen, hubd.bDescLength);
4188 memcpy(buf, &hubd, totlen);
4189 break;
4190 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
4191 if (len != 4) {
4192 return -1;
4193 }
4194 memset(buf, 0, len); /* ? XXX */
4195 totlen = len;
4196 break;
4197 /* Get Port Status request */
4198 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): {
4199 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4200
4201 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju",
4202 bn, index, cp, 0);
4203 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4204 DPRINTFN(5, "bad get port status: index=%jd bn=%jd "
4205 "portcount=%jd",
4206 index, bn, sc->sc_rhportcount[bn], 0);
4207 return -1;
4208 }
4209 if (len != 4) {
4210 DPRINTFN(5, "bad get port status: len %jd != 4",
4211 len, 0, 0, 0);
4212 return -1;
4213 }
4214 v = xhci_op_read_4(sc, XHCI_PORTSC(cp));
4215 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0);
4216 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v));
4217 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS;
4218 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED;
4219 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR;
4220 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND;
4221 if (v & XHCI_PS_PR) i |= UPS_RESET;
4222 if (v & XHCI_PS_PP) {
4223 if (i & UPS_OTHER_SPEED)
4224 i |= UPS_PORT_POWER_SS;
4225 else
4226 i |= UPS_PORT_POWER;
4227 }
4228 if (i & UPS_OTHER_SPEED)
4229 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v));
4230 if (sc->sc_vendor_port_status)
4231 i = sc->sc_vendor_port_status(sc, v, i);
4232 USETW(ps.wPortStatus, i);
4233 i = 0;
4234 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS;
4235 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED;
4236 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR;
4237 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET;
4238 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET;
4239 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE;
4240 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR;
4241 USETW(ps.wPortChange, i);
4242 totlen = uimin(len, sizeof(ps));
4243 memcpy(buf, &ps, totlen);
4244 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx"
4245 " totlen %jd",
4246 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0);
4247 break;
4248 }
4249 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
4250 return -1;
4251 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE):
4252 break;
4253 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
4254 break;
4255 /* Set Port Feature request */
4256 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): {
4257 int optval = (index >> 8) & 0xff;
4258 index &= 0xff;
4259 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4260 return -1;
4261 }
4262
4263 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4264
4265 port = XHCI_PORTSC(cp);
4266 v = xhci_op_read_4(sc, port);
4267 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0);
4268 v &= ~XHCI_PS_CLEAR;
4269 switch (value) {
4270 case UHF_PORT_ENABLE:
4271 xhci_op_write_4(sc, port, v | XHCI_PS_PED);
4272 break;
4273 case UHF_PORT_SUSPEND:
4274 /* XXX suspend */
4275 break;
4276 case UHF_PORT_RESET:
4277 xhci_op_write_4(sc, port, v | XHCI_PS_PR);
4278 /* Wait for reset to complete. */
4279 for (i = 0; i < USB_PORT_ROOT_RESET_DELAY / 10; i++) {
4280 if (sc->sc_dying) {
4281 return -1;
4282 }
4283 v = xhci_op_read_4(sc, port);
4284 if ((v & XHCI_PS_PR) == 0) {
4285 break;
4286 }
4287 usb_delay_ms(&sc->sc_bus, 10);
4288 }
4289 break;
4290 case UHF_PORT_POWER:
4291 /* XXX power control */
4292 break;
4293 /* XXX more */
4294 case UHF_C_PORT_RESET:
4295 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4296 break;
4297 case UHF_PORT_U1_TIMEOUT:
4298 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4299 return -1;
4300 }
4301 port = XHCI_PORTPMSC(cp);
4302 v = xhci_op_read_4(sc, port);
4303 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4304 index, cp, v, 0);
4305 v &= ~XHCI_PM3_U1TO_SET(0xff);
4306 v |= XHCI_PM3_U1TO_SET(optval);
4307 xhci_op_write_4(sc, port, v);
4308 break;
4309 case UHF_PORT_U2_TIMEOUT:
4310 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4311 return -1;
4312 }
4313 port = XHCI_PORTPMSC(cp);
4314 v = xhci_op_read_4(sc, port);
4315 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4316 index, cp, v, 0);
4317 v &= ~XHCI_PM3_U2TO_SET(0xff);
4318 v |= XHCI_PM3_U2TO_SET(optval);
4319 xhci_op_write_4(sc, port, v);
4320 break;
4321 default:
4322 return -1;
4323 }
4324 }
4325 break;
4326 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
4327 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
4328 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
4329 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
4330 break;
4331 default:
4332 /* default from usbroothub */
4333 return buflen;
4334 }
4335
4336 return totlen;
4337 }
4338
4339 static int
4340 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
4341 void *buf, int buflen)
4342 {
4343 struct xhci_softc *sc = XHCI_BUS2SC(bus);
4344 int actlen;
4345
4346 mutex_enter(&sc->sc_rhlock);
4347 actlen = xhci_roothub_ctrl_locked(bus, req, buf, buflen);
4348 mutex_exit(&sc->sc_rhlock);
4349
4350 return actlen;
4351 }
4352
4353 /* root hub interrupt */
4354
4355 static usbd_status
4356 xhci_root_intr_transfer(struct usbd_xfer *xfer)
4357 {
4358 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4359
4360 /* Pipe isn't running, start first */
4361 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4362 }
4363
4364 /* Wait for roothub port status/change */
4365 static usbd_status
4366 xhci_root_intr_start(struct usbd_xfer *xfer)
4367 {
4368 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4369 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4370
4371 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4372
4373 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4374
4375 if (sc->sc_dying)
4376 return USBD_IOERROR;
4377
4378 KASSERT(sc->sc_intrxfer[bn] == NULL);
4379 sc->sc_intrxfer[bn] = xfer;
4380 xfer->ux_status = USBD_IN_PROGRESS;
4381
4382 return USBD_IN_PROGRESS;
4383 }
4384
4385 static void
4386 xhci_root_intr_abort(struct usbd_xfer *xfer)
4387 {
4388 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4389 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4390
4391 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4392
4393 KASSERT(mutex_owned(&sc->sc_lock));
4394 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
4395
4396 /* If xfer has already completed, nothing to do here. */
4397 if (sc->sc_intrxfer[bn] == NULL)
4398 return;
4399
4400 /*
4401 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer.
4402 * Cancel it.
4403 */
4404 KASSERT(sc->sc_intrxfer[bn] == xfer);
4405 xfer->ux_status = USBD_CANCELLED;
4406 usb_transfer_complete(xfer);
4407 }
4408
4409 static void
4410 xhci_root_intr_close(struct usbd_pipe *pipe)
4411 {
4412 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe);
4413 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer;
4414 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4415
4416 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4417
4418 KASSERT(mutex_owned(&sc->sc_lock));
4419
4420 /*
4421 * Caller must guarantee the xfer has completed first, by
4422 * closing the pipe only after normal completion or an abort.
4423 */
4424 KASSERT(sc->sc_intrxfer[bn] == NULL);
4425 }
4426
4427 static void
4428 xhci_root_intr_done(struct usbd_xfer *xfer)
4429 {
4430 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4431 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4432
4433 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4434
4435 KASSERT(mutex_owned(&sc->sc_lock));
4436
4437 /* Claim the xfer so it doesn't get completed again. */
4438 KASSERT(sc->sc_intrxfer[bn] == xfer);
4439 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
4440 sc->sc_intrxfer[bn] = NULL;
4441 }
4442
4443 /* -------------- */
4444 /* device control */
4445
4446 static usbd_status
4447 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
4448 {
4449 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4450
4451 /* Pipe isn't running, start first */
4452 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4453 }
4454
4455 static usbd_status
4456 xhci_device_ctrl_start(struct usbd_xfer *xfer)
4457 {
4458 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4459 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4460 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4461 struct xhci_ring * const tr = xs->xs_xr[dci];
4462 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4463 usb_device_request_t * const req = &xfer->ux_request;
4464 const bool isread = usbd_xfer_isread(xfer);
4465 const uint32_t len = UGETW(req->wLength);
4466 usb_dma_t * const dma = &xfer->ux_dmabuf;
4467 uint64_t parameter;
4468 uint32_t status;
4469 uint32_t control;
4470 u_int i;
4471 const bool polling = xhci_polling_p(sc);
4472
4473 XHCIHIST_FUNC();
4474 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx",
4475 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue),
4476 UGETW(req->wIndex), UGETW(req->wLength));
4477
4478 KASSERT(polling || mutex_owned(&sc->sc_lock));
4479
4480 /* we rely on the bottom bits for extra info */
4481 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %p", xfer);
4482
4483 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0);
4484
4485 if (tr->is_halted)
4486 goto out;
4487
4488 i = 0;
4489
4490 /* setup phase */
4491 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */
4492 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req));
4493 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE :
4494 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) |
4495 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) |
4496 XHCI_TRB_3_IDT_BIT;
4497 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4498
4499 if (len != 0) {
4500 /* data phase */
4501 parameter = DMAADDR(dma, 0);
4502 KASSERTMSG(len <= 0x10000, "len %d", len);
4503 status = XHCI_TRB_2_IRQ_SET(0) |
4504 XHCI_TRB_2_TDSZ_SET(0) |
4505 XHCI_TRB_2_BYTES_SET(len);
4506 control = (isread ? XHCI_TRB_3_DIR_IN : 0) |
4507 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) |
4508 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4509 XHCI_TRB_3_IOC_BIT;
4510 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4511
4512 usb_syncmem(dma, 0, len,
4513 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4514 }
4515
4516 parameter = 0;
4517 status = XHCI_TRB_2_IRQ_SET(0);
4518 /* the status stage has inverted direction */
4519 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) |
4520 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) |
4521 XHCI_TRB_3_IOC_BIT;
4522 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4523
4524 if (!polling)
4525 mutex_enter(&tr->xr_lock);
4526 xhci_ring_put_xfer(sc, tr, xx, i);
4527 if (!polling)
4528 mutex_exit(&tr->xr_lock);
4529
4530 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4531
4532 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4533 xfer->ux_status = USBD_IN_PROGRESS;
4534 usbd_xfer_schedule_timeout(xfer);
4535 } else {
4536 /*
4537 * We must be coming from xhci_pipe_restart -- timeout
4538 * already set up, nothing to do.
4539 */
4540 }
4541 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4542
4543 return USBD_IN_PROGRESS;
4544 }
4545
4546 static void
4547 xhci_device_ctrl_done(struct usbd_xfer *xfer)
4548 {
4549 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4550 usb_device_request_t *req = &xfer->ux_request;
4551 int len = UGETW(req->wLength);
4552 int rd = req->bmRequestType & UT_READ;
4553
4554 if (len)
4555 usb_syncmem(&xfer->ux_dmabuf, 0, len,
4556 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4557 }
4558
4559 static void
4560 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
4561 {
4562 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4563
4564 usbd_xfer_abort(xfer);
4565 }
4566
4567 static void
4568 xhci_device_ctrl_close(struct usbd_pipe *pipe)
4569 {
4570 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4571
4572 xhci_close_pipe(pipe);
4573 }
4574
4575 /* ------------------ */
4576 /* device isochronous */
4577
4578 static usbd_status
4579 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
4580 {
4581 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4582
4583 return xhci_device_isoc_enter(xfer);
4584 }
4585
4586 static usbd_status
4587 xhci_device_isoc_enter(struct usbd_xfer *xfer)
4588 {
4589 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4590 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4591 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4592 struct xhci_ring * const tr = xs->xs_xr[dci];
4593 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4594 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe;
4595 usb_dma_t * const dma = &xfer->ux_dmabuf;
4596 uint64_t parameter;
4597 uint32_t status;
4598 uint32_t control;
4599 uint32_t offs;
4600 int i, ival;
4601 const bool polling = xhci_polling_p(sc);
4602 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
4603 const uint16_t mps = UE_GET_SIZE(MPS);
4604 const uint8_t maxb = xpipe->xp_maxb;
4605
4606 XHCIHIST_FUNC();
4607 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4608 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4609
4610 KASSERT(polling || mutex_owned(&sc->sc_lock));
4611
4612 if (sc->sc_dying)
4613 return USBD_IOERROR;
4614
4615 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths);
4616 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4617
4618 const bool isread = usbd_xfer_isread(xfer);
4619 if (xfer->ux_length)
4620 usb_syncmem(dma, 0, xfer->ux_length,
4621 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4622
4623 ival = xpipe->xp_ival;
4624 if (ival >= 0 && ival <= 15)
4625 ival = 1 << ival;
4626 else
4627 ival = 1; /* fake something up */
4628
4629 const unsigned mfmask = XHCI_MFINDEX_GET(~(uint32_t)0);
4630
4631 if (xpipe->xp_isoc_next == -1) {
4632 uint32_t mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX);
4633
4634 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0);
4635 mfindex = XHCI_MFINDEX_GET(mfindex);
4636
4637 /* Start Frame = MFINDEX + IST + 1 */
4638 mfindex += sc->sc_isthresh + 1;
4639 xpipe->xp_isoc_next = roundup2(mfindex, ival) & mfmask;
4640 }
4641
4642 offs = 0;
4643 for (i = 0; i < xfer->ux_nframes; i++) {
4644 const uint32_t len = xfer->ux_frlengths[i];
4645 const unsigned tdpc = howmany(len, mps);
4646 const unsigned tbc = howmany(tdpc, maxb) - 1;
4647 const unsigned tlbpc1 = tdpc % maxb;
4648 const unsigned tlbpc = tlbpc1 ? tlbpc1 - 1 : maxb - 1;
4649 const unsigned frid = xpipe->xp_isoc_next /
4650 USB_UFRAMES_PER_FRAME;
4651
4652 KASSERTMSG(len <= 0x10000, "len %d", len);
4653 parameter = DMAADDR(dma, offs);
4654 status = XHCI_TRB_2_IRQ_SET(0) |
4655 XHCI_TRB_2_TDSZ_SET(0) |
4656 XHCI_TRB_2_BYTES_SET(len);
4657 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) |
4658 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4659 XHCI_TRB_3_TBC_SET(tbc) |
4660 XHCI_TRB_3_TLBPC_SET(tlbpc) |
4661 XHCI_TRB_3_IOC_BIT;
4662 if (XHCI_HCC_CFC(sc->sc_hcc)) {
4663 control |= XHCI_TRB_3_FRID_SET(frid);
4664 #if 0
4665 } else if (xpipe->xp_isoc_next == -1) {
4666 control |= XHCI_TRB_3_FRID_SET(frid);
4667 #endif
4668 } else {
4669 control |= XHCI_TRB_3_ISO_SIA_BIT;
4670 }
4671 #if 0
4672 if (i != xfer->ux_nframes - 1)
4673 control |= XHCI_TRB_3_BEI_BIT;
4674 #endif
4675 xhci_xfer_put_trb(xx, i, parameter, status, control);
4676
4677 xpipe->xp_isoc_next = (xpipe->xp_isoc_next + ival) & mfmask;
4678 offs += len;
4679 }
4680
4681 xx->xx_isoc_done = 0;
4682
4683 if (!polling)
4684 mutex_enter(&tr->xr_lock);
4685 xhci_ring_put_xfer(sc, tr, xx, i);
4686 if (!polling)
4687 mutex_exit(&tr->xr_lock);
4688
4689 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4690 xfer->ux_status = USBD_IN_PROGRESS;
4691 usbd_xfer_schedule_timeout(xfer);
4692
4693 return USBD_IN_PROGRESS;
4694 }
4695
4696 static void
4697 xhci_device_isoc_abort(struct usbd_xfer *xfer)
4698 {
4699 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4700
4701 usbd_xfer_abort(xfer);
4702 }
4703
4704 static void
4705 xhci_device_isoc_close(struct usbd_pipe *pipe)
4706 {
4707 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4708
4709 xhci_close_pipe(pipe);
4710 }
4711
4712 static void
4713 xhci_device_isoc_done(struct usbd_xfer *xfer)
4714 {
4715 #ifdef USB_DEBUG
4716 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4717 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4718 #endif
4719 const bool isread = usbd_xfer_isread(xfer);
4720
4721 XHCIHIST_FUNC();
4722 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4723 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4724
4725 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4726 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4727 }
4728
4729 /* ----------- */
4730 /* device bulk */
4731
4732 static usbd_status
4733 xhci_device_bulk_transfer(struct usbd_xfer *xfer)
4734 {
4735 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4736
4737 /* Pipe isn't running, so start it first. */
4738 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4739 }
4740
4741 static usbd_status
4742 xhci_device_bulk_start(struct usbd_xfer *xfer)
4743 {
4744 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4745 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4746 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4747 struct xhci_ring * const tr = xs->xs_xr[dci];
4748 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4749 const uint32_t len = xfer->ux_length;
4750 usb_dma_t * const dma = &xfer->ux_dmabuf;
4751 uint64_t parameter;
4752 uint32_t status;
4753 uint32_t control;
4754 u_int i = 0;
4755 const bool polling = xhci_polling_p(sc);
4756
4757 XHCIHIST_FUNC();
4758 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4759 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4760
4761 KASSERT(polling || mutex_owned(&sc->sc_lock));
4762
4763 if (sc->sc_dying)
4764 return USBD_IOERROR;
4765
4766 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4767
4768 if (tr->is_halted)
4769 goto out;
4770
4771 parameter = DMAADDR(dma, 0);
4772 const bool isread = usbd_xfer_isread(xfer);
4773 if (len)
4774 usb_syncmem(dma, 0, len,
4775 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4776
4777 /*
4778 * XXX: (dsl) The physical buffer must not cross a 64k boundary.
4779 * If the user supplied buffer crosses such a boundary then 2
4780 * (or more) TRB should be used.
4781 * If multiple TRB are used the td_size field must be set correctly.
4782 * For v1.0 devices (like ivy bridge) this is the number of usb data
4783 * blocks needed to complete the transfer.
4784 * Setting it to 1 in the last TRB causes an extra zero-length
4785 * data block be sent.
4786 * The earlier documentation differs, I don't know how it behaves.
4787 */
4788 KASSERTMSG(len <= 0x10000, "len %d", len);
4789 status = XHCI_TRB_2_IRQ_SET(0) |
4790 XHCI_TRB_2_TDSZ_SET(0) |
4791 XHCI_TRB_2_BYTES_SET(len);
4792 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4793 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4794 XHCI_TRB_3_IOC_BIT;
4795 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4796
4797 if (!isread && (xfer->ux_flags & USBD_FORCE_SHORT_XFER)) {
4798 status = XHCI_TRB_2_IRQ_SET(0) |
4799 XHCI_TRB_2_TDSZ_SET(0) |
4800 XHCI_TRB_2_BYTES_SET(0);
4801 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4802 }
4803
4804 if (!polling)
4805 mutex_enter(&tr->xr_lock);
4806 xhci_ring_put_xfer(sc, tr, xx, i);
4807 if (!polling)
4808 mutex_exit(&tr->xr_lock);
4809
4810 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4811
4812 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4813 xfer->ux_status = USBD_IN_PROGRESS;
4814 usbd_xfer_schedule_timeout(xfer);
4815 } else {
4816 /*
4817 * We must be coming from xhci_pipe_restart -- timeout
4818 * already set up, nothing to do.
4819 */
4820 }
4821 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4822
4823 return USBD_IN_PROGRESS;
4824 }
4825
4826 static void
4827 xhci_device_bulk_done(struct usbd_xfer *xfer)
4828 {
4829 #ifdef USB_DEBUG
4830 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4831 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4832 #endif
4833 const bool isread = usbd_xfer_isread(xfer);
4834
4835 XHCIHIST_FUNC();
4836 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4837 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4838
4839 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4840 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4841 }
4842
4843 static void
4844 xhci_device_bulk_abort(struct usbd_xfer *xfer)
4845 {
4846 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4847
4848 usbd_xfer_abort(xfer);
4849 }
4850
4851 static void
4852 xhci_device_bulk_close(struct usbd_pipe *pipe)
4853 {
4854 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4855
4856 xhci_close_pipe(pipe);
4857 }
4858
4859 /* ---------------- */
4860 /* device interrupt */
4861
4862 static usbd_status
4863 xhci_device_intr_transfer(struct usbd_xfer *xfer)
4864 {
4865 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4866
4867 /* Pipe isn't running, so start it first. */
4868 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4869 }
4870
4871 static usbd_status
4872 xhci_device_intr_start(struct usbd_xfer *xfer)
4873 {
4874 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4875 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4876 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4877 struct xhci_ring * const tr = xs->xs_xr[dci];
4878 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4879 const uint32_t len = xfer->ux_length;
4880 const bool polling = xhci_polling_p(sc);
4881 usb_dma_t * const dma = &xfer->ux_dmabuf;
4882 uint64_t parameter;
4883 uint32_t status;
4884 uint32_t control;
4885 u_int i = 0;
4886
4887 XHCIHIST_FUNC();
4888 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4889 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4890
4891 KASSERT(polling || mutex_owned(&sc->sc_lock));
4892
4893 if (sc->sc_dying)
4894 return USBD_IOERROR;
4895
4896 if (tr->is_halted)
4897 goto out;
4898
4899 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4900
4901 const bool isread = usbd_xfer_isread(xfer);
4902 if (len)
4903 usb_syncmem(dma, 0, len,
4904 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4905
4906 parameter = DMAADDR(dma, 0);
4907 KASSERTMSG(len <= 0x10000, "len %d", len);
4908 status = XHCI_TRB_2_IRQ_SET(0) |
4909 XHCI_TRB_2_TDSZ_SET(0) |
4910 XHCI_TRB_2_BYTES_SET(len);
4911 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4912 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT;
4913 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4914
4915 if (!polling)
4916 mutex_enter(&tr->xr_lock);
4917 xhci_ring_put_xfer(sc, tr, xx, i);
4918 if (!polling)
4919 mutex_exit(&tr->xr_lock);
4920
4921 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4922
4923 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4924 xfer->ux_status = USBD_IN_PROGRESS;
4925 usbd_xfer_schedule_timeout(xfer);
4926 } else {
4927 /*
4928 * We must be coming from xhci_pipe_restart -- timeout
4929 * already set up, nothing to do.
4930 */
4931 }
4932 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4933
4934 return USBD_IN_PROGRESS;
4935 }
4936
4937 static void
4938 xhci_device_intr_done(struct usbd_xfer *xfer)
4939 {
4940 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4941 #ifdef USB_DEBUG
4942 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4943 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4944 #endif
4945 const bool isread = usbd_xfer_isread(xfer);
4946
4947 XHCIHIST_FUNC();
4948 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4949 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4950
4951 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4952
4953 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4954 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4955 }
4956
4957 static void
4958 xhci_device_intr_abort(struct usbd_xfer *xfer)
4959 {
4960 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4961
4962 XHCIHIST_FUNC();
4963 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0);
4964
4965 KASSERT(mutex_owned(&sc->sc_lock));
4966 usbd_xfer_abort(xfer);
4967 }
4968
4969 static void
4970 xhci_device_intr_close(struct usbd_pipe *pipe)
4971 {
4972 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
4973
4974 XHCIHIST_FUNC();
4975 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0);
4976
4977 xhci_close_pipe(pipe);
4978 }
4979