xhci.c revision 1.178 1 /* $NetBSD: xhci.c,v 1.178 2023/04/27 06:30:09 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * USB rev 2.0 and rev 3.1 specification
31 * http://www.usb.org/developers/docs/
32 * xHCI rev 1.1 specification
33 * http://www.intel.com/technology/usb/spec.htm
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.178 2023/04/27 06:30:09 skrll Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_usb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/kmem.h>
47 #include <sys/device.h>
48 #include <sys/select.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/endian.h>
58
59 #include <dev/usb/usb.h>
60 #include <dev/usb/usbdi.h>
61 #include <dev/usb/usbdivar.h>
62 #include <dev/usb/usbdi_util.h>
63 #include <dev/usb/usbhist.h>
64 #include <dev/usb/usb_mem.h>
65 #include <dev/usb/usb_quirks.h>
66
67 #include <dev/usb/xhcireg.h>
68 #include <dev/usb/xhcivar.h>
69 #include <dev/usb/usbroothub.h>
70
71
72 #ifdef USB_DEBUG
73 #ifndef XHCI_DEBUG
74 #define xhcidebug 0
75 #else /* !XHCI_DEBUG */
76 #define HEXDUMP(a, b, c) \
77 do { \
78 if (xhcidebug > 0) \
79 hexdump(printf, a, b, c); \
80 } while (/*CONSTCOND*/0)
81 static int xhcidebug = 0;
82
83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup")
84 {
85 int err;
86 const struct sysctlnode *rnode;
87 const struct sysctlnode *cnode;
88
89 err = sysctl_createv(clog, 0, NULL, &rnode,
90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci",
91 SYSCTL_DESCR("xhci global controls"),
92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
93
94 if (err)
95 goto fail;
96
97 /* control debugging printfs */
98 err = sysctl_createv(clog, 0, &rnode, &cnode,
99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
100 "debug", SYSCTL_DESCR("Enable debugging output"),
101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL);
102 if (err)
103 goto fail;
104
105 return;
106 fail:
107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
108 }
109
110 #endif /* !XHCI_DEBUG */
111 #endif /* USB_DEBUG */
112
113 #ifndef HEXDUMP
114 #define HEXDUMP(a, b, c)
115 #endif
116
117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D)
118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D)
119 #define XHCIHIST_FUNC() USBHIST_FUNC()
120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug)
121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \
122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D)
123
124 #define XHCI_DCI_SLOT 0
125 #define XHCI_DCI_EP_CONTROL 1
126
127 #define XHCI_ICI_INPUT_CONTROL 0
128
129 struct xhci_pipe {
130 struct usbd_pipe xp_pipe;
131 struct usb_task xp_async_task;
132 int16_t xp_isoc_next; /* next frame */
133 uint8_t xp_maxb; /* max burst */
134 uint8_t xp_mult;
135 };
136
137 #define XHCI_COMMAND_RING_TRBS 256
138 #define XHCI_EVENT_RING_TRBS 256
139 #define XHCI_EVENT_RING_SEGMENTS 1
140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT
141
142 static usbd_status xhci_open(struct usbd_pipe *);
143 static void xhci_close_pipe(struct usbd_pipe *);
144 static int xhci_intr1(struct xhci_softc * const);
145 static void xhci_softintr(void *);
146 static void xhci_poll(struct usbd_bus *);
147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int);
148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
149 static void xhci_abortx(struct usbd_xfer *);
150 static bool xhci_dying(struct usbd_bus *);
151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **);
152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int,
153 struct usbd_port *);
154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
155 void *, int);
156
157 static void xhci_pipe_restart(struct usbd_pipe *);
158 static void xhci_pipe_restart_async_task(void *);
159 static void xhci_pipe_restart_async(struct usbd_pipe *);
160
161 static usbd_status xhci_configure_endpoint(struct usbd_pipe *);
162 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *);
163 static void xhci_reset_endpoint(struct usbd_pipe *);
164 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *,
165 struct xhci_slot *, u_int, uint32_t);
166 static usbd_status xhci_stop_endpoint(struct usbd_pipe *);
167
168 static void xhci_host_dequeue(struct xhci_ring * const);
169 static void xhci_set_dequeue(struct usbd_pipe *);
170
171 static usbd_status xhci_do_command(struct xhci_softc * const,
172 struct xhci_soft_trb * const, int);
173 static usbd_status xhci_do_command_locked(struct xhci_softc * const,
174 struct xhci_soft_trb * const, int);
175 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t);
176 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *);
177 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool);
178 static usbd_status xhci_enable_slot(struct xhci_softc * const,
179 uint8_t * const);
180 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t);
181 static usbd_status xhci_address_device(struct xhci_softc * const,
182 uint64_t, uint8_t, bool);
183 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int);
184 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const,
185 struct xhci_slot * const, u_int);
186 static usbd_status xhci_ring_init(struct xhci_softc * const,
187 struct xhci_ring **, size_t, size_t);
188 static void xhci_ring_free(struct xhci_softc * const,
189 struct xhci_ring ** const);
190
191 static void xhci_setup_ctx(struct usbd_pipe *);
192 static void xhci_setup_route(struct usbd_pipe *, uint32_t *);
193 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *);
194 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *);
195 static uint32_t xhci_bival2ival(uint32_t, uint32_t);
196
197 static void xhci_noop(struct usbd_pipe *);
198
199 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *);
200 static usbd_status xhci_root_intr_start(struct usbd_xfer *);
201 static void xhci_root_intr_abort(struct usbd_xfer *);
202 static void xhci_root_intr_close(struct usbd_pipe *);
203 static void xhci_root_intr_done(struct usbd_xfer *);
204
205 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *);
206 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *);
207 static void xhci_device_ctrl_abort(struct usbd_xfer *);
208 static void xhci_device_ctrl_close(struct usbd_pipe *);
209 static void xhci_device_ctrl_done(struct usbd_xfer *);
210
211 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *);
212 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *);
213 static void xhci_device_isoc_abort(struct usbd_xfer *);
214 static void xhci_device_isoc_close(struct usbd_pipe *);
215 static void xhci_device_isoc_done(struct usbd_xfer *);
216
217 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *);
218 static usbd_status xhci_device_intr_start(struct usbd_xfer *);
219 static void xhci_device_intr_abort(struct usbd_xfer *);
220 static void xhci_device_intr_close(struct usbd_pipe *);
221 static void xhci_device_intr_done(struct usbd_xfer *);
222
223 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *);
224 static usbd_status xhci_device_bulk_start(struct usbd_xfer *);
225 static void xhci_device_bulk_abort(struct usbd_xfer *);
226 static void xhci_device_bulk_close(struct usbd_pipe *);
227 static void xhci_device_bulk_done(struct usbd_xfer *);
228
229 static const struct usbd_bus_methods xhci_bus_methods = {
230 .ubm_open = xhci_open,
231 .ubm_softint = xhci_softintr,
232 .ubm_dopoll = xhci_poll,
233 .ubm_allocx = xhci_allocx,
234 .ubm_freex = xhci_freex,
235 .ubm_abortx = xhci_abortx,
236 .ubm_dying = xhci_dying,
237 .ubm_getlock = xhci_get_lock,
238 .ubm_newdev = xhci_new_device,
239 .ubm_rhctrl = xhci_roothub_ctrl,
240 };
241
242 static const struct usbd_pipe_methods xhci_root_intr_methods = {
243 .upm_transfer = xhci_root_intr_transfer,
244 .upm_start = xhci_root_intr_start,
245 .upm_abort = xhci_root_intr_abort,
246 .upm_close = xhci_root_intr_close,
247 .upm_cleartoggle = xhci_noop,
248 .upm_done = xhci_root_intr_done,
249 };
250
251
252 static const struct usbd_pipe_methods xhci_device_ctrl_methods = {
253 .upm_transfer = xhci_device_ctrl_transfer,
254 .upm_start = xhci_device_ctrl_start,
255 .upm_abort = xhci_device_ctrl_abort,
256 .upm_close = xhci_device_ctrl_close,
257 .upm_cleartoggle = xhci_noop,
258 .upm_done = xhci_device_ctrl_done,
259 };
260
261 static const struct usbd_pipe_methods xhci_device_isoc_methods = {
262 .upm_transfer = xhci_device_isoc_transfer,
263 .upm_abort = xhci_device_isoc_abort,
264 .upm_close = xhci_device_isoc_close,
265 .upm_cleartoggle = xhci_noop,
266 .upm_done = xhci_device_isoc_done,
267 };
268
269 static const struct usbd_pipe_methods xhci_device_bulk_methods = {
270 .upm_transfer = xhci_device_bulk_transfer,
271 .upm_start = xhci_device_bulk_start,
272 .upm_abort = xhci_device_bulk_abort,
273 .upm_close = xhci_device_bulk_close,
274 .upm_cleartoggle = xhci_noop,
275 .upm_done = xhci_device_bulk_done,
276 };
277
278 static const struct usbd_pipe_methods xhci_device_intr_methods = {
279 .upm_transfer = xhci_device_intr_transfer,
280 .upm_start = xhci_device_intr_start,
281 .upm_abort = xhci_device_intr_abort,
282 .upm_close = xhci_device_intr_close,
283 .upm_cleartoggle = xhci_noop,
284 .upm_done = xhci_device_intr_done,
285 };
286
287 static inline uint32_t
288 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset)
289 {
290 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset);
291 }
292
293 static inline uint32_t
294 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset)
295 {
296 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
297 }
298
299 static inline uint32_t
300 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset)
301 {
302 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset);
303 }
304
305 static inline void
306 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset,
307 uint32_t value)
308 {
309 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value);
310 }
311
312 #if 0 /* unused */
313 static inline void
314 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset,
315 uint32_t value)
316 {
317 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value);
318 }
319 #endif /* unused */
320
321 static inline uint32_t
322 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset)
323 {
324 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset);
325 }
326
327 static inline uint32_t
328 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset)
329 {
330 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
331 }
332
333 static inline void
334 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset,
335 uint32_t value)
336 {
337 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
338 }
339
340 static inline uint64_t
341 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset)
342 {
343 uint64_t value;
344
345 #ifdef XHCI_USE_BUS_SPACE_8
346 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset);
347 #else
348 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
349 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh,
350 offset + 4) << 32;
351 #endif
352
353 return value;
354 }
355
356 static inline void
357 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset,
358 uint64_t value)
359 {
360 #ifdef XHCI_USE_BUS_SPACE_8
361 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value);
362 #else
363 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0,
364 (value >> 0) & 0xffffffff);
365 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4,
366 (value >> 32) & 0xffffffff);
367 #endif
368 }
369
370 static inline uint32_t
371 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset)
372 {
373 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
374 }
375
376 static inline void
377 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset,
378 uint32_t value)
379 {
380 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
381 }
382
383 static inline uint64_t
384 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset)
385 {
386 uint64_t value;
387
388 #ifdef XHCI_USE_BUS_SPACE_8
389 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset);
390 #else
391 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
392 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh,
393 offset + 4) << 32;
394 #endif
395
396 return value;
397 }
398
399 static inline void
400 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset,
401 uint64_t value)
402 {
403 #ifdef XHCI_USE_BUS_SPACE_8
404 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value);
405 #else
406 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0,
407 (value >> 0) & 0xffffffff);
408 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4,
409 (value >> 32) & 0xffffffff);
410 #endif
411 }
412
413 #if 0 /* unused */
414 static inline uint32_t
415 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset)
416 {
417 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset);
418 }
419 #endif /* unused */
420
421 static inline void
422 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset,
423 uint32_t value)
424 {
425 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value);
426 }
427
428 /* --- */
429
430 static inline uint8_t
431 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed)
432 {
433 u_int eptype = 0;
434
435 switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
436 case UE_CONTROL:
437 eptype = 0x0;
438 break;
439 case UE_ISOCHRONOUS:
440 eptype = 0x1;
441 break;
442 case UE_BULK:
443 eptype = 0x2;
444 break;
445 case UE_INTERRUPT:
446 eptype = 0x3;
447 break;
448 }
449
450 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
451 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
452 return eptype | 0x4;
453 else
454 return eptype;
455 }
456
457 static u_int
458 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed)
459 {
460 /* xHCI 1.0 section 4.5.1 */
461 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress);
462 u_int in = 0;
463
464 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
465 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
466 in = 1;
467
468 return epaddr * 2 + in;
469 }
470
471 static inline u_int
472 xhci_dci_to_ici(const u_int i)
473 {
474 return i + 1;
475 }
476
477 static inline void *
478 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs,
479 const u_int dci)
480 {
481 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
482 }
483
484 #if 0 /* unused */
485 static inline bus_addr_t
486 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs,
487 const u_int dci)
488 {
489 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
490 }
491 #endif /* unused */
492
493 static inline void *
494 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs,
495 const u_int ici)
496 {
497 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
498 }
499
500 static inline bus_addr_t
501 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs,
502 const u_int ici)
503 {
504 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
505 }
506
507 static inline struct xhci_trb *
508 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx)
509 {
510 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
511 }
512
513 static inline bus_addr_t
514 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx)
515 {
516 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
517 }
518
519 static inline void
520 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx,
521 uint64_t parameter, uint32_t status, uint32_t control)
522 {
523 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb);
524 xx->xx_trb[idx].trb_0 = parameter;
525 xx->xx_trb[idx].trb_2 = status;
526 xx->xx_trb[idx].trb_3 = control;
527 }
528
529 static inline void
530 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status,
531 uint32_t control)
532 {
533 trb->trb_0 = htole64(parameter);
534 trb->trb_2 = htole32(status);
535 trb->trb_3 = htole32(control);
536 }
537
538 static int
539 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx)
540 {
541 /* base address of TRBs */
542 bus_addr_t trbp = xhci_ring_trbp(xr, 0);
543
544 /* trb_0 range sanity check */
545 if (trb_0 == 0 || trb_0 < trbp ||
546 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 ||
547 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) {
548 return 1;
549 }
550 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb);
551 return 0;
552 }
553
554 static unsigned int
555 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs,
556 u_int dci)
557 {
558 uint32_t *cp;
559
560 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
561 cp = xhci_slot_get_dcv(sc, xs, dci);
562 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0]));
563 }
564
565 static inline unsigned int
566 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport)
567 {
568 const unsigned int port = ctlrport - 1;
569 const uint8_t bit = __BIT(port % NBBY);
570
571 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit);
572 }
573
574 /*
575 * Return the roothub port for a controller port. Both are 1..n.
576 */
577 static inline unsigned int
578 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport)
579 {
580
581 return sc->sc_ctlrportmap[ctrlport - 1];
582 }
583
584 /*
585 * Return the controller port for a bus roothub port. Both are 1..n.
586 */
587 static inline unsigned int
588 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn,
589 unsigned int rhport)
590 {
591
592 return sc->sc_rhportmap[bn][rhport - 1];
593 }
594
595 /* --- */
596
597 void
598 xhci_childdet(device_t self, device_t child)
599 {
600 struct xhci_softc * const sc = device_private(self);
601
602 mutex_enter(&sc->sc_intr_lock);
603 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child));
604 if (child == sc->sc_child2)
605 sc->sc_child2 = NULL;
606 else if (child == sc->sc_child)
607 sc->sc_child = NULL;
608 mutex_exit(&sc->sc_intr_lock);
609 }
610
611 int
612 xhci_detach(struct xhci_softc *sc, int flags)
613 {
614 int rv = 0;
615
616 if (sc->sc_child2 != NULL) {
617 rv = config_detach(sc->sc_child2, flags);
618 if (rv != 0)
619 return rv;
620 KASSERT(sc->sc_child2 == NULL);
621 }
622
623 if (sc->sc_child != NULL) {
624 rv = config_detach(sc->sc_child, flags);
625 if (rv != 0)
626 return rv;
627 KASSERT(sc->sc_child == NULL);
628 }
629
630 /* XXX unconfigure/free slots */
631
632 /* verify: */
633 xhci_rt_write_4(sc, XHCI_IMAN(0), 0);
634 xhci_op_write_4(sc, XHCI_USBCMD, 0);
635 /* do we need to wait for stop? */
636
637 xhci_op_write_8(sc, XHCI_CRCR, 0);
638 xhci_ring_free(sc, &sc->sc_cr);
639 cv_destroy(&sc->sc_command_cv);
640 cv_destroy(&sc->sc_cmdbusy_cv);
641
642 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0);
643 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0);
644 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY);
645 xhci_ring_free(sc, &sc->sc_er);
646
647 usb_freemem(&sc->sc_eventst_dma);
648
649 xhci_op_write_8(sc, XHCI_DCBAAP, 0);
650 usb_freemem(&sc->sc_dcbaa_dma);
651
652 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots);
653
654 kmem_free(sc->sc_ctlrportbus,
655 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY));
656 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int));
657
658 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
659 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int));
660 }
661
662 mutex_destroy(&sc->sc_rhlock);
663 mutex_destroy(&sc->sc_lock);
664 mutex_destroy(&sc->sc_intr_lock);
665
666 pool_cache_destroy(sc->sc_xferpool);
667
668 return rv;
669 }
670
671 int
672 xhci_activate(device_t self, enum devact act)
673 {
674 struct xhci_softc * const sc = device_private(self);
675
676 switch (act) {
677 case DVACT_DEACTIVATE:
678 sc->sc_dying = true;
679 return 0;
680 default:
681 return EOPNOTSUPP;
682 }
683 }
684
685 bool
686 xhci_suspend(device_t self, const pmf_qual_t *qual)
687 {
688 struct xhci_softc * const sc = device_private(self);
689 size_t i, j, bn, dci;
690 int port;
691 uint32_t v;
692 usbd_status err;
693 bool ok = false;
694
695 XHCIHIST_FUNC(); XHCIHIST_CALLED();
696
697 /*
698 * Block issuance of new commands, and wait for all pending
699 * commands to complete.
700 */
701 mutex_enter(&sc->sc_lock);
702 KASSERT(sc->sc_suspender == NULL);
703 KASSERT(!sc->sc_suspendresume_failed);
704 sc->sc_suspender = curlwp;
705 while (sc->sc_command_addr != 0)
706 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
707 mutex_exit(&sc->sc_lock);
708
709 /*
710 * Block roothub xfers which might touch portsc registers until
711 * we're done suspending.
712 */
713 mutex_enter(&sc->sc_rhlock);
714
715 /*
716 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
717 * xHCI Power Management, p. 342
718 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342
719 */
720
721 /*
722 * `1. Stop all USB activity by issuing Stop Endpoint Commands
723 * for Busy endpoints in the Running state. If the Force
724 * Save Context Capability (FSC = ``0'') is not supported,
725 * then Stop Endpoint Commands shall be issued for all idle
726 * endpoints in the Running state as well. The Stop
727 * Endpoint Command causes the xHC to update the respective
728 * Endpoint or Stream Contexts in system memory, e.g. the
729 * TR Dequeue Pointer, DCS, etc. fields. Refer to
730 * Implementation Note "0".'
731 */
732 for (i = 0; i < sc->sc_maxslots; i++) {
733 struct xhci_slot *xs = &sc->sc_slots[i];
734
735 /* Skip if the slot is not in use. */
736 if (xs->xs_idx == 0)
737 continue;
738
739 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
740 /* Skip if the endpoint is not Running. */
741 /* XXX What about Busy? */
742 if (xhci_get_epstate(sc, xs, dci) !=
743 XHCI_EPSTATE_RUNNING)
744 continue;
745
746 /* Stop endpoint. */
747 mutex_enter(&sc->sc_lock);
748 err = xhci_stop_endpoint_cmd(sc, xs, dci,
749 XHCI_TRB_3_SUSP_EP_BIT);
750 mutex_exit(&sc->sc_lock);
751 if (err) {
752 device_printf(self, "failed to stop endpoint"
753 " slot %zu dci %zu err %d\n",
754 i, dci, err);
755 goto out;
756 }
757 }
758 }
759
760 /*
761 * Next, suspend all the ports:
762 *
763 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15:
764 * Suspend-Resume, pp. 276-283
765 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276
766 */
767 for (bn = 0; bn < 2; bn++) {
768 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
769 /* 4.15.1: Port Suspend. */
770 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
771
772 /*
773 * `System software places individual ports
774 * into suspend mode by writing a ``3'' into
775 * the appropriate PORTSC register Port Link
776 * State (PLS) field (refer to Section 5.4.8).
777 * Software should only set the PLS field to
778 * ``3'' when the port is in the Enabled
779 * state.'
780 *
781 * `Software should not attempt to suspend a
782 * port unless the port reports that it is in
783 * the enabled (PED = ``1''; PLS < ``3'')
784 * state (refer to Section 5.4.8 for more
785 * information about PED and PLS).'
786 */
787 v = xhci_op_read_4(sc, port);
788 if (((v & XHCI_PS_PED) == 0) ||
789 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3)
790 continue;
791 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
792 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3);
793 xhci_op_write_4(sc, port, v);
794
795 /*
796 * `When the PLS field is written with U3
797 * (``3''), the status of the PLS bit will not
798 * change to the target U state U3 until the
799 * suspend signaling has completed to the
800 * attached device (which may be as long as
801 * 10ms.).'
802 *
803 * `Software is required to wait for U3
804 * transitions to complete before it puts the
805 * xHC into a low power state, and before
806 * resuming the port.'
807 *
808 * XXX Take advantage of the technique to
809 * reduce polling on host controllers that
810 * support the U3C capability.
811 */
812 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) {
813 v = xhci_op_read_4(sc, port);
814 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3)
815 break;
816 usb_delay_ms(&sc->sc_bus, 1);
817 }
818 if (j == XHCI_WAIT_PLS_U3) {
819 device_printf(self,
820 "suspend timeout on bus %zu port %zu\n",
821 bn, i);
822 goto out;
823 }
824 }
825 }
826
827 /*
828 * `2. Ensure that the Command Ring is in the Stopped state
829 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is
830 * empty), and all Command Completion Events associated
831 * with them have been received.'
832 *
833 * XXX
834 */
835
836 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */
837 xhci_op_write_4(sc, XHCI_USBCMD,
838 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS);
839
840 /*
841 * `4. Read the Operational Runtime, and VTIO registers in the
842 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ,
843 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their
844 * state.'
845 *
846 * (We don't use VTIO here (XXX for now?).)
847 */
848 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
849 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL);
850 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP);
851 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG);
852 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0));
853 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0));
854 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0));
855 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0));
856 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0));
857
858 /*
859 * `5. Set the Controller Save State (CSS) flag in the USBCMD
860 * register (5.4.1)...'
861 */
862 xhci_op_write_4(sc, XHCI_USBCMD,
863 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS);
864
865 /*
866 * `...and wait for the Save State Status (SSS) flag in the
867 * USBSTS register (5.4.2) to transition to ``0''.'
868 */
869 for (i = 0; i < XHCI_WAIT_SSS; i++) {
870 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0)
871 break;
872 usb_delay_ms(&sc->sc_bus, 1);
873 }
874 if (i >= XHCI_WAIT_SSS) {
875 device_printf(self, "suspend timeout, USBSTS.SSS\n");
876 /*
877 * Just optimistically go on and check SRE anyway --
878 * what's the worst that could happen?
879 */
880 }
881
882 /*
883 * `Note: After a Save or Restore operation completes, the
884 * Save/Restore Error (SRE) flag in the USBSTS register should
885 * be checked to ensure that the operation completed
886 * successfully.'
887 */
888 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
889 device_printf(self, "suspend error, USBSTS.SRE\n");
890 goto out;
891 }
892
893 /* Success! */
894 ok = true;
895
896 out: mutex_exit(&sc->sc_rhlock);
897 if (!ok) {
898 /*
899 * If suspend failed, stop holding up command issuance
900 * and make it fail instead.
901 */
902 mutex_enter(&sc->sc_lock);
903 KASSERT(sc->sc_suspender == curlwp);
904 sc->sc_suspender = NULL;
905 sc->sc_suspendresume_failed = true;
906 cv_broadcast(&sc->sc_cmdbusy_cv);
907 mutex_exit(&sc->sc_lock);
908 }
909 return ok;
910 }
911
912 bool
913 xhci_resume(device_t self, const pmf_qual_t *qual)
914 {
915 struct xhci_softc * const sc = device_private(self);
916 size_t i, j, bn, dci;
917 int port;
918 uint32_t v;
919 bool ok = false;
920
921 XHCIHIST_FUNC(); XHCIHIST_CALLED();
922
923 /*
924 * If resume had previously failed, just try again. Can't make
925 * things worse, probably.
926 */
927 mutex_enter(&sc->sc_lock);
928 if (sc->sc_suspendresume_failed) {
929 KASSERT(sc->sc_suspender == NULL);
930 sc->sc_suspender = curlwp;
931 sc->sc_suspendresume_failed = false;
932 }
933 KASSERT(sc->sc_suspender);
934 mutex_exit(&sc->sc_lock);
935
936 /*
937 * Block roothub xfers which might touch portsc registers until
938 * we're done resuming.
939 */
940 mutex_enter(&sc->sc_rhlock);
941
942 /*
943 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
944 * xHCI Power Management, p. 343
945 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343
946 */
947
948 /*
949 * `4. Restore the Operational Runtime, and VTIO registers with
950 * their previously saved state in the following order:
951 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN,
952 * IMOD, and VTIO.'
953 *
954 * (We don't use VTIO here (for now?).)
955 */
956 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd);
957 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl);
958 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap);
959 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config);
960 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0);
961 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0);
962 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0);
963 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0);
964 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0);
965
966 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */
967
968 /*
969 * `5. Set the Controller Restore State (CRS) flag in the
970 * USBCMD register (5.4.1) to ``1''...'
971 */
972 xhci_op_write_4(sc, XHCI_USBCMD,
973 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS);
974
975 /*
976 * `...and wait for the Restore State Status (RSS) in the
977 * USBSTS register (5.4.2) to transition to ``0''.'
978 */
979 for (i = 0; i < XHCI_WAIT_RSS; i++) {
980 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0)
981 break;
982 usb_delay_ms(&sc->sc_bus, 1);
983 }
984 if (i >= XHCI_WAIT_RSS) {
985 device_printf(self, "resume timeout, USBSTS.RSS\n");
986 goto out;
987 }
988
989 /*
990 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits
991 * are consistent with the RCS values to be written to the
992 * CRCR.'
993 *
994 * XXX Hope just zeroing it is good enough!
995 */
996 xhci_host_dequeue(sc->sc_cr);
997
998 /*
999 * `7. Write the CRCR with the address and RCS value of the
1000 * reinitialized Command Ring. Note that this write will
1001 * cause the Command Ring to restart at the address
1002 * specified by the CRCR.'
1003 */
1004 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1005 sc->sc_cr->xr_cs);
1006
1007 /*
1008 * `8. Enable the controller by setting Run/Stop (R/S) =
1009 * ``1''.'
1010 */
1011 xhci_op_write_4(sc, XHCI_USBCMD,
1012 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS);
1013
1014 /*
1015 * `9. Software shall walk the USB topology and initialize each
1016 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and
1017 * external hub ports attached to USB devices.'
1018 *
1019 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2
1020 * `Port Resume', 4.15.2.2 `Host Initiated'.
1021 *
1022 * XXX We should maybe batch up initiating the state
1023 * transitions, and then wait for them to complete all at once.
1024 */
1025 for (bn = 0; bn < 2; bn++) {
1026 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
1027 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
1028
1029 /* `When a port is in the U3 state: ...' */
1030 v = xhci_op_read_4(sc, port);
1031 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3)
1032 continue;
1033
1034 /*
1035 * `For a USB2 protocol port, software shall
1036 * write a ``15'' (Resume) to the PLS field to
1037 * initiate resume signaling. The port shall
1038 * transition to the Resume substate and the
1039 * xHC shall transmit the resume signaling
1040 * within 1ms (T_URSM). Software shall ensure
1041 * that resume is signaled for at least 20ms
1042 * (T_DRSMDN). Software shall start timing
1043 * T_DRSMDN from the write of ``15'' (Resume)
1044 * to PLS.'
1045 */
1046 if (bn == 1) {
1047 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0);
1048 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1049 v |= XHCI_PS_LWS;
1050 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME);
1051 xhci_op_write_4(sc, port, v);
1052 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
1053 } else {
1054 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0);
1055 }
1056
1057 /*
1058 * `For a USB3 protocol port [and a USB2
1059 * protocol port after transitioning to
1060 * Resume], software shall write a ``0'' (U0)
1061 * to the PLS field...'
1062 */
1063 v = xhci_op_read_4(sc, port);
1064 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1065 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0);
1066 xhci_op_write_4(sc, port, v);
1067
1068 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) {
1069 v = xhci_op_read_4(sc, port);
1070 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0)
1071 break;
1072 usb_delay_ms(&sc->sc_bus, 1);
1073 }
1074 if (j == XHCI_WAIT_PLS_U0) {
1075 device_printf(self,
1076 "resume timeout on bus %zu port %zu\n",
1077 bn, i);
1078 goto out;
1079 }
1080 }
1081 }
1082
1083 /*
1084 * `10. Restart each of the previously Running endpoints by
1085 * ringing their doorbells.'
1086 */
1087 for (i = 0; i < sc->sc_maxslots; i++) {
1088 struct xhci_slot *xs = &sc->sc_slots[i];
1089
1090 /* Skip if the slot is not in use. */
1091 if (xs->xs_idx == 0)
1092 continue;
1093
1094 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
1095 /* Skip if the endpoint is not Running. */
1096 if (xhci_get_epstate(sc, xs, dci) !=
1097 XHCI_EPSTATE_RUNNING)
1098 continue;
1099
1100 /* Ring the doorbell. */
1101 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
1102 }
1103 }
1104
1105 /*
1106 * `Note: After a Save or Restore operation completes, the
1107 * Save/Restore Error (SRE) flag in the USBSTS register should
1108 * be checked to ensure that the operation completed
1109 * successfully.'
1110 */
1111 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
1112 device_printf(self, "resume error, USBSTS.SRE\n");
1113 goto out;
1114 }
1115
1116 /* Success! */
1117 ok = true;
1118
1119 out: /*
1120 * Resume command issuance. If the hardware failed to resume,
1121 * well, tough -- deadlocking because everything is held up on
1122 * the suspension, with no opportunity to detach, isn't better
1123 * than timing out waiting for dead hardware.
1124 */
1125 mutex_enter(&sc->sc_lock);
1126 KASSERT(sc->sc_suspender);
1127 sc->sc_suspender = NULL;
1128 sc->sc_suspendresume_failed = !ok;
1129 cv_broadcast(&sc->sc_cmdbusy_cv);
1130 mutex_exit(&sc->sc_lock);
1131
1132 mutex_exit(&sc->sc_rhlock);
1133 return ok;
1134 }
1135
1136 bool
1137 xhci_shutdown(device_t self, int flags)
1138 {
1139 return false;
1140 }
1141
1142 static int
1143 xhci_hc_reset(struct xhci_softc * const sc)
1144 {
1145 uint32_t usbcmd, usbsts;
1146 int i;
1147
1148 /* Check controller not ready */
1149 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1150 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1151 if ((usbsts & XHCI_STS_CNR) == 0)
1152 break;
1153 usb_delay_ms(&sc->sc_bus, 1);
1154 }
1155 if (i >= XHCI_WAIT_CNR) {
1156 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n");
1157 return EIO;
1158 }
1159
1160 /* Halt controller */
1161 usbcmd = 0;
1162 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1163 usb_delay_ms(&sc->sc_bus, 1);
1164
1165 /* Reset controller */
1166 usbcmd = XHCI_CMD_HCRST;
1167 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1168 for (i = 0; i < XHCI_WAIT_HCRST; i++) {
1169 /*
1170 * Wait 1ms first. Existing Intel xHCI requires 1ms delay to
1171 * prevent system hang (Errata).
1172 */
1173 usb_delay_ms(&sc->sc_bus, 1);
1174 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
1175 if ((usbcmd & XHCI_CMD_HCRST) == 0)
1176 break;
1177 }
1178 if (i >= XHCI_WAIT_HCRST) {
1179 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n");
1180 return EIO;
1181 }
1182
1183 /* Check controller not ready */
1184 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1185 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1186 if ((usbsts & XHCI_STS_CNR) == 0)
1187 break;
1188 usb_delay_ms(&sc->sc_bus, 1);
1189 }
1190 if (i >= XHCI_WAIT_CNR) {
1191 aprint_error_dev(sc->sc_dev,
1192 "controller not ready timeout after reset\n");
1193 return EIO;
1194 }
1195
1196 return 0;
1197 }
1198
1199 /* 7.2 xHCI Support Protocol Capability */
1200 static void
1201 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp)
1202 {
1203 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1204
1205 /* XXX Cache this lot */
1206
1207 const uint32_t w0 = xhci_read_4(sc, ecp);
1208 const uint32_t w4 = xhci_read_4(sc, ecp + 4);
1209 const uint32_t w8 = xhci_read_4(sc, ecp + 8);
1210 const uint32_t wc = xhci_read_4(sc, ecp + 0xc);
1211
1212 aprint_debug_dev(sc->sc_dev,
1213 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc);
1214
1215 if (w4 != XHCI_XECP_USBID)
1216 return;
1217
1218 const int major = XHCI_XECP_SP_W0_MAJOR(w0);
1219 const int minor = XHCI_XECP_SP_W0_MINOR(w0);
1220 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8);
1221 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8);
1222
1223 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16));
1224 switch (mm) {
1225 case 0x0200:
1226 case 0x0300:
1227 case 0x0301:
1228 case 0x0310:
1229 case 0x0320:
1230 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n",
1231 major == 3 ? "ss" : "hs", cpo, cpo + cpc - 1);
1232 if (major == 3)
1233 sc->sc_usb3nports += cpo + cpc - 1;
1234 else
1235 sc->sc_usb2nports += cpo + cpc - 1;
1236 break;
1237 default:
1238 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n",
1239 major, minor);
1240 return;
1241 }
1242
1243 const size_t bus = (major == 3) ? 0 : 1;
1244
1245 /* Index arrays with 0..n-1 where ports are numbered 1..n */
1246 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) {
1247 if (sc->sc_ctlrportmap[cp] != 0) {
1248 aprint_error_dev(sc->sc_dev, "controller port %zu "
1249 "already assigned", cp);
1250 continue;
1251 }
1252
1253 sc->sc_ctlrportbus[cp / NBBY] |=
1254 bus == 0 ? 0 : __BIT(cp % NBBY);
1255
1256 const size_t rhp = sc->sc_rhportcount[bus]++;
1257
1258 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0,
1259 "bus %zu rhp %zu is %d", bus, rhp,
1260 sc->sc_rhportmap[bus][rhp]);
1261
1262 sc->sc_rhportmap[bus][rhp] = cp + 1;
1263 sc->sc_ctlrportmap[cp] = rhp + 1;
1264 }
1265 }
1266
1267 /* Process extended capabilities */
1268 static void
1269 xhci_ecp(struct xhci_softc *sc)
1270 {
1271 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1272
1273 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4;
1274 while (ecp != 0) {
1275 uint32_t ecr = xhci_read_4(sc, ecp);
1276 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr);
1277 switch (XHCI_XECP_ID(ecr)) {
1278 case XHCI_ID_PROTOCOLS: {
1279 xhci_id_protocols(sc, ecp);
1280 break;
1281 }
1282 case XHCI_ID_USB_LEGACY: {
1283 uint8_t bios_sem;
1284
1285 /* Take host controller ownership from BIOS */
1286 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM);
1287 if (bios_sem) {
1288 /* sets xHCI to be owned by OS */
1289 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1);
1290 aprint_debug_dev(sc->sc_dev,
1291 "waiting for BIOS to give up control\n");
1292 for (int i = 0; i < 5000; i++) {
1293 bios_sem = xhci_read_1(sc, ecp +
1294 XHCI_XECP_BIOS_SEM);
1295 if (bios_sem == 0)
1296 break;
1297 DELAY(1000);
1298 }
1299 if (bios_sem) {
1300 aprint_error_dev(sc->sc_dev,
1301 "timed out waiting for BIOS\n");
1302 }
1303 }
1304 break;
1305 }
1306 default:
1307 break;
1308 }
1309 ecr = xhci_read_4(sc, ecp);
1310 if (XHCI_XECP_NEXT(ecr) == 0) {
1311 ecp = 0;
1312 } else {
1313 ecp += XHCI_XECP_NEXT(ecr) * 4;
1314 }
1315 }
1316 }
1317
1318 #define XHCI_HCCPREV1_BITS \
1319 "\177\020" /* New bitmask */ \
1320 "f\020\020XECP\0" \
1321 "f\014\4MAXPSA\0" \
1322 "b\013CFC\0" \
1323 "b\012SEC\0" \
1324 "b\011SBD\0" \
1325 "b\010FSE\0" \
1326 "b\7NSS\0" \
1327 "b\6LTC\0" \
1328 "b\5LHRC\0" \
1329 "b\4PIND\0" \
1330 "b\3PPC\0" \
1331 "b\2CZC\0" \
1332 "b\1BNC\0" \
1333 "b\0AC64\0" \
1334 "\0"
1335 #define XHCI_HCCV1_x_BITS \
1336 "\177\020" /* New bitmask */ \
1337 "f\020\020XECP\0" \
1338 "f\014\4MAXPSA\0" \
1339 "b\013CFC\0" \
1340 "b\012SEC\0" \
1341 "b\011SPC\0" \
1342 "b\010PAE\0" \
1343 "b\7NSS\0" \
1344 "b\6LTC\0" \
1345 "b\5LHRC\0" \
1346 "b\4PIND\0" \
1347 "b\3PPC\0" \
1348 "b\2CSZ\0" \
1349 "b\1BNC\0" \
1350 "b\0AC64\0" \
1351 "\0"
1352
1353 #define XHCI_HCC2_BITS \
1354 "\177\020" /* New bitmask */ \
1355 "b\7ETC_TSC\0" \
1356 "b\6ETC\0" \
1357 "b\5CIC\0" \
1358 "b\4LEC\0" \
1359 "b\3CTC\0" \
1360 "b\2FSC\0" \
1361 "b\1CMC\0" \
1362 "b\0U3C\0" \
1363 "\0"
1364
1365 void
1366 xhci_start(struct xhci_softc *sc)
1367 {
1368 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA);
1369 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0)
1370 /* Intel xhci needs interrupt rate moderated. */
1371 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP);
1372 else
1373 xhci_rt_write_4(sc, XHCI_IMOD(0), 0);
1374 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n",
1375 xhci_rt_read_4(sc, XHCI_IMOD(0)));
1376
1377 /* Go! */
1378 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
1379 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n",
1380 xhci_op_read_4(sc, XHCI_USBCMD));
1381 }
1382
1383 int
1384 xhci_init(struct xhci_softc *sc)
1385 {
1386 bus_size_t bsz;
1387 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff;
1388 uint32_t pagesize, config;
1389 int i = 0;
1390 uint16_t hciversion;
1391 uint8_t caplength;
1392
1393 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1394
1395 /* Set up the bus struct for the usb 3 and usb 2 buses */
1396 sc->sc_bus.ub_methods = &xhci_bus_methods;
1397 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe);
1398 sc->sc_bus.ub_usedma = true;
1399 sc->sc_bus.ub_hcpriv = sc;
1400
1401 sc->sc_bus2.ub_methods = &xhci_bus_methods;
1402 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe);
1403 sc->sc_bus2.ub_revision = USBREV_2_0;
1404 sc->sc_bus2.ub_usedma = true;
1405 sc->sc_bus2.ub_hcpriv = sc;
1406 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag;
1407
1408 caplength = xhci_read_1(sc, XHCI_CAPLENGTH);
1409 hciversion = xhci_read_2(sc, XHCI_HCIVERSION);
1410
1411 if (hciversion < XHCI_HCIVERSION_0_96 ||
1412 hciversion >= 0x0200) {
1413 aprint_normal_dev(sc->sc_dev,
1414 "xHCI version %x.%x not known to be supported\n",
1415 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1416 } else {
1417 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n",
1418 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1419 }
1420
1421 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength,
1422 &sc->sc_cbh) != 0) {
1423 aprint_error_dev(sc->sc_dev, "capability subregion failure\n");
1424 return ENOMEM;
1425 }
1426
1427 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1);
1428 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1);
1429 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1);
1430 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1);
1431 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2);
1432 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3);
1433 aprint_debug_dev(sc->sc_dev,
1434 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3);
1435
1436 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS);
1437 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32;
1438
1439 char sbuf[128];
1440 if (hciversion < XHCI_HCIVERSION_1_0)
1441 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc);
1442 else
1443 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc);
1444 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf);
1445 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n",
1446 XHCI_HCC_XECP(sc->sc_hcc) * 4);
1447 if (hciversion >= XHCI_HCIVERSION_1_1) {
1448 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2);
1449 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2);
1450 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf);
1451 }
1452
1453 /* default all ports to bus 0, i.e. usb 3 */
1454 sc->sc_ctlrportbus = kmem_zalloc(
1455 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP);
1456 sc->sc_ctlrportmap =
1457 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1458
1459 /* controller port to bus roothub port map */
1460 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
1461 sc->sc_rhportmap[j] =
1462 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1463 }
1464
1465 /*
1466 * Process all Extended Capabilities
1467 */
1468 xhci_ecp(sc);
1469
1470 bsz = XHCI_PORTSC(sc->sc_maxports);
1471 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz,
1472 &sc->sc_obh) != 0) {
1473 aprint_error_dev(sc->sc_dev, "operational subregion failure\n");
1474 return ENOMEM;
1475 }
1476
1477 dboff = xhci_cap_read_4(sc, XHCI_DBOFF);
1478 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff,
1479 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) {
1480 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n");
1481 return ENOMEM;
1482 }
1483
1484 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF);
1485 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff,
1486 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) {
1487 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n");
1488 return ENOMEM;
1489 }
1490
1491 int rv;
1492 rv = xhci_hc_reset(sc);
1493 if (rv != 0) {
1494 return rv;
1495 }
1496
1497 if (sc->sc_vendor_init)
1498 sc->sc_vendor_init(sc);
1499
1500 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE);
1501 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize);
1502 pagesize = ffs(pagesize);
1503 if (pagesize == 0) {
1504 aprint_error_dev(sc->sc_dev, "pagesize is 0\n");
1505 return EIO;
1506 }
1507 sc->sc_pgsz = 1 << (12 + (pagesize - 1));
1508 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz);
1509 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n",
1510 (uint32_t)sc->sc_maxslots);
1511 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports);
1512
1513 int err;
1514 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2);
1515 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf);
1516 if (sc->sc_maxspbuf != 0) {
1517 err = usb_allocmem(sc->sc_bus.ub_dmatag,
1518 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t),
1519 USBMALLOC_COHERENT | USBMALLOC_ZERO,
1520 &sc->sc_spbufarray_dma);
1521 if (err) {
1522 aprint_error_dev(sc->sc_dev,
1523 "spbufarray init fail, err %d\n", err);
1524 return ENOMEM;
1525 }
1526
1527 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) *
1528 sc->sc_maxspbuf, KM_SLEEP);
1529 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0);
1530 for (i = 0; i < sc->sc_maxspbuf; i++) {
1531 usb_dma_t * const dma = &sc->sc_spbuf_dma[i];
1532 /* allocate contexts */
1533 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz,
1534 sc->sc_pgsz, USBMALLOC_COHERENT | USBMALLOC_ZERO,
1535 dma);
1536 if (err) {
1537 aprint_error_dev(sc->sc_dev,
1538 "spbufarray_dma init fail, err %d\n", err);
1539 rv = ENOMEM;
1540 goto bad1;
1541 }
1542 spbufarray[i] = htole64(DMAADDR(dma, 0));
1543 usb_syncmem(dma, 0, sc->sc_pgsz,
1544 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1545 }
1546
1547 usb_syncmem(&sc->sc_spbufarray_dma, 0,
1548 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE);
1549 }
1550
1551 config = xhci_op_read_4(sc, XHCI_CONFIG);
1552 config &= ~0xFF;
1553 config |= sc->sc_maxslots & 0xFF;
1554 xhci_op_write_4(sc, XHCI_CONFIG, config);
1555
1556 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS,
1557 XHCI_COMMAND_RING_SEGMENTS_ALIGN);
1558 if (err) {
1559 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n",
1560 err);
1561 rv = ENOMEM;
1562 goto bad1;
1563 }
1564
1565 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS,
1566 XHCI_EVENT_RING_SEGMENTS_ALIGN);
1567 if (err) {
1568 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n",
1569 err);
1570 rv = ENOMEM;
1571 goto bad2;
1572 }
1573
1574 usb_dma_t *dma;
1575 size_t size;
1576 size_t align;
1577
1578 dma = &sc->sc_eventst_dma;
1579 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE,
1580 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN);
1581 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size);
1582 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN;
1583 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1584 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma);
1585 if (err) {
1586 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n",
1587 err);
1588 rv = ENOMEM;
1589 goto bad3;
1590 }
1591
1592 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n",
1593 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0),
1594 KERNADDR(&sc->sc_eventst_dma, 0),
1595 sc->sc_eventst_dma.udma_block->size);
1596
1597 dma = &sc->sc_dcbaa_dma;
1598 size = (1 + sc->sc_maxslots) * sizeof(uint64_t);
1599 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size);
1600 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN;
1601 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1602 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma);
1603 if (err) {
1604 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err);
1605 rv = ENOMEM;
1606 goto bad4;
1607 }
1608 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n",
1609 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0),
1610 KERNADDR(&sc->sc_dcbaa_dma, 0),
1611 sc->sc_dcbaa_dma.udma_block->size);
1612
1613 if (sc->sc_maxspbuf != 0) {
1614 /*
1615 * DCBA entry 0 hold the scratchbuf array pointer.
1616 */
1617 *(uint64_t *)KERNADDR(dma, 0) =
1618 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0));
1619 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1620 }
1621
1622 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots,
1623 KM_SLEEP);
1624 if (sc->sc_slots == NULL) {
1625 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err);
1626 rv = ENOMEM;
1627 goto bad;
1628 }
1629
1630 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0,
1631 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
1632 if (sc->sc_xferpool == NULL) {
1633 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n",
1634 err);
1635 rv = ENOMEM;
1636 goto bad;
1637 }
1638
1639 cv_init(&sc->sc_command_cv, "xhcicmd");
1640 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq");
1641 mutex_init(&sc->sc_rhlock, MUTEX_DEFAULT, IPL_NONE);
1642 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
1643 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
1644
1645 struct xhci_erste *erst;
1646 erst = KERNADDR(&sc->sc_eventst_dma, 0);
1647 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0));
1648 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb);
1649 erst[0].erste_3 = htole32(0);
1650 usb_syncmem(&sc->sc_eventst_dma, 0,
1651 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE);
1652
1653 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS);
1654 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0));
1655 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) |
1656 XHCI_ERDP_BUSY);
1657
1658 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0));
1659 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1660 sc->sc_cr->xr_cs);
1661
1662 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0),
1663 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS);
1664
1665 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0)
1666 xhci_start(sc);
1667
1668 return 0;
1669
1670 bad:
1671 if (sc->sc_xferpool) {
1672 pool_cache_destroy(sc->sc_xferpool);
1673 sc->sc_xferpool = NULL;
1674 }
1675
1676 if (sc->sc_slots) {
1677 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) *
1678 sc->sc_maxslots);
1679 sc->sc_slots = NULL;
1680 }
1681
1682 usb_freemem(&sc->sc_dcbaa_dma);
1683 bad4:
1684 usb_freemem(&sc->sc_eventst_dma);
1685 bad3:
1686 xhci_ring_free(sc, &sc->sc_er);
1687 bad2:
1688 xhci_ring_free(sc, &sc->sc_cr);
1689 i = sc->sc_maxspbuf;
1690 bad1:
1691 for (int j = 0; j < i; j++)
1692 usb_freemem(&sc->sc_spbuf_dma[j]);
1693 usb_freemem(&sc->sc_spbufarray_dma);
1694
1695 return rv;
1696 }
1697
1698 static inline bool
1699 xhci_polling_p(struct xhci_softc * const sc)
1700 {
1701 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling;
1702 }
1703
1704 int
1705 xhci_intr(void *v)
1706 {
1707 struct xhci_softc * const sc = v;
1708 int ret = 0;
1709
1710 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1711
1712 if (sc == NULL)
1713 return 0;
1714
1715 mutex_spin_enter(&sc->sc_intr_lock);
1716
1717 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1718 goto done;
1719
1720 /* If we get an interrupt while polling, then just ignore it. */
1721 if (xhci_polling_p(sc)) {
1722 #ifdef DIAGNOSTIC
1723 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1724 #endif
1725 goto done;
1726 }
1727
1728 ret = xhci_intr1(sc);
1729 if (ret) {
1730 KASSERT(sc->sc_child || sc->sc_child2);
1731
1732 /*
1733 * One of child busses could be already detached. It doesn't
1734 * matter on which of the two the softintr is scheduled.
1735 */
1736 if (sc->sc_child)
1737 usb_schedsoftintr(&sc->sc_bus);
1738 else
1739 usb_schedsoftintr(&sc->sc_bus2);
1740 }
1741 done:
1742 mutex_spin_exit(&sc->sc_intr_lock);
1743 return ret;
1744 }
1745
1746 int
1747 xhci_intr1(struct xhci_softc * const sc)
1748 {
1749 uint32_t usbsts;
1750 uint32_t iman;
1751
1752 XHCIHIST_FUNC();
1753
1754 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1755 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0);
1756 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD |
1757 XHCI_STS_HCE)) == 0) {
1758 DPRINTFN(16, "ignored intr not for %jd",
1759 device_unit(sc->sc_dev), 0, 0, 0);
1760 return 0;
1761 }
1762
1763 /*
1764 * Clear EINT and other transient flags, to not misenterpret
1765 * next shared interrupt. Also, to avoid race, EINT must be cleared
1766 * before XHCI_IMAN_INTR_PEND is cleared.
1767 */
1768 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & XHCI_STS_RSVDP0);
1769
1770 #ifdef XHCI_DEBUG
1771 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1772 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1773 #endif
1774
1775 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1776 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1777 iman |= XHCI_IMAN_INTR_PEND;
1778 xhci_rt_write_4(sc, XHCI_IMAN(0), iman);
1779
1780 #ifdef XHCI_DEBUG
1781 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1782 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1783 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1784 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1785 #endif
1786
1787 return 1;
1788 }
1789
1790 /*
1791 * 3 port speed types used in USB stack
1792 *
1793 * usbdi speed
1794 * definition: USB_SPEED_* in usb.h
1795 * They are used in struct usbd_device in USB stack.
1796 * ioctl interface uses these values too.
1797 * port_status speed
1798 * definition: UPS_*_SPEED in usb.h
1799 * They are used in usb_port_status_t and valid only for USB 2.0.
1800 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus
1801 * of usb_port_status_ext_t indicates port speed.
1802 * Note that some 3.0 values overlap with 2.0 values.
1803 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and
1804 * means UPS_LOW_SPEED in HS.)
1805 * port status returned from hub also uses these values.
1806 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed
1807 * or more.
1808 * xspeed:
1809 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1)
1810 * They are used in only slot context and PORTSC reg of xhci.
1811 * The difference between usbdi speed and xspeed is
1812 * that FS and LS values are swapped.
1813 */
1814
1815 /* convert usbdi speed to xspeed */
1816 static int
1817 xhci_speed2xspeed(int speed)
1818 {
1819 switch (speed) {
1820 case USB_SPEED_LOW: return 2;
1821 case USB_SPEED_FULL: return 1;
1822 default: return speed;
1823 }
1824 }
1825
1826 #if 0
1827 /* convert xspeed to usbdi speed */
1828 static int
1829 xhci_xspeed2speed(int xspeed)
1830 {
1831 switch (xspeed) {
1832 case 1: return USB_SPEED_FULL;
1833 case 2: return USB_SPEED_LOW;
1834 default: return xspeed;
1835 }
1836 }
1837 #endif
1838
1839 /* convert xspeed to port status speed */
1840 static int
1841 xhci_xspeed2psspeed(int xspeed)
1842 {
1843 switch (xspeed) {
1844 case 0: return 0;
1845 case 1: return UPS_FULL_SPEED;
1846 case 2: return UPS_LOW_SPEED;
1847 case 3: return UPS_HIGH_SPEED;
1848 default: return UPS_OTHER_SPEED;
1849 }
1850 }
1851
1852 /*
1853 * Construct input contexts and issue TRB to open pipe.
1854 */
1855 static usbd_status
1856 xhci_configure_endpoint(struct usbd_pipe *pipe)
1857 {
1858 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1859 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1860 #ifdef USB_DEBUG
1861 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1862 #endif
1863 struct xhci_soft_trb trb;
1864 usbd_status err;
1865
1866 XHCIHIST_FUNC();
1867 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx",
1868 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress,
1869 pipe->up_endpoint->ue_edesc->bmAttributes);
1870
1871 /* XXX ensure input context is available? */
1872
1873 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz);
1874
1875 /* set up context */
1876 xhci_setup_ctx(pipe);
1877
1878 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0),
1879 sc->sc_ctxsz * 1);
1880 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs,
1881 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1);
1882
1883 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1884 trb.trb_2 = 0;
1885 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1886 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1887
1888 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
1889
1890 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1891 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci),
1892 sc->sc_ctxsz * 1);
1893
1894 return err;
1895 }
1896
1897 #if 0
1898 static usbd_status
1899 xhci_unconfigure_endpoint(struct usbd_pipe *pipe)
1900 {
1901 #ifdef USB_DEBUG
1902 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1903 #endif
1904
1905 XHCIHIST_FUNC();
1906 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
1907
1908 return USBD_NORMAL_COMPLETION;
1909 }
1910 #endif
1911
1912 /* 4.6.8, 6.4.3.7 */
1913 static void
1914 xhci_reset_endpoint(struct usbd_pipe *pipe)
1915 {
1916 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1917 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1918 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1919 struct xhci_soft_trb trb;
1920
1921 XHCIHIST_FUNC();
1922 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1923
1924 KASSERT(mutex_owned(&sc->sc_lock));
1925
1926 trb.trb_0 = 0;
1927 trb.trb_2 = 0;
1928 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1929 XHCI_TRB_3_EP_SET(dci) |
1930 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP);
1931
1932 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
1933 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
1934 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
1935 }
1936 }
1937
1938 /*
1939 * 4.6.9, 6.4.3.8
1940 * Stop execution of TDs on xfer ring.
1941 * Should be called with sc_lock held.
1942 */
1943 static usbd_status
1944 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci,
1945 uint32_t trb3flags)
1946 {
1947 struct xhci_soft_trb trb;
1948 usbd_status err;
1949
1950 XHCIHIST_FUNC();
1951 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1952
1953 KASSERT(mutex_owned(&sc->sc_lock));
1954
1955 trb.trb_0 = 0;
1956 trb.trb_2 = 0;
1957 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1958 XHCI_TRB_3_EP_SET(dci) |
1959 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) |
1960 trb3flags;
1961
1962 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1963
1964 return err;
1965 }
1966
1967 static usbd_status
1968 xhci_stop_endpoint(struct usbd_pipe *pipe)
1969 {
1970 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1971 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1972 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1973
1974 XHCIHIST_FUNC();
1975 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1976
1977 KASSERT(mutex_owned(&sc->sc_lock));
1978
1979 return xhci_stop_endpoint_cmd(sc, xs, dci, 0);
1980 }
1981
1982 /*
1983 * Set TR Dequeue Pointer.
1984 * xHCI 1.1 4.6.10 6.4.3.9
1985 * Purge all of the TRBs on ring and reinitialize ring.
1986 * Set TR dequeue Pointer to 0 and Cycle State to 1.
1987 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE
1988 * error will be generated.
1989 */
1990 static void
1991 xhci_set_dequeue(struct usbd_pipe *pipe)
1992 {
1993 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1994 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1995 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1996 struct xhci_ring * const xr = xs->xs_xr[dci];
1997 struct xhci_soft_trb trb;
1998
1999 XHCIHIST_FUNC();
2000 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
2001
2002 KASSERT(mutex_owned(&sc->sc_lock));
2003 KASSERT(xr != NULL);
2004
2005 xhci_host_dequeue(xr);
2006
2007 /* set DCS */
2008 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */
2009 trb.trb_2 = 0;
2010 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2011 XHCI_TRB_3_EP_SET(dci) |
2012 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE);
2013
2014 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
2015 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
2016 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
2017 }
2018 }
2019
2020 /*
2021 * Open new pipe: called from usbd_setup_pipe_flags.
2022 * Fills methods of pipe.
2023 * If pipe is not for ep0, calls configure_endpoint.
2024 */
2025 static usbd_status
2026 xhci_open(struct usbd_pipe *pipe)
2027 {
2028 struct usbd_device * const dev = pipe->up_dev;
2029 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
2030 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
2031 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2032 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2033 const u_int dci = xhci_ep_get_dci(ed);
2034 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
2035 usbd_status err;
2036
2037 XHCIHIST_FUNC();
2038 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr,
2039 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed);
2040 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx",
2041 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress,
2042 ed->bmAttributes);
2043 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize),
2044 ed->bInterval, 0, 0);
2045
2046 if (sc->sc_dying)
2047 return USBD_IOERROR;
2048
2049 /* Root Hub */
2050 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) {
2051 switch (ed->bEndpointAddress) {
2052 case USB_CONTROL_ENDPOINT:
2053 pipe->up_methods = &roothub_ctrl_methods;
2054 break;
2055 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
2056 pipe->up_methods = &xhci_root_intr_methods;
2057 break;
2058 default:
2059 pipe->up_methods = NULL;
2060 DPRINTFN(0, "bad bEndpointAddress 0x%02jx",
2061 ed->bEndpointAddress, 0, 0, 0);
2062 return USBD_INVAL;
2063 }
2064 return USBD_NORMAL_COMPLETION;
2065 }
2066
2067 usb_init_task(&xpipe->xp_async_task, xhci_pipe_restart_async_task,
2068 pipe, USB_TASKQ_MPSAFE);
2069
2070 switch (xfertype) {
2071 case UE_CONTROL:
2072 pipe->up_methods = &xhci_device_ctrl_methods;
2073 break;
2074 case UE_ISOCHRONOUS:
2075 pipe->up_methods = &xhci_device_isoc_methods;
2076 pipe->up_serialise = false;
2077 xpipe->xp_isoc_next = -1;
2078 break;
2079 case UE_BULK:
2080 pipe->up_methods = &xhci_device_bulk_methods;
2081 break;
2082 case UE_INTERRUPT:
2083 pipe->up_methods = &xhci_device_intr_methods;
2084 break;
2085 default:
2086 return USBD_IOERROR;
2087 break;
2088 }
2089
2090 KASSERT(xs != NULL);
2091 KASSERT(xs->xs_xr[dci] == NULL);
2092
2093 /* allocate transfer ring */
2094 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS,
2095 XHCI_TRB_ALIGN);
2096 if (err) {
2097 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0);
2098 return err;
2099 }
2100
2101 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT)
2102 return xhci_configure_endpoint(pipe);
2103
2104 return USBD_NORMAL_COMPLETION;
2105 }
2106
2107 /*
2108 * Closes pipe, called from usbd_kill_pipe via close methods.
2109 * If the endpoint to be closed is ep0, disable_slot.
2110 * Should be called with sc_lock held.
2111 */
2112 static void
2113 xhci_close_pipe(struct usbd_pipe *pipe)
2114 {
2115 struct xhci_pipe * const xp =
2116 container_of(pipe, struct xhci_pipe, xp_pipe);
2117 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2118 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2119 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2120 const u_int dci = xhci_ep_get_dci(ed);
2121 struct xhci_soft_trb trb;
2122 uint32_t *cp;
2123
2124 XHCIHIST_FUNC();
2125
2126 usb_rem_task_wait(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC,
2127 &sc->sc_lock);
2128
2129 if (sc->sc_dying)
2130 return;
2131
2132 /* xs is uninitialized before xhci_init_slot */
2133 if (xs == NULL || xs->xs_idx == 0)
2134 return;
2135
2136 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2137 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2138
2139 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
2140 KASSERT(mutex_owned(&sc->sc_lock));
2141
2142 if (pipe->up_dev->ud_depth == 0)
2143 return;
2144
2145 if (dci == XHCI_DCI_EP_CONTROL) {
2146 DPRINTFN(4, "closing ep0", 0, 0, 0, 0);
2147 /* This frees all rings */
2148 xhci_disable_slot(sc, xs->xs_idx);
2149 return;
2150 }
2151
2152 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED)
2153 (void)xhci_stop_endpoint(pipe);
2154
2155 /*
2156 * set appropriate bit to be dropped.
2157 * don't set DC bit to 1, otherwise all endpoints
2158 * would be deconfigured.
2159 */
2160 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
2161 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci));
2162 cp[1] = htole32(0);
2163
2164 /* XXX should be most significant one, not dci? */
2165 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
2166 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci));
2167
2168 /* configure ep context performs an implicit dequeue */
2169 xhci_host_dequeue(xs->xs_xr[dci]);
2170
2171 /* sync input contexts before they are read from memory */
2172 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
2173
2174 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
2175 trb.trb_2 = 0;
2176 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2177 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
2178
2179 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
2180 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2181
2182 xhci_ring_free(sc, &xs->xs_xr[dci]);
2183 xs->xs_xr[dci] = NULL;
2184 }
2185
2186 /*
2187 * Abort transfer. Must be called with sc_lock held. Releases and
2188 * reacquires sc_lock to sleep until hardware acknowledges abort.
2189 */
2190 static void
2191 xhci_abortx(struct usbd_xfer *xfer)
2192 {
2193 XHCIHIST_FUNC();
2194 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2195
2196 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx",
2197 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0);
2198
2199 KASSERT(mutex_owned(&sc->sc_lock));
2200 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2201 xfer->ux_status == USBD_TIMEOUT),
2202 "bad abort status: %d", xfer->ux_status);
2203
2204 xhci_pipe_restart(xfer->ux_pipe);
2205
2206 DPRINTFN(14, "end", 0, 0, 0, 0);
2207 }
2208
2209 static void
2210 xhci_host_dequeue(struct xhci_ring * const xr)
2211 {
2212 /* When dequeueing the controller, update our struct copy too */
2213 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE);
2214 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE,
2215 BUS_DMASYNC_PREWRITE);
2216 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies));
2217
2218 xr->xr_ep = 0;
2219 xr->xr_cs = 1;
2220 }
2221
2222 /*
2223 * Recover STALLed endpoint, or stop endpoint to abort a pipe.
2224 * xHCI 1.1 sect 4.10.2.1
2225 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove
2226 * all transfers on transfer ring.
2227 */
2228 static void
2229 xhci_pipe_restart(struct usbd_pipe *pipe)
2230 {
2231 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2232 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2233 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2234
2235 XHCIHIST_FUNC();
2236 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2237 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2238
2239 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2240
2241 /*
2242 * - If the endpoint is halted, indicating a stall, reset it.
2243 * - If the endpoint is stopped, we're already good.
2244 * - Otherwise, someone wanted to abort the pipe, so stop the
2245 * endpoint.
2246 *
2247 * In any case, clear the ring.
2248 */
2249 switch (xhci_get_epstate(sc, xs, dci)) {
2250 case XHCI_EPSTATE_HALTED:
2251 xhci_reset_endpoint(pipe);
2252 break;
2253 case XHCI_EPSTATE_STOPPED:
2254 break;
2255 default:
2256 xhci_stop_endpoint(pipe);
2257 break;
2258 }
2259
2260 switch (xhci_get_epstate(sc, xs, dci)) {
2261 case XHCI_EPSTATE_STOPPED:
2262 break;
2263 case XHCI_EPSTATE_ERROR:
2264 device_printf(sc->sc_dev, "endpoint 0x%x error\n",
2265 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2266 break;
2267 default:
2268 device_printf(sc->sc_dev, "endpoint 0x%x failed to stop\n",
2269 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2270 }
2271
2272 xhci_set_dequeue(pipe);
2273
2274 DPRINTFN(4, "ends", 0, 0, 0, 0);
2275 }
2276
2277 static void
2278 xhci_pipe_restart_async_task(void *cookie)
2279 {
2280 struct usbd_pipe * const pipe = cookie;
2281 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2282 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2283 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2284 struct xhci_ring * const tr = xs->xs_xr[dci];
2285 struct usbd_xfer *xfer;
2286
2287 XHCIHIST_FUNC();
2288 XHCIHIST_CALLARGS("sc=%#jx pipe=%#jx",
2289 (uintptr_t)sc, (uintptr_t)pipe, 0, 0);
2290
2291 mutex_enter(&sc->sc_lock);
2292
2293 xhci_pipe_restart(pipe);
2294
2295 /*
2296 * We halted our own queue because it stalled. Mark it no
2297 * longer halted and start issuing queued transfers again.
2298 */
2299 tr->is_halted = false;
2300 xfer = SIMPLEQ_FIRST(&pipe->up_queue);
2301 if (xfer) {
2302 /*
2303 * If the first xfer of the queue is not in progress,
2304 * though, there may be a concurrent software abort
2305 * that has already cancelled it and is now in the
2306 * middle of a concurrent xhci_pipe_restart waiting to
2307 * reacquire the pipe (bus) lock. So only restart the
2308 * xfer if it's still USBD_IN_PROGRESS.
2309 *
2310 * Either way, xfers on the queue can't be in
2311 * USBD_NOT_STARTED.
2312 */
2313 KASSERT(xfer->ux_status != USBD_NOT_STARTED);
2314 if (xfer->ux_status == USBD_IN_PROGRESS) {
2315 (*pipe->up_methods->upm_start)(xfer);
2316 } else {
2317 DPRINTF("pipe restart race xfer=%#jx status=%jd",
2318 (uintptr_t)xfer, xfer->ux_status, 0, 0);
2319 }
2320 }
2321
2322 mutex_exit(&sc->sc_lock);
2323 }
2324
2325 static void
2326 xhci_pipe_restart_async(struct usbd_pipe *pipe)
2327 {
2328 struct xhci_pipe * const xp =
2329 container_of(pipe, struct xhci_pipe, xp_pipe);
2330 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2331 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2332 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2333 struct xhci_ring * const tr = xs->xs_xr[dci];
2334
2335 XHCIHIST_FUNC();
2336 XHCIHIST_CALLARGS("pipe %#jx", (uintptr_t)pipe, 0, 0, 0);
2337
2338 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2339
2340 tr->is_halted = true;
2341 usb_add_task(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC);
2342
2343 DPRINTFN(4, "ends", 0, 0, 0, 0);
2344 }
2345
2346 /* Process roothub port status/change events and notify to uhub_intr. */
2347 static void
2348 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport)
2349 {
2350 XHCIHIST_FUNC();
2351 XHCIHIST_CALLARGS("xhci%jd: port %ju status change",
2352 device_unit(sc->sc_dev), ctlrport, 0, 0);
2353
2354 if (ctlrport > sc->sc_maxports)
2355 return;
2356
2357 const size_t bn = xhci_ctlrport2bus(sc, ctlrport);
2358 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport);
2359 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn];
2360
2361 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change",
2362 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer);
2363
2364 if (xfer == NULL)
2365 return;
2366 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
2367
2368 uint8_t *p = xfer->ux_buf;
2369 if (!xhci_polling_p(sc) || !sc->sc_intrxfer_deferred[bn])
2370 memset(p, 0, xfer->ux_length);
2371 p[rhp / NBBY] |= 1 << (rhp % NBBY);
2372 xfer->ux_actlen = xfer->ux_length;
2373 xfer->ux_status = USBD_NORMAL_COMPLETION;
2374 if (xhci_polling_p(sc))
2375 sc->sc_intrxfer_deferred[bn] = true;
2376 else
2377 usb_transfer_complete(xfer);
2378 }
2379
2380 /* Process Transfer Events */
2381 static void
2382 xhci_event_transfer(struct xhci_softc * const sc,
2383 const struct xhci_trb * const trb)
2384 {
2385 uint64_t trb_0;
2386 uint32_t trb_2, trb_3;
2387 uint8_t trbcode;
2388 u_int slot, dci;
2389 struct xhci_slot *xs;
2390 struct xhci_ring *xr;
2391 struct xhci_xfer *xx;
2392 struct usbd_xfer *xfer;
2393 usbd_status err;
2394
2395 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2396
2397 trb_0 = le64toh(trb->trb_0);
2398 trb_2 = le32toh(trb->trb_2);
2399 trb_3 = le32toh(trb->trb_3);
2400 trbcode = XHCI_TRB_2_ERROR_GET(trb_2);
2401 slot = XHCI_TRB_3_SLOT_GET(trb_3);
2402 dci = XHCI_TRB_3_EP_GET(trb_3);
2403 xs = &sc->sc_slots[slot];
2404 xr = xs->xs_xr[dci];
2405
2406 /* sanity check */
2407 KASSERT(xr != NULL);
2408 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots,
2409 "invalid xs_idx %u slot %u", xs->xs_idx, slot);
2410
2411 int idx = 0;
2412 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2413 if (xhci_trb_get_idx(xr, trb_0, &idx)) {
2414 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0);
2415 return;
2416 }
2417 xx = xr->xr_cookies[idx];
2418
2419 /* clear cookie of consumed TRB */
2420 xr->xr_cookies[idx] = NULL;
2421
2422 /*
2423 * xx is NULL if pipe is opened but xfer is not started.
2424 * It happens when stopping idle pipe.
2425 */
2426 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) {
2427 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju",
2428 idx, (uintptr_t)xx, trbcode, dci);
2429 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0,
2430 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)),
2431 0, 0);
2432 return;
2433 }
2434 } else {
2435 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */
2436 xx = (void *)(uintptr_t)(trb_0 & ~0x3);
2437 }
2438 /* XXX this may not happen */
2439 if (xx == NULL) {
2440 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0);
2441 return;
2442 }
2443 xfer = &xx->xx_xfer;
2444 /* XXX this may happen when detaching */
2445 if (xfer == NULL) {
2446 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx",
2447 (uintptr_t)xx, trb_0, 0, 0);
2448 return;
2449 }
2450 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2451 /* XXX I dunno why this happens */
2452 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer);
2453
2454 if (!xfer->ux_pipe->up_repeat &&
2455 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) {
2456 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer,
2457 0, 0, 0);
2458 return;
2459 }
2460
2461 const uint8_t xfertype =
2462 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes);
2463
2464 /* 4.11.5.2 Event Data TRB */
2465 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) {
2466 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx"
2467 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0);
2468 if ((trb_0 & 0x3) == 0x3) {
2469 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2);
2470 }
2471 }
2472
2473 switch (trbcode) {
2474 case XHCI_TRB_ERROR_SHORT_PKT:
2475 case XHCI_TRB_ERROR_SUCCESS:
2476 /*
2477 * A ctrl transfer can generate two events if it has a Data
2478 * stage. A short data stage can be OK and should not
2479 * complete the transfer as the status stage needs to be
2480 * performed.
2481 *
2482 * Note: Data and Status stage events point at same xfer.
2483 * ux_actlen and ux_dmabuf will be passed to
2484 * usb_transfer_complete after the Status stage event.
2485 *
2486 * It can be distinguished which stage generates the event:
2487 * + by checking least 3 bits of trb_0 if ED==1.
2488 * (see xhci_device_ctrl_start).
2489 * + by checking the type of original TRB if ED==0.
2490 *
2491 * In addition, intr, bulk, and isoc transfer currently
2492 * consists of single TD, so the "skip" is not needed.
2493 * ctrl xfer uses EVENT_DATA, and others do not.
2494 * Thus driver can switch the flow by checking ED bit.
2495 */
2496 if (xfertype == UE_ISOCHRONOUS) {
2497 xfer->ux_frlengths[xx->xx_isoc_done] -=
2498 XHCI_TRB_2_REM_GET(trb_2);
2499 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done];
2500 if (++xx->xx_isoc_done < xfer->ux_nframes)
2501 return;
2502 } else
2503 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2504 if (xfer->ux_actlen == 0)
2505 xfer->ux_actlen = xfer->ux_length -
2506 XHCI_TRB_2_REM_GET(trb_2);
2507 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3))
2508 == XHCI_TRB_TYPE_DATA_STAGE) {
2509 return;
2510 }
2511 } else if ((trb_0 & 0x3) == 0x3) {
2512 return;
2513 }
2514 err = USBD_NORMAL_COMPLETION;
2515 break;
2516 case XHCI_TRB_ERROR_STOPPED:
2517 case XHCI_TRB_ERROR_LENGTH:
2518 case XHCI_TRB_ERROR_STOPPED_SHORT:
2519 err = USBD_IOERROR;
2520 break;
2521 case XHCI_TRB_ERROR_STALL:
2522 case XHCI_TRB_ERROR_BABBLE:
2523 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2524 xhci_pipe_restart_async(xfer->ux_pipe);
2525 err = USBD_STALLED;
2526 break;
2527 default:
2528 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2529 err = USBD_IOERROR;
2530 break;
2531 }
2532
2533 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 ||
2534 (trb_0 & 0x3) == 0x0) {
2535 /*
2536 * Try to claim this xfer for completion. If it has
2537 * already completed or aborted, drop it on the floor.
2538 */
2539 if (!usbd_xfer_trycomplete(xfer))
2540 return;
2541
2542 /* Set the status. */
2543 xfer->ux_status = err;
2544
2545 usb_transfer_complete(xfer);
2546 }
2547 }
2548
2549 /* Process Command complete events */
2550 static void
2551 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb)
2552 {
2553 uint64_t trb_0;
2554 uint32_t trb_2, trb_3;
2555
2556 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2557
2558 KASSERT(mutex_owned(&sc->sc_lock));
2559
2560 trb_0 = le64toh(trb->trb_0);
2561 trb_2 = le32toh(trb->trb_2);
2562 trb_3 = le32toh(trb->trb_3);
2563
2564 if (trb_0 == sc->sc_command_addr) {
2565 sc->sc_resultpending = false;
2566
2567 sc->sc_result_trb.trb_0 = trb_0;
2568 sc->sc_result_trb.trb_2 = trb_2;
2569 sc->sc_result_trb.trb_3 = trb_3;
2570 if (XHCI_TRB_2_ERROR_GET(trb_2) !=
2571 XHCI_TRB_ERROR_SUCCESS) {
2572 DPRINTFN(1, "command completion "
2573 "failure: 0x%016jx 0x%08jx 0x%08jx",
2574 trb_0, trb_2, trb_3, 0);
2575 }
2576 cv_signal(&sc->sc_command_cv);
2577 } else {
2578 DPRINTFN(1, "spurious event: %#jx 0x%016jx "
2579 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3);
2580 }
2581 }
2582
2583 /*
2584 * Process events.
2585 * called from xhci_softintr
2586 */
2587 static void
2588 xhci_handle_event(struct xhci_softc * const sc,
2589 const struct xhci_trb * const trb)
2590 {
2591 uint64_t trb_0;
2592 uint32_t trb_2, trb_3;
2593
2594 XHCIHIST_FUNC();
2595
2596 trb_0 = le64toh(trb->trb_0);
2597 trb_2 = le32toh(trb->trb_2);
2598 trb_3 = le32toh(trb->trb_3);
2599
2600 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx",
2601 (uintptr_t)trb, trb_0, trb_2, trb_3);
2602
2603 /*
2604 * 4.11.3.1, 6.4.2.1
2605 * TRB Pointer is invalid for these completion codes.
2606 */
2607 switch (XHCI_TRB_2_ERROR_GET(trb_2)) {
2608 case XHCI_TRB_ERROR_RING_UNDERRUN:
2609 case XHCI_TRB_ERROR_RING_OVERRUN:
2610 case XHCI_TRB_ERROR_VF_RING_FULL:
2611 return;
2612 default:
2613 if (trb_0 == 0) {
2614 return;
2615 }
2616 break;
2617 }
2618
2619 switch (XHCI_TRB_3_TYPE_GET(trb_3)) {
2620 case XHCI_TRB_EVENT_TRANSFER:
2621 xhci_event_transfer(sc, trb);
2622 break;
2623 case XHCI_TRB_EVENT_CMD_COMPLETE:
2624 xhci_event_cmd(sc, trb);
2625 break;
2626 case XHCI_TRB_EVENT_PORT_STS_CHANGE:
2627 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff));
2628 break;
2629 default:
2630 break;
2631 }
2632 }
2633
2634 static void
2635 xhci_softintr(void *v)
2636 {
2637 struct usbd_bus * const bus = v;
2638 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2639 struct xhci_ring * const er = sc->sc_er;
2640 struct xhci_trb *trb;
2641 int i, j, k, bn;
2642
2643 XHCIHIST_FUNC();
2644
2645 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2646
2647 i = er->xr_ep;
2648 j = er->xr_cs;
2649
2650 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0);
2651
2652 /*
2653 * Handle deferred root intr xfer, in case we just switched off
2654 * polling. It's not safe to complete root intr xfers while
2655 * polling -- too much kernel machinery gets involved.
2656 */
2657 if (!xhci_polling_p(sc)) {
2658 for (bn = 0; bn < 2; bn++) {
2659 if (__predict_false(sc->sc_intrxfer_deferred[bn])) {
2660 sc->sc_intrxfer_deferred[bn] = false;
2661 usb_transfer_complete(sc->sc_intrxfer[bn]);
2662 }
2663 }
2664 }
2665
2666 while (1) {
2667 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE,
2668 BUS_DMASYNC_POSTREAD);
2669 trb = &er->xr_trb[i];
2670 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0;
2671
2672 if (j != k)
2673 break;
2674
2675 xhci_handle_event(sc, trb);
2676
2677 i++;
2678 if (i == er->xr_ntrb) {
2679 i = 0;
2680 j ^= 1;
2681 }
2682 }
2683
2684 er->xr_ep = i;
2685 er->xr_cs = j;
2686
2687 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) |
2688 XHCI_ERDP_BUSY);
2689
2690 DPRINTFN(16, "ends", 0, 0, 0, 0);
2691
2692 return;
2693 }
2694
2695 static void
2696 xhci_poll(struct usbd_bus *bus)
2697 {
2698 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2699
2700 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2701
2702 mutex_enter(&sc->sc_intr_lock);
2703 int ret = xhci_intr1(sc);
2704 if (ret) {
2705 xhci_softintr(bus);
2706 }
2707 mutex_exit(&sc->sc_intr_lock);
2708
2709 return;
2710 }
2711
2712 static struct usbd_xfer *
2713 xhci_allocx(struct usbd_bus *bus, unsigned int nframes)
2714 {
2715 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2716 struct xhci_xfer *xx;
2717 u_int ntrbs;
2718
2719 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2720
2721 ntrbs = uimax(3, nframes);
2722 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs;
2723
2724 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
2725 if (xx != NULL) {
2726 memset(xx, 0, sizeof(*xx));
2727 if (ntrbs > 0) {
2728 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP);
2729 xx->xx_ntrb = ntrbs;
2730 }
2731 #ifdef DIAGNOSTIC
2732 xx->xx_xfer.ux_state = XFER_BUSY;
2733 #endif
2734 }
2735
2736 return &xx->xx_xfer;
2737 }
2738
2739 static void
2740 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
2741 {
2742 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2743 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
2744
2745 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2746
2747 #ifdef DIAGNOSTIC
2748 if (xfer->ux_state != XFER_BUSY &&
2749 xfer->ux_status != USBD_NOT_STARTED) {
2750 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx",
2751 (uintptr_t)xfer, xfer->ux_state, 0, 0);
2752 }
2753 xfer->ux_state = XFER_FREE;
2754 #endif
2755 if (xx->xx_ntrb > 0) {
2756 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb));
2757 xx->xx_trb = NULL;
2758 xx->xx_ntrb = 0;
2759 }
2760 pool_cache_put(sc->sc_xferpool, xx);
2761 }
2762
2763 static bool
2764 xhci_dying(struct usbd_bus *bus)
2765 {
2766 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2767
2768 return sc->sc_dying;
2769 }
2770
2771 static void
2772 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
2773 {
2774 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2775
2776 *lock = &sc->sc_lock;
2777 }
2778
2779 extern uint32_t usb_cookie_no;
2780
2781 /*
2782 * xHCI 4.3
2783 * Called when uhub_explore finds a new device (via usbd_new_device).
2784 * Port initialization and speed detection (4.3.1) are already done in uhub.c.
2785 * This function does:
2786 * Allocate and construct dev structure of default endpoint (ep0).
2787 * Allocate and open pipe of ep0.
2788 * Enable slot and initialize slot context.
2789 * Set Address.
2790 * Read initial device descriptor.
2791 * Determine initial MaxPacketSize (mps) by speed.
2792 * Read full device descriptor.
2793 * Register this device.
2794 * Finally state of device transitions ADDRESSED.
2795 */
2796 static usbd_status
2797 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth,
2798 int speed, int port, struct usbd_port *up)
2799 {
2800 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2801 struct usbd_device *dev;
2802 usbd_status err;
2803 usb_device_descriptor_t *dd;
2804 struct xhci_slot *xs;
2805 uint32_t *cp;
2806
2807 XHCIHIST_FUNC();
2808 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx",
2809 port, depth, speed, (uintptr_t)up);
2810
2811 KASSERT(KERNEL_LOCKED_P());
2812
2813 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
2814 dev->ud_bus = bus;
2815 dev->ud_quirks = &usbd_no_quirk;
2816 dev->ud_addr = 0;
2817 dev->ud_ddesc.bMaxPacketSize = 0;
2818 dev->ud_depth = depth;
2819 dev->ud_powersrc = up;
2820 dev->ud_myhub = up->up_parent;
2821 dev->ud_speed = speed;
2822 dev->ud_langid = USBD_NOLANG;
2823 dev->ud_cookie.cookie = ++usb_cookie_no;
2824
2825 /* Set up default endpoint handle. */
2826 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc;
2827 /* doesn't matter, just don't let it uninitialized */
2828 dev->ud_ep0.ue_toggle = 0;
2829
2830 /* Set up default endpoint descriptor. */
2831 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE;
2832 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT;
2833 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT;
2834 dev->ud_ep0desc.bmAttributes = UE_CONTROL;
2835 dev->ud_ep0desc.bInterval = 0;
2836
2837 /* 4.3, 4.8.2.1 */
2838 switch (speed) {
2839 case USB_SPEED_SUPER:
2840 case USB_SPEED_SUPER_PLUS:
2841 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET);
2842 break;
2843 case USB_SPEED_FULL:
2844 /* XXX using 64 as initial mps of ep0 in FS */
2845 case USB_SPEED_HIGH:
2846 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET);
2847 break;
2848 case USB_SPEED_LOW:
2849 default:
2850 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET);
2851 break;
2852 }
2853
2854 up->up_dev = dev;
2855
2856 dd = &dev->ud_ddesc;
2857
2858 if (depth == 0 && port == 0) {
2859 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL);
2860 bus->ub_devices[USB_ROOTHUB_INDEX] = dev;
2861
2862 /* Establish the default pipe. */
2863 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2864 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2865 if (err) {
2866 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0);
2867 goto bad;
2868 }
2869 err = usbd_get_initial_ddesc(dev, dd);
2870 if (err) {
2871 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2872 goto bad;
2873 }
2874 } else {
2875 uint8_t slot = 0;
2876
2877 /* 4.3.2 */
2878 err = xhci_enable_slot(sc, &slot);
2879 if (err) {
2880 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0);
2881 goto bad;
2882 }
2883
2884 xs = &sc->sc_slots[slot];
2885 dev->ud_hcpriv = xs;
2886
2887 /* 4.3.3 initialize slot structure */
2888 err = xhci_init_slot(dev, slot);
2889 if (err) {
2890 DPRINTFN(1, "init slot %ju", err, 0, 0, 0);
2891 dev->ud_hcpriv = NULL;
2892 /*
2893 * We have to disable_slot here because
2894 * xs->xs_idx == 0 when xhci_init_slot fails,
2895 * in that case usbd_remove_dev won't work.
2896 */
2897 mutex_enter(&sc->sc_lock);
2898 xhci_disable_slot(sc, slot);
2899 mutex_exit(&sc->sc_lock);
2900 goto bad;
2901 }
2902
2903 /*
2904 * We have to establish the default pipe _after_ slot
2905 * structure has been prepared.
2906 */
2907 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2908 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2909 if (err) {
2910 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0,
2911 0);
2912 goto bad;
2913 }
2914
2915 /* 4.3.4 Address Assignment */
2916 err = xhci_set_address(dev, slot, false);
2917 if (err) {
2918 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0);
2919 goto bad;
2920 }
2921
2922 /* Allow device time to set new address */
2923 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE);
2924
2925 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2926 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT);
2927 HEXDUMP("slot context", cp, sc->sc_ctxsz);
2928 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3]));
2929 DPRINTFN(4, "device address %ju", addr, 0, 0, 0);
2930 /*
2931 * XXX ensure we know when the hardware does something
2932 * we can't yet cope with
2933 */
2934 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr);
2935 dev->ud_addr = addr;
2936
2937 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL,
2938 "addr %d already allocated", dev->ud_addr);
2939 /*
2940 * The root hub is given its own slot
2941 */
2942 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev;
2943
2944 err = usbd_get_initial_ddesc(dev, dd);
2945 if (err) {
2946 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2947 goto bad;
2948 }
2949
2950 /* 4.8.2.1 */
2951 if (USB_IS_SS(speed)) {
2952 if (dd->bMaxPacketSize != 9) {
2953 printf("%s: invalid mps 2^%u for SS ep0,"
2954 " using 512\n",
2955 device_xname(sc->sc_dev),
2956 dd->bMaxPacketSize);
2957 dd->bMaxPacketSize = 9;
2958 }
2959 USETW(dev->ud_ep0desc.wMaxPacketSize,
2960 (1 << dd->bMaxPacketSize));
2961 } else
2962 USETW(dev->ud_ep0desc.wMaxPacketSize,
2963 dd->bMaxPacketSize);
2964 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0);
2965 err = xhci_update_ep0_mps(sc, xs,
2966 UGETW(dev->ud_ep0desc.wMaxPacketSize));
2967 if (err) {
2968 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0);
2969 goto bad;
2970 }
2971 }
2972
2973 err = usbd_reload_device_desc(dev);
2974 if (err) {
2975 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0);
2976 goto bad;
2977 }
2978
2979 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,",
2980 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0);
2981 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,",
2982 dd->bDeviceClass, dd->bDeviceSubClass,
2983 dd->bDeviceProtocol, 0);
2984 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd",
2985 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations,
2986 dev->ud_speed);
2987
2988 usbd_get_device_strings(dev);
2989
2990 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev);
2991
2992 if (depth == 0 && port == 0) {
2993 usbd_attach_roothub(parent, dev);
2994 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0);
2995 return USBD_NORMAL_COMPLETION;
2996 }
2997
2998 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr);
2999 bad:
3000 if (err != USBD_NORMAL_COMPLETION) {
3001 if (depth == 0 && port == 0 && dev->ud_pipe0)
3002 usbd_kill_pipe(dev->ud_pipe0);
3003 usbd_remove_device(dev, up);
3004 }
3005
3006 return err;
3007 }
3008
3009 static usbd_status
3010 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp,
3011 size_t ntrb, size_t align)
3012 {
3013 size_t size = ntrb * XHCI_TRB_SIZE;
3014 struct xhci_ring *xr;
3015
3016 XHCIHIST_FUNC();
3017 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx",
3018 (uintptr_t)*xrp, ntrb, align, 0);
3019
3020 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP);
3021 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0);
3022
3023 int err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
3024 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xr->xr_dma);
3025 if (err) {
3026 kmem_free(xr, sizeof(struct xhci_ring));
3027 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0);
3028 return err;
3029 }
3030 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
3031 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP);
3032 xr->xr_trb = xhci_ring_trbv(xr, 0);
3033 xr->xr_ntrb = ntrb;
3034 xr->is_halted = false;
3035 xhci_host_dequeue(xr);
3036 *xrp = xr;
3037
3038 return USBD_NORMAL_COMPLETION;
3039 }
3040
3041 static void
3042 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr)
3043 {
3044 if (*xr == NULL)
3045 return;
3046
3047 usb_freemem(&(*xr)->xr_dma);
3048 mutex_destroy(&(*xr)->xr_lock);
3049 kmem_free((*xr)->xr_cookies,
3050 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb);
3051 kmem_free(*xr, sizeof(struct xhci_ring));
3052 *xr = NULL;
3053 }
3054
3055 static void
3056 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr,
3057 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs)
3058 {
3059 size_t i;
3060 u_int ri;
3061 u_int cs;
3062 uint64_t parameter;
3063 uint32_t status;
3064 uint32_t control;
3065
3066 XHCIHIST_FUNC();
3067 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju",
3068 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0);
3069
3070 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u",
3071 ntrbs, xr->xr_ntrb);
3072 for (i = 0; i < ntrbs; i++) {
3073 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr,
3074 (uintptr_t)trbs, i, 0);
3075 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx",
3076 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0);
3077 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) !=
3078 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3);
3079 }
3080
3081 ri = xr->xr_ep;
3082 cs = xr->xr_cs;
3083
3084 /*
3085 * Although the xhci hardware can do scatter/gather dma from
3086 * arbitrary sized buffers, there is a non-obvious restriction
3087 * that a LINK trb is only allowed at the end of a burst of
3088 * transfers - which might be 16kB.
3089 * Arbitrary aligned LINK trb definitely fail on Ivy bridge.
3090 * The simple solution is not to allow a LINK trb in the middle
3091 * of anything - as here.
3092 * XXX: (dsl) There are xhci controllers out there (eg some made by
3093 * ASMedia) that seem to lock up if they process a LINK trb but
3094 * cannot process the linked-to trb yet.
3095 * The code should write the 'cycle' bit on the link trb AFTER
3096 * adding the other trb.
3097 */
3098 u_int firstep = xr->xr_ep;
3099 u_int firstcs = xr->xr_cs;
3100
3101 for (i = 0; i < ntrbs; ) {
3102 u_int oldri = ri;
3103 u_int oldcs = cs;
3104
3105 if (ri >= (xr->xr_ntrb - 1)) {
3106 /* Put Link TD at the end of ring */
3107 parameter = xhci_ring_trbp(xr, 0);
3108 status = 0;
3109 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) |
3110 XHCI_TRB_3_TC_BIT;
3111 xr->xr_cookies[ri] = NULL;
3112 xr->xr_ep = 0;
3113 xr->xr_cs ^= 1;
3114 ri = xr->xr_ep;
3115 cs = xr->xr_cs;
3116 } else {
3117 parameter = trbs[i].trb_0;
3118 status = trbs[i].trb_2;
3119 control = trbs[i].trb_3;
3120
3121 xr->xr_cookies[ri] = cookie;
3122 ri++;
3123 i++;
3124 }
3125 /*
3126 * If this is a first TRB, mark it invalid to prevent
3127 * xHC from running it immediately.
3128 */
3129 if (oldri == firstep) {
3130 if (oldcs) {
3131 control &= ~XHCI_TRB_3_CYCLE_BIT;
3132 } else {
3133 control |= XHCI_TRB_3_CYCLE_BIT;
3134 }
3135 } else {
3136 if (oldcs) {
3137 control |= XHCI_TRB_3_CYCLE_BIT;
3138 } else {
3139 control &= ~XHCI_TRB_3_CYCLE_BIT;
3140 }
3141 }
3142 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control);
3143 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri,
3144 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3145 }
3146
3147 /* Now invert cycle bit of first TRB */
3148 if (firstcs) {
3149 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT);
3150 } else {
3151 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT);
3152 }
3153 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep,
3154 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3155
3156 xr->xr_ep = ri;
3157 xr->xr_cs = cs;
3158
3159 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep,
3160 xr->xr_cs, 0);
3161 }
3162
3163 static inline void
3164 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr,
3165 struct xhci_xfer *xx, u_int ntrb)
3166 {
3167 KASSERT(ntrb <= xx->xx_ntrb);
3168 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb);
3169 }
3170
3171 /*
3172 * Stop execution commands, purge all commands on command ring, and
3173 * rewind dequeue pointer.
3174 */
3175 static void
3176 xhci_abort_command(struct xhci_softc *sc)
3177 {
3178 struct xhci_ring * const cr = sc->sc_cr;
3179 uint64_t crcr;
3180 int i;
3181
3182 XHCIHIST_FUNC();
3183 XHCIHIST_CALLARGS("command %#jx timeout, aborting",
3184 sc->sc_command_addr, 0, 0, 0);
3185
3186 mutex_enter(&cr->xr_lock);
3187
3188 /* 4.6.1.2 Aborting a Command */
3189 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3190 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA);
3191
3192 for (i = 0; i < 500; i++) {
3193 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3194 if ((crcr & XHCI_CRCR_LO_CRR) == 0)
3195 break;
3196 usb_delay_ms(&sc->sc_bus, 1);
3197 }
3198 if ((crcr & XHCI_CRCR_LO_CRR) != 0) {
3199 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0);
3200 /* reset HC here? */
3201 }
3202
3203 /* reset command ring dequeue pointer */
3204 cr->xr_ep = 0;
3205 cr->xr_cs = 1;
3206 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs);
3207
3208 mutex_exit(&cr->xr_lock);
3209 }
3210
3211 /*
3212 * Put a command on command ring, ring bell, set timer, and cv_timedwait.
3213 * Command completion is notified by cv_signal from xhci_event_cmd()
3214 * (called from xhci_softint), or timed-out.
3215 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(),
3216 * then do_command examines it.
3217 */
3218 static usbd_status
3219 xhci_do_command_locked(struct xhci_softc * const sc,
3220 struct xhci_soft_trb * const trb, int timeout)
3221 {
3222 struct xhci_ring * const cr = sc->sc_cr;
3223 usbd_status err;
3224
3225 XHCIHIST_FUNC();
3226 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx",
3227 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3228
3229 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
3230 KASSERT(mutex_owned(&sc->sc_lock));
3231
3232 while (sc->sc_command_addr != 0 ||
3233 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp))
3234 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
3235 if (sc->sc_suspendresume_failed)
3236 return USBD_IOERROR;
3237
3238 /*
3239 * If enqueue pointer points at last of ring, it's Link TRB,
3240 * command TRB will be stored in 0th TRB.
3241 */
3242 if (cr->xr_ep == cr->xr_ntrb - 1)
3243 sc->sc_command_addr = xhci_ring_trbp(cr, 0);
3244 else
3245 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep);
3246
3247 sc->sc_resultpending = true;
3248
3249 mutex_enter(&cr->xr_lock);
3250 xhci_ring_put(sc, cr, NULL, trb, 1);
3251 mutex_exit(&cr->xr_lock);
3252
3253 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0);
3254
3255 while (sc->sc_resultpending) {
3256 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock,
3257 MAX(1, mstohz(timeout))) == EWOULDBLOCK) {
3258 xhci_abort_command(sc);
3259 err = USBD_TIMEOUT;
3260 goto timedout;
3261 }
3262 }
3263
3264 trb->trb_0 = sc->sc_result_trb.trb_0;
3265 trb->trb_2 = sc->sc_result_trb.trb_2;
3266 trb->trb_3 = sc->sc_result_trb.trb_3;
3267
3268 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx",
3269 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3270
3271 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) {
3272 case XHCI_TRB_ERROR_SUCCESS:
3273 err = USBD_NORMAL_COMPLETION;
3274 break;
3275 default:
3276 case 192 ... 223:
3277 DPRINTFN(5, "error %#jx",
3278 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0);
3279 err = USBD_IOERROR;
3280 break;
3281 case 224 ... 255:
3282 err = USBD_NORMAL_COMPLETION;
3283 break;
3284 }
3285
3286 timedout:
3287 sc->sc_resultpending = false;
3288 sc->sc_command_addr = 0;
3289 cv_broadcast(&sc->sc_cmdbusy_cv);
3290
3291 return err;
3292 }
3293
3294 static usbd_status
3295 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb,
3296 int timeout)
3297 {
3298
3299 mutex_enter(&sc->sc_lock);
3300 usbd_status ret = xhci_do_command_locked(sc, trb, timeout);
3301 mutex_exit(&sc->sc_lock);
3302
3303 return ret;
3304 }
3305
3306 static usbd_status
3307 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp)
3308 {
3309 struct xhci_soft_trb trb;
3310 usbd_status err;
3311
3312 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3313
3314 trb.trb_0 = 0;
3315 trb.trb_2 = 0;
3316 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT);
3317
3318 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3319 if (err != USBD_NORMAL_COMPLETION) {
3320 return err;
3321 }
3322
3323 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3);
3324
3325 return err;
3326 }
3327
3328 /*
3329 * xHCI 4.6.4
3330 * Deallocate ring and device/input context DMA buffers, and disable_slot.
3331 * All endpoints in the slot should be stopped.
3332 * Should be called with sc_lock held.
3333 */
3334 static usbd_status
3335 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot)
3336 {
3337 struct xhci_soft_trb trb;
3338 struct xhci_slot *xs;
3339 usbd_status err;
3340
3341 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3342
3343 if (sc->sc_dying)
3344 return USBD_IOERROR;
3345
3346 trb.trb_0 = 0;
3347 trb.trb_2 = 0;
3348 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) |
3349 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT);
3350
3351 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
3352
3353 if (!err) {
3354 xs = &sc->sc_slots[slot];
3355 if (xs->xs_idx != 0) {
3356 xhci_free_slot(sc, xs);
3357 xhci_set_dcba(sc, 0, slot);
3358 memset(xs, 0, sizeof(*xs));
3359 }
3360 }
3361
3362 return err;
3363 }
3364
3365 /*
3366 * Set address of device and transition slot state from ENABLED to ADDRESSED
3367 * if Block Setaddress Request (BSR) is false.
3368 * If BSR==true, transition slot state from ENABLED to DEFAULT.
3369 * see xHCI 1.1 4.5.3, 3.3.4
3370 * Should be called without sc_lock held.
3371 */
3372 static usbd_status
3373 xhci_address_device(struct xhci_softc * const sc,
3374 uint64_t icp, uint8_t slot_id, bool bsr)
3375 {
3376 struct xhci_soft_trb trb;
3377 usbd_status err;
3378
3379 XHCIHIST_FUNC();
3380 if (bsr) {
3381 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr",
3382 icp, slot_id, 0, 0);
3383 } else {
3384 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr",
3385 icp, slot_id, 0, 0);
3386 }
3387
3388 trb.trb_0 = icp;
3389 trb.trb_2 = 0;
3390 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) |
3391 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) |
3392 (bsr ? XHCI_TRB_3_BSR_BIT : 0);
3393
3394 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3395
3396 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS)
3397 err = USBD_NO_ADDR;
3398
3399 return err;
3400 }
3401
3402 static usbd_status
3403 xhci_update_ep0_mps(struct xhci_softc * const sc,
3404 struct xhci_slot * const xs, u_int mps)
3405 {
3406 struct xhci_soft_trb trb;
3407 usbd_status err;
3408 uint32_t * cp;
3409
3410 XHCIHIST_FUNC();
3411 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0);
3412
3413 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3414 cp[0] = htole32(0);
3415 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL));
3416
3417 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL));
3418 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps));
3419
3420 /* sync input contexts before they are read from memory */
3421 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3422 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3423 sc->sc_ctxsz * 4);
3424
3425 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
3426 trb.trb_2 = 0;
3427 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
3428 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX);
3429
3430 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3431 return err;
3432 }
3433
3434 static void
3435 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si)
3436 {
3437 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0);
3438
3439 XHCIHIST_FUNC();
3440 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd",
3441 (uintptr_t)&dcbaa[si], dcba, si, 0);
3442
3443 dcbaa[si] = htole64(dcba);
3444 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t),
3445 BUS_DMASYNC_PREWRITE);
3446 }
3447
3448 /*
3449 * Allocate device and input context DMA buffer, and
3450 * TRB DMA buffer for each endpoint.
3451 */
3452 static usbd_status
3453 xhci_init_slot(struct usbd_device *dev, uint32_t slot)
3454 {
3455 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3456 struct xhci_slot *xs;
3457
3458 XHCIHIST_FUNC();
3459 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0);
3460
3461 xs = &sc->sc_slots[slot];
3462
3463 /* allocate contexts */
3464 int err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3465 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_dc_dma);
3466 if (err) {
3467 DPRINTFN(1, "failed to allocmem output device context %jd",
3468 err, 0, 0, 0);
3469 return USBD_NOMEM;
3470 }
3471
3472 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3473 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_ic_dma);
3474 if (err) {
3475 DPRINTFN(1, "failed to allocmem input device context %jd",
3476 err, 0, 0, 0);
3477 goto bad1;
3478 }
3479
3480 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr));
3481 xs->xs_idx = slot;
3482
3483 return USBD_NORMAL_COMPLETION;
3484
3485 bad1:
3486 usb_freemem(&xs->xs_dc_dma);
3487 xs->xs_idx = 0;
3488 return USBD_NOMEM;
3489 }
3490
3491 static void
3492 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs)
3493 {
3494 u_int dci;
3495
3496 XHCIHIST_FUNC();
3497 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
3498
3499 /* deallocate all allocated rings in the slot */
3500 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
3501 if (xs->xs_xr[dci] != NULL)
3502 xhci_ring_free(sc, &xs->xs_xr[dci]);
3503 }
3504 usb_freemem(&xs->xs_ic_dma);
3505 usb_freemem(&xs->xs_dc_dma);
3506 xs->xs_idx = 0;
3507 }
3508
3509 /*
3510 * Setup slot context, set Device Context Base Address, and issue
3511 * Set Address Device command.
3512 */
3513 static usbd_status
3514 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr)
3515 {
3516 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3517 struct xhci_slot *xs;
3518 usbd_status err;
3519
3520 XHCIHIST_FUNC();
3521 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0);
3522
3523 xs = &sc->sc_slots[slot];
3524
3525 xhci_setup_ctx(dev->ud_pipe0);
3526
3527 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3528 sc->sc_ctxsz * 3);
3529
3530 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot);
3531
3532 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr);
3533
3534 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
3535 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0),
3536 sc->sc_ctxsz * 2);
3537
3538 return err;
3539 }
3540
3541 /*
3542 * 4.8.2, 6.2.3.2
3543 * construct slot/endpoint context parameters and do syncmem
3544 */
3545 static void
3546 xhci_setup_ctx(struct usbd_pipe *pipe)
3547 {
3548 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3549 struct usbd_device *dev = pipe->up_dev;
3550 struct xhci_slot * const xs = dev->ud_hcpriv;
3551 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3552 const u_int dci = xhci_ep_get_dci(ed);
3553 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3554 uint32_t *cp;
3555 uint16_t mps = UGETW(ed->wMaxPacketSize);
3556 uint8_t speed = dev->ud_speed;
3557 uint8_t ival = ed->bInterval;
3558
3559 XHCIHIST_FUNC();
3560 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju",
3561 (uintptr_t)pipe, xs->xs_idx, dci, speed);
3562
3563 /* set up initial input control context */
3564 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3565 cp[0] = htole32(0);
3566 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci));
3567 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT));
3568 cp[7] = htole32(0);
3569
3570 /* set up input slot context */
3571 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
3572 cp[0] =
3573 XHCI_SCTX_0_CTX_NUM_SET(dci) |
3574 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed));
3575 cp[1] = 0;
3576 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0);
3577 cp[3] = 0;
3578 xhci_setup_route(pipe, cp);
3579 xhci_setup_tthub(pipe, cp);
3580
3581 cp[0] = htole32(cp[0]);
3582 cp[1] = htole32(cp[1]);
3583 cp[2] = htole32(cp[2]);
3584 cp[3] = htole32(cp[3]);
3585
3586 /* set up input endpoint context */
3587 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci));
3588 cp[0] =
3589 XHCI_EPCTX_0_EPSTATE_SET(0) |
3590 XHCI_EPCTX_0_MULT_SET(0) |
3591 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) |
3592 XHCI_EPCTX_0_LSA_SET(0) |
3593 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0);
3594 cp[1] =
3595 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) |
3596 XHCI_EPCTX_1_HID_SET(0) |
3597 XHCI_EPCTX_1_MAXB_SET(0);
3598
3599 if (xfertype != UE_ISOCHRONOUS)
3600 cp[1] |= XHCI_EPCTX_1_CERR_SET(3);
3601
3602 if (xfertype == UE_CONTROL)
3603 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(8); /* 6.2.3 */
3604 else if (USB_IS_SS(speed))
3605 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(mps);
3606 else
3607 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(UE_GET_SIZE(mps));
3608
3609 xhci_setup_maxburst(pipe, cp);
3610
3611 switch (xfertype) {
3612 case UE_CONTROL:
3613 break;
3614 case UE_BULK:
3615 /* XXX Set MaxPStreams, HID, and LSA if streams enabled */
3616 break;
3617 case UE_INTERRUPT:
3618 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3619 ival = pipe->up_interval;
3620
3621 ival = xhci_bival2ival(ival, speed);
3622 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3623 break;
3624 case UE_ISOCHRONOUS:
3625 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3626 ival = pipe->up_interval;
3627
3628 /* xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 */
3629 if (speed == USB_SPEED_FULL)
3630 ival += 3; /* 1ms -> 125us */
3631 ival--;
3632 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3633 break;
3634 default:
3635 break;
3636 }
3637 DPRINTFN(4, "setting ival %ju MaxBurst %#jx",
3638 XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_1_MAXB_GET(cp[1]), 0, 0);
3639
3640 /* rewind TR dequeue pointer in xHC */
3641 /* can't use xhci_ep_get_dci() yet? */
3642 *(uint64_t *)(&cp[2]) = htole64(
3643 xhci_ring_trbp(xs->xs_xr[dci], 0) |
3644 XHCI_EPCTX_2_DCS_SET(1));
3645
3646 cp[0] = htole32(cp[0]);
3647 cp[1] = htole32(cp[1]);
3648 cp[4] = htole32(cp[4]);
3649
3650 /* rewind TR dequeue pointer in driver */
3651 struct xhci_ring *xr = xs->xs_xr[dci];
3652 mutex_enter(&xr->xr_lock);
3653 xhci_host_dequeue(xr);
3654 mutex_exit(&xr->xr_lock);
3655
3656 /* sync input contexts before they are read from memory */
3657 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3658 }
3659
3660 /*
3661 * Setup route string and roothub port of given device for slot context
3662 */
3663 static void
3664 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp)
3665 {
3666 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3667 struct usbd_device *dev = pipe->up_dev;
3668 struct usbd_port *up = dev->ud_powersrc;
3669 struct usbd_device *hub;
3670 struct usbd_device *adev;
3671 uint8_t rhport = 0;
3672 uint32_t route = 0;
3673
3674 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3675
3676 /* Locate root hub port and Determine route string */
3677 /* 4.3.3 route string does not include roothub port */
3678 for (hub = dev; hub != NULL; hub = hub->ud_myhub) {
3679 uint32_t dep;
3680
3681 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd",
3682 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc,
3683 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno :
3684 -1);
3685
3686 if (hub->ud_powersrc == NULL)
3687 break;
3688 dep = hub->ud_depth;
3689 if (dep == 0)
3690 break;
3691 rhport = hub->ud_powersrc->up_portno;
3692 if (dep > USB_HUB_MAX_DEPTH)
3693 continue;
3694
3695 route |=
3696 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport)
3697 << ((dep - 1) * 4);
3698 }
3699 route = route >> 4;
3700 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1;
3701
3702 /* Locate port on upstream high speed hub */
3703 for (adev = dev, hub = up->up_parent;
3704 hub != NULL && hub->ud_speed != USB_SPEED_HIGH;
3705 adev = hub, hub = hub->ud_myhub)
3706 ;
3707 if (hub) {
3708 int p;
3709 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) {
3710 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) {
3711 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1];
3712 goto found;
3713 }
3714 }
3715 panic("%s: cannot find HS port", __func__);
3716 found:
3717 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0);
3718 } else {
3719 dev->ud_myhsport = NULL;
3720 }
3721
3722 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport);
3723
3724 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport,
3725 ctlrport, route, (uintptr_t)hub);
3726
3727 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route);
3728 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport);
3729 }
3730
3731 /*
3732 * Setup whether device is hub, whether device uses MTT, and
3733 * TT informations if it uses MTT.
3734 */
3735 static void
3736 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp)
3737 {
3738 struct usbd_device *dev = pipe->up_dev;
3739 struct usbd_port *myhsport = dev->ud_myhsport;
3740 usb_device_descriptor_t * const dd = &dev->ud_ddesc;
3741 uint32_t speed = dev->ud_speed;
3742 uint8_t rhaddr = dev->ud_bus->ub_rhaddr;
3743 uint8_t tthubslot, ttportnum;
3744 bool ishub;
3745 bool usemtt;
3746
3747 XHCIHIST_FUNC();
3748
3749 /*
3750 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2
3751 * tthubslot:
3752 * This is the slot ID of parent HS hub
3753 * if LS/FS device is connected && connected through HS hub.
3754 * This is 0 if device is not LS/FS device ||
3755 * parent hub is not HS hub ||
3756 * attached to root hub.
3757 * ttportnum:
3758 * This is the downstream facing port of parent HS hub
3759 * if LS/FS device is connected.
3760 * This is 0 if device is not LS/FS device ||
3761 * parent hub is not HS hub ||
3762 * attached to root hub.
3763 */
3764 if (myhsport &&
3765 myhsport->up_parent->ud_addr != rhaddr &&
3766 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
3767 ttportnum = myhsport->up_portno;
3768 tthubslot = myhsport->up_parent->ud_addr;
3769 } else {
3770 ttportnum = 0;
3771 tthubslot = 0;
3772 }
3773 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd",
3774 (uintptr_t)myhsport, ttportnum, tthubslot, 0);
3775
3776 /* ishub is valid after reading UDESC_DEVICE */
3777 ishub = (dd->bDeviceClass == UDCLASS_HUB);
3778
3779 /* dev->ud_hub is valid after reading UDESC_HUB */
3780 if (ishub && dev->ud_hub) {
3781 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc;
3782 uint8_t ttt =
3783 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK);
3784
3785 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts);
3786 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt);
3787 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0);
3788 }
3789
3790 #define IS_MTTHUB(dd) \
3791 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT)
3792
3793 /*
3794 * MTT flag is set if
3795 * 1. this is HS hub && MTTs are supported and enabled; or
3796 * 2. this is LS or FS device && there is a parent HS hub where MTTs
3797 * are supported and enabled.
3798 *
3799 * XXX enabled is not tested yet
3800 */
3801 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd))
3802 usemtt = true;
3803 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) &&
3804 myhsport &&
3805 myhsport->up_parent->ud_addr != rhaddr &&
3806 IS_MTTHUB(&myhsport->up_parent->ud_ddesc))
3807 usemtt = true;
3808 else
3809 usemtt = false;
3810 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd",
3811 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt);
3812
3813 #undef IS_MTTHUB
3814
3815 cp[0] |=
3816 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) |
3817 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0);
3818 cp[2] |=
3819 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) |
3820 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum);
3821 }
3822
3823 /* set up params for periodic endpoint */
3824 static void
3825 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp)
3826 {
3827 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
3828 struct usbd_device *dev = pipe->up_dev;
3829 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3830 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3831 usbd_desc_iter_t iter;
3832 const usb_cdc_descriptor_t *cdcd;
3833 uint32_t maxb = 0;
3834 uint16_t mps = UGETW(ed->wMaxPacketSize);
3835 uint8_t speed = dev->ud_speed;
3836 uint8_t mult = 0;
3837 uint8_t ep;
3838
3839 /* config desc is NULL when opening ep0 */
3840 if (dev == NULL || dev->ud_cdesc == NULL)
3841 goto no_cdcd;
3842 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev,
3843 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY);
3844 if (cdcd == NULL)
3845 goto no_cdcd;
3846 usb_desc_iter_init(dev, &iter);
3847 iter.cur = (const void *)cdcd;
3848
3849 /* find endpoint_ss_comp desc for ep of this pipe */
3850 for (ep = 0;;) {
3851 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter);
3852 if (cdcd == NULL)
3853 break;
3854 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) {
3855 ep = ((const usb_endpoint_descriptor_t *)cdcd)->
3856 bEndpointAddress;
3857 if (UE_GET_ADDR(ep) ==
3858 UE_GET_ADDR(ed->bEndpointAddress)) {
3859 cdcd = (const usb_cdc_descriptor_t *)
3860 usb_desc_iter_next(&iter);
3861 break;
3862 }
3863 ep = 0;
3864 }
3865 }
3866 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) {
3867 const usb_endpoint_ss_comp_descriptor_t * esscd =
3868 (const usb_endpoint_ss_comp_descriptor_t *)cdcd;
3869 maxb = esscd->bMaxBurst;
3870 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes);
3871 }
3872
3873 no_cdcd:
3874 /* 6.2.3.4, 4.8.2.4 */
3875 if (USB_IS_SS(speed)) {
3876 /* USB 3.1 9.6.6 */
3877 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps);
3878 /* USB 3.1 9.6.7 */
3879 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3880 #ifdef notyet
3881 if (xfertype == UE_ISOCHRONOUS) {
3882 }
3883 if (XHCI_HCC2_LEC(sc->sc_hcc2) != 0) {
3884 /* use ESIT */
3885 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(x);
3886 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(x);
3887
3888 /* XXX if LEC = 1, set ESIT instead */
3889 cp[0] |= XHCI_EPCTX_0_MULT_SET(0);
3890 } else {
3891 /* use ival */
3892 }
3893 #endif
3894 } else {
3895 /* USB 2.0 9.6.6 */
3896 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(UE_GET_SIZE(mps));
3897
3898 /* 6.2.3.4 */
3899 if (speed == USB_SPEED_HIGH &&
3900 (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)) {
3901 maxb = UE_GET_TRANS(mps);
3902 } else {
3903 /* LS/FS or HS CTRL or HS BULK */
3904 maxb = 0;
3905 }
3906 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3907 }
3908 xpipe->xp_maxb = maxb + 1;
3909 xpipe->xp_mult = mult + 1;
3910 }
3911
3912 /*
3913 * Convert endpoint bInterval value to endpoint context interval value
3914 * for Interrupt pipe.
3915 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6
3916 */
3917 static uint32_t
3918 xhci_bival2ival(uint32_t ival, uint32_t speed)
3919 {
3920 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) {
3921 int i;
3922
3923 /*
3924 * round ival down to "the nearest base 2 multiple of
3925 * bInterval * 8".
3926 * bInterval is at most 255 as its type is uByte.
3927 * 255(ms) = 2040(x 125us) < 2^11, so start with 10.
3928 */
3929 for (i = 10; i > 0; i--) {
3930 if ((ival * 8) >= (1 << i))
3931 break;
3932 }
3933 ival = i;
3934 } else {
3935 /* Interval = bInterval-1 for SS/HS */
3936 ival--;
3937 }
3938
3939 return ival;
3940 }
3941
3942 /* ----- */
3943
3944 static void
3945 xhci_noop(struct usbd_pipe *pipe)
3946 {
3947 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3948 }
3949
3950 /*
3951 * Process root hub request.
3952 */
3953 static int
3954 xhci_roothub_ctrl_locked(struct usbd_bus *bus, usb_device_request_t *req,
3955 void *buf, int buflen)
3956 {
3957 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
3958 usb_port_status_t ps;
3959 int l, totlen = 0;
3960 uint16_t len, value, index;
3961 int port, i;
3962 uint32_t v;
3963
3964 XHCIHIST_FUNC();
3965
3966 KASSERT(mutex_owned(&sc->sc_rhlock));
3967
3968 if (sc->sc_dying)
3969 return -1;
3970
3971 size_t bn = bus == &sc->sc_bus ? 0 : 1;
3972
3973 len = UGETW(req->wLength);
3974 value = UGETW(req->wValue);
3975 index = UGETW(req->wIndex);
3976
3977 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx",
3978 req->bmRequestType | (req->bRequest << 8), value, index, len);
3979
3980 #define C(x,y) ((x) | ((y) << 8))
3981 switch (C(req->bRequest, req->bmRequestType)) {
3982 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
3983 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0);
3984 if (len == 0)
3985 break;
3986 switch (value) {
3987 #define sd ((usb_string_descriptor_t *)buf)
3988 case C(2, UDESC_STRING):
3989 /* Product */
3990 totlen = usb_makestrdesc(sd, len, "xHCI root hub");
3991 break;
3992 #undef sd
3993 default:
3994 /* default from usbroothub */
3995 return buflen;
3996 }
3997 break;
3998
3999 /* Hub requests */
4000 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
4001 break;
4002 /* Clear Port Feature request */
4003 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): {
4004 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4005
4006 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd",
4007 index, value, bn, cp);
4008 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4009 return -1;
4010 }
4011 port = XHCI_PORTSC(cp);
4012 v = xhci_op_read_4(sc, port);
4013 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0);
4014 v &= ~XHCI_PS_CLEAR;
4015 switch (value) {
4016 case UHF_PORT_ENABLE:
4017 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED);
4018 break;
4019 case UHF_PORT_SUSPEND:
4020 return -1;
4021 case UHF_PORT_POWER:
4022 break;
4023 case UHF_PORT_TEST:
4024 case UHF_PORT_INDICATOR:
4025 return -1;
4026 case UHF_C_PORT_CONNECTION:
4027 xhci_op_write_4(sc, port, v | XHCI_PS_CSC);
4028 break;
4029 case UHF_C_PORT_ENABLE:
4030 case UHF_C_PORT_SUSPEND:
4031 case UHF_C_PORT_OVER_CURRENT:
4032 return -1;
4033 case UHF_C_BH_PORT_RESET:
4034 xhci_op_write_4(sc, port, v | XHCI_PS_WRC);
4035 break;
4036 case UHF_C_PORT_RESET:
4037 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4038 break;
4039 case UHF_C_PORT_LINK_STATE:
4040 xhci_op_write_4(sc, port, v | XHCI_PS_PLC);
4041 break;
4042 case UHF_C_PORT_CONFIG_ERROR:
4043 xhci_op_write_4(sc, port, v | XHCI_PS_CEC);
4044 break;
4045 default:
4046 return -1;
4047 }
4048 break;
4049 }
4050 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
4051 if (len == 0)
4052 break;
4053 if ((value & 0xff) != 0) {
4054 return -1;
4055 }
4056 usb_hub_descriptor_t hubd;
4057
4058 totlen = uimin(buflen, sizeof(hubd));
4059 memcpy(&hubd, buf, totlen);
4060 hubd.bNbrPorts = sc->sc_rhportcount[bn];
4061 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH);
4062 hubd.bPwrOn2PwrGood = 200;
4063 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) {
4064 /* XXX can't find out? */
4065 hubd.DeviceRemovable[i++] = 0;
4066 }
4067 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
4068 totlen = uimin(totlen, hubd.bDescLength);
4069 memcpy(buf, &hubd, totlen);
4070 break;
4071 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
4072 if (len != 4) {
4073 return -1;
4074 }
4075 memset(buf, 0, len); /* ? XXX */
4076 totlen = len;
4077 break;
4078 /* Get Port Status request */
4079 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): {
4080 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4081
4082 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju",
4083 bn, index, cp, 0);
4084 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4085 DPRINTFN(5, "bad get port status: index=%jd bn=%jd "
4086 "portcount=%jd",
4087 index, bn, sc->sc_rhportcount[bn], 0);
4088 return -1;
4089 }
4090 if (len != 4) {
4091 DPRINTFN(5, "bad get port status: len %jd != 4",
4092 len, 0, 0, 0);
4093 return -1;
4094 }
4095 v = xhci_op_read_4(sc, XHCI_PORTSC(cp));
4096 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0);
4097 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v));
4098 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS;
4099 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED;
4100 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR;
4101 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND;
4102 if (v & XHCI_PS_PR) i |= UPS_RESET;
4103 if (v & XHCI_PS_PP) {
4104 if (i & UPS_OTHER_SPEED)
4105 i |= UPS_PORT_POWER_SS;
4106 else
4107 i |= UPS_PORT_POWER;
4108 }
4109 if (i & UPS_OTHER_SPEED)
4110 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v));
4111 if (sc->sc_vendor_port_status)
4112 i = sc->sc_vendor_port_status(sc, v, i);
4113 USETW(ps.wPortStatus, i);
4114 i = 0;
4115 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS;
4116 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED;
4117 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR;
4118 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET;
4119 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET;
4120 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE;
4121 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR;
4122 USETW(ps.wPortChange, i);
4123 totlen = uimin(len, sizeof(ps));
4124 memcpy(buf, &ps, totlen);
4125 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx"
4126 " totlen %jd",
4127 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0);
4128 break;
4129 }
4130 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
4131 return -1;
4132 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE):
4133 break;
4134 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
4135 break;
4136 /* Set Port Feature request */
4137 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): {
4138 int optval = (index >> 8) & 0xff;
4139 index &= 0xff;
4140 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4141 return -1;
4142 }
4143
4144 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4145
4146 port = XHCI_PORTSC(cp);
4147 v = xhci_op_read_4(sc, port);
4148 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0);
4149 v &= ~XHCI_PS_CLEAR;
4150 switch (value) {
4151 case UHF_PORT_ENABLE:
4152 xhci_op_write_4(sc, port, v | XHCI_PS_PED);
4153 break;
4154 case UHF_PORT_SUSPEND:
4155 /* XXX suspend */
4156 break;
4157 case UHF_PORT_RESET:
4158 xhci_op_write_4(sc, port, v | XHCI_PS_PR);
4159 /* Wait for reset to complete. */
4160 for (i = 0; i < USB_PORT_ROOT_RESET_DELAY / 10; i++) {
4161 if (sc->sc_dying) {
4162 return -1;
4163 }
4164 v = xhci_op_read_4(sc, port);
4165 if ((v & XHCI_PS_PR) == 0) {
4166 break;
4167 }
4168 usb_delay_ms(&sc->sc_bus, 10);
4169 }
4170 break;
4171 case UHF_PORT_POWER:
4172 /* XXX power control */
4173 break;
4174 /* XXX more */
4175 case UHF_C_PORT_RESET:
4176 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4177 break;
4178 case UHF_PORT_U1_TIMEOUT:
4179 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4180 return -1;
4181 }
4182 port = XHCI_PORTPMSC(cp);
4183 v = xhci_op_read_4(sc, port);
4184 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4185 index, cp, v, 0);
4186 v &= ~XHCI_PM3_U1TO_SET(0xff);
4187 v |= XHCI_PM3_U1TO_SET(optval);
4188 xhci_op_write_4(sc, port, v);
4189 break;
4190 case UHF_PORT_U2_TIMEOUT:
4191 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4192 return -1;
4193 }
4194 port = XHCI_PORTPMSC(cp);
4195 v = xhci_op_read_4(sc, port);
4196 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4197 index, cp, v, 0);
4198 v &= ~XHCI_PM3_U2TO_SET(0xff);
4199 v |= XHCI_PM3_U2TO_SET(optval);
4200 xhci_op_write_4(sc, port, v);
4201 break;
4202 default:
4203 return -1;
4204 }
4205 }
4206 break;
4207 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
4208 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
4209 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
4210 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
4211 break;
4212 default:
4213 /* default from usbroothub */
4214 return buflen;
4215 }
4216
4217 return totlen;
4218 }
4219
4220 static int
4221 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
4222 void *buf, int buflen)
4223 {
4224 struct xhci_softc *sc = XHCI_BUS2SC(bus);
4225 int actlen;
4226
4227 mutex_enter(&sc->sc_rhlock);
4228 actlen = xhci_roothub_ctrl_locked(bus, req, buf, buflen);
4229 mutex_exit(&sc->sc_rhlock);
4230
4231 return actlen;
4232 }
4233
4234 /* root hub interrupt */
4235
4236 static usbd_status
4237 xhci_root_intr_transfer(struct usbd_xfer *xfer)
4238 {
4239 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4240
4241 /* Pipe isn't running, start first */
4242 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4243 }
4244
4245 /* Wait for roothub port status/change */
4246 static usbd_status
4247 xhci_root_intr_start(struct usbd_xfer *xfer)
4248 {
4249 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4250 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4251
4252 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4253
4254 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4255
4256 if (sc->sc_dying)
4257 return USBD_IOERROR;
4258
4259 KASSERT(sc->sc_intrxfer[bn] == NULL);
4260 sc->sc_intrxfer[bn] = xfer;
4261 xfer->ux_status = USBD_IN_PROGRESS;
4262
4263 return USBD_IN_PROGRESS;
4264 }
4265
4266 static void
4267 xhci_root_intr_abort(struct usbd_xfer *xfer)
4268 {
4269 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4270 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4271
4272 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4273
4274 KASSERT(mutex_owned(&sc->sc_lock));
4275 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
4276
4277 /* If xfer has already completed, nothing to do here. */
4278 if (sc->sc_intrxfer[bn] == NULL)
4279 return;
4280
4281 /*
4282 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer.
4283 * Cancel it.
4284 */
4285 KASSERT(sc->sc_intrxfer[bn] == xfer);
4286 xfer->ux_status = USBD_CANCELLED;
4287 usb_transfer_complete(xfer);
4288 }
4289
4290 static void
4291 xhci_root_intr_close(struct usbd_pipe *pipe)
4292 {
4293 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe);
4294 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer;
4295 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4296
4297 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4298
4299 KASSERT(mutex_owned(&sc->sc_lock));
4300
4301 /*
4302 * Caller must guarantee the xfer has completed first, by
4303 * closing the pipe only after normal completion or an abort.
4304 */
4305 KASSERT(sc->sc_intrxfer[bn] == NULL);
4306 }
4307
4308 static void
4309 xhci_root_intr_done(struct usbd_xfer *xfer)
4310 {
4311 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4312 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4313
4314 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4315
4316 KASSERT(mutex_owned(&sc->sc_lock));
4317
4318 /* Claim the xfer so it doesn't get completed again. */
4319 KASSERT(sc->sc_intrxfer[bn] == xfer);
4320 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
4321 sc->sc_intrxfer[bn] = NULL;
4322 }
4323
4324 /* -------------- */
4325 /* device control */
4326
4327 static usbd_status
4328 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
4329 {
4330 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4331
4332 /* Pipe isn't running, start first */
4333 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4334 }
4335
4336 static usbd_status
4337 xhci_device_ctrl_start(struct usbd_xfer *xfer)
4338 {
4339 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4340 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4341 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4342 struct xhci_ring * const tr = xs->xs_xr[dci];
4343 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4344 usb_device_request_t * const req = &xfer->ux_request;
4345 const bool isread = usbd_xfer_isread(xfer);
4346 const uint32_t len = UGETW(req->wLength);
4347 usb_dma_t * const dma = &xfer->ux_dmabuf;
4348 uint64_t parameter;
4349 uint32_t status;
4350 uint32_t control;
4351 u_int i;
4352 const bool polling = xhci_polling_p(sc);
4353
4354 XHCIHIST_FUNC();
4355 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx",
4356 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue),
4357 UGETW(req->wIndex), UGETW(req->wLength));
4358
4359 KASSERT(polling || mutex_owned(&sc->sc_lock));
4360
4361 /* we rely on the bottom bits for extra info */
4362 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %p", xfer);
4363
4364 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0);
4365
4366 if (tr->is_halted)
4367 goto out;
4368
4369 i = 0;
4370
4371 /* setup phase */
4372 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */
4373 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req));
4374 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE :
4375 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) |
4376 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) |
4377 XHCI_TRB_3_IDT_BIT;
4378 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4379
4380 if (len != 0) {
4381 /* data phase */
4382 parameter = DMAADDR(dma, 0);
4383 KASSERTMSG(len <= 0x10000, "len %d", len);
4384 status = XHCI_TRB_2_IRQ_SET(0) |
4385 XHCI_TRB_2_TDSZ_SET(0) |
4386 XHCI_TRB_2_BYTES_SET(len);
4387 control = (isread ? XHCI_TRB_3_DIR_IN : 0) |
4388 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) |
4389 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4390 XHCI_TRB_3_IOC_BIT;
4391 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4392
4393 usb_syncmem(dma, 0, len,
4394 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4395 }
4396
4397 parameter = 0;
4398 status = XHCI_TRB_2_IRQ_SET(0);
4399 /* the status stage has inverted direction */
4400 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) |
4401 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) |
4402 XHCI_TRB_3_IOC_BIT;
4403 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4404
4405 if (!polling)
4406 mutex_enter(&tr->xr_lock);
4407 xhci_ring_put_xfer(sc, tr, xx, i);
4408 if (!polling)
4409 mutex_exit(&tr->xr_lock);
4410
4411 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4412
4413 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4414 usbd_xfer_schedule_timeout(xfer);
4415 xfer->ux_status = USBD_IN_PROGRESS;
4416 } else {
4417 /*
4418 * We must be coming from xhci_pipe_restart -- timeout
4419 * already set up, nothing to do.
4420 */
4421 }
4422 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4423
4424 return USBD_IN_PROGRESS;
4425 }
4426
4427 static void
4428 xhci_device_ctrl_done(struct usbd_xfer *xfer)
4429 {
4430 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4431 usb_device_request_t *req = &xfer->ux_request;
4432 int len = UGETW(req->wLength);
4433 int rd = req->bmRequestType & UT_READ;
4434
4435 if (len)
4436 usb_syncmem(&xfer->ux_dmabuf, 0, len,
4437 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4438 }
4439
4440 static void
4441 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
4442 {
4443 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4444
4445 usbd_xfer_abort(xfer);
4446 }
4447
4448 static void
4449 xhci_device_ctrl_close(struct usbd_pipe *pipe)
4450 {
4451 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4452
4453 xhci_close_pipe(pipe);
4454 }
4455
4456 /* ------------------ */
4457 /* device isochronous */
4458
4459 static usbd_status
4460 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
4461 {
4462 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4463
4464 return xhci_device_isoc_enter(xfer);
4465 }
4466
4467 static usbd_status
4468 xhci_device_isoc_enter(struct usbd_xfer *xfer)
4469 {
4470 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4471 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4472 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4473 struct xhci_ring * const tr = xs->xs_xr[dci];
4474 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4475 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe;
4476 uint32_t len = xfer->ux_length;
4477 usb_dma_t * const dma = &xfer->ux_dmabuf;
4478 uint64_t parameter;
4479 uint32_t status;
4480 uint32_t control;
4481 uint32_t mfindex;
4482 uint32_t offs;
4483 int i, ival;
4484 const bool polling = xhci_polling_p(sc);
4485 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
4486 const uint16_t mps = UE_GET_SIZE(MPS);
4487 const uint8_t maxb = xpipe->xp_maxb;
4488 u_int tdpc, tbc, tlbpc;
4489
4490 XHCIHIST_FUNC();
4491 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4492 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4493
4494 KASSERT(polling || mutex_owned(&sc->sc_lock));
4495
4496 if (sc->sc_dying)
4497 return USBD_IOERROR;
4498
4499 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths);
4500 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4501
4502 const bool isread = usbd_xfer_isread(xfer);
4503 if (xfer->ux_length)
4504 usb_syncmem(dma, 0, xfer->ux_length,
4505 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4506
4507 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
4508 if (ival >= 1 && ival <= 16)
4509 ival = 1 << (ival - 1);
4510 else
4511 ival = 1; /* fake something up */
4512
4513 if (xpipe->xp_isoc_next == -1) {
4514 mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX);
4515 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0);
4516 mfindex = XHCI_MFINDEX_GET(mfindex + 1);
4517 mfindex /= USB_UFRAMES_PER_FRAME;
4518 mfindex += 7; /* 7 frames is max possible IST */
4519 xpipe->xp_isoc_next = roundup2(mfindex, ival);
4520 }
4521
4522 offs = 0;
4523 for (i = 0; i < xfer->ux_nframes; i++) {
4524 len = xfer->ux_frlengths[i];
4525
4526 tdpc = howmany(len, mps);
4527 tbc = howmany(tdpc, maxb) - 1;
4528 tlbpc = tdpc % maxb;
4529 tlbpc = tlbpc ? tlbpc - 1 : maxb - 1;
4530
4531 KASSERTMSG(len <= 0x10000, "len %d", len);
4532 parameter = DMAADDR(dma, offs);
4533 status = XHCI_TRB_2_IRQ_SET(0) |
4534 XHCI_TRB_2_TDSZ_SET(0) |
4535 XHCI_TRB_2_BYTES_SET(len);
4536 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) |
4537 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4538 XHCI_TRB_3_TBC_SET(tbc) |
4539 XHCI_TRB_3_TLBPC_SET(tlbpc) |
4540 XHCI_TRB_3_IOC_BIT;
4541 if (XHCI_HCC_CFC(sc->sc_hcc)) {
4542 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4543 #if 0
4544 } else if (xpipe->xp_isoc_next == -1) {
4545 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4546 #endif
4547 } else {
4548 control |= XHCI_TRB_3_ISO_SIA_BIT;
4549 }
4550 #if 0
4551 if (i != xfer->ux_nframes - 1)
4552 control |= XHCI_TRB_3_BEI_BIT;
4553 #endif
4554 xhci_xfer_put_trb(xx, i, parameter, status, control);
4555
4556 xpipe->xp_isoc_next += ival;
4557 offs += len;
4558 }
4559
4560 xx->xx_isoc_done = 0;
4561
4562 if (!polling)
4563 mutex_enter(&tr->xr_lock);
4564 xhci_ring_put_xfer(sc, tr, xx, i);
4565 if (!polling)
4566 mutex_exit(&tr->xr_lock);
4567
4568 xfer->ux_status = USBD_IN_PROGRESS;
4569 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4570 usbd_xfer_schedule_timeout(xfer);
4571
4572 return USBD_IN_PROGRESS;
4573 }
4574
4575 static void
4576 xhci_device_isoc_abort(struct usbd_xfer *xfer)
4577 {
4578 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4579
4580 usbd_xfer_abort(xfer);
4581 }
4582
4583 static void
4584 xhci_device_isoc_close(struct usbd_pipe *pipe)
4585 {
4586 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4587
4588 xhci_close_pipe(pipe);
4589 }
4590
4591 static void
4592 xhci_device_isoc_done(struct usbd_xfer *xfer)
4593 {
4594 #ifdef USB_DEBUG
4595 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4596 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4597 #endif
4598 const bool isread = usbd_xfer_isread(xfer);
4599
4600 XHCIHIST_FUNC();
4601 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4602 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4603
4604 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4605 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4606 }
4607
4608 /* ----------- */
4609 /* device bulk */
4610
4611 static usbd_status
4612 xhci_device_bulk_transfer(struct usbd_xfer *xfer)
4613 {
4614 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4615
4616 /* Pipe isn't running, so start it first. */
4617 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4618 }
4619
4620 static usbd_status
4621 xhci_device_bulk_start(struct usbd_xfer *xfer)
4622 {
4623 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4624 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4625 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4626 struct xhci_ring * const tr = xs->xs_xr[dci];
4627 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4628 const uint32_t len = xfer->ux_length;
4629 usb_dma_t * const dma = &xfer->ux_dmabuf;
4630 uint64_t parameter;
4631 uint32_t status;
4632 uint32_t control;
4633 u_int i = 0;
4634 const bool polling = xhci_polling_p(sc);
4635
4636 XHCIHIST_FUNC();
4637 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4638 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4639
4640 KASSERT(polling || mutex_owned(&sc->sc_lock));
4641
4642 if (sc->sc_dying)
4643 return USBD_IOERROR;
4644
4645 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4646
4647 if (tr->is_halted)
4648 goto out;
4649
4650 parameter = DMAADDR(dma, 0);
4651 const bool isread = usbd_xfer_isread(xfer);
4652 if (len)
4653 usb_syncmem(dma, 0, len,
4654 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4655
4656 /*
4657 * XXX: (dsl) The physical buffer must not cross a 64k boundary.
4658 * If the user supplied buffer crosses such a boundary then 2
4659 * (or more) TRB should be used.
4660 * If multiple TRB are used the td_size field must be set correctly.
4661 * For v1.0 devices (like ivy bridge) this is the number of usb data
4662 * blocks needed to complete the transfer.
4663 * Setting it to 1 in the last TRB causes an extra zero-length
4664 * data block be sent.
4665 * The earlier documentation differs, I don't know how it behaves.
4666 */
4667 KASSERTMSG(len <= 0x10000, "len %d", len);
4668 status = XHCI_TRB_2_IRQ_SET(0) |
4669 XHCI_TRB_2_TDSZ_SET(0) |
4670 XHCI_TRB_2_BYTES_SET(len);
4671 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4672 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4673 XHCI_TRB_3_IOC_BIT;
4674 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4675
4676 if (!polling)
4677 mutex_enter(&tr->xr_lock);
4678 xhci_ring_put_xfer(sc, tr, xx, i);
4679 if (!polling)
4680 mutex_exit(&tr->xr_lock);
4681
4682 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4683
4684 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4685 xfer->ux_status = USBD_IN_PROGRESS;
4686 usbd_xfer_schedule_timeout(xfer);
4687 } else {
4688 /*
4689 * We must be coming from xhci_pipe_restart -- timeout
4690 * already set up, nothing to do.
4691 */
4692 }
4693 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4694
4695 return USBD_IN_PROGRESS;
4696 }
4697
4698 static void
4699 xhci_device_bulk_done(struct usbd_xfer *xfer)
4700 {
4701 #ifdef USB_DEBUG
4702 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4703 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4704 #endif
4705 const bool isread = usbd_xfer_isread(xfer);
4706
4707 XHCIHIST_FUNC();
4708 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4709 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4710
4711 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4712 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4713 }
4714
4715 static void
4716 xhci_device_bulk_abort(struct usbd_xfer *xfer)
4717 {
4718 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4719
4720 usbd_xfer_abort(xfer);
4721 }
4722
4723 static void
4724 xhci_device_bulk_close(struct usbd_pipe *pipe)
4725 {
4726 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4727
4728 xhci_close_pipe(pipe);
4729 }
4730
4731 /* ---------------- */
4732 /* device interrupt */
4733
4734 static usbd_status
4735 xhci_device_intr_transfer(struct usbd_xfer *xfer)
4736 {
4737 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4738
4739 /* Pipe isn't running, so start it first. */
4740 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4741 }
4742
4743 static usbd_status
4744 xhci_device_intr_start(struct usbd_xfer *xfer)
4745 {
4746 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4747 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4748 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4749 struct xhci_ring * const tr = xs->xs_xr[dci];
4750 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4751 const uint32_t len = xfer->ux_length;
4752 const bool polling = xhci_polling_p(sc);
4753 usb_dma_t * const dma = &xfer->ux_dmabuf;
4754 uint64_t parameter;
4755 uint32_t status;
4756 uint32_t control;
4757 u_int i = 0;
4758
4759 XHCIHIST_FUNC();
4760 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4761 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4762
4763 KASSERT(polling || mutex_owned(&sc->sc_lock));
4764
4765 if (sc->sc_dying)
4766 return USBD_IOERROR;
4767
4768 if (tr->is_halted)
4769 goto out;
4770
4771 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4772
4773 const bool isread = usbd_xfer_isread(xfer);
4774 if (len)
4775 usb_syncmem(dma, 0, len,
4776 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4777
4778 parameter = DMAADDR(dma, 0);
4779 KASSERTMSG(len <= 0x10000, "len %d", len);
4780 status = XHCI_TRB_2_IRQ_SET(0) |
4781 XHCI_TRB_2_TDSZ_SET(0) |
4782 XHCI_TRB_2_BYTES_SET(len);
4783 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4784 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT;
4785 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4786
4787 if (!polling)
4788 mutex_enter(&tr->xr_lock);
4789 xhci_ring_put_xfer(sc, tr, xx, i);
4790 if (!polling)
4791 mutex_exit(&tr->xr_lock);
4792
4793 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4794
4795 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4796 xfer->ux_status = USBD_IN_PROGRESS;
4797 usbd_xfer_schedule_timeout(xfer);
4798 } else {
4799 /*
4800 * We must be coming from xhci_pipe_restart -- timeout
4801 * already set up, nothing to do.
4802 */
4803 }
4804 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4805
4806 return USBD_IN_PROGRESS;
4807 }
4808
4809 static void
4810 xhci_device_intr_done(struct usbd_xfer *xfer)
4811 {
4812 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4813 #ifdef USB_DEBUG
4814 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4815 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4816 #endif
4817 const bool isread = usbd_xfer_isread(xfer);
4818
4819 XHCIHIST_FUNC();
4820 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4821 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4822
4823 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4824
4825 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4826 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4827 }
4828
4829 static void
4830 xhci_device_intr_abort(struct usbd_xfer *xfer)
4831 {
4832 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4833
4834 XHCIHIST_FUNC();
4835 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0);
4836
4837 KASSERT(mutex_owned(&sc->sc_lock));
4838 usbd_xfer_abort(xfer);
4839 }
4840
4841 static void
4842 xhci_device_intr_close(struct usbd_pipe *pipe)
4843 {
4844 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
4845
4846 XHCIHIST_FUNC();
4847 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0);
4848
4849 xhci_close_pipe(pipe);
4850 }
4851