xhci.c revision 1.170 1 /* $NetBSD: xhci.c,v 1.170 2022/09/13 10:15:28 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * USB rev 2.0 and rev 3.1 specification
31 * http://www.usb.org/developers/docs/
32 * xHCI rev 1.1 specification
33 * http://www.intel.com/technology/usb/spec.htm
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.170 2022/09/13 10:15:28 riastradh Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_usb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/kmem.h>
47 #include <sys/device.h>
48 #include <sys/select.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/endian.h>
58
59 #include <dev/usb/usb.h>
60 #include <dev/usb/usbdi.h>
61 #include <dev/usb/usbdivar.h>
62 #include <dev/usb/usbdi_util.h>
63 #include <dev/usb/usbhist.h>
64 #include <dev/usb/usb_mem.h>
65 #include <dev/usb/usb_quirks.h>
66
67 #include <dev/usb/xhcireg.h>
68 #include <dev/usb/xhcivar.h>
69 #include <dev/usb/usbroothub.h>
70
71
72 #ifdef USB_DEBUG
73 #ifndef XHCI_DEBUG
74 #define xhcidebug 0
75 #else /* !XHCI_DEBUG */
76 #define HEXDUMP(a, b, c) \
77 do { \
78 if (xhcidebug > 0) \
79 hexdump(printf, a, b, c); \
80 } while (/*CONSTCOND*/0)
81 static int xhcidebug = 0;
82
83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup")
84 {
85 int err;
86 const struct sysctlnode *rnode;
87 const struct sysctlnode *cnode;
88
89 err = sysctl_createv(clog, 0, NULL, &rnode,
90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci",
91 SYSCTL_DESCR("xhci global controls"),
92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
93
94 if (err)
95 goto fail;
96
97 /* control debugging printfs */
98 err = sysctl_createv(clog, 0, &rnode, &cnode,
99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
100 "debug", SYSCTL_DESCR("Enable debugging output"),
101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL);
102 if (err)
103 goto fail;
104
105 return;
106 fail:
107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
108 }
109
110 #endif /* !XHCI_DEBUG */
111 #endif /* USB_DEBUG */
112
113 #ifndef HEXDUMP
114 #define HEXDUMP(a, b, c)
115 #endif
116
117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D)
118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D)
119 #define XHCIHIST_FUNC() USBHIST_FUNC()
120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug)
121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \
122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D)
123
124 #define XHCI_DCI_SLOT 0
125 #define XHCI_DCI_EP_CONTROL 1
126
127 #define XHCI_ICI_INPUT_CONTROL 0
128
129 struct xhci_pipe {
130 struct usbd_pipe xp_pipe;
131 struct usb_task xp_async_task;
132 int16_t xp_isoc_next; /* next frame */
133 uint8_t xp_maxb; /* max burst */
134 uint8_t xp_mult;
135 };
136
137 #define XHCI_COMMAND_RING_TRBS 256
138 #define XHCI_EVENT_RING_TRBS 256
139 #define XHCI_EVENT_RING_SEGMENTS 1
140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT
141
142 static usbd_status xhci_open(struct usbd_pipe *);
143 static void xhci_close_pipe(struct usbd_pipe *);
144 static int xhci_intr1(struct xhci_softc * const);
145 static void xhci_softintr(void *);
146 static void xhci_poll(struct usbd_bus *);
147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int);
148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
149 static void xhci_abortx(struct usbd_xfer *);
150 static bool xhci_dying(struct usbd_bus *);
151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **);
152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int,
153 struct usbd_port *);
154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
155 void *, int);
156
157 static void xhci_pipe_restart(struct usbd_pipe *);
158 static void xhci_pipe_restart_async_task(void *);
159 static void xhci_pipe_restart_async(struct usbd_pipe *);
160
161 static usbd_status xhci_configure_endpoint(struct usbd_pipe *);
162 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *);
163 static void xhci_reset_endpoint(struct usbd_pipe *);
164 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *,
165 struct xhci_slot *, u_int, uint32_t);
166 static usbd_status xhci_stop_endpoint(struct usbd_pipe *);
167
168 static void xhci_host_dequeue(struct xhci_ring * const);
169 static void xhci_set_dequeue(struct usbd_pipe *);
170
171 static usbd_status xhci_do_command(struct xhci_softc * const,
172 struct xhci_soft_trb * const, int);
173 static usbd_status xhci_do_command_locked(struct xhci_softc * const,
174 struct xhci_soft_trb * const, int);
175 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t);
176 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *);
177 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool);
178 static usbd_status xhci_enable_slot(struct xhci_softc * const,
179 uint8_t * const);
180 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t);
181 static usbd_status xhci_address_device(struct xhci_softc * const,
182 uint64_t, uint8_t, bool);
183 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int);
184 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const,
185 struct xhci_slot * const, u_int);
186 static usbd_status xhci_ring_init(struct xhci_softc * const,
187 struct xhci_ring **, size_t, size_t);
188 static void xhci_ring_free(struct xhci_softc * const,
189 struct xhci_ring ** const);
190
191 static void xhci_setup_ctx(struct usbd_pipe *);
192 static void xhci_setup_route(struct usbd_pipe *, uint32_t *);
193 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *);
194 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *);
195 static uint32_t xhci_bival2ival(uint32_t, uint32_t);
196
197 static void xhci_noop(struct usbd_pipe *);
198
199 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *);
200 static usbd_status xhci_root_intr_start(struct usbd_xfer *);
201 static void xhci_root_intr_abort(struct usbd_xfer *);
202 static void xhci_root_intr_close(struct usbd_pipe *);
203 static void xhci_root_intr_done(struct usbd_xfer *);
204
205 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *);
206 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *);
207 static void xhci_device_ctrl_abort(struct usbd_xfer *);
208 static void xhci_device_ctrl_close(struct usbd_pipe *);
209 static void xhci_device_ctrl_done(struct usbd_xfer *);
210
211 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *);
212 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *);
213 static void xhci_device_isoc_abort(struct usbd_xfer *);
214 static void xhci_device_isoc_close(struct usbd_pipe *);
215 static void xhci_device_isoc_done(struct usbd_xfer *);
216
217 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *);
218 static usbd_status xhci_device_intr_start(struct usbd_xfer *);
219 static void xhci_device_intr_abort(struct usbd_xfer *);
220 static void xhci_device_intr_close(struct usbd_pipe *);
221 static void xhci_device_intr_done(struct usbd_xfer *);
222
223 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *);
224 static usbd_status xhci_device_bulk_start(struct usbd_xfer *);
225 static void xhci_device_bulk_abort(struct usbd_xfer *);
226 static void xhci_device_bulk_close(struct usbd_pipe *);
227 static void xhci_device_bulk_done(struct usbd_xfer *);
228
229 static const struct usbd_bus_methods xhci_bus_methods = {
230 .ubm_open = xhci_open,
231 .ubm_softint = xhci_softintr,
232 .ubm_dopoll = xhci_poll,
233 .ubm_allocx = xhci_allocx,
234 .ubm_freex = xhci_freex,
235 .ubm_abortx = xhci_abortx,
236 .ubm_dying = xhci_dying,
237 .ubm_getlock = xhci_get_lock,
238 .ubm_newdev = xhci_new_device,
239 .ubm_rhctrl = xhci_roothub_ctrl,
240 };
241
242 static const struct usbd_pipe_methods xhci_root_intr_methods = {
243 .upm_transfer = xhci_root_intr_transfer,
244 .upm_start = xhci_root_intr_start,
245 .upm_abort = xhci_root_intr_abort,
246 .upm_close = xhci_root_intr_close,
247 .upm_cleartoggle = xhci_noop,
248 .upm_done = xhci_root_intr_done,
249 };
250
251
252 static const struct usbd_pipe_methods xhci_device_ctrl_methods = {
253 .upm_transfer = xhci_device_ctrl_transfer,
254 .upm_start = xhci_device_ctrl_start,
255 .upm_abort = xhci_device_ctrl_abort,
256 .upm_close = xhci_device_ctrl_close,
257 .upm_cleartoggle = xhci_noop,
258 .upm_done = xhci_device_ctrl_done,
259 };
260
261 static const struct usbd_pipe_methods xhci_device_isoc_methods = {
262 .upm_transfer = xhci_device_isoc_transfer,
263 .upm_abort = xhci_device_isoc_abort,
264 .upm_close = xhci_device_isoc_close,
265 .upm_cleartoggle = xhci_noop,
266 .upm_done = xhci_device_isoc_done,
267 };
268
269 static const struct usbd_pipe_methods xhci_device_bulk_methods = {
270 .upm_transfer = xhci_device_bulk_transfer,
271 .upm_start = xhci_device_bulk_start,
272 .upm_abort = xhci_device_bulk_abort,
273 .upm_close = xhci_device_bulk_close,
274 .upm_cleartoggle = xhci_noop,
275 .upm_done = xhci_device_bulk_done,
276 };
277
278 static const struct usbd_pipe_methods xhci_device_intr_methods = {
279 .upm_transfer = xhci_device_intr_transfer,
280 .upm_start = xhci_device_intr_start,
281 .upm_abort = xhci_device_intr_abort,
282 .upm_close = xhci_device_intr_close,
283 .upm_cleartoggle = xhci_noop,
284 .upm_done = xhci_device_intr_done,
285 };
286
287 static inline uint32_t
288 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset)
289 {
290 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset);
291 }
292
293 static inline uint32_t
294 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset)
295 {
296 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
297 }
298
299 static inline uint32_t
300 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset)
301 {
302 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset);
303 }
304
305 static inline void
306 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset,
307 uint32_t value)
308 {
309 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value);
310 }
311
312 #if 0 /* unused */
313 static inline void
314 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset,
315 uint32_t value)
316 {
317 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value);
318 }
319 #endif /* unused */
320
321 static inline void
322 xhci_barrier(const struct xhci_softc * const sc, int flags)
323 {
324 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 0, sc->sc_ios, flags);
325 }
326
327 static inline uint32_t
328 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset)
329 {
330 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset);
331 }
332
333 static inline uint32_t
334 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset)
335 {
336 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
337 }
338
339 static inline void
340 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset,
341 uint32_t value)
342 {
343 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
344 }
345
346 static inline uint64_t
347 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset)
348 {
349 uint64_t value;
350
351 #ifdef XHCI_USE_BUS_SPACE_8
352 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset);
353 #else
354 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
355 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh,
356 offset + 4) << 32;
357 #endif
358
359 return value;
360 }
361
362 static inline void
363 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset,
364 uint64_t value)
365 {
366 #ifdef XHCI_USE_BUS_SPACE_8
367 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value);
368 #else
369 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0,
370 (value >> 0) & 0xffffffff);
371 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4,
372 (value >> 32) & 0xffffffff);
373 #endif
374 }
375
376 static inline uint32_t
377 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset)
378 {
379 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
380 }
381
382 static inline void
383 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset,
384 uint32_t value)
385 {
386 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
387 }
388
389 static inline uint64_t
390 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset)
391 {
392 uint64_t value;
393
394 #ifdef XHCI_USE_BUS_SPACE_8
395 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset);
396 #else
397 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
398 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh,
399 offset + 4) << 32;
400 #endif
401
402 return value;
403 }
404
405 static inline void
406 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset,
407 uint64_t value)
408 {
409 #ifdef XHCI_USE_BUS_SPACE_8
410 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value);
411 #else
412 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0,
413 (value >> 0) & 0xffffffff);
414 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4,
415 (value >> 32) & 0xffffffff);
416 #endif
417 }
418
419 #if 0 /* unused */
420 static inline uint32_t
421 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset)
422 {
423 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset);
424 }
425 #endif /* unused */
426
427 static inline void
428 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset,
429 uint32_t value)
430 {
431 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value);
432 }
433
434 /* --- */
435
436 static inline uint8_t
437 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed)
438 {
439 u_int eptype = 0;
440
441 switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
442 case UE_CONTROL:
443 eptype = 0x0;
444 break;
445 case UE_ISOCHRONOUS:
446 eptype = 0x1;
447 break;
448 case UE_BULK:
449 eptype = 0x2;
450 break;
451 case UE_INTERRUPT:
452 eptype = 0x3;
453 break;
454 }
455
456 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
457 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
458 return eptype | 0x4;
459 else
460 return eptype;
461 }
462
463 static u_int
464 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed)
465 {
466 /* xHCI 1.0 section 4.5.1 */
467 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress);
468 u_int in = 0;
469
470 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
471 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
472 in = 1;
473
474 return epaddr * 2 + in;
475 }
476
477 static inline u_int
478 xhci_dci_to_ici(const u_int i)
479 {
480 return i + 1;
481 }
482
483 static inline void *
484 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs,
485 const u_int dci)
486 {
487 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
488 }
489
490 #if 0 /* unused */
491 static inline bus_addr_t
492 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs,
493 const u_int dci)
494 {
495 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
496 }
497 #endif /* unused */
498
499 static inline void *
500 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs,
501 const u_int ici)
502 {
503 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
504 }
505
506 static inline bus_addr_t
507 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs,
508 const u_int ici)
509 {
510 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
511 }
512
513 static inline struct xhci_trb *
514 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx)
515 {
516 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
517 }
518
519 static inline bus_addr_t
520 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx)
521 {
522 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
523 }
524
525 static inline void
526 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx,
527 uint64_t parameter, uint32_t status, uint32_t control)
528 {
529 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb);
530 xx->xx_trb[idx].trb_0 = parameter;
531 xx->xx_trb[idx].trb_2 = status;
532 xx->xx_trb[idx].trb_3 = control;
533 }
534
535 static inline void
536 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status,
537 uint32_t control)
538 {
539 trb->trb_0 = htole64(parameter);
540 trb->trb_2 = htole32(status);
541 trb->trb_3 = htole32(control);
542 }
543
544 static int
545 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx)
546 {
547 /* base address of TRBs */
548 bus_addr_t trbp = xhci_ring_trbp(xr, 0);
549
550 /* trb_0 range sanity check */
551 if (trb_0 == 0 || trb_0 < trbp ||
552 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 ||
553 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) {
554 return 1;
555 }
556 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb);
557 return 0;
558 }
559
560 static unsigned int
561 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs,
562 u_int dci)
563 {
564 uint32_t *cp;
565
566 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
567 cp = xhci_slot_get_dcv(sc, xs, dci);
568 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0]));
569 }
570
571 static inline unsigned int
572 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport)
573 {
574 const unsigned int port = ctlrport - 1;
575 const uint8_t bit = __BIT(port % NBBY);
576
577 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit);
578 }
579
580 /*
581 * Return the roothub port for a controller port. Both are 1..n.
582 */
583 static inline unsigned int
584 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport)
585 {
586
587 return sc->sc_ctlrportmap[ctrlport - 1];
588 }
589
590 /*
591 * Return the controller port for a bus roothub port. Both are 1..n.
592 */
593 static inline unsigned int
594 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn,
595 unsigned int rhport)
596 {
597
598 return sc->sc_rhportmap[bn][rhport - 1];
599 }
600
601 /* --- */
602
603 void
604 xhci_childdet(device_t self, device_t child)
605 {
606 struct xhci_softc * const sc = device_private(self);
607
608 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child));
609 if (child == sc->sc_child2)
610 sc->sc_child2 = NULL;
611 else if (child == sc->sc_child)
612 sc->sc_child = NULL;
613 }
614
615 int
616 xhci_detach(struct xhci_softc *sc, int flags)
617 {
618 int rv = 0;
619
620 if (sc->sc_child2 != NULL) {
621 rv = config_detach(sc->sc_child2, flags);
622 if (rv != 0)
623 return rv;
624 KASSERT(sc->sc_child2 == NULL);
625 }
626
627 if (sc->sc_child != NULL) {
628 rv = config_detach(sc->sc_child, flags);
629 if (rv != 0)
630 return rv;
631 KASSERT(sc->sc_child == NULL);
632 }
633
634 /* XXX unconfigure/free slots */
635
636 /* verify: */
637 xhci_rt_write_4(sc, XHCI_IMAN(0), 0);
638 xhci_op_write_4(sc, XHCI_USBCMD, 0);
639 /* do we need to wait for stop? */
640
641 xhci_op_write_8(sc, XHCI_CRCR, 0);
642 xhci_ring_free(sc, &sc->sc_cr);
643 cv_destroy(&sc->sc_command_cv);
644 cv_destroy(&sc->sc_cmdbusy_cv);
645
646 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0);
647 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0);
648 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY);
649 xhci_ring_free(sc, &sc->sc_er);
650
651 usb_freemem(&sc->sc_eventst_dma);
652
653 xhci_op_write_8(sc, XHCI_DCBAAP, 0);
654 usb_freemem(&sc->sc_dcbaa_dma);
655
656 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots);
657
658 kmem_free(sc->sc_ctlrportbus,
659 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY));
660 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int));
661
662 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
663 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int));
664 }
665
666 mutex_destroy(&sc->sc_rhlock);
667 mutex_destroy(&sc->sc_lock);
668 mutex_destroy(&sc->sc_intr_lock);
669
670 pool_cache_destroy(sc->sc_xferpool);
671
672 return rv;
673 }
674
675 int
676 xhci_activate(device_t self, enum devact act)
677 {
678 struct xhci_softc * const sc = device_private(self);
679
680 switch (act) {
681 case DVACT_DEACTIVATE:
682 sc->sc_dying = true;
683 return 0;
684 default:
685 return EOPNOTSUPP;
686 }
687 }
688
689 bool
690 xhci_suspend(device_t self, const pmf_qual_t *qual)
691 {
692 struct xhci_softc * const sc = device_private(self);
693 size_t i, j, bn, dci;
694 int port;
695 uint32_t v;
696 usbd_status err;
697 bool ok = false;
698
699 XHCIHIST_FUNC(); XHCIHIST_CALLED();
700
701 /*
702 * Block issuance of new commands, and wait for all pending
703 * commands to complete.
704 */
705 mutex_enter(&sc->sc_lock);
706 KASSERT(sc->sc_suspender == NULL);
707 sc->sc_suspender = curlwp;
708 while (sc->sc_command_addr != 0)
709 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
710 mutex_exit(&sc->sc_lock);
711
712 /*
713 * Block roothub xfers which might touch portsc registers until
714 * we're done suspending.
715 */
716 mutex_enter(&sc->sc_rhlock);
717
718 /*
719 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
720 * xHCI Power Management, p. 342
721 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342
722 */
723
724 /*
725 * `1. Stop all USB activity by issuing Stop Endpoint Commands
726 * for Busy endpoints in the Running state. If the Force
727 * Save Context Capability (FSC = ``0'') is not supported,
728 * then Stop Endpoint Commands shall be issued for all idle
729 * endpoints in the Running state as well. The Stop
730 * Endpoint Command causes the xHC to update the respective
731 * Endpoint or Stream Contexts in system memory, e.g. the
732 * TR Dequeue Pointer, DCS, etc. fields. Refer to
733 * Implementation Note "0".'
734 */
735 for (i = 0; i < sc->sc_maxslots; i++) {
736 struct xhci_slot *xs = &sc->sc_slots[i];
737
738 /* Skip if the slot is not in use. */
739 if (xs->xs_idx == 0)
740 continue;
741
742 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
743 /* Skip if the endpoint is not Running. */
744 /* XXX What about Busy? */
745 if (xhci_get_epstate(sc, xs, dci) !=
746 XHCI_EPSTATE_RUNNING)
747 continue;
748
749 /* Stop endpoint. */
750 mutex_enter(&sc->sc_lock);
751 err = xhci_stop_endpoint_cmd(sc, xs, dci,
752 XHCI_TRB_3_SUSP_EP_BIT);
753 mutex_exit(&sc->sc_lock);
754 if (err) {
755 device_printf(self, "failed to stop endpoint"
756 " slot %zu dci %zu err %d\n",
757 i, dci, err);
758 goto out;
759 }
760 }
761 }
762
763 /*
764 * Next, suspend all the ports:
765 *
766 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15:
767 * Suspend-Resume, pp. 276-283
768 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276
769 */
770 for (bn = 0; bn < 2; bn++) {
771 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
772 /* 4.15.1: Port Suspend. */
773 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
774
775 /*
776 * `System software places individual ports
777 * into suspend mode by writing a ``3'' into
778 * the appropriate PORTSC register Port Link
779 * State (PLS) field (refer to Section 5.4.8).
780 * Software should only set the PLS field to
781 * ``3'' when the port is in the Enabled
782 * state.'
783 *
784 * `Software should not attempt to suspend a
785 * port unless the port reports that it is in
786 * the enabled (PED = ``1''; PLS < ``3'')
787 * state (refer to Section 5.4.8 for more
788 * information about PED and PLS).'
789 */
790 v = xhci_op_read_4(sc, port);
791 if (((v & XHCI_PS_PED) == 0) ||
792 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3)
793 continue;
794 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
795 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3);
796 xhci_op_write_4(sc, port, v);
797
798 /*
799 * `When the PLS field is written with U3
800 * (``3''), the status of the PLS bit will not
801 * change to the target U state U3 until the
802 * suspend signaling has completed to the
803 * attached device (which may be as long as
804 * 10ms.).'
805 *
806 * `Software is required to wait for U3
807 * transitions to complete before it puts the
808 * xHC into a low power state, and before
809 * resuming the port.'
810 *
811 * XXX Take advantage of the technique to
812 * reduce polling on host controllers that
813 * support the U3C capability.
814 */
815 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) {
816 v = xhci_op_read_4(sc, port);
817 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3)
818 break;
819 usb_delay_ms(&sc->sc_bus, 1);
820 }
821 if (j == XHCI_WAIT_PLS_U3) {
822 device_printf(self,
823 "suspend timeout on bus %zu port %zu\n",
824 bn, i);
825 goto out;
826 }
827 }
828 }
829
830 /*
831 * `2. Ensure that the Command Ring is in the Stopped state
832 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is
833 * empty), and all Command Completion Events associated
834 * with them have been received.'
835 *
836 * XXX
837 */
838
839 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */
840 xhci_op_write_4(sc, XHCI_USBCMD,
841 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS);
842
843 /*
844 * `4. Read the Operational Runtime, and VTIO registers in the
845 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ,
846 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their
847 * state.'
848 *
849 * (We don't use VTIO here (XXX for now?).)
850 */
851 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
852 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL);
853 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP);
854 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG);
855 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0));
856 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0));
857 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0));
858 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0));
859 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0));
860
861 /*
862 * `5. Set the Controller Save State (CSS) flag in the USBCMD
863 * register (5.4.1)...'
864 */
865 xhci_op_write_4(sc, XHCI_USBCMD,
866 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS);
867
868 /*
869 * `...and wait for the Save State Status (SSS) flag in the
870 * USBSTS register (5.4.2) to transition to ``0''.'
871 */
872 for (i = 0; i < XHCI_WAIT_SSS; i++) {
873 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0)
874 break;
875 usb_delay_ms(&sc->sc_bus, 1);
876 }
877 if (i >= XHCI_WAIT_SSS) {
878 device_printf(self, "suspend timeout, USBSTS.SSS\n");
879 /*
880 * Just optimistically go on and check SRE anyway --
881 * what's the worst that could happen?
882 */
883 }
884
885 /*
886 * `Note: After a Save or Restore operation completes, the
887 * Save/Restore Error (SRE) flag in the USBSTS register should
888 * be checked to ensure that the operation completed
889 * successfully.'
890 */
891 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
892 device_printf(self, "suspend error, USBSTS.SRE\n");
893 goto out;
894 }
895
896 /* Success! */
897 ok = true;
898
899 out: mutex_exit(&sc->sc_rhlock);
900 if (!ok) {
901 /*
902 * If suspend failed, resume command issuance.
903 */
904 mutex_enter(&sc->sc_lock);
905 KASSERT(sc->sc_suspender == curlwp);
906 sc->sc_suspender = NULL;
907 cv_broadcast(&sc->sc_cmdbusy_cv);
908 mutex_exit(&sc->sc_lock);
909 }
910 return ok;
911 }
912
913 bool
914 xhci_resume(device_t self, const pmf_qual_t *qual)
915 {
916 struct xhci_softc * const sc = device_private(self);
917 size_t i, j, bn, dci;
918 int port;
919 uint32_t v;
920 bool ok = false;
921
922 XHCIHIST_FUNC(); XHCIHIST_CALLED();
923
924 KASSERT(sc->sc_suspender);
925
926 /*
927 * Block roothub xfers which might touch portsc registers until
928 * we're done resuming.
929 */
930 mutex_enter(&sc->sc_rhlock);
931
932 /*
933 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
934 * xHCI Power Management, p. 343
935 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343
936 */
937
938 /*
939 * `4. Restore the Operational Runtime, and VTIO registers with
940 * their previously saved state in the following order:
941 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN,
942 * IMOD, and VTIO.'
943 *
944 * (We don't use VTIO here (for now?).)
945 */
946 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd);
947 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl);
948 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap);
949 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config);
950 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0);
951 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0);
952 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0);
953 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0);
954 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0);
955
956 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */
957
958 /*
959 * `5. Set the Controller Restore State (CRS) flag in the
960 * USBCMD register (5.4.1) to ``1''...'
961 */
962 xhci_op_write_4(sc, XHCI_USBCMD,
963 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS);
964
965 /*
966 * `...and wait for the Restore State Status (RSS) in the
967 * USBSTS register (5.4.2) to transition to ``0''.'
968 */
969 for (i = 0; i < XHCI_WAIT_RSS; i++) {
970 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0)
971 break;
972 usb_delay_ms(&sc->sc_bus, 1);
973 }
974 if (i >= XHCI_WAIT_RSS) {
975 device_printf(self, "resume timeout, USBSTS.RSS\n");
976 goto out;
977 }
978
979 /*
980 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits
981 * are consistent with the RCS values to be written to the
982 * CRCR.'
983 *
984 * XXX Hope just zeroing it is good enough!
985 */
986 xhci_host_dequeue(sc->sc_cr);
987
988 /*
989 * `7. Write the CRCR with the address and RCS value of the
990 * reinitialized Command Ring. Note that this write will
991 * cause the Command Ring to restart at the address
992 * specified by the CRCR.'
993 */
994 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
995 sc->sc_cr->xr_cs);
996
997 /*
998 * `8. Enable the controller by setting Run/Stop (R/S) =
999 * ``1''.'
1000 */
1001 xhci_op_write_4(sc, XHCI_USBCMD,
1002 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS);
1003
1004 /*
1005 * `9. Software shall walk the USB topology and initialize each
1006 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and
1007 * external hub ports attached to USB devices.'
1008 *
1009 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2
1010 * `Port Resume', 4.15.2.2 `Host Initiated'.
1011 *
1012 * XXX We should maybe batch up initiating the state
1013 * transitions, and then wait for them to complete all at once.
1014 */
1015 for (bn = 0; bn < 2; bn++) {
1016 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
1017 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
1018
1019 /* `When a port is in the U3 state: ...' */
1020 v = xhci_op_read_4(sc, port);
1021 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3)
1022 continue;
1023
1024 /*
1025 * `For a USB2 protocol port, software shall
1026 * write a ``15'' (Resume) to the PLS field to
1027 * initiate resume signaling. The port shall
1028 * transition to the Resume substate and the
1029 * xHC shall transmit the resume signaling
1030 * within 1ms (T_URSM). Software shall ensure
1031 * that resume is signaled for at least 20ms
1032 * (T_DRSMDN). Software shall start timing
1033 * T_DRSMDN from the write of ``15'' (Resume)
1034 * to PLS.'
1035 */
1036 if (bn == 1) {
1037 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0);
1038 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1039 v |= XHCI_PS_LWS;
1040 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME);
1041 xhci_op_write_4(sc, port, v);
1042 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
1043 } else {
1044 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0);
1045 }
1046
1047 /*
1048 * `For a USB3 protocol port [and a USB2
1049 * protocol port after transitioning to
1050 * Resume], software shall write a ``0'' (U0)
1051 * to the PLS field...'
1052 */
1053 v = xhci_op_read_4(sc, port);
1054 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1055 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0);
1056 xhci_op_write_4(sc, port, v);
1057
1058 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) {
1059 v = xhci_op_read_4(sc, port);
1060 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0)
1061 break;
1062 usb_delay_ms(&sc->sc_bus, 1);
1063 }
1064 if (j == XHCI_WAIT_PLS_U0) {
1065 device_printf(self,
1066 "resume timeout on bus %zu port %zu\n",
1067 bn, i);
1068 goto out;
1069 }
1070 }
1071 }
1072
1073 /*
1074 * `10. Restart each of the previously Running endpoints by
1075 * ringing their doorbells.'
1076 */
1077 for (i = 0; i < sc->sc_maxslots; i++) {
1078 struct xhci_slot *xs = &sc->sc_slots[i];
1079
1080 /* Skip if the slot is not in use. */
1081 if (xs->xs_idx == 0)
1082 continue;
1083
1084 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
1085 /* Skip if the endpoint is not Running. */
1086 if (xhci_get_epstate(sc, xs, dci) !=
1087 XHCI_EPSTATE_RUNNING)
1088 continue;
1089
1090 /* Ring the doorbell. */
1091 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
1092 }
1093 }
1094
1095 /*
1096 * `Note: After a Save or Restore operation completes, the
1097 * Save/Restore Error (SRE) flag in the USBSTS register should
1098 * be checked to ensure that the operation completed
1099 * successfully.'
1100 */
1101 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
1102 device_printf(self, "resume error, USBSTS.SRE\n");
1103 goto out;
1104 }
1105
1106 /* Success! */
1107 ok = true;
1108
1109 out: /*
1110 * Resume command issuance. If the hardware failed to resume,
1111 * well, tough -- deadlocking because everything is held up on
1112 * the suspension, with no opportunity to detach, isn't better
1113 * than timing out waiting for dead hardware.
1114 */
1115 mutex_enter(&sc->sc_lock);
1116 KASSERT(sc->sc_suspender);
1117 sc->sc_suspender = NULL;
1118 cv_broadcast(&sc->sc_cmdbusy_cv);
1119 mutex_exit(&sc->sc_lock);
1120
1121 mutex_exit(&sc->sc_rhlock);
1122 return ok;
1123 }
1124
1125 bool
1126 xhci_shutdown(device_t self, int flags)
1127 {
1128 return false;
1129 }
1130
1131 static int
1132 xhci_hc_reset(struct xhci_softc * const sc)
1133 {
1134 uint32_t usbcmd, usbsts;
1135 int i;
1136
1137 /* Check controller not ready */
1138 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1139 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1140 if ((usbsts & XHCI_STS_CNR) == 0)
1141 break;
1142 usb_delay_ms(&sc->sc_bus, 1);
1143 }
1144 if (i >= XHCI_WAIT_CNR) {
1145 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n");
1146 return EIO;
1147 }
1148
1149 /* Halt controller */
1150 usbcmd = 0;
1151 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1152 usb_delay_ms(&sc->sc_bus, 1);
1153
1154 /* Reset controller */
1155 usbcmd = XHCI_CMD_HCRST;
1156 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1157 for (i = 0; i < XHCI_WAIT_HCRST; i++) {
1158 /*
1159 * Wait 1ms first. Existing Intel xHCI requires 1ms delay to
1160 * prevent system hang (Errata).
1161 */
1162 usb_delay_ms(&sc->sc_bus, 1);
1163 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
1164 if ((usbcmd & XHCI_CMD_HCRST) == 0)
1165 break;
1166 }
1167 if (i >= XHCI_WAIT_HCRST) {
1168 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n");
1169 return EIO;
1170 }
1171
1172 /* Check controller not ready */
1173 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1174 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1175 if ((usbsts & XHCI_STS_CNR) == 0)
1176 break;
1177 usb_delay_ms(&sc->sc_bus, 1);
1178 }
1179 if (i >= XHCI_WAIT_CNR) {
1180 aprint_error_dev(sc->sc_dev,
1181 "controller not ready timeout after reset\n");
1182 return EIO;
1183 }
1184
1185 return 0;
1186 }
1187
1188 /* 7.2 xHCI Support Protocol Capability */
1189 static void
1190 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp)
1191 {
1192 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1193
1194 /* XXX Cache this lot */
1195
1196 const uint32_t w0 = xhci_read_4(sc, ecp);
1197 const uint32_t w4 = xhci_read_4(sc, ecp + 4);
1198 const uint32_t w8 = xhci_read_4(sc, ecp + 8);
1199 const uint32_t wc = xhci_read_4(sc, ecp + 0xc);
1200
1201 aprint_debug_dev(sc->sc_dev,
1202 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc);
1203
1204 if (w4 != XHCI_XECP_USBID)
1205 return;
1206
1207 const int major = XHCI_XECP_SP_W0_MAJOR(w0);
1208 const int minor = XHCI_XECP_SP_W0_MINOR(w0);
1209 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8);
1210 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8);
1211
1212 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16));
1213 switch (mm) {
1214 case 0x0200:
1215 case 0x0300:
1216 case 0x0301:
1217 case 0x0310:
1218 case 0x0320:
1219 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n",
1220 major == 3 ? "ss" : "hs", cpo, cpo + cpc -1);
1221 break;
1222 default:
1223 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n",
1224 major, minor);
1225 return;
1226 }
1227
1228 const size_t bus = (major == 3) ? 0 : 1;
1229
1230 /* Index arrays with 0..n-1 where ports are numbered 1..n */
1231 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) {
1232 if (sc->sc_ctlrportmap[cp] != 0) {
1233 aprint_error_dev(sc->sc_dev, "controller port %zu "
1234 "already assigned", cp);
1235 continue;
1236 }
1237
1238 sc->sc_ctlrportbus[cp / NBBY] |=
1239 bus == 0 ? 0 : __BIT(cp % NBBY);
1240
1241 const size_t rhp = sc->sc_rhportcount[bus]++;
1242
1243 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0,
1244 "bus %zu rhp %zu is %d", bus, rhp,
1245 sc->sc_rhportmap[bus][rhp]);
1246
1247 sc->sc_rhportmap[bus][rhp] = cp + 1;
1248 sc->sc_ctlrportmap[cp] = rhp + 1;
1249 }
1250 }
1251
1252 /* Process extended capabilities */
1253 static void
1254 xhci_ecp(struct xhci_softc *sc)
1255 {
1256 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1257
1258 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4;
1259 while (ecp != 0) {
1260 uint32_t ecr = xhci_read_4(sc, ecp);
1261 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr);
1262 switch (XHCI_XECP_ID(ecr)) {
1263 case XHCI_ID_PROTOCOLS: {
1264 xhci_id_protocols(sc, ecp);
1265 break;
1266 }
1267 case XHCI_ID_USB_LEGACY: {
1268 uint8_t bios_sem;
1269
1270 /* Take host controller ownership from BIOS */
1271 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM);
1272 if (bios_sem) {
1273 /* sets xHCI to be owned by OS */
1274 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1);
1275 aprint_debug_dev(sc->sc_dev,
1276 "waiting for BIOS to give up control\n");
1277 for (int i = 0; i < 5000; i++) {
1278 bios_sem = xhci_read_1(sc, ecp +
1279 XHCI_XECP_BIOS_SEM);
1280 if (bios_sem == 0)
1281 break;
1282 DELAY(1000);
1283 }
1284 if (bios_sem) {
1285 aprint_error_dev(sc->sc_dev,
1286 "timed out waiting for BIOS\n");
1287 }
1288 }
1289 break;
1290 }
1291 default:
1292 break;
1293 }
1294 ecr = xhci_read_4(sc, ecp);
1295 if (XHCI_XECP_NEXT(ecr) == 0) {
1296 ecp = 0;
1297 } else {
1298 ecp += XHCI_XECP_NEXT(ecr) * 4;
1299 }
1300 }
1301 }
1302
1303 #define XHCI_HCCPREV1_BITS \
1304 "\177\020" /* New bitmask */ \
1305 "f\020\020XECP\0" \
1306 "f\014\4MAXPSA\0" \
1307 "b\013CFC\0" \
1308 "b\012SEC\0" \
1309 "b\011SBD\0" \
1310 "b\010FSE\0" \
1311 "b\7NSS\0" \
1312 "b\6LTC\0" \
1313 "b\5LHRC\0" \
1314 "b\4PIND\0" \
1315 "b\3PPC\0" \
1316 "b\2CZC\0" \
1317 "b\1BNC\0" \
1318 "b\0AC64\0" \
1319 "\0"
1320 #define XHCI_HCCV1_x_BITS \
1321 "\177\020" /* New bitmask */ \
1322 "f\020\020XECP\0" \
1323 "f\014\4MAXPSA\0" \
1324 "b\013CFC\0" \
1325 "b\012SEC\0" \
1326 "b\011SPC\0" \
1327 "b\010PAE\0" \
1328 "b\7NSS\0" \
1329 "b\6LTC\0" \
1330 "b\5LHRC\0" \
1331 "b\4PIND\0" \
1332 "b\3PPC\0" \
1333 "b\2CSZ\0" \
1334 "b\1BNC\0" \
1335 "b\0AC64\0" \
1336 "\0"
1337
1338 #define XHCI_HCC2_BITS \
1339 "\177\020" /* New bitmask */ \
1340 "b\7ETC_TSC\0" \
1341 "b\6ETC\0" \
1342 "b\5CIC\0" \
1343 "b\4LEC\0" \
1344 "b\3CTC\0" \
1345 "b\2FSC\0" \
1346 "b\1CMC\0" \
1347 "b\0U3C\0" \
1348 "\0"
1349
1350 void
1351 xhci_start(struct xhci_softc *sc)
1352 {
1353 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA);
1354 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0)
1355 /* Intel xhci needs interrupt rate moderated. */
1356 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP);
1357 else
1358 xhci_rt_write_4(sc, XHCI_IMOD(0), 0);
1359 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n",
1360 xhci_rt_read_4(sc, XHCI_IMOD(0)));
1361
1362 /* Go! */
1363 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
1364 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n",
1365 xhci_op_read_4(sc, XHCI_USBCMD));
1366 }
1367
1368 int
1369 xhci_init(struct xhci_softc *sc)
1370 {
1371 bus_size_t bsz;
1372 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff;
1373 uint32_t pagesize, config;
1374 int i = 0;
1375 uint16_t hciversion;
1376 uint8_t caplength;
1377
1378 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1379
1380 /* Set up the bus struct for the usb 3 and usb 2 buses */
1381 sc->sc_bus.ub_methods = &xhci_bus_methods;
1382 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe);
1383 sc->sc_bus.ub_usedma = true;
1384 sc->sc_bus.ub_hcpriv = sc;
1385
1386 sc->sc_bus2.ub_methods = &xhci_bus_methods;
1387 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe);
1388 sc->sc_bus2.ub_revision = USBREV_2_0;
1389 sc->sc_bus2.ub_usedma = true;
1390 sc->sc_bus2.ub_hcpriv = sc;
1391 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag;
1392
1393 caplength = xhci_read_1(sc, XHCI_CAPLENGTH);
1394 hciversion = xhci_read_2(sc, XHCI_HCIVERSION);
1395
1396 if (hciversion < XHCI_HCIVERSION_0_96 ||
1397 hciversion >= 0x0200) {
1398 aprint_normal_dev(sc->sc_dev,
1399 "xHCI version %x.%x not known to be supported\n",
1400 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1401 } else {
1402 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n",
1403 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1404 }
1405
1406 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength,
1407 &sc->sc_cbh) != 0) {
1408 aprint_error_dev(sc->sc_dev, "capability subregion failure\n");
1409 return ENOMEM;
1410 }
1411
1412 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1);
1413 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1);
1414 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1);
1415 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1);
1416 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2);
1417 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3);
1418 aprint_debug_dev(sc->sc_dev,
1419 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3);
1420
1421 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS);
1422 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32;
1423
1424 char sbuf[128];
1425 if (hciversion < XHCI_HCIVERSION_1_0)
1426 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc);
1427 else
1428 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc);
1429 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf);
1430 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n",
1431 XHCI_HCC_XECP(sc->sc_hcc) * 4);
1432 if (hciversion >= XHCI_HCIVERSION_1_1) {
1433 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2);
1434 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2);
1435 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf);
1436 }
1437
1438 /* default all ports to bus 0, i.e. usb 3 */
1439 sc->sc_ctlrportbus = kmem_zalloc(
1440 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP);
1441 sc->sc_ctlrportmap = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1442
1443 /* controller port to bus roothub port map */
1444 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
1445 sc->sc_rhportmap[j] = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1446 }
1447
1448 /*
1449 * Process all Extended Capabilities
1450 */
1451 xhci_ecp(sc);
1452
1453 bsz = XHCI_PORTSC(sc->sc_maxports);
1454 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz,
1455 &sc->sc_obh) != 0) {
1456 aprint_error_dev(sc->sc_dev, "operational subregion failure\n");
1457 return ENOMEM;
1458 }
1459
1460 dboff = xhci_cap_read_4(sc, XHCI_DBOFF);
1461 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff,
1462 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) {
1463 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n");
1464 return ENOMEM;
1465 }
1466
1467 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF);
1468 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff,
1469 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) {
1470 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n");
1471 return ENOMEM;
1472 }
1473
1474 int rv;
1475 rv = xhci_hc_reset(sc);
1476 if (rv != 0) {
1477 return rv;
1478 }
1479
1480 if (sc->sc_vendor_init)
1481 sc->sc_vendor_init(sc);
1482
1483 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE);
1484 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize);
1485 pagesize = ffs(pagesize);
1486 if (pagesize == 0) {
1487 aprint_error_dev(sc->sc_dev, "pagesize is 0\n");
1488 return EIO;
1489 }
1490 sc->sc_pgsz = 1 << (12 + (pagesize - 1));
1491 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz);
1492 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n",
1493 (uint32_t)sc->sc_maxslots);
1494 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports);
1495
1496 int err;
1497 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2);
1498 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf);
1499 if (sc->sc_maxspbuf != 0) {
1500 err = usb_allocmem(sc->sc_bus.ub_dmatag,
1501 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t),
1502 USBMALLOC_COHERENT | USBMALLOC_ZERO,
1503 &sc->sc_spbufarray_dma);
1504 if (err) {
1505 aprint_error_dev(sc->sc_dev,
1506 "spbufarray init fail, err %d\n", err);
1507 return ENOMEM;
1508 }
1509
1510 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) *
1511 sc->sc_maxspbuf, KM_SLEEP);
1512 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0);
1513 for (i = 0; i < sc->sc_maxspbuf; i++) {
1514 usb_dma_t * const dma = &sc->sc_spbuf_dma[i];
1515 /* allocate contexts */
1516 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz,
1517 sc->sc_pgsz, USBMALLOC_COHERENT | USBMALLOC_ZERO,
1518 dma);
1519 if (err) {
1520 aprint_error_dev(sc->sc_dev,
1521 "spbufarray_dma init fail, err %d\n", err);
1522 rv = ENOMEM;
1523 goto bad1;
1524 }
1525 spbufarray[i] = htole64(DMAADDR(dma, 0));
1526 usb_syncmem(dma, 0, sc->sc_pgsz,
1527 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1528 }
1529
1530 usb_syncmem(&sc->sc_spbufarray_dma, 0,
1531 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE);
1532 }
1533
1534 config = xhci_op_read_4(sc, XHCI_CONFIG);
1535 config &= ~0xFF;
1536 config |= sc->sc_maxslots & 0xFF;
1537 xhci_op_write_4(sc, XHCI_CONFIG, config);
1538
1539 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS,
1540 XHCI_COMMAND_RING_SEGMENTS_ALIGN);
1541 if (err) {
1542 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n",
1543 err);
1544 rv = ENOMEM;
1545 goto bad1;
1546 }
1547
1548 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS,
1549 XHCI_EVENT_RING_SEGMENTS_ALIGN);
1550 if (err) {
1551 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n",
1552 err);
1553 rv = ENOMEM;
1554 goto bad2;
1555 }
1556
1557 usb_dma_t *dma;
1558 size_t size;
1559 size_t align;
1560
1561 dma = &sc->sc_eventst_dma;
1562 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE,
1563 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN);
1564 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size);
1565 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN;
1566 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1567 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma);
1568 if (err) {
1569 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n",
1570 err);
1571 rv = ENOMEM;
1572 goto bad3;
1573 }
1574
1575 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n",
1576 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0),
1577 KERNADDR(&sc->sc_eventst_dma, 0),
1578 sc->sc_eventst_dma.udma_block->size);
1579
1580 dma = &sc->sc_dcbaa_dma;
1581 size = (1 + sc->sc_maxslots) * sizeof(uint64_t);
1582 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size);
1583 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN;
1584 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1585 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma);
1586 if (err) {
1587 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err);
1588 rv = ENOMEM;
1589 goto bad4;
1590 }
1591 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n",
1592 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0),
1593 KERNADDR(&sc->sc_dcbaa_dma, 0),
1594 sc->sc_dcbaa_dma.udma_block->size);
1595
1596 if (sc->sc_maxspbuf != 0) {
1597 /*
1598 * DCBA entry 0 hold the scratchbuf array pointer.
1599 */
1600 *(uint64_t *)KERNADDR(dma, 0) =
1601 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0));
1602 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1603 }
1604
1605 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots,
1606 KM_SLEEP);
1607 if (sc->sc_slots == NULL) {
1608 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err);
1609 rv = ENOMEM;
1610 goto bad;
1611 }
1612
1613 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0,
1614 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
1615 if (sc->sc_xferpool == NULL) {
1616 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n",
1617 err);
1618 rv = ENOMEM;
1619 goto bad;
1620 }
1621
1622 cv_init(&sc->sc_command_cv, "xhcicmd");
1623 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq");
1624 mutex_init(&sc->sc_rhlock, MUTEX_DEFAULT, IPL_NONE);
1625 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
1626 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
1627
1628 struct xhci_erste *erst;
1629 erst = KERNADDR(&sc->sc_eventst_dma, 0);
1630 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0));
1631 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb);
1632 erst[0].erste_3 = htole32(0);
1633 usb_syncmem(&sc->sc_eventst_dma, 0,
1634 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE);
1635
1636 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS);
1637 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0));
1638 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) |
1639 XHCI_ERDP_BUSY);
1640
1641 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0));
1642 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1643 sc->sc_cr->xr_cs);
1644
1645 xhci_barrier(sc, BUS_SPACE_BARRIER_WRITE);
1646
1647 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0),
1648 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS);
1649
1650 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0)
1651 xhci_start(sc);
1652
1653 return 0;
1654
1655 bad:
1656 if (sc->sc_xferpool) {
1657 pool_cache_destroy(sc->sc_xferpool);
1658 sc->sc_xferpool = NULL;
1659 }
1660
1661 if (sc->sc_slots) {
1662 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) *
1663 sc->sc_maxslots);
1664 sc->sc_slots = NULL;
1665 }
1666
1667 usb_freemem(&sc->sc_dcbaa_dma);
1668 bad4:
1669 usb_freemem(&sc->sc_eventst_dma);
1670 bad3:
1671 xhci_ring_free(sc, &sc->sc_er);
1672 bad2:
1673 xhci_ring_free(sc, &sc->sc_cr);
1674 i = sc->sc_maxspbuf;
1675 bad1:
1676 for (int j = 0; j < i; j++)
1677 usb_freemem(&sc->sc_spbuf_dma[j]);
1678 usb_freemem(&sc->sc_spbufarray_dma);
1679
1680 return rv;
1681 }
1682
1683 static inline bool
1684 xhci_polling_p(struct xhci_softc * const sc)
1685 {
1686 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling;
1687 }
1688
1689 int
1690 xhci_intr(void *v)
1691 {
1692 struct xhci_softc * const sc = v;
1693 int ret = 0;
1694
1695 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1696
1697 if (sc == NULL)
1698 return 0;
1699
1700 mutex_spin_enter(&sc->sc_intr_lock);
1701
1702 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1703 goto done;
1704
1705 /* If we get an interrupt while polling, then just ignore it. */
1706 if (xhci_polling_p(sc)) {
1707 #ifdef DIAGNOSTIC
1708 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1709 #endif
1710 goto done;
1711 }
1712
1713 ret = xhci_intr1(sc);
1714 if (ret) {
1715 KASSERT(sc->sc_child || sc->sc_child2);
1716
1717 /*
1718 * One of child busses could be already detached. It doesn't
1719 * matter on which of the two the softintr is scheduled.
1720 */
1721 if (sc->sc_child)
1722 usb_schedsoftintr(&sc->sc_bus);
1723 else
1724 usb_schedsoftintr(&sc->sc_bus2);
1725 }
1726 done:
1727 mutex_spin_exit(&sc->sc_intr_lock);
1728 return ret;
1729 }
1730
1731 int
1732 xhci_intr1(struct xhci_softc * const sc)
1733 {
1734 uint32_t usbsts;
1735 uint32_t iman;
1736
1737 XHCIHIST_FUNC();
1738
1739 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1740 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0);
1741 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD |
1742 XHCI_STS_HCE)) == 0) {
1743 DPRINTFN(16, "ignored intr not for %jd",
1744 device_unit(sc->sc_dev), 0, 0, 0);
1745 return 0;
1746 }
1747
1748 /*
1749 * Clear EINT and other transient flags, to not misenterpret
1750 * next shared interrupt. Also, to avoid race, EINT must be cleared
1751 * before XHCI_IMAN_INTR_PEND is cleared.
1752 */
1753 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & XHCI_STS_RSVDP0);
1754
1755 #ifdef XHCI_DEBUG
1756 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1757 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1758 #endif
1759
1760 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1761 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1762 iman |= XHCI_IMAN_INTR_PEND;
1763 xhci_rt_write_4(sc, XHCI_IMAN(0), iman);
1764
1765 #ifdef XHCI_DEBUG
1766 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1767 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1768 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1769 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1770 #endif
1771
1772 return 1;
1773 }
1774
1775 /*
1776 * 3 port speed types used in USB stack
1777 *
1778 * usbdi speed
1779 * definition: USB_SPEED_* in usb.h
1780 * They are used in struct usbd_device in USB stack.
1781 * ioctl interface uses these values too.
1782 * port_status speed
1783 * definition: UPS_*_SPEED in usb.h
1784 * They are used in usb_port_status_t and valid only for USB 2.0.
1785 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus
1786 * of usb_port_status_ext_t indicates port speed.
1787 * Note that some 3.0 values overlap with 2.0 values.
1788 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and
1789 * means UPS_LOW_SPEED in HS.)
1790 * port status returned from hub also uses these values.
1791 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed
1792 * or more.
1793 * xspeed:
1794 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1)
1795 * They are used in only slot context and PORTSC reg of xhci.
1796 * The difference between usbdi speed and xspeed is
1797 * that FS and LS values are swapped.
1798 */
1799
1800 /* convert usbdi speed to xspeed */
1801 static int
1802 xhci_speed2xspeed(int speed)
1803 {
1804 switch (speed) {
1805 case USB_SPEED_LOW: return 2;
1806 case USB_SPEED_FULL: return 1;
1807 default: return speed;
1808 }
1809 }
1810
1811 #if 0
1812 /* convert xspeed to usbdi speed */
1813 static int
1814 xhci_xspeed2speed(int xspeed)
1815 {
1816 switch (xspeed) {
1817 case 1: return USB_SPEED_FULL;
1818 case 2: return USB_SPEED_LOW;
1819 default: return xspeed;
1820 }
1821 }
1822 #endif
1823
1824 /* convert xspeed to port status speed */
1825 static int
1826 xhci_xspeed2psspeed(int xspeed)
1827 {
1828 switch (xspeed) {
1829 case 0: return 0;
1830 case 1: return UPS_FULL_SPEED;
1831 case 2: return UPS_LOW_SPEED;
1832 case 3: return UPS_HIGH_SPEED;
1833 default: return UPS_OTHER_SPEED;
1834 }
1835 }
1836
1837 /*
1838 * Construct input contexts and issue TRB to open pipe.
1839 */
1840 static usbd_status
1841 xhci_configure_endpoint(struct usbd_pipe *pipe)
1842 {
1843 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1844 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1845 #ifdef USB_DEBUG
1846 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1847 #endif
1848 struct xhci_soft_trb trb;
1849 usbd_status err;
1850
1851 XHCIHIST_FUNC();
1852 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx",
1853 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress,
1854 pipe->up_endpoint->ue_edesc->bmAttributes);
1855
1856 /* XXX ensure input context is available? */
1857
1858 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz);
1859
1860 /* set up context */
1861 xhci_setup_ctx(pipe);
1862
1863 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0),
1864 sc->sc_ctxsz * 1);
1865 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs,
1866 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1);
1867
1868 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1869 trb.trb_2 = 0;
1870 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1871 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1872
1873 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
1874
1875 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1876 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci),
1877 sc->sc_ctxsz * 1);
1878
1879 return err;
1880 }
1881
1882 #if 0
1883 static usbd_status
1884 xhci_unconfigure_endpoint(struct usbd_pipe *pipe)
1885 {
1886 #ifdef USB_DEBUG
1887 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1888 #endif
1889
1890 XHCIHIST_FUNC();
1891 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
1892
1893 return USBD_NORMAL_COMPLETION;
1894 }
1895 #endif
1896
1897 /* 4.6.8, 6.4.3.7 */
1898 static void
1899 xhci_reset_endpoint(struct usbd_pipe *pipe)
1900 {
1901 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1902 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1903 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1904 struct xhci_soft_trb trb;
1905
1906 XHCIHIST_FUNC();
1907 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1908
1909 KASSERT(mutex_owned(&sc->sc_lock));
1910
1911 trb.trb_0 = 0;
1912 trb.trb_2 = 0;
1913 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1914 XHCI_TRB_3_EP_SET(dci) |
1915 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP);
1916
1917 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
1918 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
1919 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
1920 }
1921 }
1922
1923 /*
1924 * 4.6.9, 6.4.3.8
1925 * Stop execution of TDs on xfer ring.
1926 * Should be called with sc_lock held.
1927 */
1928 static usbd_status
1929 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci,
1930 uint32_t trb3flags)
1931 {
1932 struct xhci_soft_trb trb;
1933 usbd_status err;
1934
1935 XHCIHIST_FUNC();
1936 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1937
1938 KASSERT(mutex_owned(&sc->sc_lock));
1939
1940 trb.trb_0 = 0;
1941 trb.trb_2 = 0;
1942 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1943 XHCI_TRB_3_EP_SET(dci) |
1944 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) |
1945 trb3flags;
1946
1947 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1948
1949 return err;
1950 }
1951
1952 static usbd_status
1953 xhci_stop_endpoint(struct usbd_pipe *pipe)
1954 {
1955 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1956 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1957 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1958
1959 XHCIHIST_FUNC();
1960 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1961
1962 KASSERT(mutex_owned(&sc->sc_lock));
1963
1964 return xhci_stop_endpoint_cmd(sc, xs, dci, 0);
1965 }
1966
1967 /*
1968 * Set TR Dequeue Pointer.
1969 * xHCI 1.1 4.6.10 6.4.3.9
1970 * Purge all of the TRBs on ring and reinitialize ring.
1971 * Set TR dequeue Pointer to 0 and Cycle State to 1.
1972 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE
1973 * error will be generated.
1974 */
1975 static void
1976 xhci_set_dequeue(struct usbd_pipe *pipe)
1977 {
1978 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1979 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1980 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1981 struct xhci_ring * const xr = xs->xs_xr[dci];
1982 struct xhci_soft_trb trb;
1983
1984 XHCIHIST_FUNC();
1985 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1986
1987 KASSERT(mutex_owned(&sc->sc_lock));
1988 KASSERT(xr != NULL);
1989
1990 xhci_host_dequeue(xr);
1991
1992 /* set DCS */
1993 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */
1994 trb.trb_2 = 0;
1995 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1996 XHCI_TRB_3_EP_SET(dci) |
1997 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE);
1998
1999 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
2000 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
2001 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
2002 }
2003 }
2004
2005 /*
2006 * Open new pipe: called from usbd_setup_pipe_flags.
2007 * Fills methods of pipe.
2008 * If pipe is not for ep0, calls configure_endpoint.
2009 */
2010 static usbd_status
2011 xhci_open(struct usbd_pipe *pipe)
2012 {
2013 struct usbd_device * const dev = pipe->up_dev;
2014 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
2015 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
2016 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2017 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2018 const u_int dci = xhci_ep_get_dci(ed);
2019 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
2020 usbd_status err;
2021
2022 XHCIHIST_FUNC();
2023 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr,
2024 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed);
2025 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx",
2026 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress,
2027 ed->bmAttributes);
2028 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize),
2029 ed->bInterval, 0, 0);
2030
2031 if (sc->sc_dying)
2032 return USBD_IOERROR;
2033
2034 /* Root Hub */
2035 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) {
2036 switch (ed->bEndpointAddress) {
2037 case USB_CONTROL_ENDPOINT:
2038 pipe->up_methods = &roothub_ctrl_methods;
2039 break;
2040 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
2041 pipe->up_methods = &xhci_root_intr_methods;
2042 break;
2043 default:
2044 pipe->up_methods = NULL;
2045 DPRINTFN(0, "bad bEndpointAddress 0x%02jx",
2046 ed->bEndpointAddress, 0, 0, 0);
2047 return USBD_INVAL;
2048 }
2049 return USBD_NORMAL_COMPLETION;
2050 }
2051
2052 usb_init_task(&xpipe->xp_async_task, xhci_pipe_restart_async_task,
2053 pipe, USB_TASKQ_MPSAFE);
2054
2055 switch (xfertype) {
2056 case UE_CONTROL:
2057 pipe->up_methods = &xhci_device_ctrl_methods;
2058 break;
2059 case UE_ISOCHRONOUS:
2060 pipe->up_methods = &xhci_device_isoc_methods;
2061 pipe->up_serialise = false;
2062 xpipe->xp_isoc_next = -1;
2063 break;
2064 case UE_BULK:
2065 pipe->up_methods = &xhci_device_bulk_methods;
2066 break;
2067 case UE_INTERRUPT:
2068 pipe->up_methods = &xhci_device_intr_methods;
2069 break;
2070 default:
2071 return USBD_IOERROR;
2072 break;
2073 }
2074
2075 KASSERT(xs != NULL);
2076 KASSERT(xs->xs_xr[dci] == NULL);
2077
2078 /* allocate transfer ring */
2079 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS,
2080 XHCI_TRB_ALIGN);
2081 if (err) {
2082 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0);
2083 return err;
2084 }
2085
2086 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT)
2087 return xhci_configure_endpoint(pipe);
2088
2089 return USBD_NORMAL_COMPLETION;
2090 }
2091
2092 /*
2093 * Closes pipe, called from usbd_kill_pipe via close methods.
2094 * If the endpoint to be closed is ep0, disable_slot.
2095 * Should be called with sc_lock held.
2096 */
2097 static void
2098 xhci_close_pipe(struct usbd_pipe *pipe)
2099 {
2100 struct xhci_pipe * const xp =
2101 container_of(pipe, struct xhci_pipe, xp_pipe);
2102 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2103 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2104 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2105 const u_int dci = xhci_ep_get_dci(ed);
2106 struct xhci_soft_trb trb;
2107 uint32_t *cp;
2108
2109 XHCIHIST_FUNC();
2110
2111 usb_rem_task_wait(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC,
2112 &sc->sc_lock);
2113
2114 if (sc->sc_dying)
2115 return;
2116
2117 /* xs is uninitialized before xhci_init_slot */
2118 if (xs == NULL || xs->xs_idx == 0)
2119 return;
2120
2121 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2122 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2123
2124 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
2125 KASSERT(mutex_owned(&sc->sc_lock));
2126
2127 if (pipe->up_dev->ud_depth == 0)
2128 return;
2129
2130 if (dci == XHCI_DCI_EP_CONTROL) {
2131 DPRINTFN(4, "closing ep0", 0, 0, 0, 0);
2132 /* This frees all rings */
2133 xhci_disable_slot(sc, xs->xs_idx);
2134 return;
2135 }
2136
2137 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED)
2138 (void)xhci_stop_endpoint(pipe);
2139
2140 /*
2141 * set appropriate bit to be dropped.
2142 * don't set DC bit to 1, otherwise all endpoints
2143 * would be deconfigured.
2144 */
2145 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
2146 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci));
2147 cp[1] = htole32(0);
2148
2149 /* XXX should be most significant one, not dci? */
2150 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
2151 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci));
2152
2153 /* configure ep context performs an implicit dequeue */
2154 xhci_host_dequeue(xs->xs_xr[dci]);
2155
2156 /* sync input contexts before they are read from memory */
2157 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
2158
2159 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
2160 trb.trb_2 = 0;
2161 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2162 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
2163
2164 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
2165 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2166
2167 xhci_ring_free(sc, &xs->xs_xr[dci]);
2168 xs->xs_xr[dci] = NULL;
2169 }
2170
2171 /*
2172 * Abort transfer. Must be called with sc_lock held. Releases and
2173 * reacquires sc_lock to sleep until hardware acknowledges abort.
2174 */
2175 static void
2176 xhci_abortx(struct usbd_xfer *xfer)
2177 {
2178 XHCIHIST_FUNC();
2179 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2180
2181 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx",
2182 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0);
2183
2184 KASSERT(mutex_owned(&sc->sc_lock));
2185 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2186 xfer->ux_status == USBD_TIMEOUT),
2187 "bad abort status: %d", xfer->ux_status);
2188
2189 xhci_pipe_restart(xfer->ux_pipe);
2190
2191 DPRINTFN(14, "end", 0, 0, 0, 0);
2192 }
2193
2194 static void
2195 xhci_host_dequeue(struct xhci_ring * const xr)
2196 {
2197 /* When dequeueing the controller, update our struct copy too */
2198 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE);
2199 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE,
2200 BUS_DMASYNC_PREWRITE);
2201 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies));
2202
2203 xr->xr_ep = 0;
2204 xr->xr_cs = 1;
2205 }
2206
2207 /*
2208 * Recover STALLed endpoint, or stop endpoint to abort a pipe.
2209 * xHCI 1.1 sect 4.10.2.1
2210 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove
2211 * all transfers on transfer ring.
2212 */
2213 static void
2214 xhci_pipe_restart(struct usbd_pipe *pipe)
2215 {
2216 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2217 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2218 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2219
2220 XHCIHIST_FUNC();
2221 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2222 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2223
2224 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2225
2226 /*
2227 * - If the endpoint is halted, indicating a stall, reset it.
2228 * - If the endpoint is stopped, we're already good.
2229 * - Otherwise, someone wanted to abort the pipe, so stop the
2230 * endpoint.
2231 *
2232 * In any case, clear the ring.
2233 */
2234 switch (xhci_get_epstate(sc, xs, dci)) {
2235 case XHCI_EPSTATE_HALTED:
2236 xhci_reset_endpoint(pipe);
2237 break;
2238 case XHCI_EPSTATE_STOPPED:
2239 break;
2240 default:
2241 xhci_stop_endpoint(pipe);
2242 break;
2243 }
2244
2245 switch (xhci_get_epstate(sc, xs, dci)) {
2246 case XHCI_EPSTATE_STOPPED:
2247 break;
2248 case XHCI_EPSTATE_ERROR:
2249 device_printf(sc->sc_dev, "endpoint 0x%x error\n",
2250 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2251 break;
2252 default:
2253 device_printf(sc->sc_dev, "endpoint 0x%x failed to stop\n",
2254 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2255 }
2256
2257 xhci_set_dequeue(pipe);
2258
2259 DPRINTFN(4, "ends", 0, 0, 0, 0);
2260 }
2261
2262 static void
2263 xhci_pipe_restart_async_task(void *cookie)
2264 {
2265 struct usbd_pipe * const pipe = cookie;
2266 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2267 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2268 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2269 struct xhci_ring * const tr = xs->xs_xr[dci];
2270 struct usbd_xfer *xfer;
2271
2272 XHCIHIST_FUNC();
2273 XHCIHIST_CALLARGS("sc=%#jx pipe=%#jx",
2274 (uintptr_t)sc, (uintptr_t)pipe, 0, 0);
2275
2276 mutex_enter(&sc->sc_lock);
2277
2278 xhci_pipe_restart(pipe);
2279
2280 /*
2281 * We halted our own queue because it stalled. Mark it no
2282 * longer halted and start issuing queued transfers again.
2283 */
2284 tr->is_halted = false;
2285 xfer = SIMPLEQ_FIRST(&pipe->up_queue);
2286 if (xfer) {
2287 /*
2288 * If the first xfer of the queue is not in progress,
2289 * though, there may be a concurrent software abort
2290 * that has already cancelled it and is now in the
2291 * middle of a concurrent xhci_pipe_restart waiting to
2292 * reacquire the pipe (bus) lock. So only restart the
2293 * xfer if it's still USBD_IN_PROGRESS.
2294 *
2295 * Either way, xfers on the queue can't be in
2296 * USBD_NOT_STARTED.
2297 */
2298 KASSERT(xfer->ux_status != USBD_NOT_STARTED);
2299 if (xfer->ux_status == USBD_IN_PROGRESS) {
2300 (*pipe->up_methods->upm_start)(xfer);
2301 } else {
2302 DPRINTF("pipe restart race xfer=%#jx status=%jd",
2303 (uintptr_t)xfer, xfer->ux_status, 0, 0);
2304 }
2305 }
2306
2307 mutex_exit(&sc->sc_lock);
2308 }
2309
2310 static void
2311 xhci_pipe_restart_async(struct usbd_pipe *pipe)
2312 {
2313 struct xhci_pipe * const xp =
2314 container_of(pipe, struct xhci_pipe, xp_pipe);
2315 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2316 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2317 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2318 struct xhci_ring * const tr = xs->xs_xr[dci];
2319
2320 XHCIHIST_FUNC();
2321 XHCIHIST_CALLARGS("pipe %#jx", (uintptr_t)pipe, 0, 0, 0);
2322
2323 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2324
2325 tr->is_halted = true;
2326 usb_add_task(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC);
2327
2328 DPRINTFN(4, "ends", 0, 0, 0, 0);
2329 }
2330
2331 /* Process roothub port status/change events and notify to uhub_intr. */
2332 static void
2333 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport)
2334 {
2335 XHCIHIST_FUNC();
2336 XHCIHIST_CALLARGS("xhci%jd: port %ju status change",
2337 device_unit(sc->sc_dev), ctlrport, 0, 0);
2338
2339 if (ctlrport > sc->sc_maxports)
2340 return;
2341
2342 const size_t bn = xhci_ctlrport2bus(sc, ctlrport);
2343 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport);
2344 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn];
2345
2346 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change",
2347 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer);
2348
2349 if (xfer == NULL)
2350 return;
2351 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
2352
2353 uint8_t *p = xfer->ux_buf;
2354 memset(p, 0, xfer->ux_length);
2355 p[rhp / NBBY] |= 1 << (rhp % NBBY);
2356 xfer->ux_actlen = xfer->ux_length;
2357 xfer->ux_status = USBD_NORMAL_COMPLETION;
2358 usb_transfer_complete(xfer);
2359 }
2360
2361 /* Process Transfer Events */
2362 static void
2363 xhci_event_transfer(struct xhci_softc * const sc,
2364 const struct xhci_trb * const trb)
2365 {
2366 uint64_t trb_0;
2367 uint32_t trb_2, trb_3;
2368 uint8_t trbcode;
2369 u_int slot, dci;
2370 struct xhci_slot *xs;
2371 struct xhci_ring *xr;
2372 struct xhci_xfer *xx;
2373 struct usbd_xfer *xfer;
2374 usbd_status err;
2375
2376 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2377
2378 trb_0 = le64toh(trb->trb_0);
2379 trb_2 = le32toh(trb->trb_2);
2380 trb_3 = le32toh(trb->trb_3);
2381 trbcode = XHCI_TRB_2_ERROR_GET(trb_2);
2382 slot = XHCI_TRB_3_SLOT_GET(trb_3);
2383 dci = XHCI_TRB_3_EP_GET(trb_3);
2384 xs = &sc->sc_slots[slot];
2385 xr = xs->xs_xr[dci];
2386
2387 /* sanity check */
2388 KASSERT(xr != NULL);
2389 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots,
2390 "invalid xs_idx %u slot %u", xs->xs_idx, slot);
2391
2392 int idx = 0;
2393 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2394 if (xhci_trb_get_idx(xr, trb_0, &idx)) {
2395 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0);
2396 return;
2397 }
2398 xx = xr->xr_cookies[idx];
2399
2400 /* clear cookie of consumed TRB */
2401 xr->xr_cookies[idx] = NULL;
2402
2403 /*
2404 * xx is NULL if pipe is opened but xfer is not started.
2405 * It happens when stopping idle pipe.
2406 */
2407 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) {
2408 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju",
2409 idx, (uintptr_t)xx, trbcode, dci);
2410 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0,
2411 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)),
2412 0, 0);
2413 return;
2414 }
2415 } else {
2416 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */
2417 xx = (void *)(uintptr_t)(trb_0 & ~0x3);
2418 }
2419 /* XXX this may not happen */
2420 if (xx == NULL) {
2421 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0);
2422 return;
2423 }
2424 xfer = &xx->xx_xfer;
2425 /* XXX this may happen when detaching */
2426 if (xfer == NULL) {
2427 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx",
2428 (uintptr_t)xx, trb_0, 0, 0);
2429 return;
2430 }
2431 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2432 /* XXX I dunno why this happens */
2433 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer);
2434
2435 if (!xfer->ux_pipe->up_repeat &&
2436 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) {
2437 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer,
2438 0, 0, 0);
2439 return;
2440 }
2441
2442 const uint8_t xfertype =
2443 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes);
2444
2445 /* 4.11.5.2 Event Data TRB */
2446 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) {
2447 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx"
2448 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0);
2449 if ((trb_0 & 0x3) == 0x3) {
2450 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2);
2451 }
2452 }
2453
2454 switch (trbcode) {
2455 case XHCI_TRB_ERROR_SHORT_PKT:
2456 case XHCI_TRB_ERROR_SUCCESS:
2457 /*
2458 * A ctrl transfer can generate two events if it has a Data
2459 * stage. A short data stage can be OK and should not
2460 * complete the transfer as the status stage needs to be
2461 * performed.
2462 *
2463 * Note: Data and Status stage events point at same xfer.
2464 * ux_actlen and ux_dmabuf will be passed to
2465 * usb_transfer_complete after the Status stage event.
2466 *
2467 * It can be distinguished which stage generates the event:
2468 * + by checking least 3 bits of trb_0 if ED==1.
2469 * (see xhci_device_ctrl_start).
2470 * + by checking the type of original TRB if ED==0.
2471 *
2472 * In addition, intr, bulk, and isoc transfer currently
2473 * consists of single TD, so the "skip" is not needed.
2474 * ctrl xfer uses EVENT_DATA, and others do not.
2475 * Thus driver can switch the flow by checking ED bit.
2476 */
2477 if (xfertype == UE_ISOCHRONOUS) {
2478 xfer->ux_frlengths[xx->xx_isoc_done] -=
2479 XHCI_TRB_2_REM_GET(trb_2);
2480 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done];
2481 if (++xx->xx_isoc_done < xfer->ux_nframes)
2482 return;
2483 } else
2484 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2485 if (xfer->ux_actlen == 0)
2486 xfer->ux_actlen = xfer->ux_length -
2487 XHCI_TRB_2_REM_GET(trb_2);
2488 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3))
2489 == XHCI_TRB_TYPE_DATA_STAGE) {
2490 return;
2491 }
2492 } else if ((trb_0 & 0x3) == 0x3) {
2493 return;
2494 }
2495 err = USBD_NORMAL_COMPLETION;
2496 break;
2497 case XHCI_TRB_ERROR_STOPPED:
2498 case XHCI_TRB_ERROR_LENGTH:
2499 case XHCI_TRB_ERROR_STOPPED_SHORT:
2500 err = USBD_IOERROR;
2501 break;
2502 case XHCI_TRB_ERROR_STALL:
2503 case XHCI_TRB_ERROR_BABBLE:
2504 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2505 xhci_pipe_restart_async(xfer->ux_pipe);
2506 err = USBD_STALLED;
2507 break;
2508 default:
2509 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2510 err = USBD_IOERROR;
2511 break;
2512 }
2513
2514 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 ||
2515 (trb_0 & 0x3) == 0x0) {
2516 /*
2517 * Try to claim this xfer for completion. If it has
2518 * already completed or aborted, drop it on the floor.
2519 */
2520 if (!usbd_xfer_trycomplete(xfer))
2521 return;
2522
2523 /* Set the status. */
2524 xfer->ux_status = err;
2525
2526 usb_transfer_complete(xfer);
2527 }
2528 }
2529
2530 /* Process Command complete events */
2531 static void
2532 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb)
2533 {
2534 uint64_t trb_0;
2535 uint32_t trb_2, trb_3;
2536
2537 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2538
2539 KASSERT(mutex_owned(&sc->sc_lock));
2540
2541 trb_0 = le64toh(trb->trb_0);
2542 trb_2 = le32toh(trb->trb_2);
2543 trb_3 = le32toh(trb->trb_3);
2544
2545 if (trb_0 == sc->sc_command_addr) {
2546 sc->sc_resultpending = false;
2547
2548 sc->sc_result_trb.trb_0 = trb_0;
2549 sc->sc_result_trb.trb_2 = trb_2;
2550 sc->sc_result_trb.trb_3 = trb_3;
2551 if (XHCI_TRB_2_ERROR_GET(trb_2) !=
2552 XHCI_TRB_ERROR_SUCCESS) {
2553 DPRINTFN(1, "command completion "
2554 "failure: 0x%016jx 0x%08jx 0x%08jx",
2555 trb_0, trb_2, trb_3, 0);
2556 }
2557 cv_signal(&sc->sc_command_cv);
2558 } else {
2559 DPRINTFN(1, "spurious event: %#jx 0x%016jx "
2560 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3);
2561 }
2562 }
2563
2564 /*
2565 * Process events.
2566 * called from xhci_softintr
2567 */
2568 static void
2569 xhci_handle_event(struct xhci_softc * const sc,
2570 const struct xhci_trb * const trb)
2571 {
2572 uint64_t trb_0;
2573 uint32_t trb_2, trb_3;
2574
2575 XHCIHIST_FUNC();
2576
2577 trb_0 = le64toh(trb->trb_0);
2578 trb_2 = le32toh(trb->trb_2);
2579 trb_3 = le32toh(trb->trb_3);
2580
2581 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx",
2582 (uintptr_t)trb, trb_0, trb_2, trb_3);
2583
2584 /*
2585 * 4.11.3.1, 6.4.2.1
2586 * TRB Pointer is invalid for these completion codes.
2587 */
2588 switch (XHCI_TRB_2_ERROR_GET(trb_2)) {
2589 case XHCI_TRB_ERROR_RING_UNDERRUN:
2590 case XHCI_TRB_ERROR_RING_OVERRUN:
2591 case XHCI_TRB_ERROR_VF_RING_FULL:
2592 return;
2593 default:
2594 if (trb_0 == 0) {
2595 return;
2596 }
2597 break;
2598 }
2599
2600 switch (XHCI_TRB_3_TYPE_GET(trb_3)) {
2601 case XHCI_TRB_EVENT_TRANSFER:
2602 xhci_event_transfer(sc, trb);
2603 break;
2604 case XHCI_TRB_EVENT_CMD_COMPLETE:
2605 xhci_event_cmd(sc, trb);
2606 break;
2607 case XHCI_TRB_EVENT_PORT_STS_CHANGE:
2608 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff));
2609 break;
2610 default:
2611 break;
2612 }
2613 }
2614
2615 static void
2616 xhci_softintr(void *v)
2617 {
2618 struct usbd_bus * const bus = v;
2619 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2620 struct xhci_ring * const er = sc->sc_er;
2621 struct xhci_trb *trb;
2622 int i, j, k;
2623
2624 XHCIHIST_FUNC();
2625
2626 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2627
2628 i = er->xr_ep;
2629 j = er->xr_cs;
2630
2631 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0);
2632
2633 while (1) {
2634 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE,
2635 BUS_DMASYNC_POSTREAD);
2636 trb = &er->xr_trb[i];
2637 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0;
2638
2639 if (j != k)
2640 break;
2641
2642 xhci_handle_event(sc, trb);
2643
2644 i++;
2645 if (i == er->xr_ntrb) {
2646 i = 0;
2647 j ^= 1;
2648 }
2649 }
2650
2651 er->xr_ep = i;
2652 er->xr_cs = j;
2653
2654 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) |
2655 XHCI_ERDP_BUSY);
2656
2657 DPRINTFN(16, "ends", 0, 0, 0, 0);
2658
2659 return;
2660 }
2661
2662 static void
2663 xhci_poll(struct usbd_bus *bus)
2664 {
2665 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2666
2667 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2668
2669 mutex_enter(&sc->sc_intr_lock);
2670 int ret = xhci_intr1(sc);
2671 if (ret) {
2672 xhci_softintr(bus);
2673 }
2674 mutex_exit(&sc->sc_intr_lock);
2675
2676 return;
2677 }
2678
2679 static struct usbd_xfer *
2680 xhci_allocx(struct usbd_bus *bus, unsigned int nframes)
2681 {
2682 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2683 struct xhci_xfer *xx;
2684 u_int ntrbs;
2685
2686 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2687
2688 ntrbs = uimax(3, nframes);
2689 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs;
2690
2691 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
2692 if (xx != NULL) {
2693 memset(xx, 0, sizeof(*xx));
2694 if (ntrbs > 0) {
2695 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP);
2696 xx->xx_ntrb = ntrbs;
2697 }
2698 #ifdef DIAGNOSTIC
2699 xx->xx_xfer.ux_state = XFER_BUSY;
2700 #endif
2701 }
2702
2703 return &xx->xx_xfer;
2704 }
2705
2706 static void
2707 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
2708 {
2709 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2710 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
2711
2712 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2713
2714 #ifdef DIAGNOSTIC
2715 if (xfer->ux_state != XFER_BUSY &&
2716 xfer->ux_status != USBD_NOT_STARTED) {
2717 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx",
2718 (uintptr_t)xfer, xfer->ux_state, 0, 0);
2719 }
2720 xfer->ux_state = XFER_FREE;
2721 #endif
2722 if (xx->xx_ntrb > 0) {
2723 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb));
2724 xx->xx_trb = NULL;
2725 xx->xx_ntrb = 0;
2726 }
2727 pool_cache_put(sc->sc_xferpool, xx);
2728 }
2729
2730 static bool
2731 xhci_dying(struct usbd_bus *bus)
2732 {
2733 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2734
2735 return sc->sc_dying;
2736 }
2737
2738 static void
2739 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
2740 {
2741 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2742
2743 *lock = &sc->sc_lock;
2744 }
2745
2746 extern uint32_t usb_cookie_no;
2747
2748 /*
2749 * xHCI 4.3
2750 * Called when uhub_explore finds a new device (via usbd_new_device).
2751 * Port initialization and speed detection (4.3.1) are already done in uhub.c.
2752 * This function does:
2753 * Allocate and construct dev structure of default endpoint (ep0).
2754 * Allocate and open pipe of ep0.
2755 * Enable slot and initialize slot context.
2756 * Set Address.
2757 * Read initial device descriptor.
2758 * Determine initial MaxPacketSize (mps) by speed.
2759 * Read full device descriptor.
2760 * Register this device.
2761 * Finally state of device transitions ADDRESSED.
2762 */
2763 static usbd_status
2764 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth,
2765 int speed, int port, struct usbd_port *up)
2766 {
2767 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2768 struct usbd_device *dev;
2769 usbd_status err;
2770 usb_device_descriptor_t *dd;
2771 struct xhci_slot *xs;
2772 uint32_t *cp;
2773
2774 XHCIHIST_FUNC();
2775 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx",
2776 port, depth, speed, (uintptr_t)up);
2777
2778 KASSERT(KERNEL_LOCKED_P());
2779
2780 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
2781 dev->ud_bus = bus;
2782 dev->ud_quirks = &usbd_no_quirk;
2783 dev->ud_addr = 0;
2784 dev->ud_ddesc.bMaxPacketSize = 0;
2785 dev->ud_depth = depth;
2786 dev->ud_powersrc = up;
2787 dev->ud_myhub = up->up_parent;
2788 dev->ud_speed = speed;
2789 dev->ud_langid = USBD_NOLANG;
2790 dev->ud_cookie.cookie = ++usb_cookie_no;
2791
2792 /* Set up default endpoint handle. */
2793 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc;
2794 /* doesn't matter, just don't let it uninitialized */
2795 dev->ud_ep0.ue_toggle = 0;
2796
2797 /* Set up default endpoint descriptor. */
2798 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE;
2799 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT;
2800 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT;
2801 dev->ud_ep0desc.bmAttributes = UE_CONTROL;
2802 dev->ud_ep0desc.bInterval = 0;
2803
2804 /* 4.3, 4.8.2.1 */
2805 switch (speed) {
2806 case USB_SPEED_SUPER:
2807 case USB_SPEED_SUPER_PLUS:
2808 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET);
2809 break;
2810 case USB_SPEED_FULL:
2811 /* XXX using 64 as initial mps of ep0 in FS */
2812 case USB_SPEED_HIGH:
2813 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET);
2814 break;
2815 case USB_SPEED_LOW:
2816 default:
2817 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET);
2818 break;
2819 }
2820
2821 up->up_dev = dev;
2822
2823 dd = &dev->ud_ddesc;
2824
2825 if (depth == 0 && port == 0) {
2826 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL);
2827 bus->ub_devices[USB_ROOTHUB_INDEX] = dev;
2828
2829 /* Establish the default pipe. */
2830 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2831 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2832 if (err) {
2833 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0);
2834 goto bad;
2835 }
2836 err = usbd_get_initial_ddesc(dev, dd);
2837 if (err) {
2838 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2839 goto bad;
2840 }
2841 } else {
2842 uint8_t slot = 0;
2843
2844 /* 4.3.2 */
2845 err = xhci_enable_slot(sc, &slot);
2846 if (err) {
2847 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0);
2848 goto bad;
2849 }
2850
2851 xs = &sc->sc_slots[slot];
2852 dev->ud_hcpriv = xs;
2853
2854 /* 4.3.3 initialize slot structure */
2855 err = xhci_init_slot(dev, slot);
2856 if (err) {
2857 DPRINTFN(1, "init slot %ju", err, 0, 0, 0);
2858 dev->ud_hcpriv = NULL;
2859 /*
2860 * We have to disable_slot here because
2861 * xs->xs_idx == 0 when xhci_init_slot fails,
2862 * in that case usbd_remove_dev won't work.
2863 */
2864 mutex_enter(&sc->sc_lock);
2865 xhci_disable_slot(sc, slot);
2866 mutex_exit(&sc->sc_lock);
2867 goto bad;
2868 }
2869
2870 /*
2871 * We have to establish the default pipe _after_ slot
2872 * structure has been prepared.
2873 */
2874 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2875 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2876 if (err) {
2877 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0,
2878 0);
2879 goto bad;
2880 }
2881
2882 /* 4.3.4 Address Assignment */
2883 err = xhci_set_address(dev, slot, false);
2884 if (err) {
2885 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0);
2886 goto bad;
2887 }
2888
2889 /* Allow device time to set new address */
2890 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE);
2891
2892 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2893 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT);
2894 HEXDUMP("slot context", cp, sc->sc_ctxsz);
2895 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3]));
2896 DPRINTFN(4, "device address %ju", addr, 0, 0, 0);
2897 /*
2898 * XXX ensure we know when the hardware does something
2899 * we can't yet cope with
2900 */
2901 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr);
2902 dev->ud_addr = addr;
2903
2904 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL,
2905 "addr %d already allocated", dev->ud_addr);
2906 /*
2907 * The root hub is given its own slot
2908 */
2909 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev;
2910
2911 err = usbd_get_initial_ddesc(dev, dd);
2912 if (err) {
2913 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2914 goto bad;
2915 }
2916
2917 /* 4.8.2.1 */
2918 if (USB_IS_SS(speed)) {
2919 if (dd->bMaxPacketSize != 9) {
2920 printf("%s: invalid mps 2^%u for SS ep0,"
2921 " using 512\n",
2922 device_xname(sc->sc_dev),
2923 dd->bMaxPacketSize);
2924 dd->bMaxPacketSize = 9;
2925 }
2926 USETW(dev->ud_ep0desc.wMaxPacketSize,
2927 (1 << dd->bMaxPacketSize));
2928 } else
2929 USETW(dev->ud_ep0desc.wMaxPacketSize,
2930 dd->bMaxPacketSize);
2931 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0);
2932 err = xhci_update_ep0_mps(sc, xs,
2933 UGETW(dev->ud_ep0desc.wMaxPacketSize));
2934 if (err) {
2935 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0);
2936 goto bad;
2937 }
2938 }
2939
2940 err = usbd_reload_device_desc(dev);
2941 if (err) {
2942 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0);
2943 goto bad;
2944 }
2945
2946 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,",
2947 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0);
2948 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,",
2949 dd->bDeviceClass, dd->bDeviceSubClass,
2950 dd->bDeviceProtocol, 0);
2951 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd",
2952 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations,
2953 dev->ud_speed);
2954
2955 usbd_get_device_strings(dev);
2956
2957 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev);
2958
2959 if (depth == 0 && port == 0) {
2960 usbd_attach_roothub(parent, dev);
2961 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0);
2962 return USBD_NORMAL_COMPLETION;
2963 }
2964
2965 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr);
2966 bad:
2967 if (err != USBD_NORMAL_COMPLETION) {
2968 if (depth == 0 && port == 0 && dev->ud_pipe0)
2969 usbd_kill_pipe(dev->ud_pipe0);
2970 usbd_remove_device(dev, up);
2971 }
2972
2973 return err;
2974 }
2975
2976 static usbd_status
2977 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp,
2978 size_t ntrb, size_t align)
2979 {
2980 size_t size = ntrb * XHCI_TRB_SIZE;
2981 struct xhci_ring *xr;
2982
2983 XHCIHIST_FUNC();
2984 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx",
2985 (uintptr_t)*xrp, ntrb, align, 0);
2986
2987 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP);
2988 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0);
2989
2990 int err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
2991 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xr->xr_dma);
2992 if (err) {
2993 kmem_free(xr, sizeof(struct xhci_ring));
2994 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0);
2995 return err;
2996 }
2997 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
2998 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP);
2999 xr->xr_trb = xhci_ring_trbv(xr, 0);
3000 xr->xr_ntrb = ntrb;
3001 xr->is_halted = false;
3002 xhci_host_dequeue(xr);
3003 *xrp = xr;
3004
3005 return USBD_NORMAL_COMPLETION;
3006 }
3007
3008 static void
3009 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr)
3010 {
3011 if (*xr == NULL)
3012 return;
3013
3014 usb_freemem(&(*xr)->xr_dma);
3015 mutex_destroy(&(*xr)->xr_lock);
3016 kmem_free((*xr)->xr_cookies,
3017 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb);
3018 kmem_free(*xr, sizeof(struct xhci_ring));
3019 *xr = NULL;
3020 }
3021
3022 static void
3023 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr,
3024 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs)
3025 {
3026 size_t i;
3027 u_int ri;
3028 u_int cs;
3029 uint64_t parameter;
3030 uint32_t status;
3031 uint32_t control;
3032
3033 XHCIHIST_FUNC();
3034 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju",
3035 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0);
3036
3037 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u",
3038 ntrbs, xr->xr_ntrb);
3039 for (i = 0; i < ntrbs; i++) {
3040 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr,
3041 (uintptr_t)trbs, i, 0);
3042 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx",
3043 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0);
3044 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) !=
3045 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3);
3046 }
3047
3048 ri = xr->xr_ep;
3049 cs = xr->xr_cs;
3050
3051 /*
3052 * Although the xhci hardware can do scatter/gather dma from
3053 * arbitrary sized buffers, there is a non-obvious restriction
3054 * that a LINK trb is only allowed at the end of a burst of
3055 * transfers - which might be 16kB.
3056 * Arbitrary aligned LINK trb definitely fail on Ivy bridge.
3057 * The simple solution is not to allow a LINK trb in the middle
3058 * of anything - as here.
3059 * XXX: (dsl) There are xhci controllers out there (eg some made by
3060 * ASMedia) that seem to lock up if they process a LINK trb but
3061 * cannot process the linked-to trb yet.
3062 * The code should write the 'cycle' bit on the link trb AFTER
3063 * adding the other trb.
3064 */
3065 u_int firstep = xr->xr_ep;
3066 u_int firstcs = xr->xr_cs;
3067
3068 for (i = 0; i < ntrbs; ) {
3069 u_int oldri = ri;
3070 u_int oldcs = cs;
3071
3072 if (ri >= (xr->xr_ntrb - 1)) {
3073 /* Put Link TD at the end of ring */
3074 parameter = xhci_ring_trbp(xr, 0);
3075 status = 0;
3076 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) |
3077 XHCI_TRB_3_TC_BIT;
3078 xr->xr_cookies[ri] = NULL;
3079 xr->xr_ep = 0;
3080 xr->xr_cs ^= 1;
3081 ri = xr->xr_ep;
3082 cs = xr->xr_cs;
3083 } else {
3084 parameter = trbs[i].trb_0;
3085 status = trbs[i].trb_2;
3086 control = trbs[i].trb_3;
3087
3088 xr->xr_cookies[ri] = cookie;
3089 ri++;
3090 i++;
3091 }
3092 /*
3093 * If this is a first TRB, mark it invalid to prevent
3094 * xHC from running it immediately.
3095 */
3096 if (oldri == firstep) {
3097 if (oldcs) {
3098 control &= ~XHCI_TRB_3_CYCLE_BIT;
3099 } else {
3100 control |= XHCI_TRB_3_CYCLE_BIT;
3101 }
3102 } else {
3103 if (oldcs) {
3104 control |= XHCI_TRB_3_CYCLE_BIT;
3105 } else {
3106 control &= ~XHCI_TRB_3_CYCLE_BIT;
3107 }
3108 }
3109 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control);
3110 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri,
3111 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3112 }
3113
3114 /* Now invert cycle bit of first TRB */
3115 if (firstcs) {
3116 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT);
3117 } else {
3118 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT);
3119 }
3120 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep,
3121 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3122
3123 xr->xr_ep = ri;
3124 xr->xr_cs = cs;
3125
3126 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep,
3127 xr->xr_cs, 0);
3128 }
3129
3130 static inline void
3131 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr,
3132 struct xhci_xfer *xx, u_int ntrb)
3133 {
3134 KASSERT(ntrb <= xx->xx_ntrb);
3135 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb);
3136 }
3137
3138 /*
3139 * Stop execution commands, purge all commands on command ring, and
3140 * rewind dequeue pointer.
3141 */
3142 static void
3143 xhci_abort_command(struct xhci_softc *sc)
3144 {
3145 struct xhci_ring * const cr = sc->sc_cr;
3146 uint64_t crcr;
3147 int i;
3148
3149 XHCIHIST_FUNC();
3150 XHCIHIST_CALLARGS("command %#jx timeout, aborting",
3151 sc->sc_command_addr, 0, 0, 0);
3152
3153 mutex_enter(&cr->xr_lock);
3154
3155 /* 4.6.1.2 Aborting a Command */
3156 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3157 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA);
3158
3159 for (i = 0; i < 500; i++) {
3160 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3161 if ((crcr & XHCI_CRCR_LO_CRR) == 0)
3162 break;
3163 usb_delay_ms(&sc->sc_bus, 1);
3164 }
3165 if ((crcr & XHCI_CRCR_LO_CRR) != 0) {
3166 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0);
3167 /* reset HC here? */
3168 }
3169
3170 /* reset command ring dequeue pointer */
3171 cr->xr_ep = 0;
3172 cr->xr_cs = 1;
3173 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs);
3174
3175 mutex_exit(&cr->xr_lock);
3176 }
3177
3178 /*
3179 * Put a command on command ring, ring bell, set timer, and cv_timedwait.
3180 * Command completion is notified by cv_signal from xhci_event_cmd()
3181 * (called from xhci_softint), or timed-out.
3182 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(),
3183 * then do_command examines it.
3184 */
3185 static usbd_status
3186 xhci_do_command_locked(struct xhci_softc * const sc,
3187 struct xhci_soft_trb * const trb, int timeout)
3188 {
3189 struct xhci_ring * const cr = sc->sc_cr;
3190 usbd_status err;
3191
3192 XHCIHIST_FUNC();
3193 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx",
3194 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3195
3196 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
3197 KASSERT(mutex_owned(&sc->sc_lock));
3198
3199 while (sc->sc_command_addr != 0 ||
3200 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp))
3201 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
3202
3203 /*
3204 * If enqueue pointer points at last of ring, it's Link TRB,
3205 * command TRB will be stored in 0th TRB.
3206 */
3207 if (cr->xr_ep == cr->xr_ntrb - 1)
3208 sc->sc_command_addr = xhci_ring_trbp(cr, 0);
3209 else
3210 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep);
3211
3212 sc->sc_resultpending = true;
3213
3214 mutex_enter(&cr->xr_lock);
3215 xhci_ring_put(sc, cr, NULL, trb, 1);
3216 mutex_exit(&cr->xr_lock);
3217
3218 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0);
3219
3220 while (sc->sc_resultpending) {
3221 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock,
3222 MAX(1, mstohz(timeout))) == EWOULDBLOCK) {
3223 xhci_abort_command(sc);
3224 err = USBD_TIMEOUT;
3225 goto timedout;
3226 }
3227 }
3228
3229 trb->trb_0 = sc->sc_result_trb.trb_0;
3230 trb->trb_2 = sc->sc_result_trb.trb_2;
3231 trb->trb_3 = sc->sc_result_trb.trb_3;
3232
3233 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx",
3234 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3235
3236 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) {
3237 case XHCI_TRB_ERROR_SUCCESS:
3238 err = USBD_NORMAL_COMPLETION;
3239 break;
3240 default:
3241 case 192 ... 223:
3242 DPRINTFN(5, "error %#jx",
3243 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0);
3244 err = USBD_IOERROR;
3245 break;
3246 case 224 ... 255:
3247 err = USBD_NORMAL_COMPLETION;
3248 break;
3249 }
3250
3251 timedout:
3252 sc->sc_resultpending = false;
3253 sc->sc_command_addr = 0;
3254 cv_broadcast(&sc->sc_cmdbusy_cv);
3255
3256 return err;
3257 }
3258
3259 static usbd_status
3260 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb,
3261 int timeout)
3262 {
3263
3264 mutex_enter(&sc->sc_lock);
3265 usbd_status ret = xhci_do_command_locked(sc, trb, timeout);
3266 mutex_exit(&sc->sc_lock);
3267
3268 return ret;
3269 }
3270
3271 static usbd_status
3272 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp)
3273 {
3274 struct xhci_soft_trb trb;
3275 usbd_status err;
3276
3277 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3278
3279 trb.trb_0 = 0;
3280 trb.trb_2 = 0;
3281 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT);
3282
3283 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3284 if (err != USBD_NORMAL_COMPLETION) {
3285 return err;
3286 }
3287
3288 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3);
3289
3290 return err;
3291 }
3292
3293 /*
3294 * xHCI 4.6.4
3295 * Deallocate ring and device/input context DMA buffers, and disable_slot.
3296 * All endpoints in the slot should be stopped.
3297 * Should be called with sc_lock held.
3298 */
3299 static usbd_status
3300 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot)
3301 {
3302 struct xhci_soft_trb trb;
3303 struct xhci_slot *xs;
3304 usbd_status err;
3305
3306 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3307
3308 if (sc->sc_dying)
3309 return USBD_IOERROR;
3310
3311 trb.trb_0 = 0;
3312 trb.trb_2 = 0;
3313 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) |
3314 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT);
3315
3316 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
3317
3318 if (!err) {
3319 xs = &sc->sc_slots[slot];
3320 if (xs->xs_idx != 0) {
3321 xhci_free_slot(sc, xs);
3322 xhci_set_dcba(sc, 0, slot);
3323 memset(xs, 0, sizeof(*xs));
3324 }
3325 }
3326
3327 return err;
3328 }
3329
3330 /*
3331 * Set address of device and transition slot state from ENABLED to ADDRESSED
3332 * if Block Setaddress Request (BSR) is false.
3333 * If BSR==true, transition slot state from ENABLED to DEFAULT.
3334 * see xHCI 1.1 4.5.3, 3.3.4
3335 * Should be called without sc_lock held.
3336 */
3337 static usbd_status
3338 xhci_address_device(struct xhci_softc * const sc,
3339 uint64_t icp, uint8_t slot_id, bool bsr)
3340 {
3341 struct xhci_soft_trb trb;
3342 usbd_status err;
3343
3344 XHCIHIST_FUNC();
3345 if (bsr) {
3346 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr",
3347 icp, slot_id, 0, 0);
3348 } else {
3349 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr",
3350 icp, slot_id, 0, 0);
3351 }
3352
3353 trb.trb_0 = icp;
3354 trb.trb_2 = 0;
3355 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) |
3356 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) |
3357 (bsr ? XHCI_TRB_3_BSR_BIT : 0);
3358
3359 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3360
3361 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS)
3362 err = USBD_NO_ADDR;
3363
3364 return err;
3365 }
3366
3367 static usbd_status
3368 xhci_update_ep0_mps(struct xhci_softc * const sc,
3369 struct xhci_slot * const xs, u_int mps)
3370 {
3371 struct xhci_soft_trb trb;
3372 usbd_status err;
3373 uint32_t * cp;
3374
3375 XHCIHIST_FUNC();
3376 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0);
3377
3378 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3379 cp[0] = htole32(0);
3380 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL));
3381
3382 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL));
3383 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps));
3384
3385 /* sync input contexts before they are read from memory */
3386 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3387 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3388 sc->sc_ctxsz * 4);
3389
3390 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
3391 trb.trb_2 = 0;
3392 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
3393 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX);
3394
3395 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3396 return err;
3397 }
3398
3399 static void
3400 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si)
3401 {
3402 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0);
3403
3404 XHCIHIST_FUNC();
3405 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd",
3406 (uintptr_t)&dcbaa[si], dcba, si, 0);
3407
3408 dcbaa[si] = htole64(dcba);
3409 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t),
3410 BUS_DMASYNC_PREWRITE);
3411 }
3412
3413 /*
3414 * Allocate device and input context DMA buffer, and
3415 * TRB DMA buffer for each endpoint.
3416 */
3417 static usbd_status
3418 xhci_init_slot(struct usbd_device *dev, uint32_t slot)
3419 {
3420 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3421 struct xhci_slot *xs;
3422
3423 XHCIHIST_FUNC();
3424 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0);
3425
3426 xs = &sc->sc_slots[slot];
3427
3428 /* allocate contexts */
3429 int err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3430 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_dc_dma);
3431 if (err) {
3432 DPRINTFN(1, "failed to allocmem output device context %jd",
3433 err, 0, 0, 0);
3434 return USBD_NOMEM;
3435 }
3436
3437 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3438 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_ic_dma);
3439 if (err) {
3440 DPRINTFN(1, "failed to allocmem input device context %jd",
3441 err, 0, 0, 0);
3442 goto bad1;
3443 }
3444
3445 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr));
3446 xs->xs_idx = slot;
3447
3448 return USBD_NORMAL_COMPLETION;
3449
3450 bad1:
3451 usb_freemem(&xs->xs_dc_dma);
3452 xs->xs_idx = 0;
3453 return USBD_NOMEM;
3454 }
3455
3456 static void
3457 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs)
3458 {
3459 u_int dci;
3460
3461 XHCIHIST_FUNC();
3462 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
3463
3464 /* deallocate all allocated rings in the slot */
3465 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
3466 if (xs->xs_xr[dci] != NULL)
3467 xhci_ring_free(sc, &xs->xs_xr[dci]);
3468 }
3469 usb_freemem(&xs->xs_ic_dma);
3470 usb_freemem(&xs->xs_dc_dma);
3471 xs->xs_idx = 0;
3472 }
3473
3474 /*
3475 * Setup slot context, set Device Context Base Address, and issue
3476 * Set Address Device command.
3477 */
3478 static usbd_status
3479 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr)
3480 {
3481 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3482 struct xhci_slot *xs;
3483 usbd_status err;
3484
3485 XHCIHIST_FUNC();
3486 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0);
3487
3488 xs = &sc->sc_slots[slot];
3489
3490 xhci_setup_ctx(dev->ud_pipe0);
3491
3492 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3493 sc->sc_ctxsz * 3);
3494
3495 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot);
3496
3497 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr);
3498
3499 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
3500 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0),
3501 sc->sc_ctxsz * 2);
3502
3503 return err;
3504 }
3505
3506 /*
3507 * 4.8.2, 6.2.3.2
3508 * construct slot/endpoint context parameters and do syncmem
3509 */
3510 static void
3511 xhci_setup_ctx(struct usbd_pipe *pipe)
3512 {
3513 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3514 struct usbd_device *dev = pipe->up_dev;
3515 struct xhci_slot * const xs = dev->ud_hcpriv;
3516 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3517 const u_int dci = xhci_ep_get_dci(ed);
3518 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3519 uint32_t *cp;
3520 uint16_t mps = UGETW(ed->wMaxPacketSize);
3521 uint8_t speed = dev->ud_speed;
3522 uint8_t ival = ed->bInterval;
3523
3524 XHCIHIST_FUNC();
3525 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju",
3526 (uintptr_t)pipe, xs->xs_idx, dci, speed);
3527
3528 /* set up initial input control context */
3529 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3530 cp[0] = htole32(0);
3531 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci));
3532 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT));
3533 cp[7] = htole32(0);
3534
3535 /* set up input slot context */
3536 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
3537 cp[0] =
3538 XHCI_SCTX_0_CTX_NUM_SET(dci) |
3539 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed));
3540 cp[1] = 0;
3541 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0);
3542 cp[3] = 0;
3543 xhci_setup_route(pipe, cp);
3544 xhci_setup_tthub(pipe, cp);
3545
3546 cp[0] = htole32(cp[0]);
3547 cp[1] = htole32(cp[1]);
3548 cp[2] = htole32(cp[2]);
3549 cp[3] = htole32(cp[3]);
3550
3551 /* set up input endpoint context */
3552 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci));
3553 cp[0] =
3554 XHCI_EPCTX_0_EPSTATE_SET(0) |
3555 XHCI_EPCTX_0_MULT_SET(0) |
3556 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) |
3557 XHCI_EPCTX_0_LSA_SET(0) |
3558 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0);
3559 cp[1] =
3560 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) |
3561 XHCI_EPCTX_1_HID_SET(0) |
3562 XHCI_EPCTX_1_MAXB_SET(0);
3563
3564 if (xfertype != UE_ISOCHRONOUS)
3565 cp[1] |= XHCI_EPCTX_1_CERR_SET(3);
3566
3567 if (xfertype == UE_CONTROL)
3568 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(8); /* 6.2.3 */
3569 else if (USB_IS_SS(speed))
3570 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(mps);
3571 else
3572 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(UE_GET_SIZE(mps));
3573
3574 xhci_setup_maxburst(pipe, cp);
3575
3576 switch (xfertype) {
3577 case UE_CONTROL:
3578 break;
3579 case UE_BULK:
3580 /* XXX Set MaxPStreams, HID, and LSA if streams enabled */
3581 break;
3582 case UE_INTERRUPT:
3583 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3584 ival = pipe->up_interval;
3585
3586 ival = xhci_bival2ival(ival, speed);
3587 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3588 break;
3589 case UE_ISOCHRONOUS:
3590 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3591 ival = pipe->up_interval;
3592
3593 /* xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 */
3594 if (speed == USB_SPEED_FULL)
3595 ival += 3; /* 1ms -> 125us */
3596 ival--;
3597 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3598 break;
3599 default:
3600 break;
3601 }
3602 DPRINTFN(4, "setting ival %ju MaxBurst %#jx",
3603 XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_1_MAXB_GET(cp[1]), 0, 0);
3604
3605 /* rewind TR dequeue pointer in xHC */
3606 /* can't use xhci_ep_get_dci() yet? */
3607 *(uint64_t *)(&cp[2]) = htole64(
3608 xhci_ring_trbp(xs->xs_xr[dci], 0) |
3609 XHCI_EPCTX_2_DCS_SET(1));
3610
3611 cp[0] = htole32(cp[0]);
3612 cp[1] = htole32(cp[1]);
3613 cp[4] = htole32(cp[4]);
3614
3615 /* rewind TR dequeue pointer in driver */
3616 struct xhci_ring *xr = xs->xs_xr[dci];
3617 mutex_enter(&xr->xr_lock);
3618 xhci_host_dequeue(xr);
3619 mutex_exit(&xr->xr_lock);
3620
3621 /* sync input contexts before they are read from memory */
3622 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3623 }
3624
3625 /*
3626 * Setup route string and roothub port of given device for slot context
3627 */
3628 static void
3629 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp)
3630 {
3631 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3632 struct usbd_device *dev = pipe->up_dev;
3633 struct usbd_port *up = dev->ud_powersrc;
3634 struct usbd_device *hub;
3635 struct usbd_device *adev;
3636 uint8_t rhport = 0;
3637 uint32_t route = 0;
3638
3639 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3640
3641 /* Locate root hub port and Determine route string */
3642 /* 4.3.3 route string does not include roothub port */
3643 for (hub = dev; hub != NULL; hub = hub->ud_myhub) {
3644 uint32_t dep;
3645
3646 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd",
3647 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc,
3648 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno :
3649 -1);
3650
3651 if (hub->ud_powersrc == NULL)
3652 break;
3653 dep = hub->ud_depth;
3654 if (dep == 0)
3655 break;
3656 rhport = hub->ud_powersrc->up_portno;
3657 if (dep > USB_HUB_MAX_DEPTH)
3658 continue;
3659
3660 route |=
3661 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport)
3662 << ((dep - 1) * 4);
3663 }
3664 route = route >> 4;
3665 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1;
3666
3667 /* Locate port on upstream high speed hub */
3668 for (adev = dev, hub = up->up_parent;
3669 hub != NULL && hub->ud_speed != USB_SPEED_HIGH;
3670 adev = hub, hub = hub->ud_myhub)
3671 ;
3672 if (hub) {
3673 int p;
3674 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) {
3675 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) {
3676 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1];
3677 goto found;
3678 }
3679 }
3680 panic("%s: cannot find HS port", __func__);
3681 found:
3682 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0);
3683 } else {
3684 dev->ud_myhsport = NULL;
3685 }
3686
3687 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport);
3688
3689 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport,
3690 ctlrport, route, (uintptr_t)hub);
3691
3692 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route);
3693 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport);
3694 }
3695
3696 /*
3697 * Setup whether device is hub, whether device uses MTT, and
3698 * TT informations if it uses MTT.
3699 */
3700 static void
3701 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp)
3702 {
3703 struct usbd_device *dev = pipe->up_dev;
3704 struct usbd_port *myhsport = dev->ud_myhsport;
3705 usb_device_descriptor_t * const dd = &dev->ud_ddesc;
3706 uint32_t speed = dev->ud_speed;
3707 uint8_t rhaddr = dev->ud_bus->ub_rhaddr;
3708 uint8_t tthubslot, ttportnum;
3709 bool ishub;
3710 bool usemtt;
3711
3712 XHCIHIST_FUNC();
3713
3714 /*
3715 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2
3716 * tthubslot:
3717 * This is the slot ID of parent HS hub
3718 * if LS/FS device is connected && connected through HS hub.
3719 * This is 0 if device is not LS/FS device ||
3720 * parent hub is not HS hub ||
3721 * attached to root hub.
3722 * ttportnum:
3723 * This is the downstream facing port of parent HS hub
3724 * if LS/FS device is connected.
3725 * This is 0 if device is not LS/FS device ||
3726 * parent hub is not HS hub ||
3727 * attached to root hub.
3728 */
3729 if (myhsport &&
3730 myhsport->up_parent->ud_addr != rhaddr &&
3731 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
3732 ttportnum = myhsport->up_portno;
3733 tthubslot = myhsport->up_parent->ud_addr;
3734 } else {
3735 ttportnum = 0;
3736 tthubslot = 0;
3737 }
3738 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd",
3739 (uintptr_t)myhsport, ttportnum, tthubslot, 0);
3740
3741 /* ishub is valid after reading UDESC_DEVICE */
3742 ishub = (dd->bDeviceClass == UDCLASS_HUB);
3743
3744 /* dev->ud_hub is valid after reading UDESC_HUB */
3745 if (ishub && dev->ud_hub) {
3746 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc;
3747 uint8_t ttt =
3748 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK);
3749
3750 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts);
3751 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt);
3752 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0);
3753 }
3754
3755 #define IS_MTTHUB(dd) \
3756 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT)
3757
3758 /*
3759 * MTT flag is set if
3760 * 1. this is HS hub && MTTs are supported and enabled; or
3761 * 2. this is LS or FS device && there is a parent HS hub where MTTs
3762 * are supported and enabled.
3763 *
3764 * XXX enabled is not tested yet
3765 */
3766 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd))
3767 usemtt = true;
3768 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) &&
3769 myhsport &&
3770 myhsport->up_parent->ud_addr != rhaddr &&
3771 IS_MTTHUB(&myhsport->up_parent->ud_ddesc))
3772 usemtt = true;
3773 else
3774 usemtt = false;
3775 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd",
3776 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt);
3777
3778 #undef IS_MTTHUB
3779
3780 cp[0] |=
3781 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) |
3782 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0);
3783 cp[2] |=
3784 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) |
3785 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum);
3786 }
3787
3788 /* set up params for periodic endpoint */
3789 static void
3790 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp)
3791 {
3792 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
3793 struct usbd_device *dev = pipe->up_dev;
3794 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3795 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3796 usbd_desc_iter_t iter;
3797 const usb_cdc_descriptor_t *cdcd;
3798 uint32_t maxb = 0;
3799 uint16_t mps = UGETW(ed->wMaxPacketSize);
3800 uint8_t speed = dev->ud_speed;
3801 uint8_t mult = 0;
3802 uint8_t ep;
3803
3804 /* config desc is NULL when opening ep0 */
3805 if (dev == NULL || dev->ud_cdesc == NULL)
3806 goto no_cdcd;
3807 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev,
3808 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY);
3809 if (cdcd == NULL)
3810 goto no_cdcd;
3811 usb_desc_iter_init(dev, &iter);
3812 iter.cur = (const void *)cdcd;
3813
3814 /* find endpoint_ss_comp desc for ep of this pipe */
3815 for (ep = 0;;) {
3816 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter);
3817 if (cdcd == NULL)
3818 break;
3819 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) {
3820 ep = ((const usb_endpoint_descriptor_t *)cdcd)->
3821 bEndpointAddress;
3822 if (UE_GET_ADDR(ep) ==
3823 UE_GET_ADDR(ed->bEndpointAddress)) {
3824 cdcd = (const usb_cdc_descriptor_t *)
3825 usb_desc_iter_next(&iter);
3826 break;
3827 }
3828 ep = 0;
3829 }
3830 }
3831 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) {
3832 const usb_endpoint_ss_comp_descriptor_t * esscd =
3833 (const usb_endpoint_ss_comp_descriptor_t *)cdcd;
3834 maxb = esscd->bMaxBurst;
3835 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes);
3836 }
3837
3838 no_cdcd:
3839 /* 6.2.3.4, 4.8.2.4 */
3840 if (USB_IS_SS(speed)) {
3841 /* USB 3.1 9.6.6 */
3842 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps);
3843 /* USB 3.1 9.6.7 */
3844 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3845 #ifdef notyet
3846 if (xfertype == UE_ISOCHRONOUS) {
3847 }
3848 if (XHCI_HCC2_LEC(sc->sc_hcc2) != 0) {
3849 /* use ESIT */
3850 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(x);
3851 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(x);
3852
3853 /* XXX if LEC = 1, set ESIT instead */
3854 cp[0] |= XHCI_EPCTX_0_MULT_SET(0);
3855 } else {
3856 /* use ival */
3857 }
3858 #endif
3859 } else {
3860 /* USB 2.0 9.6.6 */
3861 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(UE_GET_SIZE(mps));
3862
3863 /* 6.2.3.4 */
3864 if (speed == USB_SPEED_HIGH &&
3865 (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)) {
3866 maxb = UE_GET_TRANS(mps);
3867 } else {
3868 /* LS/FS or HS CTRL or HS BULK */
3869 maxb = 0;
3870 }
3871 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3872 }
3873 xpipe->xp_maxb = maxb + 1;
3874 xpipe->xp_mult = mult + 1;
3875 }
3876
3877 /*
3878 * Convert endpoint bInterval value to endpoint context interval value
3879 * for Interrupt pipe.
3880 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6
3881 */
3882 static uint32_t
3883 xhci_bival2ival(uint32_t ival, uint32_t speed)
3884 {
3885 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) {
3886 int i;
3887
3888 /*
3889 * round ival down to "the nearest base 2 multiple of
3890 * bInterval * 8".
3891 * bInterval is at most 255 as its type is uByte.
3892 * 255(ms) = 2040(x 125us) < 2^11, so start with 10.
3893 */
3894 for (i = 10; i > 0; i--) {
3895 if ((ival * 8) >= (1 << i))
3896 break;
3897 }
3898 ival = i;
3899 } else {
3900 /* Interval = bInterval-1 for SS/HS */
3901 ival--;
3902 }
3903
3904 return ival;
3905 }
3906
3907 /* ----- */
3908
3909 static void
3910 xhci_noop(struct usbd_pipe *pipe)
3911 {
3912 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3913 }
3914
3915 /*
3916 * Process root hub request.
3917 */
3918 static int
3919 xhci_roothub_ctrl_locked(struct usbd_bus *bus, usb_device_request_t *req,
3920 void *buf, int buflen)
3921 {
3922 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
3923 usb_port_status_t ps;
3924 int l, totlen = 0;
3925 uint16_t len, value, index;
3926 int port, i;
3927 uint32_t v;
3928
3929 XHCIHIST_FUNC();
3930
3931 KASSERT(mutex_owned(&sc->sc_rhlock));
3932
3933 if (sc->sc_dying)
3934 return -1;
3935
3936 size_t bn = bus == &sc->sc_bus ? 0 : 1;
3937
3938 len = UGETW(req->wLength);
3939 value = UGETW(req->wValue);
3940 index = UGETW(req->wIndex);
3941
3942 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx",
3943 req->bmRequestType | (req->bRequest << 8), value, index, len);
3944
3945 #define C(x,y) ((x) | ((y) << 8))
3946 switch (C(req->bRequest, req->bmRequestType)) {
3947 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
3948 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0);
3949 if (len == 0)
3950 break;
3951 switch (value) {
3952 #define sd ((usb_string_descriptor_t *)buf)
3953 case C(2, UDESC_STRING):
3954 /* Product */
3955 totlen = usb_makestrdesc(sd, len, "xHCI root hub");
3956 break;
3957 #undef sd
3958 default:
3959 /* default from usbroothub */
3960 return buflen;
3961 }
3962 break;
3963
3964 /* Hub requests */
3965 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
3966 break;
3967 /* Clear Port Feature request */
3968 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): {
3969 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
3970
3971 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd",
3972 index, value, bn, cp);
3973 if (index < 1 || index > sc->sc_rhportcount[bn]) {
3974 return -1;
3975 }
3976 port = XHCI_PORTSC(cp);
3977 v = xhci_op_read_4(sc, port);
3978 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0);
3979 v &= ~XHCI_PS_CLEAR;
3980 switch (value) {
3981 case UHF_PORT_ENABLE:
3982 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED);
3983 break;
3984 case UHF_PORT_SUSPEND:
3985 return -1;
3986 case UHF_PORT_POWER:
3987 break;
3988 case UHF_PORT_TEST:
3989 case UHF_PORT_INDICATOR:
3990 return -1;
3991 case UHF_C_PORT_CONNECTION:
3992 xhci_op_write_4(sc, port, v | XHCI_PS_CSC);
3993 break;
3994 case UHF_C_PORT_ENABLE:
3995 case UHF_C_PORT_SUSPEND:
3996 case UHF_C_PORT_OVER_CURRENT:
3997 return -1;
3998 case UHF_C_BH_PORT_RESET:
3999 xhci_op_write_4(sc, port, v | XHCI_PS_WRC);
4000 break;
4001 case UHF_C_PORT_RESET:
4002 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4003 break;
4004 case UHF_C_PORT_LINK_STATE:
4005 xhci_op_write_4(sc, port, v | XHCI_PS_PLC);
4006 break;
4007 case UHF_C_PORT_CONFIG_ERROR:
4008 xhci_op_write_4(sc, port, v | XHCI_PS_CEC);
4009 break;
4010 default:
4011 return -1;
4012 }
4013 break;
4014 }
4015 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
4016 if (len == 0)
4017 break;
4018 if ((value & 0xff) != 0) {
4019 return -1;
4020 }
4021 usb_hub_descriptor_t hubd;
4022
4023 totlen = uimin(buflen, sizeof(hubd));
4024 memcpy(&hubd, buf, totlen);
4025 hubd.bNbrPorts = sc->sc_rhportcount[bn];
4026 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH);
4027 hubd.bPwrOn2PwrGood = 200;
4028 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) {
4029 /* XXX can't find out? */
4030 hubd.DeviceRemovable[i++] = 0;
4031 }
4032 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
4033 totlen = uimin(totlen, hubd.bDescLength);
4034 memcpy(buf, &hubd, totlen);
4035 break;
4036 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
4037 if (len != 4) {
4038 return -1;
4039 }
4040 memset(buf, 0, len); /* ? XXX */
4041 totlen = len;
4042 break;
4043 /* Get Port Status request */
4044 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): {
4045 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4046
4047 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju",
4048 bn, index, cp, 0);
4049 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4050 DPRINTFN(5, "bad get port status: index=%jd bn=%jd "
4051 "portcount=%jd",
4052 index, bn, sc->sc_rhportcount[bn], 0);
4053 return -1;
4054 }
4055 if (len != 4) {
4056 DPRINTFN(5, "bad get port status: len %jd != 4",
4057 len, 0, 0, 0);
4058 return -1;
4059 }
4060 v = xhci_op_read_4(sc, XHCI_PORTSC(cp));
4061 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0);
4062 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v));
4063 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS;
4064 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED;
4065 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR;
4066 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND;
4067 if (v & XHCI_PS_PR) i |= UPS_RESET;
4068 if (v & XHCI_PS_PP) {
4069 if (i & UPS_OTHER_SPEED)
4070 i |= UPS_PORT_POWER_SS;
4071 else
4072 i |= UPS_PORT_POWER;
4073 }
4074 if (i & UPS_OTHER_SPEED)
4075 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v));
4076 if (sc->sc_vendor_port_status)
4077 i = sc->sc_vendor_port_status(sc, v, i);
4078 USETW(ps.wPortStatus, i);
4079 i = 0;
4080 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS;
4081 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED;
4082 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR;
4083 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET;
4084 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET;
4085 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE;
4086 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR;
4087 USETW(ps.wPortChange, i);
4088 totlen = uimin(len, sizeof(ps));
4089 memcpy(buf, &ps, totlen);
4090 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx"
4091 " totlen %jd",
4092 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0);
4093 break;
4094 }
4095 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
4096 return -1;
4097 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE):
4098 break;
4099 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
4100 break;
4101 /* Set Port Feature request */
4102 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): {
4103 int optval = (index >> 8) & 0xff;
4104 index &= 0xff;
4105 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4106 return -1;
4107 }
4108
4109 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4110
4111 port = XHCI_PORTSC(cp);
4112 v = xhci_op_read_4(sc, port);
4113 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0);
4114 v &= ~XHCI_PS_CLEAR;
4115 switch (value) {
4116 case UHF_PORT_ENABLE:
4117 xhci_op_write_4(sc, port, v | XHCI_PS_PED);
4118 break;
4119 case UHF_PORT_SUSPEND:
4120 /* XXX suspend */
4121 break;
4122 case UHF_PORT_RESET:
4123 xhci_op_write_4(sc, port, v | XHCI_PS_PR);
4124 /* Wait for reset to complete. */
4125 for (i = 0; i < USB_PORT_ROOT_RESET_DELAY / 10; i++) {
4126 if (sc->sc_dying) {
4127 return -1;
4128 }
4129 v = xhci_op_read_4(sc, port);
4130 if ((v & XHCI_PS_PR) == 0) {
4131 break;
4132 }
4133 usb_delay_ms(&sc->sc_bus, 10);
4134 }
4135 break;
4136 case UHF_PORT_POWER:
4137 /* XXX power control */
4138 break;
4139 /* XXX more */
4140 case UHF_C_PORT_RESET:
4141 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4142 break;
4143 case UHF_PORT_U1_TIMEOUT:
4144 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4145 return -1;
4146 }
4147 port = XHCI_PORTPMSC(cp);
4148 v = xhci_op_read_4(sc, port);
4149 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4150 index, cp, v, 0);
4151 v &= ~XHCI_PM3_U1TO_SET(0xff);
4152 v |= XHCI_PM3_U1TO_SET(optval);
4153 xhci_op_write_4(sc, port, v);
4154 break;
4155 case UHF_PORT_U2_TIMEOUT:
4156 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4157 return -1;
4158 }
4159 port = XHCI_PORTPMSC(cp);
4160 v = xhci_op_read_4(sc, port);
4161 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4162 index, cp, v, 0);
4163 v &= ~XHCI_PM3_U2TO_SET(0xff);
4164 v |= XHCI_PM3_U2TO_SET(optval);
4165 xhci_op_write_4(sc, port, v);
4166 break;
4167 default:
4168 return -1;
4169 }
4170 }
4171 break;
4172 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
4173 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
4174 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
4175 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
4176 break;
4177 default:
4178 /* default from usbroothub */
4179 return buflen;
4180 }
4181
4182 return totlen;
4183 }
4184
4185 static int
4186 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
4187 void *buf, int buflen)
4188 {
4189 struct xhci_softc *sc = XHCI_BUS2SC(bus);
4190 int actlen;
4191
4192 mutex_enter(&sc->sc_rhlock);
4193 actlen = xhci_roothub_ctrl_locked(bus, req, buf, buflen);
4194 mutex_exit(&sc->sc_rhlock);
4195
4196 return actlen;
4197 }
4198
4199 /* root hub interrupt */
4200
4201 static usbd_status
4202 xhci_root_intr_transfer(struct usbd_xfer *xfer)
4203 {
4204 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4205
4206 /* Pipe isn't running, start first */
4207 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4208 }
4209
4210 /* Wait for roothub port status/change */
4211 static usbd_status
4212 xhci_root_intr_start(struct usbd_xfer *xfer)
4213 {
4214 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4215 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4216
4217 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4218
4219 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4220
4221 if (sc->sc_dying)
4222 return USBD_IOERROR;
4223
4224 KASSERT(sc->sc_intrxfer[bn] == NULL);
4225 sc->sc_intrxfer[bn] = xfer;
4226 xfer->ux_status = USBD_IN_PROGRESS;
4227
4228 return USBD_IN_PROGRESS;
4229 }
4230
4231 static void
4232 xhci_root_intr_abort(struct usbd_xfer *xfer)
4233 {
4234 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4235 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4236
4237 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4238
4239 KASSERT(mutex_owned(&sc->sc_lock));
4240 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
4241
4242 /* If xfer has already completed, nothing to do here. */
4243 if (sc->sc_intrxfer[bn] == NULL)
4244 return;
4245
4246 /*
4247 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer.
4248 * Cancel it.
4249 */
4250 KASSERT(sc->sc_intrxfer[bn] == xfer);
4251 xfer->ux_status = USBD_CANCELLED;
4252 usb_transfer_complete(xfer);
4253 }
4254
4255 static void
4256 xhci_root_intr_close(struct usbd_pipe *pipe)
4257 {
4258 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe);
4259 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer;
4260 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4261
4262 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4263
4264 KASSERT(mutex_owned(&sc->sc_lock));
4265
4266 /*
4267 * Caller must guarantee the xfer has completed first, by
4268 * closing the pipe only after normal completion or an abort.
4269 */
4270 KASSERT(sc->sc_intrxfer[bn] == NULL);
4271 }
4272
4273 static void
4274 xhci_root_intr_done(struct usbd_xfer *xfer)
4275 {
4276 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4277 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4278
4279 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4280
4281 KASSERT(mutex_owned(&sc->sc_lock));
4282
4283 /* Claim the xfer so it doesn't get completed again. */
4284 KASSERT(sc->sc_intrxfer[bn] == xfer);
4285 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
4286 sc->sc_intrxfer[bn] = NULL;
4287 }
4288
4289 /* -------------- */
4290 /* device control */
4291
4292 static usbd_status
4293 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
4294 {
4295 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4296
4297 /* Pipe isn't running, start first */
4298 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4299 }
4300
4301 static usbd_status
4302 xhci_device_ctrl_start(struct usbd_xfer *xfer)
4303 {
4304 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4305 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4306 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4307 struct xhci_ring * const tr = xs->xs_xr[dci];
4308 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4309 usb_device_request_t * const req = &xfer->ux_request;
4310 const bool isread = usbd_xfer_isread(xfer);
4311 const uint32_t len = UGETW(req->wLength);
4312 usb_dma_t * const dma = &xfer->ux_dmabuf;
4313 uint64_t parameter;
4314 uint32_t status;
4315 uint32_t control;
4316 u_int i;
4317 const bool polling = xhci_polling_p(sc);
4318
4319 XHCIHIST_FUNC();
4320 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx",
4321 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue),
4322 UGETW(req->wIndex), UGETW(req->wLength));
4323
4324 KASSERT(polling || mutex_owned(&sc->sc_lock));
4325
4326 /* we rely on the bottom bits for extra info */
4327 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %zx",
4328 (uintptr_t) xfer);
4329
4330 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0);
4331
4332 if (tr->is_halted)
4333 goto out;
4334
4335 i = 0;
4336
4337 /* setup phase */
4338 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */
4339 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req));
4340 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE :
4341 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) |
4342 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) |
4343 XHCI_TRB_3_IDT_BIT;
4344 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4345
4346 if (len != 0) {
4347 /* data phase */
4348 parameter = DMAADDR(dma, 0);
4349 KASSERTMSG(len <= 0x10000, "len %d", len);
4350 status = XHCI_TRB_2_IRQ_SET(0) |
4351 XHCI_TRB_2_TDSZ_SET(0) |
4352 XHCI_TRB_2_BYTES_SET(len);
4353 control = (isread ? XHCI_TRB_3_DIR_IN : 0) |
4354 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) |
4355 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4356 XHCI_TRB_3_IOC_BIT;
4357 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4358
4359 usb_syncmem(dma, 0, len,
4360 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4361 }
4362
4363 parameter = 0;
4364 status = XHCI_TRB_2_IRQ_SET(0);
4365 /* the status stage has inverted direction */
4366 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) |
4367 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) |
4368 XHCI_TRB_3_IOC_BIT;
4369 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4370
4371 if (!polling)
4372 mutex_enter(&tr->xr_lock);
4373 xhci_ring_put_xfer(sc, tr, xx, i);
4374 if (!polling)
4375 mutex_exit(&tr->xr_lock);
4376
4377 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4378
4379 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4380 usbd_xfer_schedule_timeout(xfer);
4381 xfer->ux_status = USBD_IN_PROGRESS;
4382 } else {
4383 /*
4384 * We must be coming from xhci_pipe_restart -- timeout
4385 * already set up, nothing to do.
4386 */
4387 }
4388 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4389
4390 return USBD_IN_PROGRESS;
4391 }
4392
4393 static void
4394 xhci_device_ctrl_done(struct usbd_xfer *xfer)
4395 {
4396 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4397 usb_device_request_t *req = &xfer->ux_request;
4398 int len = UGETW(req->wLength);
4399 int rd = req->bmRequestType & UT_READ;
4400
4401 if (len)
4402 usb_syncmem(&xfer->ux_dmabuf, 0, len,
4403 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4404 }
4405
4406 static void
4407 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
4408 {
4409 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4410
4411 usbd_xfer_abort(xfer);
4412 }
4413
4414 static void
4415 xhci_device_ctrl_close(struct usbd_pipe *pipe)
4416 {
4417 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4418
4419 xhci_close_pipe(pipe);
4420 }
4421
4422 /* ------------------ */
4423 /* device isochronous */
4424
4425 static usbd_status
4426 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
4427 {
4428 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4429
4430 return xhci_device_isoc_enter(xfer);
4431 }
4432
4433 static usbd_status
4434 xhci_device_isoc_enter(struct usbd_xfer *xfer)
4435 {
4436 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4437 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4438 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4439 struct xhci_ring * const tr = xs->xs_xr[dci];
4440 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4441 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe;
4442 uint32_t len = xfer->ux_length;
4443 usb_dma_t * const dma = &xfer->ux_dmabuf;
4444 uint64_t parameter;
4445 uint32_t status;
4446 uint32_t control;
4447 uint32_t mfindex;
4448 uint32_t offs;
4449 int i, ival;
4450 const bool polling = xhci_polling_p(sc);
4451 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
4452 const uint16_t mps = UE_GET_SIZE(MPS);
4453 const uint8_t maxb = xpipe->xp_maxb;
4454 u_int tdpc, tbc, tlbpc;
4455
4456 XHCIHIST_FUNC();
4457 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4458 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4459
4460 KASSERT(polling || mutex_owned(&sc->sc_lock));
4461
4462 if (sc->sc_dying)
4463 return USBD_IOERROR;
4464
4465 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths);
4466 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4467
4468 const bool isread = usbd_xfer_isread(xfer);
4469 if (xfer->ux_length)
4470 usb_syncmem(dma, 0, xfer->ux_length,
4471 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4472
4473 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
4474 if (ival >= 1 && ival <= 16)
4475 ival = 1 << (ival - 1);
4476 else
4477 ival = 1; /* fake something up */
4478
4479 if (xpipe->xp_isoc_next == -1) {
4480 mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX);
4481 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0);
4482 mfindex = XHCI_MFINDEX_GET(mfindex + 1);
4483 mfindex /= USB_UFRAMES_PER_FRAME;
4484 mfindex += 7; /* 7 frames is max possible IST */
4485 xpipe->xp_isoc_next = roundup2(mfindex, ival);
4486 }
4487
4488 offs = 0;
4489 for (i = 0; i < xfer->ux_nframes; i++) {
4490 len = xfer->ux_frlengths[i];
4491
4492 tdpc = howmany(len, mps);
4493 tbc = howmany(tdpc, maxb) - 1;
4494 tlbpc = tdpc % maxb;
4495 tlbpc = tlbpc ? tlbpc - 1 : maxb - 1;
4496
4497 KASSERTMSG(len <= 0x10000, "len %d", len);
4498 parameter = DMAADDR(dma, offs);
4499 status = XHCI_TRB_2_IRQ_SET(0) |
4500 XHCI_TRB_2_TDSZ_SET(0) |
4501 XHCI_TRB_2_BYTES_SET(len);
4502 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) |
4503 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4504 XHCI_TRB_3_TBC_SET(tbc) |
4505 XHCI_TRB_3_TLBPC_SET(tlbpc) |
4506 XHCI_TRB_3_IOC_BIT;
4507 if (XHCI_HCC_CFC(sc->sc_hcc)) {
4508 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4509 #if 0
4510 } else if (xpipe->xp_isoc_next == -1) {
4511 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4512 #endif
4513 } else {
4514 control |= XHCI_TRB_3_ISO_SIA_BIT;
4515 }
4516 #if 0
4517 if (i != xfer->ux_nframes - 1)
4518 control |= XHCI_TRB_3_BEI_BIT;
4519 #endif
4520 xhci_xfer_put_trb(xx, i, parameter, status, control);
4521
4522 xpipe->xp_isoc_next += ival;
4523 offs += len;
4524 }
4525
4526 xx->xx_isoc_done = 0;
4527
4528 if (!polling)
4529 mutex_enter(&tr->xr_lock);
4530 xhci_ring_put_xfer(sc, tr, xx, i);
4531 if (!polling)
4532 mutex_exit(&tr->xr_lock);
4533
4534 xfer->ux_status = USBD_IN_PROGRESS;
4535 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4536 usbd_xfer_schedule_timeout(xfer);
4537
4538 return USBD_IN_PROGRESS;
4539 }
4540
4541 static void
4542 xhci_device_isoc_abort(struct usbd_xfer *xfer)
4543 {
4544 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4545
4546 usbd_xfer_abort(xfer);
4547 }
4548
4549 static void
4550 xhci_device_isoc_close(struct usbd_pipe *pipe)
4551 {
4552 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4553
4554 xhci_close_pipe(pipe);
4555 }
4556
4557 static void
4558 xhci_device_isoc_done(struct usbd_xfer *xfer)
4559 {
4560 #ifdef USB_DEBUG
4561 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4562 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4563 #endif
4564 const bool isread = usbd_xfer_isread(xfer);
4565
4566 XHCIHIST_FUNC();
4567 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4568 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4569
4570 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4571 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4572 }
4573
4574 /* ----------- */
4575 /* device bulk */
4576
4577 static usbd_status
4578 xhci_device_bulk_transfer(struct usbd_xfer *xfer)
4579 {
4580 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4581
4582 /* Pipe isn't running, so start it first. */
4583 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4584 }
4585
4586 static usbd_status
4587 xhci_device_bulk_start(struct usbd_xfer *xfer)
4588 {
4589 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4590 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4591 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4592 struct xhci_ring * const tr = xs->xs_xr[dci];
4593 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4594 const uint32_t len = xfer->ux_length;
4595 usb_dma_t * const dma = &xfer->ux_dmabuf;
4596 uint64_t parameter;
4597 uint32_t status;
4598 uint32_t control;
4599 u_int i = 0;
4600 const bool polling = xhci_polling_p(sc);
4601
4602 XHCIHIST_FUNC();
4603 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4604 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4605
4606 KASSERT(polling || mutex_owned(&sc->sc_lock));
4607
4608 if (sc->sc_dying)
4609 return USBD_IOERROR;
4610
4611 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4612
4613 if (tr->is_halted)
4614 goto out;
4615
4616 parameter = DMAADDR(dma, 0);
4617 const bool isread = usbd_xfer_isread(xfer);
4618 if (len)
4619 usb_syncmem(dma, 0, len,
4620 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4621
4622 /*
4623 * XXX: (dsl) The physical buffer must not cross a 64k boundary.
4624 * If the user supplied buffer crosses such a boundary then 2
4625 * (or more) TRB should be used.
4626 * If multiple TRB are used the td_size field must be set correctly.
4627 * For v1.0 devices (like ivy bridge) this is the number of usb data
4628 * blocks needed to complete the transfer.
4629 * Setting it to 1 in the last TRB causes an extra zero-length
4630 * data block be sent.
4631 * The earlier documentation differs, I don't know how it behaves.
4632 */
4633 KASSERTMSG(len <= 0x10000, "len %d", len);
4634 status = XHCI_TRB_2_IRQ_SET(0) |
4635 XHCI_TRB_2_TDSZ_SET(0) |
4636 XHCI_TRB_2_BYTES_SET(len);
4637 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4638 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4639 XHCI_TRB_3_IOC_BIT;
4640 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4641
4642 if (!polling)
4643 mutex_enter(&tr->xr_lock);
4644 xhci_ring_put_xfer(sc, tr, xx, i);
4645 if (!polling)
4646 mutex_exit(&tr->xr_lock);
4647
4648 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4649
4650 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4651 xfer->ux_status = USBD_IN_PROGRESS;
4652 usbd_xfer_schedule_timeout(xfer);
4653 } else {
4654 /*
4655 * We must be coming from xhci_pipe_restart -- timeout
4656 * already set up, nothing to do.
4657 */
4658 }
4659 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4660
4661 return USBD_IN_PROGRESS;
4662 }
4663
4664 static void
4665 xhci_device_bulk_done(struct usbd_xfer *xfer)
4666 {
4667 #ifdef USB_DEBUG
4668 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4669 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4670 #endif
4671 const bool isread = usbd_xfer_isread(xfer);
4672
4673 XHCIHIST_FUNC();
4674 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4675 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4676
4677 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4678 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4679 }
4680
4681 static void
4682 xhci_device_bulk_abort(struct usbd_xfer *xfer)
4683 {
4684 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4685
4686 usbd_xfer_abort(xfer);
4687 }
4688
4689 static void
4690 xhci_device_bulk_close(struct usbd_pipe *pipe)
4691 {
4692 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4693
4694 xhci_close_pipe(pipe);
4695 }
4696
4697 /* ---------------- */
4698 /* device interrupt */
4699
4700 static usbd_status
4701 xhci_device_intr_transfer(struct usbd_xfer *xfer)
4702 {
4703 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4704
4705 /* Pipe isn't running, so start it first. */
4706 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4707 }
4708
4709 static usbd_status
4710 xhci_device_intr_start(struct usbd_xfer *xfer)
4711 {
4712 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4713 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4714 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4715 struct xhci_ring * const tr = xs->xs_xr[dci];
4716 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4717 const uint32_t len = xfer->ux_length;
4718 const bool polling = xhci_polling_p(sc);
4719 usb_dma_t * const dma = &xfer->ux_dmabuf;
4720 uint64_t parameter;
4721 uint32_t status;
4722 uint32_t control;
4723 u_int i = 0;
4724
4725 XHCIHIST_FUNC();
4726 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4727 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4728
4729 KASSERT(polling || mutex_owned(&sc->sc_lock));
4730
4731 if (sc->sc_dying)
4732 return USBD_IOERROR;
4733
4734 if (tr->is_halted)
4735 goto out;
4736
4737 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4738
4739 const bool isread = usbd_xfer_isread(xfer);
4740 if (len)
4741 usb_syncmem(dma, 0, len,
4742 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4743
4744 parameter = DMAADDR(dma, 0);
4745 KASSERTMSG(len <= 0x10000, "len %d", len);
4746 status = XHCI_TRB_2_IRQ_SET(0) |
4747 XHCI_TRB_2_TDSZ_SET(0) |
4748 XHCI_TRB_2_BYTES_SET(len);
4749 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4750 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT;
4751 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4752
4753 if (!polling)
4754 mutex_enter(&tr->xr_lock);
4755 xhci_ring_put_xfer(sc, tr, xx, i);
4756 if (!polling)
4757 mutex_exit(&tr->xr_lock);
4758
4759 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4760
4761 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4762 xfer->ux_status = USBD_IN_PROGRESS;
4763 usbd_xfer_schedule_timeout(xfer);
4764 } else {
4765 /*
4766 * We must be coming from xhci_pipe_restart -- timeout
4767 * already set up, nothing to do.
4768 */
4769 }
4770 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4771
4772 return USBD_IN_PROGRESS;
4773 }
4774
4775 static void
4776 xhci_device_intr_done(struct usbd_xfer *xfer)
4777 {
4778 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4779 #ifdef USB_DEBUG
4780 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4781 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4782 #endif
4783 const bool isread = usbd_xfer_isread(xfer);
4784
4785 XHCIHIST_FUNC();
4786 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4787 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4788
4789 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4790
4791 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4792 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4793 }
4794
4795 static void
4796 xhci_device_intr_abort(struct usbd_xfer *xfer)
4797 {
4798 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4799
4800 XHCIHIST_FUNC();
4801 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0);
4802
4803 KASSERT(mutex_owned(&sc->sc_lock));
4804 usbd_xfer_abort(xfer);
4805 }
4806
4807 static void
4808 xhci_device_intr_close(struct usbd_pipe *pipe)
4809 {
4810 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
4811
4812 XHCIHIST_FUNC();
4813 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0);
4814
4815 xhci_close_pipe(pipe);
4816 }
4817