xhci.c revision 1.146 1 /* $NetBSD: xhci.c,v 1.146 2021/06/12 13:58:05 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * USB rev 2.0 and rev 3.1 specification
31 * http://www.usb.org/developers/docs/
32 * xHCI rev 1.1 specification
33 * http://www.intel.com/technology/usb/spec.htm
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.146 2021/06/12 13:58:05 riastradh Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_usb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/kmem.h>
47 #include <sys/device.h>
48 #include <sys/select.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/endian.h>
58
59 #include <dev/usb/usb.h>
60 #include <dev/usb/usbdi.h>
61 #include <dev/usb/usbdivar.h>
62 #include <dev/usb/usbdi_util.h>
63 #include <dev/usb/usbhist.h>
64 #include <dev/usb/usb_mem.h>
65 #include <dev/usb/usb_quirks.h>
66
67 #include <dev/usb/xhcireg.h>
68 #include <dev/usb/xhcivar.h>
69 #include <dev/usb/usbroothub.h>
70
71
72 #ifdef USB_DEBUG
73 #ifndef XHCI_DEBUG
74 #define xhcidebug 0
75 #else /* !XHCI_DEBUG */
76 #define HEXDUMP(a, b, c) \
77 do { \
78 if (xhcidebug > 0) \
79 hexdump(printf, a, b, c); \
80 } while (/*CONSTCOND*/0)
81 static int xhcidebug = 0;
82
83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup")
84 {
85 int err;
86 const struct sysctlnode *rnode;
87 const struct sysctlnode *cnode;
88
89 err = sysctl_createv(clog, 0, NULL, &rnode,
90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci",
91 SYSCTL_DESCR("xhci global controls"),
92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
93
94 if (err)
95 goto fail;
96
97 /* control debugging printfs */
98 err = sysctl_createv(clog, 0, &rnode, &cnode,
99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
100 "debug", SYSCTL_DESCR("Enable debugging output"),
101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL);
102 if (err)
103 goto fail;
104
105 return;
106 fail:
107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
108 }
109
110 #endif /* !XHCI_DEBUG */
111 #endif /* USB_DEBUG */
112
113 #ifndef HEXDUMP
114 #define HEXDUMP(a, b, c)
115 #endif
116
117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D)
118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D)
119 #define XHCIHIST_FUNC() USBHIST_FUNC()
120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug)
121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \
122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D)
123
124 #define XHCI_DCI_SLOT 0
125 #define XHCI_DCI_EP_CONTROL 1
126
127 #define XHCI_ICI_INPUT_CONTROL 0
128
129 struct xhci_pipe {
130 struct usbd_pipe xp_pipe;
131 struct usb_task xp_async_task;
132 int16_t xp_isoc_next; /* next frame */
133 uint8_t xp_maxb; /* max burst */
134 uint8_t xp_mult;
135 };
136
137 #define XHCI_COMMAND_RING_TRBS 256
138 #define XHCI_EVENT_RING_TRBS 256
139 #define XHCI_EVENT_RING_SEGMENTS 1
140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT
141
142 static usbd_status xhci_open(struct usbd_pipe *);
143 static void xhci_close_pipe(struct usbd_pipe *);
144 static int xhci_intr1(struct xhci_softc * const);
145 static void xhci_softintr(void *);
146 static void xhci_poll(struct usbd_bus *);
147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int);
148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
149 static void xhci_abortx(struct usbd_xfer *);
150 static bool xhci_dying(struct usbd_bus *);
151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **);
152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int,
153 struct usbd_port *);
154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
155 void *, int);
156
157 static usbd_status xhci_configure_endpoint(struct usbd_pipe *);
158 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *);
159 static usbd_status xhci_reset_endpoint(struct usbd_pipe *);
160 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *,
161 struct xhci_slot *, u_int, uint32_t);
162 static usbd_status xhci_stop_endpoint(struct usbd_pipe *);
163
164 static void xhci_host_dequeue(struct xhci_ring * const);
165 static usbd_status xhci_set_dequeue(struct usbd_pipe *);
166
167 static usbd_status xhci_do_command(struct xhci_softc * const,
168 struct xhci_soft_trb * const, int);
169 static usbd_status xhci_do_command_locked(struct xhci_softc * const,
170 struct xhci_soft_trb * const, int);
171 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t);
172 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *);
173 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool);
174 static usbd_status xhci_enable_slot(struct xhci_softc * const,
175 uint8_t * const);
176 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t);
177 static usbd_status xhci_address_device(struct xhci_softc * const,
178 uint64_t, uint8_t, bool);
179 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int);
180 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const,
181 struct xhci_slot * const, u_int);
182 static usbd_status xhci_ring_init(struct xhci_softc * const,
183 struct xhci_ring **, size_t, size_t);
184 static void xhci_ring_free(struct xhci_softc * const,
185 struct xhci_ring ** const);
186
187 static void xhci_setup_ctx(struct usbd_pipe *);
188 static void xhci_setup_route(struct usbd_pipe *, uint32_t *);
189 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *);
190 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *);
191 static uint32_t xhci_bival2ival(uint32_t, uint32_t);
192
193 static void xhci_noop(struct usbd_pipe *);
194
195 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *);
196 static usbd_status xhci_root_intr_start(struct usbd_xfer *);
197 static void xhci_root_intr_abort(struct usbd_xfer *);
198 static void xhci_root_intr_close(struct usbd_pipe *);
199 static void xhci_root_intr_done(struct usbd_xfer *);
200
201 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *);
202 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *);
203 static void xhci_device_ctrl_abort(struct usbd_xfer *);
204 static void xhci_device_ctrl_close(struct usbd_pipe *);
205 static void xhci_device_ctrl_done(struct usbd_xfer *);
206
207 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *);
208 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *);
209 static void xhci_device_isoc_abort(struct usbd_xfer *);
210 static void xhci_device_isoc_close(struct usbd_pipe *);
211 static void xhci_device_isoc_done(struct usbd_xfer *);
212
213 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *);
214 static usbd_status xhci_device_intr_start(struct usbd_xfer *);
215 static void xhci_device_intr_abort(struct usbd_xfer *);
216 static void xhci_device_intr_close(struct usbd_pipe *);
217 static void xhci_device_intr_done(struct usbd_xfer *);
218
219 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *);
220 static usbd_status xhci_device_bulk_start(struct usbd_xfer *);
221 static void xhci_device_bulk_abort(struct usbd_xfer *);
222 static void xhci_device_bulk_close(struct usbd_pipe *);
223 static void xhci_device_bulk_done(struct usbd_xfer *);
224
225 static const struct usbd_bus_methods xhci_bus_methods = {
226 .ubm_open = xhci_open,
227 .ubm_softint = xhci_softintr,
228 .ubm_dopoll = xhci_poll,
229 .ubm_allocx = xhci_allocx,
230 .ubm_freex = xhci_freex,
231 .ubm_abortx = xhci_abortx,
232 .ubm_dying = xhci_dying,
233 .ubm_getlock = xhci_get_lock,
234 .ubm_newdev = xhci_new_device,
235 .ubm_rhctrl = xhci_roothub_ctrl,
236 };
237
238 static const struct usbd_pipe_methods xhci_root_intr_methods = {
239 .upm_transfer = xhci_root_intr_transfer,
240 .upm_start = xhci_root_intr_start,
241 .upm_abort = xhci_root_intr_abort,
242 .upm_close = xhci_root_intr_close,
243 .upm_cleartoggle = xhci_noop,
244 .upm_done = xhci_root_intr_done,
245 };
246
247
248 static const struct usbd_pipe_methods xhci_device_ctrl_methods = {
249 .upm_transfer = xhci_device_ctrl_transfer,
250 .upm_start = xhci_device_ctrl_start,
251 .upm_abort = xhci_device_ctrl_abort,
252 .upm_close = xhci_device_ctrl_close,
253 .upm_cleartoggle = xhci_noop,
254 .upm_done = xhci_device_ctrl_done,
255 };
256
257 static const struct usbd_pipe_methods xhci_device_isoc_methods = {
258 .upm_transfer = xhci_device_isoc_transfer,
259 .upm_abort = xhci_device_isoc_abort,
260 .upm_close = xhci_device_isoc_close,
261 .upm_cleartoggle = xhci_noop,
262 .upm_done = xhci_device_isoc_done,
263 };
264
265 static const struct usbd_pipe_methods xhci_device_bulk_methods = {
266 .upm_transfer = xhci_device_bulk_transfer,
267 .upm_start = xhci_device_bulk_start,
268 .upm_abort = xhci_device_bulk_abort,
269 .upm_close = xhci_device_bulk_close,
270 .upm_cleartoggle = xhci_noop,
271 .upm_done = xhci_device_bulk_done,
272 };
273
274 static const struct usbd_pipe_methods xhci_device_intr_methods = {
275 .upm_transfer = xhci_device_intr_transfer,
276 .upm_start = xhci_device_intr_start,
277 .upm_abort = xhci_device_intr_abort,
278 .upm_close = xhci_device_intr_close,
279 .upm_cleartoggle = xhci_noop,
280 .upm_done = xhci_device_intr_done,
281 };
282
283 static inline uint32_t
284 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset)
285 {
286 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset);
287 }
288
289 static inline uint32_t
290 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset)
291 {
292 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
293 }
294
295 static inline uint32_t
296 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset)
297 {
298 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset);
299 }
300
301 static inline void
302 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset,
303 uint32_t value)
304 {
305 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value);
306 }
307
308 #if 0 /* unused */
309 static inline void
310 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset,
311 uint32_t value)
312 {
313 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value);
314 }
315 #endif /* unused */
316
317 static inline void
318 xhci_barrier(const struct xhci_softc * const sc, int flags)
319 {
320 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 0, sc->sc_ios, flags);
321 }
322
323 static inline uint32_t
324 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset)
325 {
326 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset);
327 }
328
329 static inline uint32_t
330 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset)
331 {
332 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
333 }
334
335 static inline void
336 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset,
337 uint32_t value)
338 {
339 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
340 }
341
342 static inline uint64_t
343 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset)
344 {
345 uint64_t value;
346
347 if (XHCI_HCC_AC64(sc->sc_hcc)) {
348 #ifdef XHCI_USE_BUS_SPACE_8
349 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset);
350 #else
351 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
352 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh,
353 offset + 4) << 32;
354 #endif
355 } else {
356 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
357 }
358
359 return value;
360 }
361
362 static inline void
363 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset,
364 uint64_t value)
365 {
366 if (XHCI_HCC_AC64(sc->sc_hcc)) {
367 #ifdef XHCI_USE_BUS_SPACE_8
368 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value);
369 #else
370 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0,
371 (value >> 0) & 0xffffffff);
372 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4,
373 (value >> 32) & 0xffffffff);
374 #endif
375 } else {
376 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
377 }
378 }
379
380 static inline uint32_t
381 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset)
382 {
383 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
384 }
385
386 static inline void
387 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset,
388 uint32_t value)
389 {
390 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
391 }
392
393 static inline uint64_t
394 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset)
395 {
396 uint64_t value;
397
398 if (XHCI_HCC_AC64(sc->sc_hcc)) {
399 #ifdef XHCI_USE_BUS_SPACE_8
400 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset);
401 #else
402 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
403 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh,
404 offset + 4) << 32;
405 #endif
406 } else {
407 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
408 }
409
410 return value;
411 }
412
413 static inline void
414 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset,
415 uint64_t value)
416 {
417 if (XHCI_HCC_AC64(sc->sc_hcc)) {
418 #ifdef XHCI_USE_BUS_SPACE_8
419 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value);
420 #else
421 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0,
422 (value >> 0) & 0xffffffff);
423 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4,
424 (value >> 32) & 0xffffffff);
425 #endif
426 } else {
427 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
428 }
429 }
430
431 #if 0 /* unused */
432 static inline uint32_t
433 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset)
434 {
435 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset);
436 }
437 #endif /* unused */
438
439 static inline void
440 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset,
441 uint32_t value)
442 {
443 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value);
444 }
445
446 /* --- */
447
448 static inline uint8_t
449 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed)
450 {
451 u_int eptype = 0;
452
453 switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
454 case UE_CONTROL:
455 eptype = 0x0;
456 break;
457 case UE_ISOCHRONOUS:
458 eptype = 0x1;
459 break;
460 case UE_BULK:
461 eptype = 0x2;
462 break;
463 case UE_INTERRUPT:
464 eptype = 0x3;
465 break;
466 }
467
468 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
469 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
470 return eptype | 0x4;
471 else
472 return eptype;
473 }
474
475 static u_int
476 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed)
477 {
478 /* xHCI 1.0 section 4.5.1 */
479 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress);
480 u_int in = 0;
481
482 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
483 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
484 in = 1;
485
486 return epaddr * 2 + in;
487 }
488
489 static inline u_int
490 xhci_dci_to_ici(const u_int i)
491 {
492 return i + 1;
493 }
494
495 static inline void *
496 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs,
497 const u_int dci)
498 {
499 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
500 }
501
502 #if 0 /* unused */
503 static inline bus_addr_t
504 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs,
505 const u_int dci)
506 {
507 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
508 }
509 #endif /* unused */
510
511 static inline void *
512 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs,
513 const u_int ici)
514 {
515 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
516 }
517
518 static inline bus_addr_t
519 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs,
520 const u_int ici)
521 {
522 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
523 }
524
525 static inline struct xhci_trb *
526 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx)
527 {
528 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
529 }
530
531 static inline bus_addr_t
532 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx)
533 {
534 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
535 }
536
537 static inline void
538 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx,
539 uint64_t parameter, uint32_t status, uint32_t control)
540 {
541 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb);
542 xx->xx_trb[idx].trb_0 = parameter;
543 xx->xx_trb[idx].trb_2 = status;
544 xx->xx_trb[idx].trb_3 = control;
545 }
546
547 static inline void
548 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status,
549 uint32_t control)
550 {
551 trb->trb_0 = htole64(parameter);
552 trb->trb_2 = htole32(status);
553 trb->trb_3 = htole32(control);
554 }
555
556 static int
557 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx)
558 {
559 /* base address of TRBs */
560 bus_addr_t trbp = xhci_ring_trbp(xr, 0);
561
562 /* trb_0 range sanity check */
563 if (trb_0 == 0 || trb_0 < trbp ||
564 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 ||
565 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) {
566 return 1;
567 }
568 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb);
569 return 0;
570 }
571
572 static unsigned int
573 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs,
574 u_int dci)
575 {
576 uint32_t *cp;
577
578 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
579 cp = xhci_slot_get_dcv(sc, xs, dci);
580 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0]));
581 }
582
583 static inline unsigned int
584 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport)
585 {
586 const unsigned int port = ctlrport - 1;
587 const uint8_t bit = __BIT(port % NBBY);
588
589 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit);
590 }
591
592 /*
593 * Return the roothub port for a controller port. Both are 1..n.
594 */
595 static inline unsigned int
596 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport)
597 {
598
599 return sc->sc_ctlrportmap[ctrlport - 1];
600 }
601
602 /*
603 * Return the controller port for a bus roothub port. Both are 1..n.
604 */
605 static inline unsigned int
606 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn,
607 unsigned int rhport)
608 {
609
610 return sc->sc_rhportmap[bn][rhport - 1];
611 }
612
613 /* --- */
614
615 void
616 xhci_childdet(device_t self, device_t child)
617 {
618 struct xhci_softc * const sc = device_private(self);
619
620 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child));
621 if (child == sc->sc_child2)
622 sc->sc_child2 = NULL;
623 else if (child == sc->sc_child)
624 sc->sc_child = NULL;
625 }
626
627 int
628 xhci_detach(struct xhci_softc *sc, int flags)
629 {
630 int rv = 0;
631
632 if (sc->sc_child2 != NULL) {
633 rv = config_detach(sc->sc_child2, flags);
634 if (rv != 0)
635 return rv;
636 KASSERT(sc->sc_child2 == NULL);
637 }
638
639 if (sc->sc_child != NULL) {
640 rv = config_detach(sc->sc_child, flags);
641 if (rv != 0)
642 return rv;
643 KASSERT(sc->sc_child == NULL);
644 }
645
646 /* XXX unconfigure/free slots */
647
648 /* verify: */
649 xhci_rt_write_4(sc, XHCI_IMAN(0), 0);
650 xhci_op_write_4(sc, XHCI_USBCMD, 0);
651 /* do we need to wait for stop? */
652
653 xhci_op_write_8(sc, XHCI_CRCR, 0);
654 xhci_ring_free(sc, &sc->sc_cr);
655 cv_destroy(&sc->sc_command_cv);
656 cv_destroy(&sc->sc_cmdbusy_cv);
657
658 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0);
659 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0);
660 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY);
661 xhci_ring_free(sc, &sc->sc_er);
662
663 usb_freemem(&sc->sc_bus, &sc->sc_eventst_dma);
664
665 xhci_op_write_8(sc, XHCI_DCBAAP, 0);
666 usb_freemem(&sc->sc_bus, &sc->sc_dcbaa_dma);
667
668 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots);
669
670 kmem_free(sc->sc_ctlrportbus,
671 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY));
672 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int));
673
674 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
675 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int));
676 }
677
678 mutex_destroy(&sc->sc_lock);
679 mutex_destroy(&sc->sc_intr_lock);
680
681 pool_cache_destroy(sc->sc_xferpool);
682
683 return rv;
684 }
685
686 int
687 xhci_activate(device_t self, enum devact act)
688 {
689 struct xhci_softc * const sc = device_private(self);
690
691 switch (act) {
692 case DVACT_DEACTIVATE:
693 sc->sc_dying = true;
694 return 0;
695 default:
696 return EOPNOTSUPP;
697 }
698 }
699
700 bool
701 xhci_suspend(device_t self, const pmf_qual_t *qual)
702 {
703 struct xhci_softc * const sc = device_private(self);
704 size_t i, j, bn, dci;
705 int port;
706 uint32_t v;
707 usbd_status err;
708 bool ok = false;
709
710 XHCIHIST_FUNC(); XHCIHIST_CALLED();
711
712 mutex_enter(&sc->sc_lock);
713
714 /*
715 * Block issuance of new commands, and wait for all pending
716 * commands to complete.
717 */
718 KASSERT(sc->sc_suspender == NULL);
719 sc->sc_suspender = curlwp;
720 while (sc->sc_command_addr != 0)
721 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
722
723 /*
724 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
725 * xHCI Power Management, p. 342
726 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342
727 */
728
729 /*
730 * `1. Stop all USB activity by issuing Stop Endpoint Commands
731 * for Busy endpoints in the Running state. If the Force
732 * Save Context Capability (FSC = ``0'') is not supported,
733 * then Stop Endpoint Commands shall be issued for all idle
734 * endpoints in the Running state as well. The Stop
735 * Endpoint Command causes the xHC to update the respective
736 * Endpoint or Stream Contexts in system memory, e.g. the
737 * TR Dequeue Pointer, DCS, etc. fields. Refer to
738 * Implementation Note "0".'
739 */
740 for (i = 0; i < sc->sc_maxslots; i++) {
741 struct xhci_slot *xs = &sc->sc_slots[i];
742
743 /* Skip if the slot is not in use. */
744 if (xs->xs_idx == 0)
745 continue;
746
747 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
748 /* Skip if the endpoint is not Running. */
749 /* XXX What about Busy? */
750 if (xhci_get_epstate(sc, xs, dci) !=
751 XHCI_EPSTATE_RUNNING)
752 continue;
753
754 /* Stop endpoint. */
755 err = xhci_stop_endpoint_cmd(sc, xs, dci,
756 XHCI_TRB_3_SUSP_EP_BIT);
757 if (err) {
758 device_printf(self, "failed to stop endpoint"
759 " slot %zu dci %zu err %d\n",
760 i, dci, err);
761 goto out;
762 }
763 }
764 }
765
766 /*
767 * Next, suspend all the ports:
768 *
769 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15:
770 * Suspend-Resume, pp. 276-283
771 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276
772 */
773 for (bn = 0; bn < 2; bn++) {
774 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
775 /* 4.15.1: Port Suspend. */
776 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
777
778 /*
779 * `System software places individual ports
780 * into suspend mode by writing a ``3'' into
781 * the appropriate PORTSC register Port Link
782 * State (PLS) field (refer to Section 5.4.8).
783 * Software should only set the PLS field to
784 * ``3'' when the port is in the Enabled
785 * state.'
786 *
787 * `Software should not attempt to suspend a
788 * port unless the port reports that it is in
789 * the enabled (PED = ``1''; PLS < ``3'')
790 * state (refer to Section 5.4.8 for more
791 * information about PED and PLS).'
792 */
793 v = xhci_op_read_4(sc, port);
794 if (((v & XHCI_PS_PED) == 0) ||
795 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3)
796 continue;
797 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
798 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3);
799 xhci_op_write_4(sc, port, v);
800
801 /*
802 * `When the PLS field is written with U3
803 * (``3''), the status of the PLS bit will not
804 * change to the target U state U3 until the
805 * suspend signaling has completed to the
806 * attached device (which may be as long as
807 * 10ms.).'
808 *
809 * `Software is required to wait for U3
810 * transitions to complete before it puts the
811 * xHC into a low power state, and before
812 * resuming the port.'
813 *
814 * XXX Take advantage of the technique to
815 * reduce polling on host controllers that
816 * support the U3C capability.
817 */
818 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) {
819 v = xhci_op_read_4(sc, port);
820 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3)
821 break;
822 usb_delay_ms(&sc->sc_bus, 1);
823 }
824 if (j == XHCI_WAIT_PLS_U3) {
825 device_printf(self,
826 "suspend timeout on bus %zu port %zu\n",
827 bn, i);
828 goto out;
829 }
830 }
831 }
832
833 /*
834 * `2. Ensure that the Command Ring is in the Stopped state
835 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is
836 * empty), and all Command Completion Events associated
837 * with them have been received.'
838 *
839 * XXX
840 */
841
842 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */
843 xhci_op_write_4(sc, XHCI_USBCMD,
844 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS);
845
846 /*
847 * `4. Read the Operational Runtime, and VTIO registers in the
848 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ,
849 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their
850 * state.'
851 *
852 * (We don't use VTIO here (XXX for now?).)
853 */
854 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
855 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL);
856 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP);
857 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG);
858 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0));
859 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0));
860 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0));
861 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0));
862 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0));
863
864 /*
865 * `5. Set the Controller Save State (CSS) flag in the USBCMD
866 * register (5.4.1)...'
867 */
868 xhci_op_write_4(sc, XHCI_USBCMD,
869 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS);
870
871 /*
872 * `...and wait for the Save State Status (SSS) flag in the
873 * USBSTS register (5.4.2) to transition to ``0''.'
874 */
875 for (i = 0; i < XHCI_WAIT_SSS; i++) {
876 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0)
877 break;
878 usb_delay_ms(&sc->sc_bus, 1);
879 }
880 if (i >= XHCI_WAIT_SSS) {
881 device_printf(self, "suspend timeout, USBSTS.SSS\n");
882 /*
883 * Just optimistically go on and check SRE anyway --
884 * what's the worst that could happen?
885 */
886 }
887
888 /*
889 * `Note: After a Save or Restore operation completes, the
890 * Save/Restore Error (SRE) flag in the USBSTS register should
891 * be checked to ensure that the operation completed
892 * successfully.'
893 */
894 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
895 device_printf(self, "suspend error, USBSTS.SRE\n");
896 goto out;
897 }
898
899 /* Success! */
900 ok = true;
901
902 out: mutex_exit(&sc->sc_lock);
903 return ok;
904 }
905
906 bool
907 xhci_resume(device_t self, const pmf_qual_t *qual)
908 {
909 struct xhci_softc * const sc = device_private(self);
910 size_t i, j, bn, dci;
911 int port;
912 uint32_t v;
913 bool ok = false;
914
915 XHCIHIST_FUNC(); XHCIHIST_CALLED();
916
917 mutex_enter(&sc->sc_lock);
918 KASSERT(sc->sc_suspender);
919
920 /*
921 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
922 * xHCI Power Management, p. 343
923 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343
924 */
925
926 /*
927 * `4. Restore the Operational Runtime, and VTIO registers with
928 * their previously saved state in the following order:
929 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN,
930 * IMOD, and VTIO.'
931 *
932 * (We don't use VTIO here (for now?).)
933 */
934 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd);
935 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl);
936 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap);
937 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config);
938 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0);
939 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0);
940 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0);
941 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0);
942 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0);
943
944 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */
945
946 /*
947 * `5. Set the Controller Restore State (CRS) flag in the
948 * USBCMD register (5.4.1) to ``1''...'
949 */
950 xhci_op_write_4(sc, XHCI_USBCMD,
951 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS);
952
953 /*
954 * `...and wait for the Restore State Status (RSS) in the
955 * USBSTS register (5.4.2) to transition to ``0''.'
956 */
957 for (i = 0; i < XHCI_WAIT_RSS; i++) {
958 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0)
959 break;
960 usb_delay_ms(&sc->sc_bus, 1);
961 }
962 if (i >= XHCI_WAIT_RSS) {
963 device_printf(self, "suspend timeout, USBSTS.RSS\n");
964 goto out;
965 }
966
967 /*
968 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits
969 * are consistent with the RCS values to be written to the
970 * CRCR.'
971 *
972 * XXX Hope just zeroing it is good enough!
973 */
974 xhci_host_dequeue(sc->sc_cr);
975
976 /*
977 * `7. Write the CRCR with the address and RCS value of the
978 * reinitialized Command Ring. Note that this write will
979 * cause the Command Ring to restart at the address
980 * specified by the CRCR.'
981 */
982 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
983 sc->sc_cr->xr_cs);
984
985 /*
986 * `8. Enable the controller by setting Run/Stop (R/S) =
987 * ``1''.'
988 */
989 xhci_op_write_4(sc, XHCI_USBCMD,
990 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS);
991
992 /*
993 * `9. Software shall walk the USB topology and initialize each
994 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and
995 * external hub ports attached to USB devices.'
996 *
997 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2
998 * `Port Resume', 4.15.2.1 `Host Initiated'.
999 *
1000 * XXX We should maybe batch up initiating the state
1001 * transitions, and then wait for them to complete all at once.
1002 */
1003 for (bn = 0; bn < 2; bn++) {
1004 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
1005 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
1006
1007 /* `When a port is in the U3 state: ...' */
1008 v = xhci_op_read_4(sc, port);
1009 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3)
1010 continue;
1011
1012 /*
1013 * `For a USB2 protocol port, software shall
1014 * write a ``15'' (Resume) to the PLS field to
1015 * initiate resume signaling. The port shall
1016 * transition to the Resume substate and the
1017 * xHC shall transmit the resume signaling
1018 * within 1ms (T_URSM). Software shall ensure
1019 * that resume is signaled for at least 20ms
1020 * (T_DRSMDN). Software shall start timing
1021 * T_DRSMDN from the write of ``15'' (Resume)
1022 * to PLS.'
1023 */
1024 if (bn == 1) {
1025 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0);
1026 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1027 v |= XHCI_PS_LWS;
1028 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME);
1029 xhci_op_write_4(sc, port, v);
1030 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
1031 } else {
1032 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0);
1033 }
1034
1035 /*
1036 * `For a USB3 protocol port [and a USB2
1037 * protocol port after transitioning to
1038 * Resume], software shall write a ``0'' (U0)
1039 * to the PLS field...'
1040 */
1041 v = xhci_op_read_4(sc, port);
1042 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1043 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0);
1044 xhci_op_write_4(sc, port, v);
1045
1046 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) {
1047 v = xhci_op_read_4(sc, port);
1048 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0)
1049 break;
1050 usb_delay_ms(&sc->sc_bus, 1);
1051 }
1052 if (j == XHCI_WAIT_PLS_U0) {
1053 device_printf(self,
1054 "resume timeout on bus %zu port %zu\n",
1055 bn, i);
1056 goto out;
1057 }
1058 }
1059 }
1060
1061 /*
1062 * `10. Restart each of the previously Running endpoints by
1063 * ringing their doorbells.'
1064 */
1065 for (i = 0; i < sc->sc_maxslots; i++) {
1066 struct xhci_slot *xs = &sc->sc_slots[i];
1067
1068 /* Skip if the slot is not in use. */
1069 if (xs->xs_idx == 0)
1070 continue;
1071
1072 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
1073 /* Skip if the endpoint is not Running. */
1074 if (xhci_get_epstate(sc, xs, dci) !=
1075 XHCI_EPSTATE_RUNNING)
1076 continue;
1077
1078 /* Ring the doorbell. */
1079 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
1080 }
1081 }
1082
1083 /*
1084 * `Note: After a Save or Restore operation completes, the
1085 * Save/Restore Error (SRE) flag in the USBSTS register should
1086 * be checked to ensure that the operation completed
1087 * successfully.'
1088 */
1089 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
1090 device_printf(self, "resume error, USBSTS.SRE\n");
1091 goto out;
1092 }
1093
1094 /* Resume command issuance. */
1095 sc->sc_suspender = NULL;
1096 cv_broadcast(&sc->sc_cmdbusy_cv);
1097
1098 /* Success! */
1099 ok = true;
1100
1101 out: mutex_exit(&sc->sc_lock);
1102 return ok;
1103 }
1104
1105 bool
1106 xhci_shutdown(device_t self, int flags)
1107 {
1108 return false;
1109 }
1110
1111 static int
1112 xhci_hc_reset(struct xhci_softc * const sc)
1113 {
1114 uint32_t usbcmd, usbsts;
1115 int i;
1116
1117 /* Check controller not ready */
1118 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1119 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1120 if ((usbsts & XHCI_STS_CNR) == 0)
1121 break;
1122 usb_delay_ms(&sc->sc_bus, 1);
1123 }
1124 if (i >= XHCI_WAIT_CNR) {
1125 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n");
1126 return EIO;
1127 }
1128
1129 /* Halt controller */
1130 usbcmd = 0;
1131 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1132 usb_delay_ms(&sc->sc_bus, 1);
1133
1134 /* Reset controller */
1135 usbcmd = XHCI_CMD_HCRST;
1136 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1137 for (i = 0; i < XHCI_WAIT_HCRST; i++) {
1138 /*
1139 * Wait 1ms first. Existing Intel xHCI requies 1ms delay to
1140 * prevent system hang (Errata).
1141 */
1142 usb_delay_ms(&sc->sc_bus, 1);
1143 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
1144 if ((usbcmd & XHCI_CMD_HCRST) == 0)
1145 break;
1146 }
1147 if (i >= XHCI_WAIT_HCRST) {
1148 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n");
1149 return EIO;
1150 }
1151
1152 /* Check controller not ready */
1153 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1154 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1155 if ((usbsts & XHCI_STS_CNR) == 0)
1156 break;
1157 usb_delay_ms(&sc->sc_bus, 1);
1158 }
1159 if (i >= XHCI_WAIT_CNR) {
1160 aprint_error_dev(sc->sc_dev,
1161 "controller not ready timeout after reset\n");
1162 return EIO;
1163 }
1164
1165 return 0;
1166 }
1167
1168 /* 7.2 xHCI Support Protocol Capability */
1169 static void
1170 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp)
1171 {
1172 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1173
1174 /* XXX Cache this lot */
1175
1176 const uint32_t w0 = xhci_read_4(sc, ecp);
1177 const uint32_t w4 = xhci_read_4(sc, ecp + 4);
1178 const uint32_t w8 = xhci_read_4(sc, ecp + 8);
1179 const uint32_t wc = xhci_read_4(sc, ecp + 0xc);
1180
1181 aprint_debug_dev(sc->sc_dev,
1182 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc);
1183
1184 if (w4 != XHCI_XECP_USBID)
1185 return;
1186
1187 const int major = XHCI_XECP_SP_W0_MAJOR(w0);
1188 const int minor = XHCI_XECP_SP_W0_MINOR(w0);
1189 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8);
1190 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8);
1191
1192 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16));
1193 switch (mm) {
1194 case 0x0200:
1195 case 0x0300:
1196 case 0x0301:
1197 case 0x0310:
1198 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n",
1199 major == 3 ? "ss" : "hs", cpo, cpo + cpc -1);
1200 break;
1201 default:
1202 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n",
1203 major, minor);
1204 return;
1205 }
1206
1207 const size_t bus = (major == 3) ? 0 : 1;
1208
1209 /* Index arrays with 0..n-1 where ports are numbered 1..n */
1210 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) {
1211 if (sc->sc_ctlrportmap[cp] != 0) {
1212 aprint_error_dev(sc->sc_dev, "controller port %zu "
1213 "already assigned", cp);
1214 continue;
1215 }
1216
1217 sc->sc_ctlrportbus[cp / NBBY] |=
1218 bus == 0 ? 0 : __BIT(cp % NBBY);
1219
1220 const size_t rhp = sc->sc_rhportcount[bus]++;
1221
1222 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0,
1223 "bus %zu rhp %zu is %d", bus, rhp,
1224 sc->sc_rhportmap[bus][rhp]);
1225
1226 sc->sc_rhportmap[bus][rhp] = cp + 1;
1227 sc->sc_ctlrportmap[cp] = rhp + 1;
1228 }
1229 }
1230
1231 /* Process extended capabilities */
1232 static void
1233 xhci_ecp(struct xhci_softc *sc)
1234 {
1235 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1236
1237 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4;
1238 while (ecp != 0) {
1239 uint32_t ecr = xhci_read_4(sc, ecp);
1240 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr);
1241 switch (XHCI_XECP_ID(ecr)) {
1242 case XHCI_ID_PROTOCOLS: {
1243 xhci_id_protocols(sc, ecp);
1244 break;
1245 }
1246 case XHCI_ID_USB_LEGACY: {
1247 uint8_t bios_sem;
1248
1249 /* Take host controller ownership from BIOS */
1250 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM);
1251 if (bios_sem) {
1252 /* sets xHCI to be owned by OS */
1253 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1);
1254 aprint_debug_dev(sc->sc_dev,
1255 "waiting for BIOS to give up control\n");
1256 for (int i = 0; i < 5000; i++) {
1257 bios_sem = xhci_read_1(sc, ecp +
1258 XHCI_XECP_BIOS_SEM);
1259 if (bios_sem == 0)
1260 break;
1261 DELAY(1000);
1262 }
1263 if (bios_sem) {
1264 aprint_error_dev(sc->sc_dev,
1265 "timed out waiting for BIOS\n");
1266 }
1267 }
1268 break;
1269 }
1270 default:
1271 break;
1272 }
1273 ecr = xhci_read_4(sc, ecp);
1274 if (XHCI_XECP_NEXT(ecr) == 0) {
1275 ecp = 0;
1276 } else {
1277 ecp += XHCI_XECP_NEXT(ecr) * 4;
1278 }
1279 }
1280 }
1281
1282 #define XHCI_HCCPREV1_BITS \
1283 "\177\020" /* New bitmask */ \
1284 "f\020\020XECP\0" \
1285 "f\014\4MAXPSA\0" \
1286 "b\013CFC\0" \
1287 "b\012SEC\0" \
1288 "b\011SBD\0" \
1289 "b\010FSE\0" \
1290 "b\7NSS\0" \
1291 "b\6LTC\0" \
1292 "b\5LHRC\0" \
1293 "b\4PIND\0" \
1294 "b\3PPC\0" \
1295 "b\2CZC\0" \
1296 "b\1BNC\0" \
1297 "b\0AC64\0" \
1298 "\0"
1299 #define XHCI_HCCV1_x_BITS \
1300 "\177\020" /* New bitmask */ \
1301 "f\020\020XECP\0" \
1302 "f\014\4MAXPSA\0" \
1303 "b\013CFC\0" \
1304 "b\012SEC\0" \
1305 "b\011SPC\0" \
1306 "b\010PAE\0" \
1307 "b\7NSS\0" \
1308 "b\6LTC\0" \
1309 "b\5LHRC\0" \
1310 "b\4PIND\0" \
1311 "b\3PPC\0" \
1312 "b\2CSZ\0" \
1313 "b\1BNC\0" \
1314 "b\0AC64\0" \
1315 "\0"
1316
1317 #define XHCI_HCC2_BITS \
1318 "\177\020" /* New bitmask */ \
1319 "b\7ETC_TSC\0" \
1320 "b\6ETC\0" \
1321 "b\5CIC\0" \
1322 "b\4LEC\0" \
1323 "b\3CTC\0" \
1324 "b\2FSC\0" \
1325 "b\1CMC\0" \
1326 "b\0U3C\0" \
1327 "\0"
1328
1329 void
1330 xhci_start(struct xhci_softc *sc)
1331 {
1332 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA);
1333 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0)
1334 /* Intel xhci needs interrupt rate moderated. */
1335 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP);
1336 else
1337 xhci_rt_write_4(sc, XHCI_IMOD(0), 0);
1338 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n",
1339 xhci_rt_read_4(sc, XHCI_IMOD(0)));
1340
1341 /* Go! */
1342 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
1343 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n",
1344 xhci_op_read_4(sc, XHCI_USBCMD));
1345 }
1346
1347 int
1348 xhci_init(struct xhci_softc *sc)
1349 {
1350 bus_size_t bsz;
1351 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff;
1352 uint32_t pagesize, config;
1353 int i = 0;
1354 uint16_t hciversion;
1355 uint8_t caplength;
1356
1357 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1358
1359 /* Set up the bus struct for the usb 3 and usb 2 buses */
1360 sc->sc_bus.ub_methods = &xhci_bus_methods;
1361 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe);
1362 sc->sc_bus.ub_usedma = true;
1363 sc->sc_bus.ub_hcpriv = sc;
1364
1365 sc->sc_bus2.ub_methods = &xhci_bus_methods;
1366 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe);
1367 sc->sc_bus2.ub_revision = USBREV_2_0;
1368 sc->sc_bus2.ub_usedma = true;
1369 sc->sc_bus2.ub_hcpriv = sc;
1370 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag;
1371
1372 caplength = xhci_read_1(sc, XHCI_CAPLENGTH);
1373 hciversion = xhci_read_2(sc, XHCI_HCIVERSION);
1374
1375 if (hciversion < XHCI_HCIVERSION_0_96 ||
1376 hciversion >= 0x0200) {
1377 aprint_normal_dev(sc->sc_dev,
1378 "xHCI version %x.%x not known to be supported\n",
1379 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1380 } else {
1381 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n",
1382 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1383 }
1384
1385 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength,
1386 &sc->sc_cbh) != 0) {
1387 aprint_error_dev(sc->sc_dev, "capability subregion failure\n");
1388 return ENOMEM;
1389 }
1390
1391 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1);
1392 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1);
1393 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1);
1394 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1);
1395 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2);
1396 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3);
1397 aprint_debug_dev(sc->sc_dev,
1398 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3);
1399
1400 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS);
1401 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32;
1402
1403 char sbuf[128];
1404 if (hciversion < XHCI_HCIVERSION_1_0)
1405 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc);
1406 else
1407 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc);
1408 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf);
1409 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n",
1410 XHCI_HCC_XECP(sc->sc_hcc) * 4);
1411 if (hciversion >= XHCI_HCIVERSION_1_1) {
1412 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2);
1413 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2);
1414 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf);
1415 }
1416
1417 /* default all ports to bus 0, i.e. usb 3 */
1418 sc->sc_ctlrportbus = kmem_zalloc(
1419 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP);
1420 sc->sc_ctlrportmap = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1421
1422 /* controller port to bus roothub port map */
1423 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
1424 sc->sc_rhportmap[j] = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1425 }
1426
1427 /*
1428 * Process all Extended Capabilities
1429 */
1430 xhci_ecp(sc);
1431
1432 bsz = XHCI_PORTSC(sc->sc_maxports);
1433 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz,
1434 &sc->sc_obh) != 0) {
1435 aprint_error_dev(sc->sc_dev, "operational subregion failure\n");
1436 return ENOMEM;
1437 }
1438
1439 dboff = xhci_cap_read_4(sc, XHCI_DBOFF);
1440 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff,
1441 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) {
1442 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n");
1443 return ENOMEM;
1444 }
1445
1446 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF);
1447 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff,
1448 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) {
1449 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n");
1450 return ENOMEM;
1451 }
1452
1453 int rv;
1454 rv = xhci_hc_reset(sc);
1455 if (rv != 0) {
1456 return rv;
1457 }
1458
1459 if (sc->sc_vendor_init)
1460 sc->sc_vendor_init(sc);
1461
1462 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE);
1463 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize);
1464 pagesize = ffs(pagesize);
1465 if (pagesize == 0) {
1466 aprint_error_dev(sc->sc_dev, "pagesize is 0\n");
1467 return EIO;
1468 }
1469 sc->sc_pgsz = 1 << (12 + (pagesize - 1));
1470 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz);
1471 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n",
1472 (uint32_t)sc->sc_maxslots);
1473 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports);
1474
1475 int err;
1476 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2);
1477 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf);
1478 if (sc->sc_maxspbuf != 0) {
1479 err = usb_allocmem(&sc->sc_bus,
1480 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t),
1481 USBMALLOC_COHERENT | USBMALLOC_ZERO,
1482 &sc->sc_spbufarray_dma);
1483 if (err) {
1484 aprint_error_dev(sc->sc_dev,
1485 "spbufarray init fail, err %d\n", err);
1486 return ENOMEM;
1487 }
1488
1489 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) *
1490 sc->sc_maxspbuf, KM_SLEEP);
1491 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0);
1492 for (i = 0; i < sc->sc_maxspbuf; i++) {
1493 usb_dma_t * const dma = &sc->sc_spbuf_dma[i];
1494 /* allocate contexts */
1495 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz,
1496 sc->sc_pgsz, USBMALLOC_COHERENT | USBMALLOC_ZERO,
1497 dma);
1498 if (err) {
1499 aprint_error_dev(sc->sc_dev,
1500 "spbufarray_dma init fail, err %d\n", err);
1501 rv = ENOMEM;
1502 goto bad1;
1503 }
1504 spbufarray[i] = htole64(DMAADDR(dma, 0));
1505 usb_syncmem(dma, 0, sc->sc_pgsz,
1506 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1507 }
1508
1509 usb_syncmem(&sc->sc_spbufarray_dma, 0,
1510 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE);
1511 }
1512
1513 config = xhci_op_read_4(sc, XHCI_CONFIG);
1514 config &= ~0xFF;
1515 config |= sc->sc_maxslots & 0xFF;
1516 xhci_op_write_4(sc, XHCI_CONFIG, config);
1517
1518 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS,
1519 XHCI_COMMAND_RING_SEGMENTS_ALIGN);
1520 if (err) {
1521 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n",
1522 err);
1523 rv = ENOMEM;
1524 goto bad1;
1525 }
1526
1527 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS,
1528 XHCI_EVENT_RING_SEGMENTS_ALIGN);
1529 if (err) {
1530 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n",
1531 err);
1532 rv = ENOMEM;
1533 goto bad2;
1534 }
1535
1536 usb_dma_t *dma;
1537 size_t size;
1538 size_t align;
1539
1540 dma = &sc->sc_eventst_dma;
1541 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE,
1542 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN);
1543 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size);
1544 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN;
1545 err = usb_allocmem(&sc->sc_bus, size, align,
1546 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma);
1547 if (err) {
1548 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n",
1549 err);
1550 rv = ENOMEM;
1551 goto bad3;
1552 }
1553
1554 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n",
1555 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0),
1556 KERNADDR(&sc->sc_eventst_dma, 0),
1557 sc->sc_eventst_dma.udma_block->size);
1558
1559 dma = &sc->sc_dcbaa_dma;
1560 size = (1 + sc->sc_maxslots) * sizeof(uint64_t);
1561 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size);
1562 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN;
1563 err = usb_allocmem(&sc->sc_bus, size, align,
1564 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma);
1565 if (err) {
1566 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err);
1567 rv = ENOMEM;
1568 goto bad4;
1569 }
1570 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n",
1571 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0),
1572 KERNADDR(&sc->sc_dcbaa_dma, 0),
1573 sc->sc_dcbaa_dma.udma_block->size);
1574
1575 if (sc->sc_maxspbuf != 0) {
1576 /*
1577 * DCBA entry 0 hold the scratchbuf array pointer.
1578 */
1579 *(uint64_t *)KERNADDR(dma, 0) =
1580 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0));
1581 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1582 }
1583
1584 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots,
1585 KM_SLEEP);
1586 if (sc->sc_slots == NULL) {
1587 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err);
1588 rv = ENOMEM;
1589 goto bad;
1590 }
1591
1592 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0,
1593 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
1594 if (sc->sc_xferpool == NULL) {
1595 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n",
1596 err);
1597 rv = ENOMEM;
1598 goto bad;
1599 }
1600
1601 cv_init(&sc->sc_command_cv, "xhcicmd");
1602 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq");
1603 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
1604 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
1605
1606 struct xhci_erste *erst;
1607 erst = KERNADDR(&sc->sc_eventst_dma, 0);
1608 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0));
1609 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb);
1610 erst[0].erste_3 = htole32(0);
1611 usb_syncmem(&sc->sc_eventst_dma, 0,
1612 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE);
1613
1614 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS);
1615 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0));
1616 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) |
1617 XHCI_ERDP_BUSY);
1618
1619 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0));
1620 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1621 sc->sc_cr->xr_cs);
1622
1623 xhci_barrier(sc, BUS_SPACE_BARRIER_WRITE);
1624
1625 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0),
1626 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS);
1627
1628 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0)
1629 xhci_start(sc);
1630
1631 return 0;
1632
1633 bad:
1634 if (sc->sc_xferpool) {
1635 pool_cache_destroy(sc->sc_xferpool);
1636 sc->sc_xferpool = NULL;
1637 }
1638
1639 if (sc->sc_slots) {
1640 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) *
1641 sc->sc_maxslots);
1642 sc->sc_slots = NULL;
1643 }
1644
1645 usb_freemem(&sc->sc_bus, &sc->sc_dcbaa_dma);
1646 bad4:
1647 usb_freemem(&sc->sc_bus, &sc->sc_eventst_dma);
1648 bad3:
1649 xhci_ring_free(sc, &sc->sc_er);
1650 bad2:
1651 xhci_ring_free(sc, &sc->sc_cr);
1652 i = sc->sc_maxspbuf;
1653 bad1:
1654 for (int j = 0; j < i; j++)
1655 usb_freemem(&sc->sc_bus, &sc->sc_spbuf_dma[j]);
1656 usb_freemem(&sc->sc_bus, &sc->sc_spbufarray_dma);
1657
1658 return rv;
1659 }
1660
1661 static inline bool
1662 xhci_polling_p(struct xhci_softc * const sc)
1663 {
1664 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling;
1665 }
1666
1667 int
1668 xhci_intr(void *v)
1669 {
1670 struct xhci_softc * const sc = v;
1671 int ret = 0;
1672
1673 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1674
1675 if (sc == NULL)
1676 return 0;
1677
1678 mutex_spin_enter(&sc->sc_intr_lock);
1679
1680 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1681 goto done;
1682
1683 /* If we get an interrupt while polling, then just ignore it. */
1684 if (xhci_polling_p(sc)) {
1685 #ifdef DIAGNOSTIC
1686 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1687 #endif
1688 goto done;
1689 }
1690
1691 ret = xhci_intr1(sc);
1692 if (ret) {
1693 KASSERT(sc->sc_child || sc->sc_child2);
1694
1695 /*
1696 * One of child busses could be already detached. It doesn't
1697 * matter on which of the two the softintr is scheduled.
1698 */
1699 if (sc->sc_child)
1700 usb_schedsoftintr(&sc->sc_bus);
1701 else
1702 usb_schedsoftintr(&sc->sc_bus2);
1703 }
1704 done:
1705 mutex_spin_exit(&sc->sc_intr_lock);
1706 return ret;
1707 }
1708
1709 int
1710 xhci_intr1(struct xhci_softc * const sc)
1711 {
1712 uint32_t usbsts;
1713 uint32_t iman;
1714
1715 XHCIHIST_FUNC();
1716
1717 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1718 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0);
1719 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD |
1720 XHCI_STS_HCE)) == 0) {
1721 DPRINTFN(16, "ignored intr not for %jd",
1722 device_unit(sc->sc_dev), 0, 0, 0);
1723 return 0;
1724 }
1725
1726 /*
1727 * Clear EINT and other transient flags, to not misenterpret
1728 * next shared interrupt. Also, to avoid race, EINT must be cleared
1729 * before XHCI_IMAN_INTR_PEND is cleared.
1730 */
1731 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & XHCI_STS_RSVDP0);
1732
1733 #ifdef XHCI_DEBUG
1734 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1735 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1736 #endif
1737
1738 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1739 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1740 iman |= XHCI_IMAN_INTR_PEND;
1741 xhci_rt_write_4(sc, XHCI_IMAN(0), iman);
1742
1743 #ifdef XHCI_DEBUG
1744 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1745 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1746 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1747 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1748 #endif
1749
1750 return 1;
1751 }
1752
1753 /*
1754 * 3 port speed types used in USB stack
1755 *
1756 * usbdi speed
1757 * definition: USB_SPEED_* in usb.h
1758 * They are used in struct usbd_device in USB stack.
1759 * ioctl interface uses these values too.
1760 * port_status speed
1761 * definition: UPS_*_SPEED in usb.h
1762 * They are used in usb_port_status_t and valid only for USB 2.0.
1763 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus
1764 * of usb_port_status_ext_t indicates port speed.
1765 * Note that some 3.0 values overlap with 2.0 values.
1766 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and
1767 * means UPS_LOW_SPEED in HS.)
1768 * port status returned from hub also uses these values.
1769 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed
1770 * or more.
1771 * xspeed:
1772 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1)
1773 * They are used in only slot context and PORTSC reg of xhci.
1774 * The difference between usbdi speed and xspeed is
1775 * that FS and LS values are swapped.
1776 */
1777
1778 /* convert usbdi speed to xspeed */
1779 static int
1780 xhci_speed2xspeed(int speed)
1781 {
1782 switch (speed) {
1783 case USB_SPEED_LOW: return 2;
1784 case USB_SPEED_FULL: return 1;
1785 default: return speed;
1786 }
1787 }
1788
1789 #if 0
1790 /* convert xspeed to usbdi speed */
1791 static int
1792 xhci_xspeed2speed(int xspeed)
1793 {
1794 switch (xspeed) {
1795 case 1: return USB_SPEED_FULL;
1796 case 2: return USB_SPEED_LOW;
1797 default: return xspeed;
1798 }
1799 }
1800 #endif
1801
1802 /* convert xspeed to port status speed */
1803 static int
1804 xhci_xspeed2psspeed(int xspeed)
1805 {
1806 switch (xspeed) {
1807 case 0: return 0;
1808 case 1: return UPS_FULL_SPEED;
1809 case 2: return UPS_LOW_SPEED;
1810 case 3: return UPS_HIGH_SPEED;
1811 default: return UPS_OTHER_SPEED;
1812 }
1813 }
1814
1815 /*
1816 * Construct input contexts and issue TRB to open pipe.
1817 */
1818 static usbd_status
1819 xhci_configure_endpoint(struct usbd_pipe *pipe)
1820 {
1821 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1822 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1823 #ifdef USB_DEBUG
1824 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1825 #endif
1826 struct xhci_soft_trb trb;
1827 usbd_status err;
1828
1829 XHCIHIST_FUNC();
1830 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx",
1831 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress,
1832 pipe->up_endpoint->ue_edesc->bmAttributes);
1833
1834 /* XXX ensure input context is available? */
1835
1836 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz);
1837
1838 /* set up context */
1839 xhci_setup_ctx(pipe);
1840
1841 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0),
1842 sc->sc_ctxsz * 1);
1843 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs,
1844 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1);
1845
1846 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1847 trb.trb_2 = 0;
1848 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1849 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1850
1851 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
1852
1853 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1854 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci),
1855 sc->sc_ctxsz * 1);
1856
1857 return err;
1858 }
1859
1860 #if 0
1861 static usbd_status
1862 xhci_unconfigure_endpoint(struct usbd_pipe *pipe)
1863 {
1864 #ifdef USB_DEBUG
1865 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1866 #endif
1867
1868 XHCIHIST_FUNC();
1869 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
1870
1871 return USBD_NORMAL_COMPLETION;
1872 }
1873 #endif
1874
1875 /* 4.6.8, 6.4.3.7 */
1876 static usbd_status
1877 xhci_reset_endpoint_locked(struct usbd_pipe *pipe)
1878 {
1879 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1880 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1881 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1882 struct xhci_soft_trb trb;
1883 usbd_status err;
1884
1885 XHCIHIST_FUNC();
1886 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1887
1888 KASSERT(mutex_owned(&sc->sc_lock));
1889
1890 trb.trb_0 = 0;
1891 trb.trb_2 = 0;
1892 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1893 XHCI_TRB_3_EP_SET(dci) |
1894 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP);
1895
1896 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1897
1898 return err;
1899 }
1900
1901 static usbd_status
1902 xhci_reset_endpoint(struct usbd_pipe *pipe)
1903 {
1904 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1905
1906 mutex_enter(&sc->sc_lock);
1907 usbd_status ret = xhci_reset_endpoint_locked(pipe);
1908 mutex_exit(&sc->sc_lock);
1909
1910 return ret;
1911 }
1912
1913 /*
1914 * 4.6.9, 6.4.3.8
1915 * Stop execution of TDs on xfer ring.
1916 * Should be called with sc_lock held.
1917 */
1918 static usbd_status
1919 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci,
1920 uint32_t trb3flags)
1921 {
1922 struct xhci_soft_trb trb;
1923 usbd_status err;
1924
1925 XHCIHIST_FUNC();
1926 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1927
1928 KASSERT(mutex_owned(&sc->sc_lock));
1929
1930 trb.trb_0 = 0;
1931 trb.trb_2 = 0;
1932 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1933 XHCI_TRB_3_EP_SET(dci) |
1934 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) |
1935 trb3flags;
1936
1937 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1938
1939 return err;
1940 }
1941
1942 static usbd_status
1943 xhci_stop_endpoint(struct usbd_pipe *pipe)
1944 {
1945 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1946 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1947 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1948
1949 XHCIHIST_FUNC();
1950 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1951
1952 KASSERT(mutex_owned(&sc->sc_lock));
1953
1954 return xhci_stop_endpoint_cmd(sc, xs, dci, 0);
1955 }
1956
1957 /*
1958 * Set TR Dequeue Pointer.
1959 * xHCI 1.1 4.6.10 6.4.3.9
1960 * Purge all of the TRBs on ring and reinitialize ring.
1961 * Set TR dequeue Pointr to 0 and Cycle State to 1.
1962 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE
1963 * error will be generated.
1964 */
1965 static usbd_status
1966 xhci_set_dequeue_locked(struct usbd_pipe *pipe)
1967 {
1968 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1969 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1970 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1971 struct xhci_ring * const xr = xs->xs_xr[dci];
1972 struct xhci_soft_trb trb;
1973 usbd_status err;
1974
1975 XHCIHIST_FUNC();
1976 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1977
1978 KASSERT(mutex_owned(&sc->sc_lock));
1979 KASSERT(xr != NULL);
1980
1981 xhci_host_dequeue(xr);
1982
1983 /* set DCS */
1984 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */
1985 trb.trb_2 = 0;
1986 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1987 XHCI_TRB_3_EP_SET(dci) |
1988 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE);
1989
1990 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1991
1992 return err;
1993 }
1994
1995 static usbd_status
1996 xhci_set_dequeue(struct usbd_pipe *pipe)
1997 {
1998 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1999
2000 mutex_enter(&sc->sc_lock);
2001 usbd_status ret = xhci_set_dequeue_locked(pipe);
2002 mutex_exit(&sc->sc_lock);
2003
2004 return ret;
2005 }
2006
2007 /*
2008 * Open new pipe: called from usbd_setup_pipe_flags.
2009 * Fills methods of pipe.
2010 * If pipe is not for ep0, calls configure_endpoint.
2011 */
2012 static usbd_status
2013 xhci_open(struct usbd_pipe *pipe)
2014 {
2015 struct usbd_device * const dev = pipe->up_dev;
2016 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
2017 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
2018 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2019 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2020 const u_int dci = xhci_ep_get_dci(ed);
2021 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
2022 usbd_status err;
2023
2024 XHCIHIST_FUNC();
2025 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr,
2026 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed);
2027 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx",
2028 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress,
2029 ed->bmAttributes);
2030 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize),
2031 ed->bInterval, 0, 0);
2032
2033 if (sc->sc_dying)
2034 return USBD_IOERROR;
2035
2036 /* Root Hub */
2037 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) {
2038 switch (ed->bEndpointAddress) {
2039 case USB_CONTROL_ENDPOINT:
2040 pipe->up_methods = &roothub_ctrl_methods;
2041 break;
2042 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
2043 pipe->up_methods = &xhci_root_intr_methods;
2044 break;
2045 default:
2046 pipe->up_methods = NULL;
2047 DPRINTFN(0, "bad bEndpointAddress 0x%02jx",
2048 ed->bEndpointAddress, 0, 0, 0);
2049 return USBD_INVAL;
2050 }
2051 return USBD_NORMAL_COMPLETION;
2052 }
2053
2054 switch (xfertype) {
2055 case UE_CONTROL:
2056 pipe->up_methods = &xhci_device_ctrl_methods;
2057 break;
2058 case UE_ISOCHRONOUS:
2059 pipe->up_methods = &xhci_device_isoc_methods;
2060 pipe->up_serialise = false;
2061 xpipe->xp_isoc_next = -1;
2062 break;
2063 case UE_BULK:
2064 pipe->up_methods = &xhci_device_bulk_methods;
2065 break;
2066 case UE_INTERRUPT:
2067 pipe->up_methods = &xhci_device_intr_methods;
2068 break;
2069 default:
2070 return USBD_IOERROR;
2071 break;
2072 }
2073
2074 KASSERT(xs != NULL);
2075 KASSERT(xs->xs_xr[dci] == NULL);
2076
2077 /* allocate transfer ring */
2078 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS,
2079 XHCI_TRB_ALIGN);
2080 if (err) {
2081 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0);
2082 return err;
2083 }
2084
2085 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT)
2086 return xhci_configure_endpoint(pipe);
2087
2088 return USBD_NORMAL_COMPLETION;
2089 }
2090
2091 /*
2092 * Closes pipe, called from usbd_kill_pipe via close methods.
2093 * If the endpoint to be closed is ep0, disable_slot.
2094 * Should be called with sc_lock held.
2095 */
2096 static void
2097 xhci_close_pipe(struct usbd_pipe *pipe)
2098 {
2099 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2100 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2101 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2102 const u_int dci = xhci_ep_get_dci(ed);
2103 struct xhci_soft_trb trb;
2104 uint32_t *cp;
2105
2106 XHCIHIST_FUNC();
2107
2108 if (sc->sc_dying)
2109 return;
2110
2111 /* xs is uninitialized before xhci_init_slot */
2112 if (xs == NULL || xs->xs_idx == 0)
2113 return;
2114
2115 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2116 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2117
2118 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
2119 KASSERT(mutex_owned(&sc->sc_lock));
2120
2121 if (pipe->up_dev->ud_depth == 0)
2122 return;
2123
2124 if (dci == XHCI_DCI_EP_CONTROL) {
2125 DPRINTFN(4, "closing ep0", 0, 0, 0, 0);
2126 /* This frees all rings */
2127 xhci_disable_slot(sc, xs->xs_idx);
2128 return;
2129 }
2130
2131 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED)
2132 (void)xhci_stop_endpoint(pipe);
2133
2134 /*
2135 * set appropriate bit to be dropped.
2136 * don't set DC bit to 1, otherwise all endpoints
2137 * would be deconfigured.
2138 */
2139 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
2140 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci));
2141 cp[1] = htole32(0);
2142
2143 /* XXX should be most significant one, not dci? */
2144 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
2145 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci));
2146
2147 /* configure ep context performs an implicit dequeue */
2148 xhci_host_dequeue(xs->xs_xr[dci]);
2149
2150 /* sync input contexts before they are read from memory */
2151 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
2152
2153 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
2154 trb.trb_2 = 0;
2155 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2156 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
2157
2158 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
2159 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2160
2161 xhci_ring_free(sc, &xs->xs_xr[dci]);
2162 xs->xs_xr[dci] = NULL;
2163 }
2164
2165 /*
2166 * Abort transfer.
2167 * Should be called with sc_lock held.
2168 */
2169 static void
2170 xhci_abortx(struct usbd_xfer *xfer)
2171 {
2172 XHCIHIST_FUNC();
2173 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2174 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
2175 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
2176
2177 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx",
2178 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0);
2179
2180 KASSERT(mutex_owned(&sc->sc_lock));
2181 ASSERT_SLEEPABLE();
2182
2183 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2184 xfer->ux_status == USBD_TIMEOUT),
2185 "bad abort status: %d", xfer->ux_status);
2186
2187 /*
2188 * If we're dying, skip the hardware action and just notify the
2189 * software that we're done.
2190 */
2191 if (sc->sc_dying) {
2192 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer,
2193 xfer->ux_status, 0, 0);
2194 goto dying;
2195 }
2196
2197 /*
2198 * HC Step 1: Stop execution of TD on the ring.
2199 */
2200 switch (xhci_get_epstate(sc, xs, dci)) {
2201 case XHCI_EPSTATE_HALTED:
2202 (void)xhci_reset_endpoint_locked(xfer->ux_pipe);
2203 break;
2204 case XHCI_EPSTATE_STOPPED:
2205 break;
2206 default:
2207 (void)xhci_stop_endpoint(xfer->ux_pipe);
2208 break;
2209 }
2210 #ifdef DIAGNOSTIC
2211 uint32_t epst = xhci_get_epstate(sc, xs, dci);
2212 if (epst != XHCI_EPSTATE_STOPPED)
2213 DPRINTFN(4, "dci %ju not stopped %ju", dci, epst, 0, 0);
2214 #endif
2215
2216 /*
2217 * HC Step 2: Remove any vestiges of the xfer from the ring.
2218 */
2219 xhci_set_dequeue_locked(xfer->ux_pipe);
2220
2221 /*
2222 * Final Step: Notify completion to waiting xfers.
2223 */
2224 dying:
2225 usb_transfer_complete(xfer);
2226 DPRINTFN(14, "end", 0, 0, 0, 0);
2227
2228 KASSERT(mutex_owned(&sc->sc_lock));
2229 }
2230
2231 static void
2232 xhci_host_dequeue(struct xhci_ring * const xr)
2233 {
2234 /* When dequeueing the controller, update our struct copy too */
2235 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE);
2236 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE,
2237 BUS_DMASYNC_PREWRITE);
2238 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies));
2239
2240 xr->xr_ep = 0;
2241 xr->xr_cs = 1;
2242 }
2243
2244 /*
2245 * Recover STALLed endpoint.
2246 * xHCI 1.1 sect 4.10.2.1
2247 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove
2248 * all transfers on transfer ring.
2249 * These are done in thread context asynchronously.
2250 */
2251 static void
2252 xhci_clear_endpoint_stall_async_task(void *cookie)
2253 {
2254 struct usbd_xfer * const xfer = cookie;
2255 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2256 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
2257 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
2258 struct xhci_ring * const tr = xs->xs_xr[dci];
2259
2260 XHCIHIST_FUNC();
2261 XHCIHIST_CALLARGS("xfer %#jx slot %ju dci %ju", (uintptr_t)xfer, xs->xs_idx,
2262 dci, 0);
2263
2264 /*
2265 * XXXMRG: Stall task can run after slot is disabled when yanked.
2266 * This hack notices that the xs has been memset() in
2267 * xhci_disable_slot() and returns. Both xhci_reset_endpoint()
2268 * and xhci_set_dequeue() rely upon a valid ring setup for correct
2269 * operation, and the latter will fault, as would
2270 * usb_transfer_complete() if it got that far.
2271 */
2272 if (xs->xs_idx == 0) {
2273 DPRINTFN(4, "ends xs_idx is 0", 0, 0, 0, 0);
2274 return;
2275 }
2276
2277 KASSERT(tr != NULL);
2278
2279 xhci_reset_endpoint(xfer->ux_pipe);
2280 xhci_set_dequeue(xfer->ux_pipe);
2281
2282 mutex_enter(&sc->sc_lock);
2283 tr->is_halted = false;
2284 usb_transfer_complete(xfer);
2285 mutex_exit(&sc->sc_lock);
2286 DPRINTFN(4, "ends", 0, 0, 0, 0);
2287 }
2288
2289 static usbd_status
2290 xhci_clear_endpoint_stall_async(struct usbd_xfer *xfer)
2291 {
2292 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2293 struct xhci_pipe * const xp = (struct xhci_pipe *)xfer->ux_pipe;
2294
2295 XHCIHIST_FUNC();
2296 XHCIHIST_CALLARGS("xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2297
2298 if (sc->sc_dying) {
2299 return USBD_IOERROR;
2300 }
2301
2302 usb_init_task(&xp->xp_async_task,
2303 xhci_clear_endpoint_stall_async_task, xfer, USB_TASKQ_MPSAFE);
2304 usb_add_task(xfer->ux_pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC);
2305 DPRINTFN(4, "ends", 0, 0, 0, 0);
2306
2307 return USBD_NORMAL_COMPLETION;
2308 }
2309
2310 /* Process roothub port status/change events and notify to uhub_intr. */
2311 static void
2312 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport)
2313 {
2314 XHCIHIST_FUNC();
2315 XHCIHIST_CALLARGS("xhci%jd: port %ju status change",
2316 device_unit(sc->sc_dev), ctlrport, 0, 0);
2317
2318 if (ctlrport > sc->sc_maxports)
2319 return;
2320
2321 const size_t bn = xhci_ctlrport2bus(sc, ctlrport);
2322 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport);
2323 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn];
2324
2325 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change",
2326 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer);
2327
2328 if (xfer == NULL)
2329 return;
2330 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
2331
2332 uint8_t *p = xfer->ux_buf;
2333 memset(p, 0, xfer->ux_length);
2334 p[rhp / NBBY] |= 1 << (rhp % NBBY);
2335 xfer->ux_actlen = xfer->ux_length;
2336 xfer->ux_status = USBD_NORMAL_COMPLETION;
2337 usb_transfer_complete(xfer);
2338 }
2339
2340 /* Process Transfer Events */
2341 static void
2342 xhci_event_transfer(struct xhci_softc * const sc,
2343 const struct xhci_trb * const trb)
2344 {
2345 uint64_t trb_0;
2346 uint32_t trb_2, trb_3;
2347 uint8_t trbcode;
2348 u_int slot, dci;
2349 struct xhci_slot *xs;
2350 struct xhci_ring *xr;
2351 struct xhci_xfer *xx;
2352 struct usbd_xfer *xfer;
2353 usbd_status err;
2354
2355 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2356
2357 trb_0 = le64toh(trb->trb_0);
2358 trb_2 = le32toh(trb->trb_2);
2359 trb_3 = le32toh(trb->trb_3);
2360 trbcode = XHCI_TRB_2_ERROR_GET(trb_2);
2361 slot = XHCI_TRB_3_SLOT_GET(trb_3);
2362 dci = XHCI_TRB_3_EP_GET(trb_3);
2363 xs = &sc->sc_slots[slot];
2364 xr = xs->xs_xr[dci];
2365
2366 /* sanity check */
2367 KASSERT(xr != NULL);
2368 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots,
2369 "invalid xs_idx %u slot %u", xs->xs_idx, slot);
2370
2371 int idx = 0;
2372 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2373 if (xhci_trb_get_idx(xr, trb_0, &idx)) {
2374 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0);
2375 return;
2376 }
2377 xx = xr->xr_cookies[idx];
2378
2379 /* clear cookie of consumed TRB */
2380 xr->xr_cookies[idx] = NULL;
2381
2382 /*
2383 * xx is NULL if pipe is opened but xfer is not started.
2384 * It happens when stopping idle pipe.
2385 */
2386 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) {
2387 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju",
2388 idx, (uintptr_t)xx, trbcode, dci);
2389 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0,
2390 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)),
2391 0, 0);
2392 return;
2393 }
2394 } else {
2395 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */
2396 xx = (void *)(uintptr_t)(trb_0 & ~0x3);
2397 }
2398 /* XXX this may not happen */
2399 if (xx == NULL) {
2400 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0);
2401 return;
2402 }
2403 xfer = &xx->xx_xfer;
2404 /* XXX this may happen when detaching */
2405 if (xfer == NULL) {
2406 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx",
2407 (uintptr_t)xx, trb_0, 0, 0);
2408 return;
2409 }
2410 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2411 /* XXX I dunno why this happens */
2412 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer);
2413
2414 if (!xfer->ux_pipe->up_repeat &&
2415 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) {
2416 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer,
2417 0, 0, 0);
2418 return;
2419 }
2420
2421 const uint8_t xfertype =
2422 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes);
2423
2424 /* 4.11.5.2 Event Data TRB */
2425 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) {
2426 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx"
2427 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0);
2428 if ((trb_0 & 0x3) == 0x3) {
2429 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2);
2430 }
2431 }
2432
2433 switch (trbcode) {
2434 case XHCI_TRB_ERROR_SHORT_PKT:
2435 case XHCI_TRB_ERROR_SUCCESS:
2436 /*
2437 * A ctrl transfer can generate two events if it has a Data
2438 * stage. A short data stage can be OK and should not
2439 * complete the transfer as the status stage needs to be
2440 * performed.
2441 *
2442 * Note: Data and Status stage events point at same xfer.
2443 * ux_actlen and ux_dmabuf will be passed to
2444 * usb_transfer_complete after the Status stage event.
2445 *
2446 * It can be distingished which stage generates the event:
2447 * + by checking least 3 bits of trb_0 if ED==1.
2448 * (see xhci_device_ctrl_start).
2449 * + by checking the type of original TRB if ED==0.
2450 *
2451 * In addition, intr, bulk, and isoc transfer currently
2452 * consists of single TD, so the "skip" is not needed.
2453 * ctrl xfer uses EVENT_DATA, and others do not.
2454 * Thus driver can switch the flow by checking ED bit.
2455 */
2456 if (xfertype == UE_ISOCHRONOUS) {
2457 xfer->ux_frlengths[xx->xx_isoc_done] -=
2458 XHCI_TRB_2_REM_GET(trb_2);
2459 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done];
2460 if (++xx->xx_isoc_done < xfer->ux_nframes)
2461 return;
2462 } else
2463 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2464 if (xfer->ux_actlen == 0)
2465 xfer->ux_actlen = xfer->ux_length -
2466 XHCI_TRB_2_REM_GET(trb_2);
2467 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3))
2468 == XHCI_TRB_TYPE_DATA_STAGE) {
2469 return;
2470 }
2471 } else if ((trb_0 & 0x3) == 0x3) {
2472 return;
2473 }
2474 err = USBD_NORMAL_COMPLETION;
2475 break;
2476 case XHCI_TRB_ERROR_STOPPED:
2477 case XHCI_TRB_ERROR_LENGTH:
2478 case XHCI_TRB_ERROR_STOPPED_SHORT:
2479 err = USBD_IOERROR;
2480 break;
2481 case XHCI_TRB_ERROR_STALL:
2482 case XHCI_TRB_ERROR_BABBLE:
2483 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2484 xr->is_halted = true;
2485 /*
2486 * Try to claim this xfer for completion. If it has already
2487 * completed or aborted, drop it on the floor.
2488 */
2489 if (!usbd_xfer_trycomplete(xfer))
2490 return;
2491
2492 /*
2493 * Stalled endpoints can be recoverd by issuing
2494 * command TRB TYPE_RESET_EP on xHCI instead of
2495 * issuing request CLEAR_FEATURE UF_ENDPOINT_HALT
2496 * on the endpoint. However, this function may be
2497 * called from softint context (e.g. from umass),
2498 * in that case driver gets KASSERT in cv_timedwait
2499 * in xhci_do_command.
2500 * To avoid this, this runs reset_endpoint and
2501 * usb_transfer_complete in usb task thread
2502 * asynchronously (and then umass issues clear
2503 * UF_ENDPOINT_HALT).
2504 */
2505
2506 /* Override the status. */
2507 xfer->ux_status = USBD_STALLED;
2508
2509 xhci_clear_endpoint_stall_async(xfer);
2510 return;
2511 default:
2512 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2513 err = USBD_IOERROR;
2514 break;
2515 }
2516
2517 /*
2518 * Try to claim this xfer for completion. If it has already
2519 * completed or aborted, drop it on the floor.
2520 */
2521 if (!usbd_xfer_trycomplete(xfer))
2522 return;
2523
2524 /* Set the status. */
2525 xfer->ux_status = err;
2526
2527 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 ||
2528 (trb_0 & 0x3) == 0x0) {
2529 usb_transfer_complete(xfer);
2530 }
2531 }
2532
2533 /* Process Command complete events */
2534 static void
2535 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb)
2536 {
2537 uint64_t trb_0;
2538 uint32_t trb_2, trb_3;
2539
2540 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2541
2542 KASSERT(mutex_owned(&sc->sc_lock));
2543
2544 trb_0 = le64toh(trb->trb_0);
2545 trb_2 = le32toh(trb->trb_2);
2546 trb_3 = le32toh(trb->trb_3);
2547
2548 if (trb_0 == sc->sc_command_addr) {
2549 sc->sc_resultpending = false;
2550
2551 sc->sc_result_trb.trb_0 = trb_0;
2552 sc->sc_result_trb.trb_2 = trb_2;
2553 sc->sc_result_trb.trb_3 = trb_3;
2554 if (XHCI_TRB_2_ERROR_GET(trb_2) !=
2555 XHCI_TRB_ERROR_SUCCESS) {
2556 DPRINTFN(1, "command completion "
2557 "failure: 0x%016jx 0x%08jx 0x%08jx",
2558 trb_0, trb_2, trb_3, 0);
2559 }
2560 cv_signal(&sc->sc_command_cv);
2561 } else {
2562 DPRINTFN(1, "spurious event: %#jx 0x%016jx "
2563 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3);
2564 }
2565 }
2566
2567 /*
2568 * Process events.
2569 * called from xhci_softintr
2570 */
2571 static void
2572 xhci_handle_event(struct xhci_softc * const sc,
2573 const struct xhci_trb * const trb)
2574 {
2575 uint64_t trb_0;
2576 uint32_t trb_2, trb_3;
2577
2578 XHCIHIST_FUNC();
2579
2580 trb_0 = le64toh(trb->trb_0);
2581 trb_2 = le32toh(trb->trb_2);
2582 trb_3 = le32toh(trb->trb_3);
2583
2584 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx",
2585 (uintptr_t)trb, trb_0, trb_2, trb_3);
2586
2587 /*
2588 * 4.11.3.1, 6.4.2.1
2589 * TRB Pointer is invalid for these completion codes.
2590 */
2591 switch (XHCI_TRB_2_ERROR_GET(trb_2)) {
2592 case XHCI_TRB_ERROR_RING_UNDERRUN:
2593 case XHCI_TRB_ERROR_RING_OVERRUN:
2594 case XHCI_TRB_ERROR_VF_RING_FULL:
2595 return;
2596 default:
2597 if (trb_0 == 0) {
2598 return;
2599 }
2600 break;
2601 }
2602
2603 switch (XHCI_TRB_3_TYPE_GET(trb_3)) {
2604 case XHCI_TRB_EVENT_TRANSFER:
2605 xhci_event_transfer(sc, trb);
2606 break;
2607 case XHCI_TRB_EVENT_CMD_COMPLETE:
2608 xhci_event_cmd(sc, trb);
2609 break;
2610 case XHCI_TRB_EVENT_PORT_STS_CHANGE:
2611 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff));
2612 break;
2613 default:
2614 break;
2615 }
2616 }
2617
2618 static void
2619 xhci_softintr(void *v)
2620 {
2621 struct usbd_bus * const bus = v;
2622 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2623 struct xhci_ring * const er = sc->sc_er;
2624 struct xhci_trb *trb;
2625 int i, j, k;
2626
2627 XHCIHIST_FUNC();
2628
2629 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2630
2631 i = er->xr_ep;
2632 j = er->xr_cs;
2633
2634 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0);
2635
2636 while (1) {
2637 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE,
2638 BUS_DMASYNC_POSTREAD);
2639 trb = &er->xr_trb[i];
2640 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0;
2641
2642 if (j != k)
2643 break;
2644
2645 xhci_handle_event(sc, trb);
2646
2647 i++;
2648 if (i == er->xr_ntrb) {
2649 i = 0;
2650 j ^= 1;
2651 }
2652 }
2653
2654 er->xr_ep = i;
2655 er->xr_cs = j;
2656
2657 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) |
2658 XHCI_ERDP_BUSY);
2659
2660 DPRINTFN(16, "ends", 0, 0, 0, 0);
2661
2662 return;
2663 }
2664
2665 static void
2666 xhci_poll(struct usbd_bus *bus)
2667 {
2668 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2669
2670 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2671
2672 mutex_enter(&sc->sc_intr_lock);
2673 int ret = xhci_intr1(sc);
2674 if (ret) {
2675 xhci_softintr(bus);
2676 }
2677 mutex_exit(&sc->sc_intr_lock);
2678
2679 return;
2680 }
2681
2682 static struct usbd_xfer *
2683 xhci_allocx(struct usbd_bus *bus, unsigned int nframes)
2684 {
2685 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2686 struct xhci_xfer *xx;
2687 u_int ntrbs;
2688
2689 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2690
2691 ntrbs = uimax(3, nframes);
2692 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs;
2693
2694 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
2695 if (xx != NULL) {
2696 memset(xx, 0, sizeof(*xx));
2697 if (ntrbs > 0) {
2698 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP);
2699 xx->xx_ntrb = ntrbs;
2700 }
2701 #ifdef DIAGNOSTIC
2702 xx->xx_xfer.ux_state = XFER_BUSY;
2703 #endif
2704 }
2705
2706 return &xx->xx_xfer;
2707 }
2708
2709 static void
2710 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
2711 {
2712 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2713 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
2714
2715 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2716
2717 #ifdef DIAGNOSTIC
2718 if (xfer->ux_state != XFER_BUSY &&
2719 xfer->ux_status != USBD_NOT_STARTED) {
2720 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx",
2721 (uintptr_t)xfer, xfer->ux_state, 0, 0);
2722 }
2723 xfer->ux_state = XFER_FREE;
2724 #endif
2725 if (xx->xx_ntrb > 0) {
2726 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb));
2727 xx->xx_trb = NULL;
2728 xx->xx_ntrb = 0;
2729 }
2730 pool_cache_put(sc->sc_xferpool, xx);
2731 }
2732
2733 static bool
2734 xhci_dying(struct usbd_bus *bus)
2735 {
2736 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2737
2738 return sc->sc_dying;
2739 }
2740
2741 static void
2742 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
2743 {
2744 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2745
2746 *lock = &sc->sc_lock;
2747 }
2748
2749 extern uint32_t usb_cookie_no;
2750
2751 /*
2752 * xHCI 4.3
2753 * Called when uhub_explore finds a new device (via usbd_new_device).
2754 * Port initialization and speed detection (4.3.1) are already done in uhub.c.
2755 * This function does:
2756 * Allocate and construct dev structure of default endpoint (ep0).
2757 * Allocate and open pipe of ep0.
2758 * Enable slot and initialize slot context.
2759 * Set Address.
2760 * Read initial device descriptor.
2761 * Determine initial MaxPacketSize (mps) by speed.
2762 * Read full device descriptor.
2763 * Register this device.
2764 * Finally state of device transitions ADDRESSED.
2765 */
2766 static usbd_status
2767 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth,
2768 int speed, int port, struct usbd_port *up)
2769 {
2770 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2771 struct usbd_device *dev;
2772 usbd_status err;
2773 usb_device_descriptor_t *dd;
2774 struct xhci_slot *xs;
2775 uint32_t *cp;
2776
2777 XHCIHIST_FUNC();
2778 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx",
2779 port, depth, speed, (uintptr_t)up);
2780
2781 KASSERT(KERNEL_LOCKED_P());
2782
2783 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
2784 dev->ud_bus = bus;
2785 dev->ud_quirks = &usbd_no_quirk;
2786 dev->ud_addr = 0;
2787 dev->ud_ddesc.bMaxPacketSize = 0;
2788 dev->ud_depth = depth;
2789 dev->ud_powersrc = up;
2790 dev->ud_myhub = up->up_parent;
2791 dev->ud_speed = speed;
2792 dev->ud_langid = USBD_NOLANG;
2793 dev->ud_cookie.cookie = ++usb_cookie_no;
2794
2795 /* Set up default endpoint handle. */
2796 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc;
2797 /* doesn't matter, just don't let it uninitialized */
2798 dev->ud_ep0.ue_toggle = 0;
2799
2800 /* Set up default endpoint descriptor. */
2801 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE;
2802 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT;
2803 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT;
2804 dev->ud_ep0desc.bmAttributes = UE_CONTROL;
2805 dev->ud_ep0desc.bInterval = 0;
2806
2807 /* 4.3, 4.8.2.1 */
2808 switch (speed) {
2809 case USB_SPEED_SUPER:
2810 case USB_SPEED_SUPER_PLUS:
2811 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET);
2812 break;
2813 case USB_SPEED_FULL:
2814 /* XXX using 64 as initial mps of ep0 in FS */
2815 case USB_SPEED_HIGH:
2816 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET);
2817 break;
2818 case USB_SPEED_LOW:
2819 default:
2820 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET);
2821 break;
2822 }
2823
2824 up->up_dev = dev;
2825
2826 dd = &dev->ud_ddesc;
2827
2828 if (depth == 0 && port == 0) {
2829 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL);
2830 bus->ub_devices[USB_ROOTHUB_INDEX] = dev;
2831
2832 /* Establish the default pipe. */
2833 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2834 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2835 if (err) {
2836 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0);
2837 goto bad;
2838 }
2839 err = usbd_get_initial_ddesc(dev, dd);
2840 if (err) {
2841 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2842 goto bad;
2843 }
2844 } else {
2845 uint8_t slot = 0;
2846
2847 /* 4.3.2 */
2848 err = xhci_enable_slot(sc, &slot);
2849 if (err) {
2850 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0);
2851 goto bad;
2852 }
2853
2854 xs = &sc->sc_slots[slot];
2855 dev->ud_hcpriv = xs;
2856
2857 /* 4.3.3 initialize slot structure */
2858 err = xhci_init_slot(dev, slot);
2859 if (err) {
2860 DPRINTFN(1, "init slot %ju", err, 0, 0, 0);
2861 dev->ud_hcpriv = NULL;
2862 /*
2863 * We have to disable_slot here because
2864 * xs->xs_idx == 0 when xhci_init_slot fails,
2865 * in that case usbd_remove_dev won't work.
2866 */
2867 mutex_enter(&sc->sc_lock);
2868 xhci_disable_slot(sc, slot);
2869 mutex_exit(&sc->sc_lock);
2870 goto bad;
2871 }
2872
2873 /*
2874 * We have to establish the default pipe _after_ slot
2875 * structure has been prepared.
2876 */
2877 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2878 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2879 if (err) {
2880 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0,
2881 0);
2882 goto bad;
2883 }
2884
2885 /* 4.3.4 Address Assignment */
2886 err = xhci_set_address(dev, slot, false);
2887 if (err) {
2888 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0);
2889 goto bad;
2890 }
2891
2892 /* Allow device time to set new address */
2893 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE);
2894
2895 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2896 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT);
2897 HEXDUMP("slot context", cp, sc->sc_ctxsz);
2898 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3]));
2899 DPRINTFN(4, "device address %ju", addr, 0, 0, 0);
2900 /*
2901 * XXX ensure we know when the hardware does something
2902 * we can't yet cope with
2903 */
2904 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr);
2905 dev->ud_addr = addr;
2906
2907 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL,
2908 "addr %d already allocated", dev->ud_addr);
2909 /*
2910 * The root hub is given its own slot
2911 */
2912 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev;
2913
2914 err = usbd_get_initial_ddesc(dev, dd);
2915 if (err) {
2916 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2917 goto bad;
2918 }
2919
2920 /* 4.8.2.1 */
2921 if (USB_IS_SS(speed)) {
2922 if (dd->bMaxPacketSize != 9) {
2923 printf("%s: invalid mps 2^%u for SS ep0,"
2924 " using 512\n",
2925 device_xname(sc->sc_dev),
2926 dd->bMaxPacketSize);
2927 dd->bMaxPacketSize = 9;
2928 }
2929 USETW(dev->ud_ep0desc.wMaxPacketSize,
2930 (1 << dd->bMaxPacketSize));
2931 } else
2932 USETW(dev->ud_ep0desc.wMaxPacketSize,
2933 dd->bMaxPacketSize);
2934 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0);
2935 err = xhci_update_ep0_mps(sc, xs,
2936 UGETW(dev->ud_ep0desc.wMaxPacketSize));
2937 if (err) {
2938 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0);
2939 goto bad;
2940 }
2941 }
2942
2943 err = usbd_reload_device_desc(dev);
2944 if (err) {
2945 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0);
2946 goto bad;
2947 }
2948
2949 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,",
2950 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0);
2951 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,",
2952 dd->bDeviceClass, dd->bDeviceSubClass,
2953 dd->bDeviceProtocol, 0);
2954 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd",
2955 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations,
2956 dev->ud_speed);
2957
2958 usbd_get_device_strings(dev);
2959
2960 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev);
2961
2962 if (depth == 0 && port == 0) {
2963 usbd_attach_roothub(parent, dev);
2964 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0);
2965 return USBD_NORMAL_COMPLETION;
2966 }
2967
2968 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr);
2969 bad:
2970 if (err != USBD_NORMAL_COMPLETION) {
2971 if (depth == 0 && port == 0 && dev->ud_pipe0)
2972 usbd_kill_pipe(dev->ud_pipe0);
2973 usbd_remove_device(dev, up);
2974 }
2975
2976 return err;
2977 }
2978
2979 static usbd_status
2980 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp,
2981 size_t ntrb, size_t align)
2982 {
2983 size_t size = ntrb * XHCI_TRB_SIZE;
2984 struct xhci_ring *xr;
2985
2986 XHCIHIST_FUNC();
2987 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx",
2988 (uintptr_t)*xrp, ntrb, align, 0);
2989
2990 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP);
2991 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0);
2992
2993 int err = usb_allocmem(&sc->sc_bus, size, align,
2994 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xr->xr_dma);
2995 if (err) {
2996 kmem_free(xr, sizeof(struct xhci_ring));
2997 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0);
2998 return err;
2999 }
3000 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
3001 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP);
3002 xr->xr_trb = xhci_ring_trbv(xr, 0);
3003 xr->xr_ntrb = ntrb;
3004 xr->is_halted = false;
3005 xhci_host_dequeue(xr);
3006 *xrp = xr;
3007
3008 return USBD_NORMAL_COMPLETION;
3009 }
3010
3011 static void
3012 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr)
3013 {
3014 if (*xr == NULL)
3015 return;
3016
3017 usb_freemem(&sc->sc_bus, &(*xr)->xr_dma);
3018 mutex_destroy(&(*xr)->xr_lock);
3019 kmem_free((*xr)->xr_cookies,
3020 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb);
3021 kmem_free(*xr, sizeof(struct xhci_ring));
3022 *xr = NULL;
3023 }
3024
3025 static void
3026 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr,
3027 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs)
3028 {
3029 size_t i;
3030 u_int ri;
3031 u_int cs;
3032 uint64_t parameter;
3033 uint32_t status;
3034 uint32_t control;
3035
3036 XHCIHIST_FUNC();
3037 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju",
3038 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0);
3039
3040 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u",
3041 ntrbs, xr->xr_ntrb);
3042 for (i = 0; i < ntrbs; i++) {
3043 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr,
3044 (uintptr_t)trbs, i, 0);
3045 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx",
3046 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0);
3047 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) !=
3048 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3);
3049 }
3050
3051 ri = xr->xr_ep;
3052 cs = xr->xr_cs;
3053
3054 /*
3055 * Although the xhci hardware can do scatter/gather dma from
3056 * arbitrary sized buffers, there is a non-obvious restriction
3057 * that a LINK trb is only allowed at the end of a burst of
3058 * transfers - which might be 16kB.
3059 * Arbitrary aligned LINK trb definitely fail on Ivy bridge.
3060 * The simple solution is not to allow a LINK trb in the middle
3061 * of anything - as here.
3062 * XXX: (dsl) There are xhci controllers out there (eg some made by
3063 * ASMedia) that seem to lock up if they process a LINK trb but
3064 * cannot process the linked-to trb yet.
3065 * The code should write the 'cycle' bit on the link trb AFTER
3066 * adding the other trb.
3067 */
3068 u_int firstep = xr->xr_ep;
3069 u_int firstcs = xr->xr_cs;
3070
3071 for (i = 0; i < ntrbs; ) {
3072 u_int oldri = ri;
3073 u_int oldcs = cs;
3074
3075 if (ri >= (xr->xr_ntrb - 1)) {
3076 /* Put Link TD at the end of ring */
3077 parameter = xhci_ring_trbp(xr, 0);
3078 status = 0;
3079 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) |
3080 XHCI_TRB_3_TC_BIT;
3081 xr->xr_cookies[ri] = NULL;
3082 xr->xr_ep = 0;
3083 xr->xr_cs ^= 1;
3084 ri = xr->xr_ep;
3085 cs = xr->xr_cs;
3086 } else {
3087 parameter = trbs[i].trb_0;
3088 status = trbs[i].trb_2;
3089 control = trbs[i].trb_3;
3090
3091 xr->xr_cookies[ri] = cookie;
3092 ri++;
3093 i++;
3094 }
3095 /*
3096 * If this is a first TRB, mark it invalid to prevent
3097 * xHC from running it immediately.
3098 */
3099 if (oldri == firstep) {
3100 if (oldcs) {
3101 control &= ~XHCI_TRB_3_CYCLE_BIT;
3102 } else {
3103 control |= XHCI_TRB_3_CYCLE_BIT;
3104 }
3105 } else {
3106 if (oldcs) {
3107 control |= XHCI_TRB_3_CYCLE_BIT;
3108 } else {
3109 control &= ~XHCI_TRB_3_CYCLE_BIT;
3110 }
3111 }
3112 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control);
3113 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri,
3114 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3115 }
3116
3117 /* Now invert cycle bit of first TRB */
3118 if (firstcs) {
3119 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT);
3120 } else {
3121 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT);
3122 }
3123 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep,
3124 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3125
3126 xr->xr_ep = ri;
3127 xr->xr_cs = cs;
3128
3129 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep,
3130 xr->xr_cs, 0);
3131 }
3132
3133 static inline void
3134 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr,
3135 struct xhci_xfer *xx, u_int ntrb)
3136 {
3137 KASSERT(ntrb <= xx->xx_ntrb);
3138 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb);
3139 }
3140
3141 /*
3142 * Stop execution commands, purge all commands on command ring, and
3143 * rewind dequeue pointer.
3144 */
3145 static void
3146 xhci_abort_command(struct xhci_softc *sc)
3147 {
3148 struct xhci_ring * const cr = sc->sc_cr;
3149 uint64_t crcr;
3150 int i;
3151
3152 XHCIHIST_FUNC();
3153 XHCIHIST_CALLARGS("command %#jx timeout, aborting",
3154 sc->sc_command_addr, 0, 0, 0);
3155
3156 mutex_enter(&cr->xr_lock);
3157
3158 /* 4.6.1.2 Aborting a Command */
3159 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3160 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA);
3161
3162 for (i = 0; i < 500; i++) {
3163 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3164 if ((crcr & XHCI_CRCR_LO_CRR) == 0)
3165 break;
3166 usb_delay_ms(&sc->sc_bus, 1);
3167 }
3168 if ((crcr & XHCI_CRCR_LO_CRR) != 0) {
3169 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0);
3170 /* reset HC here? */
3171 }
3172
3173 /* reset command ring dequeue pointer */
3174 cr->xr_ep = 0;
3175 cr->xr_cs = 1;
3176 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs);
3177
3178 mutex_exit(&cr->xr_lock);
3179 }
3180
3181 /*
3182 * Put a command on command ring, ring bell, set timer, and cv_timedwait.
3183 * Command completion is notified by cv_signal from xhci_event_cmd()
3184 * (called from xhci_softint), or timed-out.
3185 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(),
3186 * then do_command examines it.
3187 */
3188 static usbd_status
3189 xhci_do_command_locked(struct xhci_softc * const sc,
3190 struct xhci_soft_trb * const trb, int timeout)
3191 {
3192 struct xhci_ring * const cr = sc->sc_cr;
3193 usbd_status err;
3194
3195 XHCIHIST_FUNC();
3196 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx",
3197 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3198
3199 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
3200 KASSERT(mutex_owned(&sc->sc_lock));
3201
3202 while (sc->sc_command_addr != 0 ||
3203 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp))
3204 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
3205
3206 /*
3207 * If enqueue pointer points at last of ring, it's Link TRB,
3208 * command TRB will be stored in 0th TRB.
3209 */
3210 if (cr->xr_ep == cr->xr_ntrb - 1)
3211 sc->sc_command_addr = xhci_ring_trbp(cr, 0);
3212 else
3213 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep);
3214
3215 sc->sc_resultpending = true;
3216
3217 mutex_enter(&cr->xr_lock);
3218 xhci_ring_put(sc, cr, NULL, trb, 1);
3219 mutex_exit(&cr->xr_lock);
3220
3221 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0);
3222
3223 while (sc->sc_resultpending) {
3224 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock,
3225 MAX(1, mstohz(timeout))) == EWOULDBLOCK) {
3226 xhci_abort_command(sc);
3227 err = USBD_TIMEOUT;
3228 goto timedout;
3229 }
3230 }
3231
3232 trb->trb_0 = sc->sc_result_trb.trb_0;
3233 trb->trb_2 = sc->sc_result_trb.trb_2;
3234 trb->trb_3 = sc->sc_result_trb.trb_3;
3235
3236 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx",
3237 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3238
3239 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) {
3240 case XHCI_TRB_ERROR_SUCCESS:
3241 err = USBD_NORMAL_COMPLETION;
3242 break;
3243 default:
3244 case 192 ... 223:
3245 DPRINTFN(5, "error %#jx",
3246 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0);
3247 err = USBD_IOERROR;
3248 break;
3249 case 224 ... 255:
3250 err = USBD_NORMAL_COMPLETION;
3251 break;
3252 }
3253
3254 timedout:
3255 sc->sc_resultpending = false;
3256 sc->sc_command_addr = 0;
3257 cv_broadcast(&sc->sc_cmdbusy_cv);
3258
3259 return err;
3260 }
3261
3262 static usbd_status
3263 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb,
3264 int timeout)
3265 {
3266
3267 mutex_enter(&sc->sc_lock);
3268 usbd_status ret = xhci_do_command_locked(sc, trb, timeout);
3269 mutex_exit(&sc->sc_lock);
3270
3271 return ret;
3272 }
3273
3274 static usbd_status
3275 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp)
3276 {
3277 struct xhci_soft_trb trb;
3278 usbd_status err;
3279
3280 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3281
3282 trb.trb_0 = 0;
3283 trb.trb_2 = 0;
3284 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT);
3285
3286 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3287 if (err != USBD_NORMAL_COMPLETION) {
3288 return err;
3289 }
3290
3291 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3);
3292
3293 return err;
3294 }
3295
3296 /*
3297 * xHCI 4.6.4
3298 * Deallocate ring and device/input context DMA buffers, and disable_slot.
3299 * All endpoints in the slot should be stopped.
3300 * Should be called with sc_lock held.
3301 */
3302 static usbd_status
3303 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot)
3304 {
3305 struct xhci_soft_trb trb;
3306 struct xhci_slot *xs;
3307 usbd_status err;
3308
3309 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3310
3311 if (sc->sc_dying)
3312 return USBD_IOERROR;
3313
3314 trb.trb_0 = 0;
3315 trb.trb_2 = 0;
3316 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) |
3317 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT);
3318
3319 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
3320
3321 if (!err) {
3322 xs = &sc->sc_slots[slot];
3323 if (xs->xs_idx != 0) {
3324 xhci_free_slot(sc, xs);
3325 xhci_set_dcba(sc, 0, slot);
3326 memset(xs, 0, sizeof(*xs));
3327 }
3328 }
3329
3330 return err;
3331 }
3332
3333 /*
3334 * Set address of device and transition slot state from ENABLED to ADDRESSED
3335 * if Block Setaddress Request (BSR) is false.
3336 * If BSR==true, transition slot state from ENABLED to DEFAULT.
3337 * see xHCI 1.1 4.5.3, 3.3.4
3338 * Should be called without sc_lock held.
3339 */
3340 static usbd_status
3341 xhci_address_device(struct xhci_softc * const sc,
3342 uint64_t icp, uint8_t slot_id, bool bsr)
3343 {
3344 struct xhci_soft_trb trb;
3345 usbd_status err;
3346
3347 XHCIHIST_FUNC();
3348 if (bsr) {
3349 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr",
3350 icp, slot_id, 0, 0);
3351 } else {
3352 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr",
3353 icp, slot_id, 0, 0);
3354 }
3355
3356 trb.trb_0 = icp;
3357 trb.trb_2 = 0;
3358 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) |
3359 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) |
3360 (bsr ? XHCI_TRB_3_BSR_BIT : 0);
3361
3362 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3363
3364 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS)
3365 err = USBD_NO_ADDR;
3366
3367 return err;
3368 }
3369
3370 static usbd_status
3371 xhci_update_ep0_mps(struct xhci_softc * const sc,
3372 struct xhci_slot * const xs, u_int mps)
3373 {
3374 struct xhci_soft_trb trb;
3375 usbd_status err;
3376 uint32_t * cp;
3377
3378 XHCIHIST_FUNC();
3379 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0);
3380
3381 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3382 cp[0] = htole32(0);
3383 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL));
3384
3385 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL));
3386 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps));
3387
3388 /* sync input contexts before they are read from memory */
3389 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3390 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3391 sc->sc_ctxsz * 4);
3392
3393 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
3394 trb.trb_2 = 0;
3395 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
3396 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX);
3397
3398 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3399 return err;
3400 }
3401
3402 static void
3403 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si)
3404 {
3405 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0);
3406
3407 XHCIHIST_FUNC();
3408 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd",
3409 (uintptr_t)&dcbaa[si], dcba, si, 0);
3410
3411 dcbaa[si] = htole64(dcba);
3412 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t),
3413 BUS_DMASYNC_PREWRITE);
3414 }
3415
3416 /*
3417 * Allocate device and input context DMA buffer, and
3418 * TRB DMA buffer for each endpoint.
3419 */
3420 static usbd_status
3421 xhci_init_slot(struct usbd_device *dev, uint32_t slot)
3422 {
3423 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3424 struct xhci_slot *xs;
3425
3426 XHCIHIST_FUNC();
3427 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0);
3428
3429 xs = &sc->sc_slots[slot];
3430
3431 /* allocate contexts */
3432 int err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz, sc->sc_pgsz,
3433 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_dc_dma);
3434 if (err) {
3435 DPRINTFN(1, "failed to allocmem output device context %jd",
3436 err, 0, 0, 0);
3437 return USBD_NOMEM;
3438 }
3439
3440 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz, sc->sc_pgsz,
3441 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_ic_dma);
3442 if (err) {
3443 DPRINTFN(1, "failed to allocmem input device context %jd",
3444 err, 0, 0, 0);
3445 goto bad1;
3446 }
3447
3448 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr));
3449 xs->xs_idx = slot;
3450
3451 return USBD_NORMAL_COMPLETION;
3452
3453 bad1:
3454 usb_freemem(&sc->sc_bus, &xs->xs_dc_dma);
3455 xs->xs_idx = 0;
3456 return USBD_NOMEM;
3457 }
3458
3459 static void
3460 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs)
3461 {
3462 u_int dci;
3463
3464 XHCIHIST_FUNC();
3465 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
3466
3467 /* deallocate all allocated rings in the slot */
3468 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
3469 if (xs->xs_xr[dci] != NULL)
3470 xhci_ring_free(sc, &xs->xs_xr[dci]);
3471 }
3472 usb_freemem(&sc->sc_bus, &xs->xs_ic_dma);
3473 usb_freemem(&sc->sc_bus, &xs->xs_dc_dma);
3474 xs->xs_idx = 0;
3475 }
3476
3477 /*
3478 * Setup slot context, set Device Context Base Address, and issue
3479 * Set Address Device command.
3480 */
3481 static usbd_status
3482 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr)
3483 {
3484 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3485 struct xhci_slot *xs;
3486 usbd_status err;
3487
3488 XHCIHIST_FUNC();
3489 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0);
3490
3491 xs = &sc->sc_slots[slot];
3492
3493 xhci_setup_ctx(dev->ud_pipe0);
3494
3495 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3496 sc->sc_ctxsz * 3);
3497
3498 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot);
3499
3500 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr);
3501
3502 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
3503 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0),
3504 sc->sc_ctxsz * 2);
3505
3506 return err;
3507 }
3508
3509 /*
3510 * 4.8.2, 6.2.3.2
3511 * construct slot/endpoint context parameters and do syncmem
3512 */
3513 static void
3514 xhci_setup_ctx(struct usbd_pipe *pipe)
3515 {
3516 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3517 struct usbd_device *dev = pipe->up_dev;
3518 struct xhci_slot * const xs = dev->ud_hcpriv;
3519 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3520 const u_int dci = xhci_ep_get_dci(ed);
3521 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3522 uint32_t *cp;
3523 uint16_t mps = UGETW(ed->wMaxPacketSize);
3524 uint8_t speed = dev->ud_speed;
3525 uint8_t ival = ed->bInterval;
3526
3527 XHCIHIST_FUNC();
3528 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju",
3529 (uintptr_t)pipe, xs->xs_idx, dci, speed);
3530
3531 /* set up initial input control context */
3532 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3533 cp[0] = htole32(0);
3534 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci));
3535 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT));
3536 cp[7] = htole32(0);
3537
3538 /* set up input slot context */
3539 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
3540 cp[0] =
3541 XHCI_SCTX_0_CTX_NUM_SET(dci) |
3542 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed));
3543 cp[1] = 0;
3544 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0);
3545 cp[3] = 0;
3546 xhci_setup_route(pipe, cp);
3547 xhci_setup_tthub(pipe, cp);
3548
3549 cp[0] = htole32(cp[0]);
3550 cp[1] = htole32(cp[1]);
3551 cp[2] = htole32(cp[2]);
3552 cp[3] = htole32(cp[3]);
3553
3554 /* set up input endpoint context */
3555 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci));
3556 cp[0] =
3557 XHCI_EPCTX_0_EPSTATE_SET(0) |
3558 XHCI_EPCTX_0_MULT_SET(0) |
3559 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) |
3560 XHCI_EPCTX_0_LSA_SET(0) |
3561 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0);
3562 cp[1] =
3563 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) |
3564 XHCI_EPCTX_1_HID_SET(0) |
3565 XHCI_EPCTX_1_MAXB_SET(0);
3566
3567 if (xfertype != UE_ISOCHRONOUS)
3568 cp[1] |= XHCI_EPCTX_1_CERR_SET(3);
3569
3570 if (xfertype == UE_CONTROL)
3571 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(8); /* 6.2.3 */
3572 else if (USB_IS_SS(speed))
3573 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(mps);
3574 else
3575 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(UE_GET_SIZE(mps));
3576
3577 xhci_setup_maxburst(pipe, cp);
3578
3579 switch (xfertype) {
3580 case UE_CONTROL:
3581 break;
3582 case UE_BULK:
3583 /* XXX Set MaxPStreams, HID, and LSA if streams enabled */
3584 break;
3585 case UE_INTERRUPT:
3586 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3587 ival = pipe->up_interval;
3588
3589 ival = xhci_bival2ival(ival, speed);
3590 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3591 break;
3592 case UE_ISOCHRONOUS:
3593 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3594 ival = pipe->up_interval;
3595
3596 /* xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 */
3597 if (speed == USB_SPEED_FULL)
3598 ival += 3; /* 1ms -> 125us */
3599 ival--;
3600 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3601 break;
3602 default:
3603 break;
3604 }
3605 DPRINTFN(4, "setting ival %ju MaxBurst %#jx",
3606 XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_1_MAXB_GET(cp[1]), 0, 0);
3607
3608 /* rewind TR dequeue pointer in xHC */
3609 /* can't use xhci_ep_get_dci() yet? */
3610 *(uint64_t *)(&cp[2]) = htole64(
3611 xhci_ring_trbp(xs->xs_xr[dci], 0) |
3612 XHCI_EPCTX_2_DCS_SET(1));
3613
3614 cp[0] = htole32(cp[0]);
3615 cp[1] = htole32(cp[1]);
3616 cp[4] = htole32(cp[4]);
3617
3618 /* rewind TR dequeue pointer in driver */
3619 struct xhci_ring *xr = xs->xs_xr[dci];
3620 mutex_enter(&xr->xr_lock);
3621 xhci_host_dequeue(xr);
3622 mutex_exit(&xr->xr_lock);
3623
3624 /* sync input contexts before they are read from memory */
3625 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3626 }
3627
3628 /*
3629 * Setup route string and roothub port of given device for slot context
3630 */
3631 static void
3632 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp)
3633 {
3634 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3635 struct usbd_device *dev = pipe->up_dev;
3636 struct usbd_port *up = dev->ud_powersrc;
3637 struct usbd_device *hub;
3638 struct usbd_device *adev;
3639 uint8_t rhport = 0;
3640 uint32_t route = 0;
3641
3642 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3643
3644 /* Locate root hub port and Determine route string */
3645 /* 4.3.3 route string does not include roothub port */
3646 for (hub = dev; hub != NULL; hub = hub->ud_myhub) {
3647 uint32_t dep;
3648
3649 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd",
3650 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc,
3651 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno :
3652 -1);
3653
3654 if (hub->ud_powersrc == NULL)
3655 break;
3656 dep = hub->ud_depth;
3657 if (dep == 0)
3658 break;
3659 rhport = hub->ud_powersrc->up_portno;
3660 if (dep > USB_HUB_MAX_DEPTH)
3661 continue;
3662
3663 route |=
3664 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport)
3665 << ((dep - 1) * 4);
3666 }
3667 route = route >> 4;
3668 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1;
3669
3670 /* Locate port on upstream high speed hub */
3671 for (adev = dev, hub = up->up_parent;
3672 hub != NULL && hub->ud_speed != USB_SPEED_HIGH;
3673 adev = hub, hub = hub->ud_myhub)
3674 ;
3675 if (hub) {
3676 int p;
3677 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) {
3678 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) {
3679 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1];
3680 goto found;
3681 }
3682 }
3683 panic("%s: cannot find HS port", __func__);
3684 found:
3685 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0);
3686 } else {
3687 dev->ud_myhsport = NULL;
3688 }
3689
3690 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport);
3691
3692 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport,
3693 ctlrport, route, (uintptr_t)hub);
3694
3695 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route);
3696 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport);
3697 }
3698
3699 /*
3700 * Setup whether device is hub, whether device uses MTT, and
3701 * TT informations if it uses MTT.
3702 */
3703 static void
3704 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp)
3705 {
3706 struct usbd_device *dev = pipe->up_dev;
3707 struct usbd_port *myhsport = dev->ud_myhsport;
3708 usb_device_descriptor_t * const dd = &dev->ud_ddesc;
3709 uint32_t speed = dev->ud_speed;
3710 uint8_t rhaddr = dev->ud_bus->ub_rhaddr;
3711 uint8_t tthubslot, ttportnum;
3712 bool ishub;
3713 bool usemtt;
3714
3715 XHCIHIST_FUNC();
3716
3717 /*
3718 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2
3719 * tthubslot:
3720 * This is the slot ID of parent HS hub
3721 * if LS/FS device is connected && connected through HS hub.
3722 * This is 0 if device is not LS/FS device ||
3723 * parent hub is not HS hub ||
3724 * attached to root hub.
3725 * ttportnum:
3726 * This is the downstream facing port of parent HS hub
3727 * if LS/FS device is connected.
3728 * This is 0 if device is not LS/FS device ||
3729 * parent hub is not HS hub ||
3730 * attached to root hub.
3731 */
3732 if (myhsport &&
3733 myhsport->up_parent->ud_addr != rhaddr &&
3734 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
3735 ttportnum = myhsport->up_portno;
3736 tthubslot = myhsport->up_parent->ud_addr;
3737 } else {
3738 ttportnum = 0;
3739 tthubslot = 0;
3740 }
3741 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd",
3742 (uintptr_t)myhsport, ttportnum, tthubslot, 0);
3743
3744 /* ishub is valid after reading UDESC_DEVICE */
3745 ishub = (dd->bDeviceClass == UDCLASS_HUB);
3746
3747 /* dev->ud_hub is valid after reading UDESC_HUB */
3748 if (ishub && dev->ud_hub) {
3749 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc;
3750 uint8_t ttt =
3751 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK);
3752
3753 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts);
3754 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt);
3755 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0);
3756 }
3757
3758 #define IS_MTTHUB(dd) \
3759 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT)
3760
3761 /*
3762 * MTT flag is set if
3763 * 1. this is HS hub && MTTs are supported and enabled; or
3764 * 2. this is LS or FS device && there is a parent HS hub where MTTs
3765 * are supported and enabled.
3766 *
3767 * XXX enabled is not tested yet
3768 */
3769 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd))
3770 usemtt = true;
3771 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) &&
3772 myhsport &&
3773 myhsport->up_parent->ud_addr != rhaddr &&
3774 IS_MTTHUB(&myhsport->up_parent->ud_ddesc))
3775 usemtt = true;
3776 else
3777 usemtt = false;
3778 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd",
3779 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt);
3780
3781 #undef IS_MTTHUB
3782
3783 cp[0] |=
3784 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) |
3785 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0);
3786 cp[2] |=
3787 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) |
3788 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum);
3789 }
3790
3791 /* set up params for periodic endpoint */
3792 static void
3793 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp)
3794 {
3795 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
3796 struct usbd_device *dev = pipe->up_dev;
3797 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3798 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3799 usbd_desc_iter_t iter;
3800 const usb_cdc_descriptor_t *cdcd;
3801 uint32_t maxb = 0;
3802 uint16_t mps = UGETW(ed->wMaxPacketSize);
3803 uint8_t speed = dev->ud_speed;
3804 uint8_t mult = 0;
3805 uint8_t ep;
3806
3807 /* config desc is NULL when opening ep0 */
3808 if (dev == NULL || dev->ud_cdesc == NULL)
3809 goto no_cdcd;
3810 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev,
3811 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY);
3812 if (cdcd == NULL)
3813 goto no_cdcd;
3814 usb_desc_iter_init(dev, &iter);
3815 iter.cur = (const void *)cdcd;
3816
3817 /* find endpoint_ss_comp desc for ep of this pipe */
3818 for (ep = 0;;) {
3819 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter);
3820 if (cdcd == NULL)
3821 break;
3822 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) {
3823 ep = ((const usb_endpoint_descriptor_t *)cdcd)->
3824 bEndpointAddress;
3825 if (UE_GET_ADDR(ep) ==
3826 UE_GET_ADDR(ed->bEndpointAddress)) {
3827 cdcd = (const usb_cdc_descriptor_t *)
3828 usb_desc_iter_next(&iter);
3829 break;
3830 }
3831 ep = 0;
3832 }
3833 }
3834 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) {
3835 const usb_endpoint_ss_comp_descriptor_t * esscd =
3836 (const usb_endpoint_ss_comp_descriptor_t *)cdcd;
3837 maxb = esscd->bMaxBurst;
3838 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes);
3839 }
3840
3841 no_cdcd:
3842 /* 6.2.3.4, 4.8.2.4 */
3843 if (USB_IS_SS(speed)) {
3844 /* USB 3.1 9.6.6 */
3845 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps);
3846 /* USB 3.1 9.6.7 */
3847 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3848 #ifdef notyet
3849 if (xfertype == UE_ISOCHRONOUS) {
3850 }
3851 if (XHCI_HCC2_LEC(sc->sc_hcc2) != 0) {
3852 /* use ESIT */
3853 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(x);
3854 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(x);
3855
3856 /* XXX if LEC = 1, set ESIT instead */
3857 cp[0] |= XHCI_EPCTX_0_MULT_SET(0);
3858 } else {
3859 /* use ival */
3860 }
3861 #endif
3862 } else {
3863 /* USB 2.0 9.6.6 */
3864 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(UE_GET_SIZE(mps));
3865
3866 /* 6.2.3.4 */
3867 if (speed == USB_SPEED_HIGH &&
3868 (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)) {
3869 maxb = UE_GET_TRANS(mps);
3870 } else {
3871 /* LS/FS or HS CTRL or HS BULK */
3872 maxb = 0;
3873 }
3874 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3875 }
3876 xpipe->xp_maxb = maxb + 1;
3877 xpipe->xp_mult = mult + 1;
3878 }
3879
3880 /*
3881 * Convert endpoint bInterval value to endpoint context interval value
3882 * for Interrupt pipe.
3883 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6
3884 */
3885 static uint32_t
3886 xhci_bival2ival(uint32_t ival, uint32_t speed)
3887 {
3888 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) {
3889 int i;
3890
3891 /*
3892 * round ival down to "the nearest base 2 multiple of
3893 * bInterval * 8".
3894 * bInterval is at most 255 as its type is uByte.
3895 * 255(ms) = 2040(x 125us) < 2^11, so start with 10.
3896 */
3897 for (i = 10; i > 0; i--) {
3898 if ((ival * 8) >= (1 << i))
3899 break;
3900 }
3901 ival = i;
3902 } else {
3903 /* Interval = bInterval-1 for SS/HS */
3904 ival--;
3905 }
3906
3907 return ival;
3908 }
3909
3910 /* ----- */
3911
3912 static void
3913 xhci_noop(struct usbd_pipe *pipe)
3914 {
3915 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3916 }
3917
3918 /*
3919 * Process root hub request.
3920 */
3921 static int
3922 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
3923 void *buf, int buflen)
3924 {
3925 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
3926 usb_port_status_t ps;
3927 int l, totlen = 0;
3928 uint16_t len, value, index;
3929 int port, i;
3930 uint32_t v;
3931
3932 XHCIHIST_FUNC();
3933
3934 if (sc->sc_dying)
3935 return -1;
3936
3937 size_t bn = bus == &sc->sc_bus ? 0 : 1;
3938
3939 len = UGETW(req->wLength);
3940 value = UGETW(req->wValue);
3941 index = UGETW(req->wIndex);
3942
3943 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx",
3944 req->bmRequestType | (req->bRequest << 8), value, index, len);
3945
3946 #define C(x,y) ((x) | ((y) << 8))
3947 switch (C(req->bRequest, req->bmRequestType)) {
3948 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
3949 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0);
3950 if (len == 0)
3951 break;
3952 switch (value) {
3953 #define sd ((usb_string_descriptor_t *)buf)
3954 case C(2, UDESC_STRING):
3955 /* Product */
3956 totlen = usb_makestrdesc(sd, len, "xHCI root hub");
3957 break;
3958 #undef sd
3959 default:
3960 /* default from usbroothub */
3961 return buflen;
3962 }
3963 break;
3964
3965 /* Hub requests */
3966 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
3967 break;
3968 /* Clear Port Feature request */
3969 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): {
3970 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
3971
3972 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd",
3973 index, value, bn, cp);
3974 if (index < 1 || index > sc->sc_rhportcount[bn]) {
3975 return -1;
3976 }
3977 port = XHCI_PORTSC(cp);
3978 v = xhci_op_read_4(sc, port);
3979 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0);
3980 v &= ~XHCI_PS_CLEAR;
3981 switch (value) {
3982 case UHF_PORT_ENABLE:
3983 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED);
3984 break;
3985 case UHF_PORT_SUSPEND:
3986 return -1;
3987 case UHF_PORT_POWER:
3988 break;
3989 case UHF_PORT_TEST:
3990 case UHF_PORT_INDICATOR:
3991 return -1;
3992 case UHF_C_PORT_CONNECTION:
3993 xhci_op_write_4(sc, port, v | XHCI_PS_CSC);
3994 break;
3995 case UHF_C_PORT_ENABLE:
3996 case UHF_C_PORT_SUSPEND:
3997 case UHF_C_PORT_OVER_CURRENT:
3998 return -1;
3999 case UHF_C_BH_PORT_RESET:
4000 xhci_op_write_4(sc, port, v | XHCI_PS_WRC);
4001 break;
4002 case UHF_C_PORT_RESET:
4003 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4004 break;
4005 case UHF_C_PORT_LINK_STATE:
4006 xhci_op_write_4(sc, port, v | XHCI_PS_PLC);
4007 break;
4008 case UHF_C_PORT_CONFIG_ERROR:
4009 xhci_op_write_4(sc, port, v | XHCI_PS_CEC);
4010 break;
4011 default:
4012 return -1;
4013 }
4014 break;
4015 }
4016 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
4017 if (len == 0)
4018 break;
4019 if ((value & 0xff) != 0) {
4020 return -1;
4021 }
4022 usb_hub_descriptor_t hubd;
4023
4024 totlen = uimin(buflen, sizeof(hubd));
4025 memcpy(&hubd, buf, totlen);
4026 hubd.bNbrPorts = sc->sc_rhportcount[bn];
4027 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH);
4028 hubd.bPwrOn2PwrGood = 200;
4029 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) {
4030 /* XXX can't find out? */
4031 hubd.DeviceRemovable[i++] = 0;
4032 }
4033 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
4034 totlen = uimin(totlen, hubd.bDescLength);
4035 memcpy(buf, &hubd, totlen);
4036 break;
4037 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
4038 if (len != 4) {
4039 return -1;
4040 }
4041 memset(buf, 0, len); /* ? XXX */
4042 totlen = len;
4043 break;
4044 /* Get Port Status request */
4045 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): {
4046 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4047
4048 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju",
4049 bn, index, cp, 0);
4050 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4051 DPRINTFN(5, "bad get port status: index=%jd bn=%jd "
4052 "portcount=%jd",
4053 index, bn, sc->sc_rhportcount[bn], 0);
4054 return -1;
4055 }
4056 if (len != 4) {
4057 DPRINTFN(5, "bad get port status: len %jd != 4",
4058 len, 0, 0, 0);
4059 return -1;
4060 }
4061 v = xhci_op_read_4(sc, XHCI_PORTSC(cp));
4062 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0);
4063 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v));
4064 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS;
4065 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED;
4066 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR;
4067 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND;
4068 if (v & XHCI_PS_PR) i |= UPS_RESET;
4069 if (v & XHCI_PS_PP) {
4070 if (i & UPS_OTHER_SPEED)
4071 i |= UPS_PORT_POWER_SS;
4072 else
4073 i |= UPS_PORT_POWER;
4074 }
4075 if (i & UPS_OTHER_SPEED)
4076 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v));
4077 if (sc->sc_vendor_port_status)
4078 i = sc->sc_vendor_port_status(sc, v, i);
4079 USETW(ps.wPortStatus, i);
4080 i = 0;
4081 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS;
4082 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED;
4083 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR;
4084 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET;
4085 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET;
4086 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE;
4087 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR;
4088 USETW(ps.wPortChange, i);
4089 totlen = uimin(len, sizeof(ps));
4090 memcpy(buf, &ps, totlen);
4091 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx"
4092 " totlen %jd",
4093 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0);
4094 break;
4095 }
4096 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
4097 return -1;
4098 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE):
4099 break;
4100 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
4101 break;
4102 /* Set Port Feature request */
4103 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): {
4104 int optval = (index >> 8) & 0xff;
4105 index &= 0xff;
4106 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4107 return -1;
4108 }
4109
4110 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4111
4112 port = XHCI_PORTSC(cp);
4113 v = xhci_op_read_4(sc, port);
4114 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0);
4115 v &= ~XHCI_PS_CLEAR;
4116 switch (value) {
4117 case UHF_PORT_ENABLE:
4118 xhci_op_write_4(sc, port, v | XHCI_PS_PED);
4119 break;
4120 case UHF_PORT_SUSPEND:
4121 /* XXX suspend */
4122 break;
4123 case UHF_PORT_RESET:
4124 v &= ~(XHCI_PS_PED | XHCI_PS_PR);
4125 xhci_op_write_4(sc, port, v | XHCI_PS_PR);
4126 /* Wait for reset to complete. */
4127 usb_delay_ms(&sc->sc_bus, USB_PORT_ROOT_RESET_DELAY);
4128 if (sc->sc_dying) {
4129 return -1;
4130 }
4131 v = xhci_op_read_4(sc, port);
4132 if (v & XHCI_PS_PR) {
4133 xhci_op_write_4(sc, port, v & ~XHCI_PS_PR);
4134 usb_delay_ms(&sc->sc_bus, 10);
4135 /* XXX */
4136 }
4137 break;
4138 case UHF_PORT_POWER:
4139 /* XXX power control */
4140 break;
4141 /* XXX more */
4142 case UHF_C_PORT_RESET:
4143 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4144 break;
4145 case UHF_PORT_U1_TIMEOUT:
4146 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4147 return -1;
4148 }
4149 port = XHCI_PORTPMSC(cp);
4150 v = xhci_op_read_4(sc, port);
4151 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4152 index, cp, v, 0);
4153 v &= ~XHCI_PM3_U1TO_SET(0xff);
4154 v |= XHCI_PM3_U1TO_SET(optval);
4155 xhci_op_write_4(sc, port, v);
4156 break;
4157 case UHF_PORT_U2_TIMEOUT:
4158 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4159 return -1;
4160 }
4161 port = XHCI_PORTPMSC(cp);
4162 v = xhci_op_read_4(sc, port);
4163 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4164 index, cp, v, 0);
4165 v &= ~XHCI_PM3_U2TO_SET(0xff);
4166 v |= XHCI_PM3_U2TO_SET(optval);
4167 xhci_op_write_4(sc, port, v);
4168 break;
4169 default:
4170 return -1;
4171 }
4172 }
4173 break;
4174 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
4175 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
4176 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
4177 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
4178 break;
4179 default:
4180 /* default from usbroothub */
4181 return buflen;
4182 }
4183
4184 return totlen;
4185 }
4186
4187 /* root hub interrupt */
4188
4189 static usbd_status
4190 xhci_root_intr_transfer(struct usbd_xfer *xfer)
4191 {
4192 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4193 usbd_status err;
4194
4195 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4196
4197 /* Insert last in queue. */
4198 mutex_enter(&sc->sc_lock);
4199 err = usb_insert_transfer(xfer);
4200 mutex_exit(&sc->sc_lock);
4201 if (err)
4202 return err;
4203
4204 /* Pipe isn't running, start first */
4205 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4206 }
4207
4208 /* Wait for roothub port status/change */
4209 static usbd_status
4210 xhci_root_intr_start(struct usbd_xfer *xfer)
4211 {
4212 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4213 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4214 const bool polling = xhci_polling_p(sc);
4215
4216 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4217
4218 if (sc->sc_dying)
4219 return USBD_IOERROR;
4220
4221 if (!polling)
4222 mutex_enter(&sc->sc_lock);
4223 KASSERT(sc->sc_intrxfer[bn] == NULL);
4224 sc->sc_intrxfer[bn] = xfer;
4225 xfer->ux_status = USBD_IN_PROGRESS;
4226 if (!polling)
4227 mutex_exit(&sc->sc_lock);
4228
4229 return USBD_IN_PROGRESS;
4230 }
4231
4232 static void
4233 xhci_root_intr_abort(struct usbd_xfer *xfer)
4234 {
4235 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4236 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4237
4238 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4239
4240 KASSERT(mutex_owned(&sc->sc_lock));
4241 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
4242
4243 /* If xfer has already completed, nothing to do here. */
4244 if (sc->sc_intrxfer[bn] == NULL)
4245 return;
4246
4247 /*
4248 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer.
4249 * Cancel it.
4250 */
4251 KASSERT(sc->sc_intrxfer[bn] == xfer);
4252 xfer->ux_status = USBD_CANCELLED;
4253 usb_transfer_complete(xfer);
4254 }
4255
4256 static void
4257 xhci_root_intr_close(struct usbd_pipe *pipe)
4258 {
4259 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe);
4260 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer;
4261 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4262
4263 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4264
4265 KASSERT(mutex_owned(&sc->sc_lock));
4266
4267 /*
4268 * Caller must guarantee the xfer has completed first, by
4269 * closing the pipe only after normal completion or an abort.
4270 */
4271 KASSERT(sc->sc_intrxfer[bn] == NULL);
4272 }
4273
4274 static void
4275 xhci_root_intr_done(struct usbd_xfer *xfer)
4276 {
4277 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4278 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4279
4280 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4281
4282 KASSERT(mutex_owned(&sc->sc_lock));
4283
4284 /* Claim the xfer so it doesn't get completed again. */
4285 KASSERT(sc->sc_intrxfer[bn] == xfer);
4286 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
4287 sc->sc_intrxfer[bn] = NULL;
4288 }
4289
4290 /* -------------- */
4291 /* device control */
4292
4293 static usbd_status
4294 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
4295 {
4296 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4297 usbd_status err;
4298
4299 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4300
4301 /* Insert last in queue. */
4302 mutex_enter(&sc->sc_lock);
4303 err = usb_insert_transfer(xfer);
4304 mutex_exit(&sc->sc_lock);
4305 if (err)
4306 return err;
4307
4308 /* Pipe isn't running, start first */
4309 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4310 }
4311
4312 static usbd_status
4313 xhci_device_ctrl_start(struct usbd_xfer *xfer)
4314 {
4315 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4316 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4317 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4318 struct xhci_ring * const tr = xs->xs_xr[dci];
4319 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4320 usb_device_request_t * const req = &xfer->ux_request;
4321 const bool isread = usbd_xfer_isread(xfer);
4322 const uint32_t len = UGETW(req->wLength);
4323 usb_dma_t * const dma = &xfer->ux_dmabuf;
4324 uint64_t parameter;
4325 uint32_t status;
4326 uint32_t control;
4327 u_int i;
4328 const bool polling = xhci_polling_p(sc);
4329
4330 XHCIHIST_FUNC();
4331 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx",
4332 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue),
4333 UGETW(req->wIndex), UGETW(req->wLength));
4334
4335 /* we rely on the bottom bits for extra info */
4336 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %zx",
4337 (uintptr_t) xfer);
4338
4339 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0);
4340
4341 i = 0;
4342
4343 /* setup phase */
4344 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */
4345 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req));
4346 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE :
4347 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) |
4348 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) |
4349 XHCI_TRB_3_IDT_BIT;
4350 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4351
4352 if (len != 0) {
4353 /* data phase */
4354 parameter = DMAADDR(dma, 0);
4355 KASSERTMSG(len <= 0x10000, "len %d", len);
4356 status = XHCI_TRB_2_IRQ_SET(0) |
4357 XHCI_TRB_2_TDSZ_SET(0) |
4358 XHCI_TRB_2_BYTES_SET(len);
4359 control = (isread ? XHCI_TRB_3_DIR_IN : 0) |
4360 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) |
4361 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4362 XHCI_TRB_3_IOC_BIT;
4363 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4364
4365 usb_syncmem(dma, 0, len,
4366 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4367 }
4368
4369 parameter = 0;
4370 status = XHCI_TRB_2_IRQ_SET(0);
4371 /* the status stage has inverted direction */
4372 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) |
4373 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) |
4374 XHCI_TRB_3_IOC_BIT;
4375 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4376
4377 if (!polling)
4378 mutex_enter(&tr->xr_lock);
4379 xhci_ring_put_xfer(sc, tr, xx, i);
4380 if (!polling)
4381 mutex_exit(&tr->xr_lock);
4382
4383 if (!polling)
4384 mutex_enter(&sc->sc_lock);
4385 xfer->ux_status = USBD_IN_PROGRESS;
4386 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4387 usbd_xfer_schedule_timeout(xfer);
4388 if (!polling)
4389 mutex_exit(&sc->sc_lock);
4390
4391 return USBD_IN_PROGRESS;
4392 }
4393
4394 static void
4395 xhci_device_ctrl_done(struct usbd_xfer *xfer)
4396 {
4397 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4398 usb_device_request_t *req = &xfer->ux_request;
4399 int len = UGETW(req->wLength);
4400 int rd = req->bmRequestType & UT_READ;
4401
4402 if (len)
4403 usb_syncmem(&xfer->ux_dmabuf, 0, len,
4404 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4405 }
4406
4407 static void
4408 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
4409 {
4410 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4411
4412 usbd_xfer_abort(xfer);
4413 }
4414
4415 static void
4416 xhci_device_ctrl_close(struct usbd_pipe *pipe)
4417 {
4418 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4419
4420 xhci_close_pipe(pipe);
4421 }
4422
4423 /* ------------------ */
4424 /* device isochronous */
4425
4426 static usbd_status
4427 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
4428 {
4429 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4430 usbd_status err;
4431
4432 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4433
4434 /* Insert last in queue. */
4435 mutex_enter(&sc->sc_lock);
4436 err = usb_insert_transfer(xfer);
4437 mutex_exit(&sc->sc_lock);
4438 if (err)
4439 return err;
4440
4441 return xhci_device_isoc_enter(xfer);
4442 }
4443
4444 static usbd_status
4445 xhci_device_isoc_enter(struct usbd_xfer *xfer)
4446 {
4447 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4448 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4449 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4450 struct xhci_ring * const tr = xs->xs_xr[dci];
4451 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4452 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe;
4453 uint32_t len = xfer->ux_length;
4454 usb_dma_t * const dma = &xfer->ux_dmabuf;
4455 uint64_t parameter;
4456 uint32_t status;
4457 uint32_t control;
4458 uint32_t mfindex;
4459 uint32_t offs;
4460 int i, ival;
4461 const bool polling = xhci_polling_p(sc);
4462 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
4463 const uint16_t mps = UE_GET_SIZE(MPS);
4464 const uint8_t maxb = xpipe->xp_maxb;
4465 u_int tdpc, tbc, tlbpc;
4466
4467 XHCIHIST_FUNC();
4468 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4469 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4470
4471 if (sc->sc_dying)
4472 return USBD_IOERROR;
4473
4474 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths);
4475 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4476
4477 const bool isread = usbd_xfer_isread(xfer);
4478 if (xfer->ux_length)
4479 usb_syncmem(dma, 0, xfer->ux_length,
4480 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4481
4482 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
4483 if (ival >= 1 && ival <= 16)
4484 ival = 1 << (ival - 1);
4485 else
4486 ival = 1; /* fake something up */
4487
4488 if (xpipe->xp_isoc_next == -1) {
4489 mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX);
4490 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0);
4491 mfindex = XHCI_MFINDEX_GET(mfindex + 1);
4492 mfindex /= USB_UFRAMES_PER_FRAME;
4493 mfindex += 7; /* 7 frames is max possible IST */
4494 xpipe->xp_isoc_next = roundup2(mfindex, ival);
4495 }
4496
4497 offs = 0;
4498 for (i = 0; i < xfer->ux_nframes; i++) {
4499 len = xfer->ux_frlengths[i];
4500
4501 tdpc = howmany(len, mps);
4502 tbc = howmany(tdpc, maxb) - 1;
4503 tlbpc = tdpc % maxb;
4504 tlbpc = tlbpc ? tlbpc - 1 : maxb - 1;
4505
4506 KASSERTMSG(len <= 0x10000, "len %d", len);
4507 parameter = DMAADDR(dma, offs);
4508 status = XHCI_TRB_2_IRQ_SET(0) |
4509 XHCI_TRB_2_TDSZ_SET(0) |
4510 XHCI_TRB_2_BYTES_SET(len);
4511 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) |
4512 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4513 XHCI_TRB_3_TBC_SET(tbc) |
4514 XHCI_TRB_3_TLBPC_SET(tlbpc) |
4515 XHCI_TRB_3_IOC_BIT;
4516 if (XHCI_HCC_CFC(sc->sc_hcc)) {
4517 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4518 #if 0
4519 } else if (xpipe->xp_isoc_next == -1) {
4520 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4521 #endif
4522 } else {
4523 control |= XHCI_TRB_3_ISO_SIA_BIT;
4524 }
4525 #if 0
4526 if (i != xfer->ux_nframes - 1)
4527 control |= XHCI_TRB_3_BEI_BIT;
4528 #endif
4529 xhci_xfer_put_trb(xx, i, parameter, status, control);
4530
4531 xpipe->xp_isoc_next += ival;
4532 offs += len;
4533 }
4534
4535 xx->xx_isoc_done = 0;
4536
4537 if (!polling)
4538 mutex_enter(&tr->xr_lock);
4539 xhci_ring_put_xfer(sc, tr, xx, i);
4540 if (!polling)
4541 mutex_exit(&tr->xr_lock);
4542
4543 if (!polling)
4544 mutex_enter(&sc->sc_lock);
4545 xfer->ux_status = USBD_IN_PROGRESS;
4546 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4547 usbd_xfer_schedule_timeout(xfer);
4548 if (!polling)
4549 mutex_exit(&sc->sc_lock);
4550
4551 return USBD_IN_PROGRESS;
4552 }
4553
4554 static void
4555 xhci_device_isoc_abort(struct usbd_xfer *xfer)
4556 {
4557 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4558
4559 usbd_xfer_abort(xfer);
4560 }
4561
4562 static void
4563 xhci_device_isoc_close(struct usbd_pipe *pipe)
4564 {
4565 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4566
4567 xhci_close_pipe(pipe);
4568 }
4569
4570 static void
4571 xhci_device_isoc_done(struct usbd_xfer *xfer)
4572 {
4573 #ifdef USB_DEBUG
4574 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4575 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4576 #endif
4577 const bool isread = usbd_xfer_isread(xfer);
4578
4579 XHCIHIST_FUNC();
4580 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4581 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4582
4583 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4584 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4585 }
4586
4587 /* ----------- */
4588 /* device bulk */
4589
4590 static usbd_status
4591 xhci_device_bulk_transfer(struct usbd_xfer *xfer)
4592 {
4593 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4594 usbd_status err;
4595
4596 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4597
4598 /* Insert last in queue. */
4599 mutex_enter(&sc->sc_lock);
4600 err = usb_insert_transfer(xfer);
4601 mutex_exit(&sc->sc_lock);
4602 if (err)
4603 return err;
4604
4605 /*
4606 * Pipe isn't running (otherwise err would be USBD_INPROG),
4607 * so start it first.
4608 */
4609 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4610 }
4611
4612 static usbd_status
4613 xhci_device_bulk_start(struct usbd_xfer *xfer)
4614 {
4615 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4616 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4617 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4618 struct xhci_ring * const tr = xs->xs_xr[dci];
4619 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4620 const uint32_t len = xfer->ux_length;
4621 usb_dma_t * const dma = &xfer->ux_dmabuf;
4622 uint64_t parameter;
4623 uint32_t status;
4624 uint32_t control;
4625 u_int i = 0;
4626 const bool polling = xhci_polling_p(sc);
4627
4628 XHCIHIST_FUNC();
4629 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4630 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4631
4632 if (sc->sc_dying)
4633 return USBD_IOERROR;
4634
4635 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4636
4637 parameter = DMAADDR(dma, 0);
4638 const bool isread = usbd_xfer_isread(xfer);
4639 if (len)
4640 usb_syncmem(dma, 0, len,
4641 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4642
4643 /*
4644 * XXX: (dsl) The physical buffer must not cross a 64k boundary.
4645 * If the user supplied buffer crosses such a boundary then 2
4646 * (or more) TRB should be used.
4647 * If multiple TRB are used the td_size field must be set correctly.
4648 * For v1.0 devices (like ivy bridge) this is the number of usb data
4649 * blocks needed to complete the transfer.
4650 * Setting it to 1 in the last TRB causes an extra zero-length
4651 * data block be sent.
4652 * The earlier documentation differs, I don't know how it behaves.
4653 */
4654 KASSERTMSG(len <= 0x10000, "len %d", len);
4655 status = XHCI_TRB_2_IRQ_SET(0) |
4656 XHCI_TRB_2_TDSZ_SET(0) |
4657 XHCI_TRB_2_BYTES_SET(len);
4658 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4659 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4660 XHCI_TRB_3_IOC_BIT;
4661 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4662
4663 if (!polling)
4664 mutex_enter(&tr->xr_lock);
4665 xhci_ring_put_xfer(sc, tr, xx, i);
4666 if (!polling)
4667 mutex_exit(&tr->xr_lock);
4668
4669 if (!polling)
4670 mutex_enter(&sc->sc_lock);
4671 xfer->ux_status = USBD_IN_PROGRESS;
4672 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4673 usbd_xfer_schedule_timeout(xfer);
4674 if (!polling)
4675 mutex_exit(&sc->sc_lock);
4676
4677 return USBD_IN_PROGRESS;
4678 }
4679
4680 static void
4681 xhci_device_bulk_done(struct usbd_xfer *xfer)
4682 {
4683 #ifdef USB_DEBUG
4684 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4685 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4686 #endif
4687 const bool isread = usbd_xfer_isread(xfer);
4688
4689 XHCIHIST_FUNC();
4690 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4691 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4692
4693 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4694 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4695 }
4696
4697 static void
4698 xhci_device_bulk_abort(struct usbd_xfer *xfer)
4699 {
4700 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4701
4702 usbd_xfer_abort(xfer);
4703 }
4704
4705 static void
4706 xhci_device_bulk_close(struct usbd_pipe *pipe)
4707 {
4708 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4709
4710 xhci_close_pipe(pipe);
4711 }
4712
4713 /* ---------------- */
4714 /* device interrupt */
4715
4716 static usbd_status
4717 xhci_device_intr_transfer(struct usbd_xfer *xfer)
4718 {
4719 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4720 usbd_status err;
4721
4722 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4723
4724 /* Insert last in queue. */
4725 mutex_enter(&sc->sc_lock);
4726 err = usb_insert_transfer(xfer);
4727 mutex_exit(&sc->sc_lock);
4728 if (err)
4729 return err;
4730
4731 /*
4732 * Pipe isn't running (otherwise err would be USBD_INPROG),
4733 * so start it first.
4734 */
4735 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4736 }
4737
4738 static usbd_status
4739 xhci_device_intr_start(struct usbd_xfer *xfer)
4740 {
4741 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4742 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4743 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4744 struct xhci_ring * const tr = xs->xs_xr[dci];
4745 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4746 const uint32_t len = xfer->ux_length;
4747 const bool polling = xhci_polling_p(sc);
4748 usb_dma_t * const dma = &xfer->ux_dmabuf;
4749 uint64_t parameter;
4750 uint32_t status;
4751 uint32_t control;
4752 u_int i = 0;
4753
4754 XHCIHIST_FUNC();
4755 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4756 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4757
4758 if (sc->sc_dying)
4759 return USBD_IOERROR;
4760
4761 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4762
4763 const bool isread = usbd_xfer_isread(xfer);
4764 if (len)
4765 usb_syncmem(dma, 0, len,
4766 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4767
4768 parameter = DMAADDR(dma, 0);
4769 KASSERTMSG(len <= 0x10000, "len %d", len);
4770 status = XHCI_TRB_2_IRQ_SET(0) |
4771 XHCI_TRB_2_TDSZ_SET(0) |
4772 XHCI_TRB_2_BYTES_SET(len);
4773 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4774 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT;
4775 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4776
4777 if (!polling)
4778 mutex_enter(&tr->xr_lock);
4779 xhci_ring_put_xfer(sc, tr, xx, i);
4780 if (!polling)
4781 mutex_exit(&tr->xr_lock);
4782
4783 if (!polling)
4784 mutex_enter(&sc->sc_lock);
4785 xfer->ux_status = USBD_IN_PROGRESS;
4786 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4787 usbd_xfer_schedule_timeout(xfer);
4788 if (!polling)
4789 mutex_exit(&sc->sc_lock);
4790
4791 return USBD_IN_PROGRESS;
4792 }
4793
4794 static void
4795 xhci_device_intr_done(struct usbd_xfer *xfer)
4796 {
4797 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4798 #ifdef USB_DEBUG
4799 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4800 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4801 #endif
4802 const bool isread = usbd_xfer_isread(xfer);
4803
4804 XHCIHIST_FUNC();
4805 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4806 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4807
4808 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4809
4810 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4811 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4812 }
4813
4814 static void
4815 xhci_device_intr_abort(struct usbd_xfer *xfer)
4816 {
4817 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4818
4819 XHCIHIST_FUNC();
4820 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0);
4821
4822 KASSERT(mutex_owned(&sc->sc_lock));
4823 usbd_xfer_abort(xfer);
4824 }
4825
4826 static void
4827 xhci_device_intr_close(struct usbd_pipe *pipe)
4828 {
4829 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
4830
4831 XHCIHIST_FUNC();
4832 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0);
4833
4834 xhci_close_pipe(pipe);
4835 }
4836