xhci.c revision 1.154 1 /* $NetBSD: xhci.c,v 1.154 2022/01/25 11:17:39 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * USB rev 2.0 and rev 3.1 specification
31 * http://www.usb.org/developers/docs/
32 * xHCI rev 1.1 specification
33 * http://www.intel.com/technology/usb/spec.htm
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.154 2022/01/25 11:17:39 msaitoh Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_usb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/kmem.h>
47 #include <sys/device.h>
48 #include <sys/select.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/endian.h>
58
59 #include <dev/usb/usb.h>
60 #include <dev/usb/usbdi.h>
61 #include <dev/usb/usbdivar.h>
62 #include <dev/usb/usbdi_util.h>
63 #include <dev/usb/usbhist.h>
64 #include <dev/usb/usb_mem.h>
65 #include <dev/usb/usb_quirks.h>
66
67 #include <dev/usb/xhcireg.h>
68 #include <dev/usb/xhcivar.h>
69 #include <dev/usb/usbroothub.h>
70
71
72 #ifdef USB_DEBUG
73 #ifndef XHCI_DEBUG
74 #define xhcidebug 0
75 #else /* !XHCI_DEBUG */
76 #define HEXDUMP(a, b, c) \
77 do { \
78 if (xhcidebug > 0) \
79 hexdump(printf, a, b, c); \
80 } while (/*CONSTCOND*/0)
81 static int xhcidebug = 0;
82
83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup")
84 {
85 int err;
86 const struct sysctlnode *rnode;
87 const struct sysctlnode *cnode;
88
89 err = sysctl_createv(clog, 0, NULL, &rnode,
90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci",
91 SYSCTL_DESCR("xhci global controls"),
92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
93
94 if (err)
95 goto fail;
96
97 /* control debugging printfs */
98 err = sysctl_createv(clog, 0, &rnode, &cnode,
99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
100 "debug", SYSCTL_DESCR("Enable debugging output"),
101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL);
102 if (err)
103 goto fail;
104
105 return;
106 fail:
107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
108 }
109
110 #endif /* !XHCI_DEBUG */
111 #endif /* USB_DEBUG */
112
113 #ifndef HEXDUMP
114 #define HEXDUMP(a, b, c)
115 #endif
116
117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D)
118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D)
119 #define XHCIHIST_FUNC() USBHIST_FUNC()
120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug)
121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \
122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D)
123
124 #define XHCI_DCI_SLOT 0
125 #define XHCI_DCI_EP_CONTROL 1
126
127 #define XHCI_ICI_INPUT_CONTROL 0
128
129 struct xhci_pipe {
130 struct usbd_pipe xp_pipe;
131 struct usb_task xp_async_task;
132 int16_t xp_isoc_next; /* next frame */
133 uint8_t xp_maxb; /* max burst */
134 uint8_t xp_mult;
135 };
136
137 #define XHCI_COMMAND_RING_TRBS 256
138 #define XHCI_EVENT_RING_TRBS 256
139 #define XHCI_EVENT_RING_SEGMENTS 1
140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT
141
142 static usbd_status xhci_open(struct usbd_pipe *);
143 static void xhci_close_pipe(struct usbd_pipe *);
144 static int xhci_intr1(struct xhci_softc * const);
145 static void xhci_softintr(void *);
146 static void xhci_poll(struct usbd_bus *);
147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int);
148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
149 static void xhci_abortx(struct usbd_xfer *);
150 static bool xhci_dying(struct usbd_bus *);
151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **);
152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int,
153 struct usbd_port *);
154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
155 void *, int);
156
157 static usbd_status xhci_configure_endpoint(struct usbd_pipe *);
158 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *);
159 static usbd_status xhci_reset_endpoint(struct usbd_pipe *);
160 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *,
161 struct xhci_slot *, u_int, uint32_t);
162 static usbd_status xhci_stop_endpoint(struct usbd_pipe *);
163
164 static void xhci_host_dequeue(struct xhci_ring * const);
165 static usbd_status xhci_set_dequeue(struct usbd_pipe *);
166
167 static usbd_status xhci_do_command(struct xhci_softc * const,
168 struct xhci_soft_trb * const, int);
169 static usbd_status xhci_do_command_locked(struct xhci_softc * const,
170 struct xhci_soft_trb * const, int);
171 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t);
172 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *);
173 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool);
174 static usbd_status xhci_enable_slot(struct xhci_softc * const,
175 uint8_t * const);
176 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t);
177 static usbd_status xhci_address_device(struct xhci_softc * const,
178 uint64_t, uint8_t, bool);
179 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int);
180 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const,
181 struct xhci_slot * const, u_int);
182 static usbd_status xhci_ring_init(struct xhci_softc * const,
183 struct xhci_ring **, size_t, size_t);
184 static void xhci_ring_free(struct xhci_softc * const,
185 struct xhci_ring ** const);
186
187 static void xhci_setup_ctx(struct usbd_pipe *);
188 static void xhci_setup_route(struct usbd_pipe *, uint32_t *);
189 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *);
190 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *);
191 static uint32_t xhci_bival2ival(uint32_t, uint32_t);
192
193 static void xhci_noop(struct usbd_pipe *);
194
195 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *);
196 static usbd_status xhci_root_intr_start(struct usbd_xfer *);
197 static void xhci_root_intr_abort(struct usbd_xfer *);
198 static void xhci_root_intr_close(struct usbd_pipe *);
199 static void xhci_root_intr_done(struct usbd_xfer *);
200
201 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *);
202 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *);
203 static void xhci_device_ctrl_abort(struct usbd_xfer *);
204 static void xhci_device_ctrl_close(struct usbd_pipe *);
205 static void xhci_device_ctrl_done(struct usbd_xfer *);
206
207 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *);
208 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *);
209 static void xhci_device_isoc_abort(struct usbd_xfer *);
210 static void xhci_device_isoc_close(struct usbd_pipe *);
211 static void xhci_device_isoc_done(struct usbd_xfer *);
212
213 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *);
214 static usbd_status xhci_device_intr_start(struct usbd_xfer *);
215 static void xhci_device_intr_abort(struct usbd_xfer *);
216 static void xhci_device_intr_close(struct usbd_pipe *);
217 static void xhci_device_intr_done(struct usbd_xfer *);
218
219 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *);
220 static usbd_status xhci_device_bulk_start(struct usbd_xfer *);
221 static void xhci_device_bulk_abort(struct usbd_xfer *);
222 static void xhci_device_bulk_close(struct usbd_pipe *);
223 static void xhci_device_bulk_done(struct usbd_xfer *);
224
225 static const struct usbd_bus_methods xhci_bus_methods = {
226 .ubm_open = xhci_open,
227 .ubm_softint = xhci_softintr,
228 .ubm_dopoll = xhci_poll,
229 .ubm_allocx = xhci_allocx,
230 .ubm_freex = xhci_freex,
231 .ubm_abortx = xhci_abortx,
232 .ubm_dying = xhci_dying,
233 .ubm_getlock = xhci_get_lock,
234 .ubm_newdev = xhci_new_device,
235 .ubm_rhctrl = xhci_roothub_ctrl,
236 };
237
238 static const struct usbd_pipe_methods xhci_root_intr_methods = {
239 .upm_transfer = xhci_root_intr_transfer,
240 .upm_start = xhci_root_intr_start,
241 .upm_abort = xhci_root_intr_abort,
242 .upm_close = xhci_root_intr_close,
243 .upm_cleartoggle = xhci_noop,
244 .upm_done = xhci_root_intr_done,
245 };
246
247
248 static const struct usbd_pipe_methods xhci_device_ctrl_methods = {
249 .upm_transfer = xhci_device_ctrl_transfer,
250 .upm_start = xhci_device_ctrl_start,
251 .upm_abort = xhci_device_ctrl_abort,
252 .upm_close = xhci_device_ctrl_close,
253 .upm_cleartoggle = xhci_noop,
254 .upm_done = xhci_device_ctrl_done,
255 };
256
257 static const struct usbd_pipe_methods xhci_device_isoc_methods = {
258 .upm_transfer = xhci_device_isoc_transfer,
259 .upm_abort = xhci_device_isoc_abort,
260 .upm_close = xhci_device_isoc_close,
261 .upm_cleartoggle = xhci_noop,
262 .upm_done = xhci_device_isoc_done,
263 };
264
265 static const struct usbd_pipe_methods xhci_device_bulk_methods = {
266 .upm_transfer = xhci_device_bulk_transfer,
267 .upm_start = xhci_device_bulk_start,
268 .upm_abort = xhci_device_bulk_abort,
269 .upm_close = xhci_device_bulk_close,
270 .upm_cleartoggle = xhci_noop,
271 .upm_done = xhci_device_bulk_done,
272 };
273
274 static const struct usbd_pipe_methods xhci_device_intr_methods = {
275 .upm_transfer = xhci_device_intr_transfer,
276 .upm_start = xhci_device_intr_start,
277 .upm_abort = xhci_device_intr_abort,
278 .upm_close = xhci_device_intr_close,
279 .upm_cleartoggle = xhci_noop,
280 .upm_done = xhci_device_intr_done,
281 };
282
283 static inline uint32_t
284 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset)
285 {
286 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset);
287 }
288
289 static inline uint32_t
290 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset)
291 {
292 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
293 }
294
295 static inline uint32_t
296 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset)
297 {
298 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset);
299 }
300
301 static inline void
302 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset,
303 uint32_t value)
304 {
305 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value);
306 }
307
308 #if 0 /* unused */
309 static inline void
310 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset,
311 uint32_t value)
312 {
313 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value);
314 }
315 #endif /* unused */
316
317 static inline void
318 xhci_barrier(const struct xhci_softc * const sc, int flags)
319 {
320 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 0, sc->sc_ios, flags);
321 }
322
323 static inline uint32_t
324 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset)
325 {
326 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset);
327 }
328
329 static inline uint32_t
330 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset)
331 {
332 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
333 }
334
335 static inline void
336 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset,
337 uint32_t value)
338 {
339 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
340 }
341
342 static inline uint64_t
343 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset)
344 {
345 uint64_t value;
346
347 #ifdef XHCI_USE_BUS_SPACE_8
348 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset);
349 #else
350 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
351 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh,
352 offset + 4) << 32;
353 #endif
354
355 return value;
356 }
357
358 static inline void
359 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset,
360 uint64_t value)
361 {
362 #ifdef XHCI_USE_BUS_SPACE_8
363 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value);
364 #else
365 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0,
366 (value >> 0) & 0xffffffff);
367 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4,
368 (value >> 32) & 0xffffffff);
369 #endif
370 }
371
372 static inline uint32_t
373 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset)
374 {
375 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
376 }
377
378 static inline void
379 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset,
380 uint32_t value)
381 {
382 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
383 }
384
385 static inline uint64_t
386 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset)
387 {
388 uint64_t value;
389
390 #ifdef XHCI_USE_BUS_SPACE_8
391 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset);
392 #else
393 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
394 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh,
395 offset + 4) << 32;
396 #endif
397
398 return value;
399 }
400
401 static inline void
402 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset,
403 uint64_t value)
404 {
405 #ifdef XHCI_USE_BUS_SPACE_8
406 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value);
407 #else
408 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0,
409 (value >> 0) & 0xffffffff);
410 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4,
411 (value >> 32) & 0xffffffff);
412 #endif
413 }
414
415 #if 0 /* unused */
416 static inline uint32_t
417 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset)
418 {
419 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset);
420 }
421 #endif /* unused */
422
423 static inline void
424 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset,
425 uint32_t value)
426 {
427 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value);
428 }
429
430 /* --- */
431
432 static inline uint8_t
433 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed)
434 {
435 u_int eptype = 0;
436
437 switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
438 case UE_CONTROL:
439 eptype = 0x0;
440 break;
441 case UE_ISOCHRONOUS:
442 eptype = 0x1;
443 break;
444 case UE_BULK:
445 eptype = 0x2;
446 break;
447 case UE_INTERRUPT:
448 eptype = 0x3;
449 break;
450 }
451
452 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
453 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
454 return eptype | 0x4;
455 else
456 return eptype;
457 }
458
459 static u_int
460 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed)
461 {
462 /* xHCI 1.0 section 4.5.1 */
463 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress);
464 u_int in = 0;
465
466 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
467 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
468 in = 1;
469
470 return epaddr * 2 + in;
471 }
472
473 static inline u_int
474 xhci_dci_to_ici(const u_int i)
475 {
476 return i + 1;
477 }
478
479 static inline void *
480 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs,
481 const u_int dci)
482 {
483 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
484 }
485
486 #if 0 /* unused */
487 static inline bus_addr_t
488 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs,
489 const u_int dci)
490 {
491 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
492 }
493 #endif /* unused */
494
495 static inline void *
496 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs,
497 const u_int ici)
498 {
499 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
500 }
501
502 static inline bus_addr_t
503 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs,
504 const u_int ici)
505 {
506 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
507 }
508
509 static inline struct xhci_trb *
510 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx)
511 {
512 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
513 }
514
515 static inline bus_addr_t
516 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx)
517 {
518 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
519 }
520
521 static inline void
522 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx,
523 uint64_t parameter, uint32_t status, uint32_t control)
524 {
525 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb);
526 xx->xx_trb[idx].trb_0 = parameter;
527 xx->xx_trb[idx].trb_2 = status;
528 xx->xx_trb[idx].trb_3 = control;
529 }
530
531 static inline void
532 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status,
533 uint32_t control)
534 {
535 trb->trb_0 = htole64(parameter);
536 trb->trb_2 = htole32(status);
537 trb->trb_3 = htole32(control);
538 }
539
540 static int
541 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx)
542 {
543 /* base address of TRBs */
544 bus_addr_t trbp = xhci_ring_trbp(xr, 0);
545
546 /* trb_0 range sanity check */
547 if (trb_0 == 0 || trb_0 < trbp ||
548 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 ||
549 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) {
550 return 1;
551 }
552 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb);
553 return 0;
554 }
555
556 static unsigned int
557 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs,
558 u_int dci)
559 {
560 uint32_t *cp;
561
562 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
563 cp = xhci_slot_get_dcv(sc, xs, dci);
564 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0]));
565 }
566
567 static inline unsigned int
568 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport)
569 {
570 const unsigned int port = ctlrport - 1;
571 const uint8_t bit = __BIT(port % NBBY);
572
573 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit);
574 }
575
576 /*
577 * Return the roothub port for a controller port. Both are 1..n.
578 */
579 static inline unsigned int
580 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport)
581 {
582
583 return sc->sc_ctlrportmap[ctrlport - 1];
584 }
585
586 /*
587 * Return the controller port for a bus roothub port. Both are 1..n.
588 */
589 static inline unsigned int
590 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn,
591 unsigned int rhport)
592 {
593
594 return sc->sc_rhportmap[bn][rhport - 1];
595 }
596
597 /* --- */
598
599 void
600 xhci_childdet(device_t self, device_t child)
601 {
602 struct xhci_softc * const sc = device_private(self);
603
604 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child));
605 if (child == sc->sc_child2)
606 sc->sc_child2 = NULL;
607 else if (child == sc->sc_child)
608 sc->sc_child = NULL;
609 }
610
611 int
612 xhci_detach(struct xhci_softc *sc, int flags)
613 {
614 int rv = 0;
615
616 if (sc->sc_child2 != NULL) {
617 rv = config_detach(sc->sc_child2, flags);
618 if (rv != 0)
619 return rv;
620 KASSERT(sc->sc_child2 == NULL);
621 }
622
623 if (sc->sc_child != NULL) {
624 rv = config_detach(sc->sc_child, flags);
625 if (rv != 0)
626 return rv;
627 KASSERT(sc->sc_child == NULL);
628 }
629
630 /* XXX unconfigure/free slots */
631
632 /* verify: */
633 xhci_rt_write_4(sc, XHCI_IMAN(0), 0);
634 xhci_op_write_4(sc, XHCI_USBCMD, 0);
635 /* do we need to wait for stop? */
636
637 xhci_op_write_8(sc, XHCI_CRCR, 0);
638 xhci_ring_free(sc, &sc->sc_cr);
639 cv_destroy(&sc->sc_command_cv);
640 cv_destroy(&sc->sc_cmdbusy_cv);
641
642 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0);
643 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0);
644 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY);
645 xhci_ring_free(sc, &sc->sc_er);
646
647 usb_freemem(&sc->sc_eventst_dma);
648
649 xhci_op_write_8(sc, XHCI_DCBAAP, 0);
650 usb_freemem(&sc->sc_dcbaa_dma);
651
652 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots);
653
654 kmem_free(sc->sc_ctlrportbus,
655 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY));
656 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int));
657
658 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
659 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int));
660 }
661
662 mutex_destroy(&sc->sc_lock);
663 mutex_destroy(&sc->sc_intr_lock);
664
665 pool_cache_destroy(sc->sc_xferpool);
666
667 return rv;
668 }
669
670 int
671 xhci_activate(device_t self, enum devact act)
672 {
673 struct xhci_softc * const sc = device_private(self);
674
675 switch (act) {
676 case DVACT_DEACTIVATE:
677 sc->sc_dying = true;
678 return 0;
679 default:
680 return EOPNOTSUPP;
681 }
682 }
683
684 bool
685 xhci_suspend(device_t self, const pmf_qual_t *qual)
686 {
687 struct xhci_softc * const sc = device_private(self);
688 size_t i, j, bn, dci;
689 int port;
690 uint32_t v;
691 usbd_status err;
692 bool ok = false;
693
694 XHCIHIST_FUNC(); XHCIHIST_CALLED();
695
696 mutex_enter(&sc->sc_lock);
697
698 /*
699 * Block issuance of new commands, and wait for all pending
700 * commands to complete.
701 */
702 KASSERT(sc->sc_suspender == NULL);
703 sc->sc_suspender = curlwp;
704 while (sc->sc_command_addr != 0)
705 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
706
707 /*
708 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
709 * xHCI Power Management, p. 342
710 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342
711 */
712
713 /*
714 * `1. Stop all USB activity by issuing Stop Endpoint Commands
715 * for Busy endpoints in the Running state. If the Force
716 * Save Context Capability (FSC = ``0'') is not supported,
717 * then Stop Endpoint Commands shall be issued for all idle
718 * endpoints in the Running state as well. The Stop
719 * Endpoint Command causes the xHC to update the respective
720 * Endpoint or Stream Contexts in system memory, e.g. the
721 * TR Dequeue Pointer, DCS, etc. fields. Refer to
722 * Implementation Note "0".'
723 */
724 for (i = 0; i < sc->sc_maxslots; i++) {
725 struct xhci_slot *xs = &sc->sc_slots[i];
726
727 /* Skip if the slot is not in use. */
728 if (xs->xs_idx == 0)
729 continue;
730
731 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
732 /* Skip if the endpoint is not Running. */
733 /* XXX What about Busy? */
734 if (xhci_get_epstate(sc, xs, dci) !=
735 XHCI_EPSTATE_RUNNING)
736 continue;
737
738 /* Stop endpoint. */
739 err = xhci_stop_endpoint_cmd(sc, xs, dci,
740 XHCI_TRB_3_SUSP_EP_BIT);
741 if (err) {
742 device_printf(self, "failed to stop endpoint"
743 " slot %zu dci %zu err %d\n",
744 i, dci, err);
745 goto out;
746 }
747 }
748 }
749
750 /*
751 * Next, suspend all the ports:
752 *
753 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15:
754 * Suspend-Resume, pp. 276-283
755 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276
756 */
757 for (bn = 0; bn < 2; bn++) {
758 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
759 /* 4.15.1: Port Suspend. */
760 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
761
762 /*
763 * `System software places individual ports
764 * into suspend mode by writing a ``3'' into
765 * the appropriate PORTSC register Port Link
766 * State (PLS) field (refer to Section 5.4.8).
767 * Software should only set the PLS field to
768 * ``3'' when the port is in the Enabled
769 * state.'
770 *
771 * `Software should not attempt to suspend a
772 * port unless the port reports that it is in
773 * the enabled (PED = ``1''; PLS < ``3'')
774 * state (refer to Section 5.4.8 for more
775 * information about PED and PLS).'
776 */
777 v = xhci_op_read_4(sc, port);
778 if (((v & XHCI_PS_PED) == 0) ||
779 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3)
780 continue;
781 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
782 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3);
783 xhci_op_write_4(sc, port, v);
784
785 /*
786 * `When the PLS field is written with U3
787 * (``3''), the status of the PLS bit will not
788 * change to the target U state U3 until the
789 * suspend signaling has completed to the
790 * attached device (which may be as long as
791 * 10ms.).'
792 *
793 * `Software is required to wait for U3
794 * transitions to complete before it puts the
795 * xHC into a low power state, and before
796 * resuming the port.'
797 *
798 * XXX Take advantage of the technique to
799 * reduce polling on host controllers that
800 * support the U3C capability.
801 */
802 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) {
803 v = xhci_op_read_4(sc, port);
804 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3)
805 break;
806 usb_delay_ms(&sc->sc_bus, 1);
807 }
808 if (j == XHCI_WAIT_PLS_U3) {
809 device_printf(self,
810 "suspend timeout on bus %zu port %zu\n",
811 bn, i);
812 goto out;
813 }
814 }
815 }
816
817 /*
818 * `2. Ensure that the Command Ring is in the Stopped state
819 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is
820 * empty), and all Command Completion Events associated
821 * with them have been received.'
822 *
823 * XXX
824 */
825
826 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */
827 xhci_op_write_4(sc, XHCI_USBCMD,
828 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS);
829
830 /*
831 * `4. Read the Operational Runtime, and VTIO registers in the
832 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ,
833 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their
834 * state.'
835 *
836 * (We don't use VTIO here (XXX for now?).)
837 */
838 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
839 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL);
840 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP);
841 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG);
842 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0));
843 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0));
844 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0));
845 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0));
846 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0));
847
848 /*
849 * `5. Set the Controller Save State (CSS) flag in the USBCMD
850 * register (5.4.1)...'
851 */
852 xhci_op_write_4(sc, XHCI_USBCMD,
853 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS);
854
855 /*
856 * `...and wait for the Save State Status (SSS) flag in the
857 * USBSTS register (5.4.2) to transition to ``0''.'
858 */
859 for (i = 0; i < XHCI_WAIT_SSS; i++) {
860 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0)
861 break;
862 usb_delay_ms(&sc->sc_bus, 1);
863 }
864 if (i >= XHCI_WAIT_SSS) {
865 device_printf(self, "suspend timeout, USBSTS.SSS\n");
866 /*
867 * Just optimistically go on and check SRE anyway --
868 * what's the worst that could happen?
869 */
870 }
871
872 /*
873 * `Note: After a Save or Restore operation completes, the
874 * Save/Restore Error (SRE) flag in the USBSTS register should
875 * be checked to ensure that the operation completed
876 * successfully.'
877 */
878 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
879 device_printf(self, "suspend error, USBSTS.SRE\n");
880 goto out;
881 }
882
883 /* Success! */
884 ok = true;
885
886 out: mutex_exit(&sc->sc_lock);
887 return ok;
888 }
889
890 bool
891 xhci_resume(device_t self, const pmf_qual_t *qual)
892 {
893 struct xhci_softc * const sc = device_private(self);
894 size_t i, j, bn, dci;
895 int port;
896 uint32_t v;
897 bool ok = false;
898
899 XHCIHIST_FUNC(); XHCIHIST_CALLED();
900
901 mutex_enter(&sc->sc_lock);
902 KASSERT(sc->sc_suspender);
903
904 /*
905 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
906 * xHCI Power Management, p. 343
907 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343
908 */
909
910 /*
911 * `4. Restore the Operational Runtime, and VTIO registers with
912 * their previously saved state in the following order:
913 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN,
914 * IMOD, and VTIO.'
915 *
916 * (We don't use VTIO here (for now?).)
917 */
918 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd);
919 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl);
920 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap);
921 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config);
922 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0);
923 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0);
924 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0);
925 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0);
926 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0);
927
928 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */
929
930 /*
931 * `5. Set the Controller Restore State (CRS) flag in the
932 * USBCMD register (5.4.1) to ``1''...'
933 */
934 xhci_op_write_4(sc, XHCI_USBCMD,
935 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS);
936
937 /*
938 * `...and wait for the Restore State Status (RSS) in the
939 * USBSTS register (5.4.2) to transition to ``0''.'
940 */
941 for (i = 0; i < XHCI_WAIT_RSS; i++) {
942 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0)
943 break;
944 usb_delay_ms(&sc->sc_bus, 1);
945 }
946 if (i >= XHCI_WAIT_RSS) {
947 device_printf(self, "resume timeout, USBSTS.RSS\n");
948 goto out;
949 }
950
951 /*
952 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits
953 * are consistent with the RCS values to be written to the
954 * CRCR.'
955 *
956 * XXX Hope just zeroing it is good enough!
957 */
958 xhci_host_dequeue(sc->sc_cr);
959
960 /*
961 * `7. Write the CRCR with the address and RCS value of the
962 * reinitialized Command Ring. Note that this write will
963 * cause the Command Ring to restart at the address
964 * specified by the CRCR.'
965 */
966 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
967 sc->sc_cr->xr_cs);
968
969 /*
970 * `8. Enable the controller by setting Run/Stop (R/S) =
971 * ``1''.'
972 */
973 xhci_op_write_4(sc, XHCI_USBCMD,
974 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS);
975
976 /*
977 * `9. Software shall walk the USB topology and initialize each
978 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and
979 * external hub ports attached to USB devices.'
980 *
981 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2
982 * `Port Resume', 4.15.2.2 `Host Initiated'.
983 *
984 * XXX We should maybe batch up initiating the state
985 * transitions, and then wait for them to complete all at once.
986 */
987 for (bn = 0; bn < 2; bn++) {
988 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
989 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
990
991 /* `When a port is in the U3 state: ...' */
992 v = xhci_op_read_4(sc, port);
993 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3)
994 continue;
995
996 /*
997 * `For a USB2 protocol port, software shall
998 * write a ``15'' (Resume) to the PLS field to
999 * initiate resume signaling. The port shall
1000 * transition to the Resume substate and the
1001 * xHC shall transmit the resume signaling
1002 * within 1ms (T_URSM). Software shall ensure
1003 * that resume is signaled for at least 20ms
1004 * (T_DRSMDN). Software shall start timing
1005 * T_DRSMDN from the write of ``15'' (Resume)
1006 * to PLS.'
1007 */
1008 if (bn == 1) {
1009 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0);
1010 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1011 v |= XHCI_PS_LWS;
1012 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME);
1013 xhci_op_write_4(sc, port, v);
1014 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
1015 } else {
1016 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0);
1017 }
1018
1019 /*
1020 * `For a USB3 protocol port [and a USB2
1021 * protocol port after transitioning to
1022 * Resume], software shall write a ``0'' (U0)
1023 * to the PLS field...'
1024 */
1025 v = xhci_op_read_4(sc, port);
1026 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1027 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0);
1028 xhci_op_write_4(sc, port, v);
1029
1030 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) {
1031 v = xhci_op_read_4(sc, port);
1032 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0)
1033 break;
1034 usb_delay_ms(&sc->sc_bus, 1);
1035 }
1036 if (j == XHCI_WAIT_PLS_U0) {
1037 device_printf(self,
1038 "resume timeout on bus %zu port %zu\n",
1039 bn, i);
1040 goto out;
1041 }
1042 }
1043 }
1044
1045 /*
1046 * `10. Restart each of the previously Running endpoints by
1047 * ringing their doorbells.'
1048 */
1049 for (i = 0; i < sc->sc_maxslots; i++) {
1050 struct xhci_slot *xs = &sc->sc_slots[i];
1051
1052 /* Skip if the slot is not in use. */
1053 if (xs->xs_idx == 0)
1054 continue;
1055
1056 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
1057 /* Skip if the endpoint is not Running. */
1058 if (xhci_get_epstate(sc, xs, dci) !=
1059 XHCI_EPSTATE_RUNNING)
1060 continue;
1061
1062 /* Ring the doorbell. */
1063 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
1064 }
1065 }
1066
1067 /*
1068 * `Note: After a Save or Restore operation completes, the
1069 * Save/Restore Error (SRE) flag in the USBSTS register should
1070 * be checked to ensure that the operation completed
1071 * successfully.'
1072 */
1073 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
1074 device_printf(self, "resume error, USBSTS.SRE\n");
1075 goto out;
1076 }
1077
1078 /* Resume command issuance. */
1079 sc->sc_suspender = NULL;
1080 cv_broadcast(&sc->sc_cmdbusy_cv);
1081
1082 /* Success! */
1083 ok = true;
1084
1085 out: mutex_exit(&sc->sc_lock);
1086 return ok;
1087 }
1088
1089 bool
1090 xhci_shutdown(device_t self, int flags)
1091 {
1092 return false;
1093 }
1094
1095 static int
1096 xhci_hc_reset(struct xhci_softc * const sc)
1097 {
1098 uint32_t usbcmd, usbsts;
1099 int i;
1100
1101 /* Check controller not ready */
1102 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1103 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1104 if ((usbsts & XHCI_STS_CNR) == 0)
1105 break;
1106 usb_delay_ms(&sc->sc_bus, 1);
1107 }
1108 if (i >= XHCI_WAIT_CNR) {
1109 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n");
1110 return EIO;
1111 }
1112
1113 /* Halt controller */
1114 usbcmd = 0;
1115 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1116 usb_delay_ms(&sc->sc_bus, 1);
1117
1118 /* Reset controller */
1119 usbcmd = XHCI_CMD_HCRST;
1120 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1121 for (i = 0; i < XHCI_WAIT_HCRST; i++) {
1122 /*
1123 * Wait 1ms first. Existing Intel xHCI requires 1ms delay to
1124 * prevent system hang (Errata).
1125 */
1126 usb_delay_ms(&sc->sc_bus, 1);
1127 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
1128 if ((usbcmd & XHCI_CMD_HCRST) == 0)
1129 break;
1130 }
1131 if (i >= XHCI_WAIT_HCRST) {
1132 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n");
1133 return EIO;
1134 }
1135
1136 /* Check controller not ready */
1137 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1138 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1139 if ((usbsts & XHCI_STS_CNR) == 0)
1140 break;
1141 usb_delay_ms(&sc->sc_bus, 1);
1142 }
1143 if (i >= XHCI_WAIT_CNR) {
1144 aprint_error_dev(sc->sc_dev,
1145 "controller not ready timeout after reset\n");
1146 return EIO;
1147 }
1148
1149 return 0;
1150 }
1151
1152 /* 7.2 xHCI Support Protocol Capability */
1153 static void
1154 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp)
1155 {
1156 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1157
1158 /* XXX Cache this lot */
1159
1160 const uint32_t w0 = xhci_read_4(sc, ecp);
1161 const uint32_t w4 = xhci_read_4(sc, ecp + 4);
1162 const uint32_t w8 = xhci_read_4(sc, ecp + 8);
1163 const uint32_t wc = xhci_read_4(sc, ecp + 0xc);
1164
1165 aprint_debug_dev(sc->sc_dev,
1166 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc);
1167
1168 if (w4 != XHCI_XECP_USBID)
1169 return;
1170
1171 const int major = XHCI_XECP_SP_W0_MAJOR(w0);
1172 const int minor = XHCI_XECP_SP_W0_MINOR(w0);
1173 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8);
1174 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8);
1175
1176 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16));
1177 switch (mm) {
1178 case 0x0200:
1179 case 0x0300:
1180 case 0x0301:
1181 case 0x0310:
1182 case 0x0320:
1183 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n",
1184 major == 3 ? "ss" : "hs", cpo, cpo + cpc -1);
1185 break;
1186 default:
1187 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n",
1188 major, minor);
1189 return;
1190 }
1191
1192 const size_t bus = (major == 3) ? 0 : 1;
1193
1194 /* Index arrays with 0..n-1 where ports are numbered 1..n */
1195 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) {
1196 if (sc->sc_ctlrportmap[cp] != 0) {
1197 aprint_error_dev(sc->sc_dev, "controller port %zu "
1198 "already assigned", cp);
1199 continue;
1200 }
1201
1202 sc->sc_ctlrportbus[cp / NBBY] |=
1203 bus == 0 ? 0 : __BIT(cp % NBBY);
1204
1205 const size_t rhp = sc->sc_rhportcount[bus]++;
1206
1207 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0,
1208 "bus %zu rhp %zu is %d", bus, rhp,
1209 sc->sc_rhportmap[bus][rhp]);
1210
1211 sc->sc_rhportmap[bus][rhp] = cp + 1;
1212 sc->sc_ctlrportmap[cp] = rhp + 1;
1213 }
1214 }
1215
1216 /* Process extended capabilities */
1217 static void
1218 xhci_ecp(struct xhci_softc *sc)
1219 {
1220 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1221
1222 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4;
1223 while (ecp != 0) {
1224 uint32_t ecr = xhci_read_4(sc, ecp);
1225 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr);
1226 switch (XHCI_XECP_ID(ecr)) {
1227 case XHCI_ID_PROTOCOLS: {
1228 xhci_id_protocols(sc, ecp);
1229 break;
1230 }
1231 case XHCI_ID_USB_LEGACY: {
1232 uint8_t bios_sem;
1233
1234 /* Take host controller ownership from BIOS */
1235 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM);
1236 if (bios_sem) {
1237 /* sets xHCI to be owned by OS */
1238 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1);
1239 aprint_debug_dev(sc->sc_dev,
1240 "waiting for BIOS to give up control\n");
1241 for (int i = 0; i < 5000; i++) {
1242 bios_sem = xhci_read_1(sc, ecp +
1243 XHCI_XECP_BIOS_SEM);
1244 if (bios_sem == 0)
1245 break;
1246 DELAY(1000);
1247 }
1248 if (bios_sem) {
1249 aprint_error_dev(sc->sc_dev,
1250 "timed out waiting for BIOS\n");
1251 }
1252 }
1253 break;
1254 }
1255 default:
1256 break;
1257 }
1258 ecr = xhci_read_4(sc, ecp);
1259 if (XHCI_XECP_NEXT(ecr) == 0) {
1260 ecp = 0;
1261 } else {
1262 ecp += XHCI_XECP_NEXT(ecr) * 4;
1263 }
1264 }
1265 }
1266
1267 #define XHCI_HCCPREV1_BITS \
1268 "\177\020" /* New bitmask */ \
1269 "f\020\020XECP\0" \
1270 "f\014\4MAXPSA\0" \
1271 "b\013CFC\0" \
1272 "b\012SEC\0" \
1273 "b\011SBD\0" \
1274 "b\010FSE\0" \
1275 "b\7NSS\0" \
1276 "b\6LTC\0" \
1277 "b\5LHRC\0" \
1278 "b\4PIND\0" \
1279 "b\3PPC\0" \
1280 "b\2CZC\0" \
1281 "b\1BNC\0" \
1282 "b\0AC64\0" \
1283 "\0"
1284 #define XHCI_HCCV1_x_BITS \
1285 "\177\020" /* New bitmask */ \
1286 "f\020\020XECP\0" \
1287 "f\014\4MAXPSA\0" \
1288 "b\013CFC\0" \
1289 "b\012SEC\0" \
1290 "b\011SPC\0" \
1291 "b\010PAE\0" \
1292 "b\7NSS\0" \
1293 "b\6LTC\0" \
1294 "b\5LHRC\0" \
1295 "b\4PIND\0" \
1296 "b\3PPC\0" \
1297 "b\2CSZ\0" \
1298 "b\1BNC\0" \
1299 "b\0AC64\0" \
1300 "\0"
1301
1302 #define XHCI_HCC2_BITS \
1303 "\177\020" /* New bitmask */ \
1304 "b\7ETC_TSC\0" \
1305 "b\6ETC\0" \
1306 "b\5CIC\0" \
1307 "b\4LEC\0" \
1308 "b\3CTC\0" \
1309 "b\2FSC\0" \
1310 "b\1CMC\0" \
1311 "b\0U3C\0" \
1312 "\0"
1313
1314 void
1315 xhci_start(struct xhci_softc *sc)
1316 {
1317 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA);
1318 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0)
1319 /* Intel xhci needs interrupt rate moderated. */
1320 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP);
1321 else
1322 xhci_rt_write_4(sc, XHCI_IMOD(0), 0);
1323 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n",
1324 xhci_rt_read_4(sc, XHCI_IMOD(0)));
1325
1326 /* Go! */
1327 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
1328 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n",
1329 xhci_op_read_4(sc, XHCI_USBCMD));
1330 }
1331
1332 int
1333 xhci_init(struct xhci_softc *sc)
1334 {
1335 bus_size_t bsz;
1336 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff;
1337 uint32_t pagesize, config;
1338 int i = 0;
1339 uint16_t hciversion;
1340 uint8_t caplength;
1341
1342 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1343
1344 /* Set up the bus struct for the usb 3 and usb 2 buses */
1345 sc->sc_bus.ub_methods = &xhci_bus_methods;
1346 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe);
1347 sc->sc_bus.ub_usedma = true;
1348 sc->sc_bus.ub_hcpriv = sc;
1349
1350 sc->sc_bus2.ub_methods = &xhci_bus_methods;
1351 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe);
1352 sc->sc_bus2.ub_revision = USBREV_2_0;
1353 sc->sc_bus2.ub_usedma = true;
1354 sc->sc_bus2.ub_hcpriv = sc;
1355 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag;
1356
1357 caplength = xhci_read_1(sc, XHCI_CAPLENGTH);
1358 hciversion = xhci_read_2(sc, XHCI_HCIVERSION);
1359
1360 if (hciversion < XHCI_HCIVERSION_0_96 ||
1361 hciversion >= 0x0200) {
1362 aprint_normal_dev(sc->sc_dev,
1363 "xHCI version %x.%x not known to be supported\n",
1364 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1365 } else {
1366 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n",
1367 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1368 }
1369
1370 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength,
1371 &sc->sc_cbh) != 0) {
1372 aprint_error_dev(sc->sc_dev, "capability subregion failure\n");
1373 return ENOMEM;
1374 }
1375
1376 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1);
1377 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1);
1378 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1);
1379 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1);
1380 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2);
1381 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3);
1382 aprint_debug_dev(sc->sc_dev,
1383 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3);
1384
1385 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS);
1386 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32;
1387
1388 char sbuf[128];
1389 if (hciversion < XHCI_HCIVERSION_1_0)
1390 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc);
1391 else
1392 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc);
1393 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf);
1394 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n",
1395 XHCI_HCC_XECP(sc->sc_hcc) * 4);
1396 if (hciversion >= XHCI_HCIVERSION_1_1) {
1397 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2);
1398 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2);
1399 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf);
1400 }
1401
1402 /* default all ports to bus 0, i.e. usb 3 */
1403 sc->sc_ctlrportbus = kmem_zalloc(
1404 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP);
1405 sc->sc_ctlrportmap = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1406
1407 /* controller port to bus roothub port map */
1408 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
1409 sc->sc_rhportmap[j] = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1410 }
1411
1412 /*
1413 * Process all Extended Capabilities
1414 */
1415 xhci_ecp(sc);
1416
1417 bsz = XHCI_PORTSC(sc->sc_maxports);
1418 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz,
1419 &sc->sc_obh) != 0) {
1420 aprint_error_dev(sc->sc_dev, "operational subregion failure\n");
1421 return ENOMEM;
1422 }
1423
1424 dboff = xhci_cap_read_4(sc, XHCI_DBOFF);
1425 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff,
1426 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) {
1427 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n");
1428 return ENOMEM;
1429 }
1430
1431 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF);
1432 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff,
1433 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) {
1434 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n");
1435 return ENOMEM;
1436 }
1437
1438 int rv;
1439 rv = xhci_hc_reset(sc);
1440 if (rv != 0) {
1441 return rv;
1442 }
1443
1444 if (sc->sc_vendor_init)
1445 sc->sc_vendor_init(sc);
1446
1447 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE);
1448 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize);
1449 pagesize = ffs(pagesize);
1450 if (pagesize == 0) {
1451 aprint_error_dev(sc->sc_dev, "pagesize is 0\n");
1452 return EIO;
1453 }
1454 sc->sc_pgsz = 1 << (12 + (pagesize - 1));
1455 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz);
1456 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n",
1457 (uint32_t)sc->sc_maxslots);
1458 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports);
1459
1460 int err;
1461 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2);
1462 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf);
1463 if (sc->sc_maxspbuf != 0) {
1464 err = usb_allocmem(sc->sc_bus.ub_dmatag,
1465 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t),
1466 USBMALLOC_COHERENT | USBMALLOC_ZERO,
1467 &sc->sc_spbufarray_dma);
1468 if (err) {
1469 aprint_error_dev(sc->sc_dev,
1470 "spbufarray init fail, err %d\n", err);
1471 return ENOMEM;
1472 }
1473
1474 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) *
1475 sc->sc_maxspbuf, KM_SLEEP);
1476 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0);
1477 for (i = 0; i < sc->sc_maxspbuf; i++) {
1478 usb_dma_t * const dma = &sc->sc_spbuf_dma[i];
1479 /* allocate contexts */
1480 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz,
1481 sc->sc_pgsz, USBMALLOC_COHERENT | USBMALLOC_ZERO,
1482 dma);
1483 if (err) {
1484 aprint_error_dev(sc->sc_dev,
1485 "spbufarray_dma init fail, err %d\n", err);
1486 rv = ENOMEM;
1487 goto bad1;
1488 }
1489 spbufarray[i] = htole64(DMAADDR(dma, 0));
1490 usb_syncmem(dma, 0, sc->sc_pgsz,
1491 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1492 }
1493
1494 usb_syncmem(&sc->sc_spbufarray_dma, 0,
1495 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE);
1496 }
1497
1498 config = xhci_op_read_4(sc, XHCI_CONFIG);
1499 config &= ~0xFF;
1500 config |= sc->sc_maxslots & 0xFF;
1501 xhci_op_write_4(sc, XHCI_CONFIG, config);
1502
1503 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS,
1504 XHCI_COMMAND_RING_SEGMENTS_ALIGN);
1505 if (err) {
1506 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n",
1507 err);
1508 rv = ENOMEM;
1509 goto bad1;
1510 }
1511
1512 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS,
1513 XHCI_EVENT_RING_SEGMENTS_ALIGN);
1514 if (err) {
1515 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n",
1516 err);
1517 rv = ENOMEM;
1518 goto bad2;
1519 }
1520
1521 usb_dma_t *dma;
1522 size_t size;
1523 size_t align;
1524
1525 dma = &sc->sc_eventst_dma;
1526 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE,
1527 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN);
1528 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size);
1529 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN;
1530 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1531 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma);
1532 if (err) {
1533 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n",
1534 err);
1535 rv = ENOMEM;
1536 goto bad3;
1537 }
1538
1539 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n",
1540 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0),
1541 KERNADDR(&sc->sc_eventst_dma, 0),
1542 sc->sc_eventst_dma.udma_block->size);
1543
1544 dma = &sc->sc_dcbaa_dma;
1545 size = (1 + sc->sc_maxslots) * sizeof(uint64_t);
1546 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size);
1547 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN;
1548 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1549 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma);
1550 if (err) {
1551 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err);
1552 rv = ENOMEM;
1553 goto bad4;
1554 }
1555 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n",
1556 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0),
1557 KERNADDR(&sc->sc_dcbaa_dma, 0),
1558 sc->sc_dcbaa_dma.udma_block->size);
1559
1560 if (sc->sc_maxspbuf != 0) {
1561 /*
1562 * DCBA entry 0 hold the scratchbuf array pointer.
1563 */
1564 *(uint64_t *)KERNADDR(dma, 0) =
1565 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0));
1566 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1567 }
1568
1569 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots,
1570 KM_SLEEP);
1571 if (sc->sc_slots == NULL) {
1572 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err);
1573 rv = ENOMEM;
1574 goto bad;
1575 }
1576
1577 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0,
1578 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
1579 if (sc->sc_xferpool == NULL) {
1580 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n",
1581 err);
1582 rv = ENOMEM;
1583 goto bad;
1584 }
1585
1586 cv_init(&sc->sc_command_cv, "xhcicmd");
1587 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq");
1588 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
1589 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
1590
1591 struct xhci_erste *erst;
1592 erst = KERNADDR(&sc->sc_eventst_dma, 0);
1593 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0));
1594 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb);
1595 erst[0].erste_3 = htole32(0);
1596 usb_syncmem(&sc->sc_eventst_dma, 0,
1597 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE);
1598
1599 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS);
1600 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0));
1601 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) |
1602 XHCI_ERDP_BUSY);
1603
1604 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0));
1605 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1606 sc->sc_cr->xr_cs);
1607
1608 xhci_barrier(sc, BUS_SPACE_BARRIER_WRITE);
1609
1610 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0),
1611 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS);
1612
1613 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0)
1614 xhci_start(sc);
1615
1616 return 0;
1617
1618 bad:
1619 if (sc->sc_xferpool) {
1620 pool_cache_destroy(sc->sc_xferpool);
1621 sc->sc_xferpool = NULL;
1622 }
1623
1624 if (sc->sc_slots) {
1625 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) *
1626 sc->sc_maxslots);
1627 sc->sc_slots = NULL;
1628 }
1629
1630 usb_freemem(&sc->sc_dcbaa_dma);
1631 bad4:
1632 usb_freemem(&sc->sc_eventst_dma);
1633 bad3:
1634 xhci_ring_free(sc, &sc->sc_er);
1635 bad2:
1636 xhci_ring_free(sc, &sc->sc_cr);
1637 i = sc->sc_maxspbuf;
1638 bad1:
1639 for (int j = 0; j < i; j++)
1640 usb_freemem(&sc->sc_spbuf_dma[j]);
1641 usb_freemem(&sc->sc_spbufarray_dma);
1642
1643 return rv;
1644 }
1645
1646 static inline bool
1647 xhci_polling_p(struct xhci_softc * const sc)
1648 {
1649 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling;
1650 }
1651
1652 int
1653 xhci_intr(void *v)
1654 {
1655 struct xhci_softc * const sc = v;
1656 int ret = 0;
1657
1658 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1659
1660 if (sc == NULL)
1661 return 0;
1662
1663 mutex_spin_enter(&sc->sc_intr_lock);
1664
1665 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1666 goto done;
1667
1668 /* If we get an interrupt while polling, then just ignore it. */
1669 if (xhci_polling_p(sc)) {
1670 #ifdef DIAGNOSTIC
1671 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1672 #endif
1673 goto done;
1674 }
1675
1676 ret = xhci_intr1(sc);
1677 if (ret) {
1678 KASSERT(sc->sc_child || sc->sc_child2);
1679
1680 /*
1681 * One of child busses could be already detached. It doesn't
1682 * matter on which of the two the softintr is scheduled.
1683 */
1684 if (sc->sc_child)
1685 usb_schedsoftintr(&sc->sc_bus);
1686 else
1687 usb_schedsoftintr(&sc->sc_bus2);
1688 }
1689 done:
1690 mutex_spin_exit(&sc->sc_intr_lock);
1691 return ret;
1692 }
1693
1694 int
1695 xhci_intr1(struct xhci_softc * const sc)
1696 {
1697 uint32_t usbsts;
1698 uint32_t iman;
1699
1700 XHCIHIST_FUNC();
1701
1702 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1703 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0);
1704 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD |
1705 XHCI_STS_HCE)) == 0) {
1706 DPRINTFN(16, "ignored intr not for %jd",
1707 device_unit(sc->sc_dev), 0, 0, 0);
1708 return 0;
1709 }
1710
1711 /*
1712 * Clear EINT and other transient flags, to not misenterpret
1713 * next shared interrupt. Also, to avoid race, EINT must be cleared
1714 * before XHCI_IMAN_INTR_PEND is cleared.
1715 */
1716 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & XHCI_STS_RSVDP0);
1717
1718 #ifdef XHCI_DEBUG
1719 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1720 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1721 #endif
1722
1723 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1724 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1725 iman |= XHCI_IMAN_INTR_PEND;
1726 xhci_rt_write_4(sc, XHCI_IMAN(0), iman);
1727
1728 #ifdef XHCI_DEBUG
1729 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1730 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1731 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1732 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1733 #endif
1734
1735 return 1;
1736 }
1737
1738 /*
1739 * 3 port speed types used in USB stack
1740 *
1741 * usbdi speed
1742 * definition: USB_SPEED_* in usb.h
1743 * They are used in struct usbd_device in USB stack.
1744 * ioctl interface uses these values too.
1745 * port_status speed
1746 * definition: UPS_*_SPEED in usb.h
1747 * They are used in usb_port_status_t and valid only for USB 2.0.
1748 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus
1749 * of usb_port_status_ext_t indicates port speed.
1750 * Note that some 3.0 values overlap with 2.0 values.
1751 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and
1752 * means UPS_LOW_SPEED in HS.)
1753 * port status returned from hub also uses these values.
1754 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed
1755 * or more.
1756 * xspeed:
1757 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1)
1758 * They are used in only slot context and PORTSC reg of xhci.
1759 * The difference between usbdi speed and xspeed is
1760 * that FS and LS values are swapped.
1761 */
1762
1763 /* convert usbdi speed to xspeed */
1764 static int
1765 xhci_speed2xspeed(int speed)
1766 {
1767 switch (speed) {
1768 case USB_SPEED_LOW: return 2;
1769 case USB_SPEED_FULL: return 1;
1770 default: return speed;
1771 }
1772 }
1773
1774 #if 0
1775 /* convert xspeed to usbdi speed */
1776 static int
1777 xhci_xspeed2speed(int xspeed)
1778 {
1779 switch (xspeed) {
1780 case 1: return USB_SPEED_FULL;
1781 case 2: return USB_SPEED_LOW;
1782 default: return xspeed;
1783 }
1784 }
1785 #endif
1786
1787 /* convert xspeed to port status speed */
1788 static int
1789 xhci_xspeed2psspeed(int xspeed)
1790 {
1791 switch (xspeed) {
1792 case 0: return 0;
1793 case 1: return UPS_FULL_SPEED;
1794 case 2: return UPS_LOW_SPEED;
1795 case 3: return UPS_HIGH_SPEED;
1796 default: return UPS_OTHER_SPEED;
1797 }
1798 }
1799
1800 /*
1801 * Construct input contexts and issue TRB to open pipe.
1802 */
1803 static usbd_status
1804 xhci_configure_endpoint(struct usbd_pipe *pipe)
1805 {
1806 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1807 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1808 #ifdef USB_DEBUG
1809 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1810 #endif
1811 struct xhci_soft_trb trb;
1812 usbd_status err;
1813
1814 XHCIHIST_FUNC();
1815 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx",
1816 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress,
1817 pipe->up_endpoint->ue_edesc->bmAttributes);
1818
1819 /* XXX ensure input context is available? */
1820
1821 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz);
1822
1823 /* set up context */
1824 xhci_setup_ctx(pipe);
1825
1826 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0),
1827 sc->sc_ctxsz * 1);
1828 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs,
1829 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1);
1830
1831 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1832 trb.trb_2 = 0;
1833 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1834 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1835
1836 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
1837
1838 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1839 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci),
1840 sc->sc_ctxsz * 1);
1841
1842 return err;
1843 }
1844
1845 #if 0
1846 static usbd_status
1847 xhci_unconfigure_endpoint(struct usbd_pipe *pipe)
1848 {
1849 #ifdef USB_DEBUG
1850 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1851 #endif
1852
1853 XHCIHIST_FUNC();
1854 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
1855
1856 return USBD_NORMAL_COMPLETION;
1857 }
1858 #endif
1859
1860 /* 4.6.8, 6.4.3.7 */
1861 static usbd_status
1862 xhci_reset_endpoint_locked(struct usbd_pipe *pipe)
1863 {
1864 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1865 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1866 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1867 struct xhci_soft_trb trb;
1868 usbd_status err;
1869
1870 XHCIHIST_FUNC();
1871 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1872
1873 KASSERT(mutex_owned(&sc->sc_lock));
1874
1875 trb.trb_0 = 0;
1876 trb.trb_2 = 0;
1877 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1878 XHCI_TRB_3_EP_SET(dci) |
1879 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP);
1880
1881 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1882
1883 return err;
1884 }
1885
1886 static usbd_status
1887 xhci_reset_endpoint(struct usbd_pipe *pipe)
1888 {
1889 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1890
1891 mutex_enter(&sc->sc_lock);
1892 usbd_status ret = xhci_reset_endpoint_locked(pipe);
1893 mutex_exit(&sc->sc_lock);
1894
1895 return ret;
1896 }
1897
1898 /*
1899 * 4.6.9, 6.4.3.8
1900 * Stop execution of TDs on xfer ring.
1901 * Should be called with sc_lock held.
1902 */
1903 static usbd_status
1904 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci,
1905 uint32_t trb3flags)
1906 {
1907 struct xhci_soft_trb trb;
1908 usbd_status err;
1909
1910 XHCIHIST_FUNC();
1911 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1912
1913 KASSERT(mutex_owned(&sc->sc_lock));
1914
1915 trb.trb_0 = 0;
1916 trb.trb_2 = 0;
1917 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1918 XHCI_TRB_3_EP_SET(dci) |
1919 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) |
1920 trb3flags;
1921
1922 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1923
1924 return err;
1925 }
1926
1927 static usbd_status
1928 xhci_stop_endpoint(struct usbd_pipe *pipe)
1929 {
1930 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1931 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1932 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1933
1934 XHCIHIST_FUNC();
1935 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1936
1937 KASSERT(mutex_owned(&sc->sc_lock));
1938
1939 return xhci_stop_endpoint_cmd(sc, xs, dci, 0);
1940 }
1941
1942 /*
1943 * Set TR Dequeue Pointer.
1944 * xHCI 1.1 4.6.10 6.4.3.9
1945 * Purge all of the TRBs on ring and reinitialize ring.
1946 * Set TR dequeue Pointer to 0 and Cycle State to 1.
1947 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE
1948 * error will be generated.
1949 */
1950 static usbd_status
1951 xhci_set_dequeue_locked(struct usbd_pipe *pipe)
1952 {
1953 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1954 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1955 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1956 struct xhci_ring * const xr = xs->xs_xr[dci];
1957 struct xhci_soft_trb trb;
1958 usbd_status err;
1959
1960 XHCIHIST_FUNC();
1961 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1962
1963 KASSERT(mutex_owned(&sc->sc_lock));
1964 KASSERT(xr != NULL);
1965
1966 xhci_host_dequeue(xr);
1967
1968 /* set DCS */
1969 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */
1970 trb.trb_2 = 0;
1971 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1972 XHCI_TRB_3_EP_SET(dci) |
1973 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE);
1974
1975 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1976
1977 return err;
1978 }
1979
1980 static usbd_status
1981 xhci_set_dequeue(struct usbd_pipe *pipe)
1982 {
1983 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1984
1985 mutex_enter(&sc->sc_lock);
1986 usbd_status ret = xhci_set_dequeue_locked(pipe);
1987 mutex_exit(&sc->sc_lock);
1988
1989 return ret;
1990 }
1991
1992 /*
1993 * Open new pipe: called from usbd_setup_pipe_flags.
1994 * Fills methods of pipe.
1995 * If pipe is not for ep0, calls configure_endpoint.
1996 */
1997 static usbd_status
1998 xhci_open(struct usbd_pipe *pipe)
1999 {
2000 struct usbd_device * const dev = pipe->up_dev;
2001 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
2002 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
2003 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2004 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2005 const u_int dci = xhci_ep_get_dci(ed);
2006 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
2007 usbd_status err;
2008
2009 XHCIHIST_FUNC();
2010 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr,
2011 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed);
2012 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx",
2013 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress,
2014 ed->bmAttributes);
2015 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize),
2016 ed->bInterval, 0, 0);
2017
2018 if (sc->sc_dying)
2019 return USBD_IOERROR;
2020
2021 /* Root Hub */
2022 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) {
2023 switch (ed->bEndpointAddress) {
2024 case USB_CONTROL_ENDPOINT:
2025 pipe->up_methods = &roothub_ctrl_methods;
2026 break;
2027 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
2028 pipe->up_methods = &xhci_root_intr_methods;
2029 break;
2030 default:
2031 pipe->up_methods = NULL;
2032 DPRINTFN(0, "bad bEndpointAddress 0x%02jx",
2033 ed->bEndpointAddress, 0, 0, 0);
2034 return USBD_INVAL;
2035 }
2036 return USBD_NORMAL_COMPLETION;
2037 }
2038
2039 switch (xfertype) {
2040 case UE_CONTROL:
2041 pipe->up_methods = &xhci_device_ctrl_methods;
2042 break;
2043 case UE_ISOCHRONOUS:
2044 pipe->up_methods = &xhci_device_isoc_methods;
2045 pipe->up_serialise = false;
2046 xpipe->xp_isoc_next = -1;
2047 break;
2048 case UE_BULK:
2049 pipe->up_methods = &xhci_device_bulk_methods;
2050 break;
2051 case UE_INTERRUPT:
2052 pipe->up_methods = &xhci_device_intr_methods;
2053 break;
2054 default:
2055 return USBD_IOERROR;
2056 break;
2057 }
2058
2059 KASSERT(xs != NULL);
2060 KASSERT(xs->xs_xr[dci] == NULL);
2061
2062 /* allocate transfer ring */
2063 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS,
2064 XHCI_TRB_ALIGN);
2065 if (err) {
2066 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0);
2067 return err;
2068 }
2069
2070 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT)
2071 return xhci_configure_endpoint(pipe);
2072
2073 return USBD_NORMAL_COMPLETION;
2074 }
2075
2076 /*
2077 * Closes pipe, called from usbd_kill_pipe via close methods.
2078 * If the endpoint to be closed is ep0, disable_slot.
2079 * Should be called with sc_lock held.
2080 */
2081 static void
2082 xhci_close_pipe(struct usbd_pipe *pipe)
2083 {
2084 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2085 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2086 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2087 const u_int dci = xhci_ep_get_dci(ed);
2088 struct xhci_soft_trb trb;
2089 uint32_t *cp;
2090
2091 XHCIHIST_FUNC();
2092
2093 if (sc->sc_dying)
2094 return;
2095
2096 /* xs is uninitialized before xhci_init_slot */
2097 if (xs == NULL || xs->xs_idx == 0)
2098 return;
2099
2100 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2101 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2102
2103 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
2104 KASSERT(mutex_owned(&sc->sc_lock));
2105
2106 if (pipe->up_dev->ud_depth == 0)
2107 return;
2108
2109 if (dci == XHCI_DCI_EP_CONTROL) {
2110 DPRINTFN(4, "closing ep0", 0, 0, 0, 0);
2111 /* This frees all rings */
2112 xhci_disable_slot(sc, xs->xs_idx);
2113 return;
2114 }
2115
2116 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED)
2117 (void)xhci_stop_endpoint(pipe);
2118
2119 /*
2120 * set appropriate bit to be dropped.
2121 * don't set DC bit to 1, otherwise all endpoints
2122 * would be deconfigured.
2123 */
2124 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
2125 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci));
2126 cp[1] = htole32(0);
2127
2128 /* XXX should be most significant one, not dci? */
2129 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
2130 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci));
2131
2132 /* configure ep context performs an implicit dequeue */
2133 xhci_host_dequeue(xs->xs_xr[dci]);
2134
2135 /* sync input contexts before they are read from memory */
2136 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
2137
2138 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
2139 trb.trb_2 = 0;
2140 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2141 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
2142
2143 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
2144 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2145
2146 xhci_ring_free(sc, &xs->xs_xr[dci]);
2147 xs->xs_xr[dci] = NULL;
2148 }
2149
2150 /*
2151 * Abort transfer.
2152 * Should be called with sc_lock held.
2153 */
2154 static void
2155 xhci_abortx(struct usbd_xfer *xfer)
2156 {
2157 XHCIHIST_FUNC();
2158 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2159 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
2160 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
2161
2162 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx",
2163 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0);
2164
2165 KASSERT(mutex_owned(&sc->sc_lock));
2166 ASSERT_SLEEPABLE();
2167
2168 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2169 xfer->ux_status == USBD_TIMEOUT),
2170 "bad abort status: %d", xfer->ux_status);
2171
2172 /*
2173 * If we're dying, skip the hardware action and just notify the
2174 * software that we're done.
2175 */
2176 if (sc->sc_dying) {
2177 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer,
2178 xfer->ux_status, 0, 0);
2179 goto dying;
2180 }
2181
2182 /*
2183 * HC Step 1: Stop execution of TD on the ring.
2184 */
2185 switch (xhci_get_epstate(sc, xs, dci)) {
2186 case XHCI_EPSTATE_HALTED:
2187 (void)xhci_reset_endpoint_locked(xfer->ux_pipe);
2188 break;
2189 case XHCI_EPSTATE_STOPPED:
2190 break;
2191 default:
2192 (void)xhci_stop_endpoint(xfer->ux_pipe);
2193 break;
2194 }
2195 #ifdef DIAGNOSTIC
2196 uint32_t epst = xhci_get_epstate(sc, xs, dci);
2197 if (epst != XHCI_EPSTATE_STOPPED)
2198 DPRINTFN(4, "dci %ju not stopped %ju", dci, epst, 0, 0);
2199 #endif
2200
2201 /*
2202 * HC Step 2: Remove any vestiges of the xfer from the ring.
2203 */
2204 xhci_set_dequeue_locked(xfer->ux_pipe);
2205
2206 /*
2207 * Final Step: Notify completion to waiting xfers.
2208 */
2209 dying:
2210 usb_transfer_complete(xfer);
2211 DPRINTFN(14, "end", 0, 0, 0, 0);
2212
2213 KASSERT(mutex_owned(&sc->sc_lock));
2214 }
2215
2216 static void
2217 xhci_host_dequeue(struct xhci_ring * const xr)
2218 {
2219 /* When dequeueing the controller, update our struct copy too */
2220 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE);
2221 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE,
2222 BUS_DMASYNC_PREWRITE);
2223 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies));
2224
2225 xr->xr_ep = 0;
2226 xr->xr_cs = 1;
2227 }
2228
2229 /*
2230 * Recover STALLed endpoint.
2231 * xHCI 1.1 sect 4.10.2.1
2232 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove
2233 * all transfers on transfer ring.
2234 * These are done in thread context asynchronously.
2235 */
2236 static void
2237 xhci_clear_endpoint_stall_async_task(void *cookie)
2238 {
2239 struct usbd_xfer * const xfer = cookie;
2240 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2241 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
2242 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
2243 struct xhci_ring * const tr = xs->xs_xr[dci];
2244
2245 XHCIHIST_FUNC();
2246 XHCIHIST_CALLARGS("xfer %#jx slot %ju dci %ju", (uintptr_t)xfer, xs->xs_idx,
2247 dci, 0);
2248
2249 /*
2250 * XXXMRG: Stall task can run after slot is disabled when yanked.
2251 * This hack notices that the xs has been memset() in
2252 * xhci_disable_slot() and returns. Both xhci_reset_endpoint()
2253 * and xhci_set_dequeue() rely upon a valid ring setup for correct
2254 * operation, and the latter will fault, as would
2255 * usb_transfer_complete() if it got that far.
2256 */
2257 if (xs->xs_idx == 0) {
2258 DPRINTFN(4, "ends xs_idx is 0", 0, 0, 0, 0);
2259 return;
2260 }
2261
2262 KASSERT(tr != NULL);
2263
2264 xhci_reset_endpoint(xfer->ux_pipe);
2265 xhci_set_dequeue(xfer->ux_pipe);
2266
2267 mutex_enter(&sc->sc_lock);
2268 tr->is_halted = false;
2269 usb_transfer_complete(xfer);
2270 mutex_exit(&sc->sc_lock);
2271 DPRINTFN(4, "ends", 0, 0, 0, 0);
2272 }
2273
2274 static usbd_status
2275 xhci_clear_endpoint_stall_async(struct usbd_xfer *xfer)
2276 {
2277 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2278 struct xhci_pipe * const xp = (struct xhci_pipe *)xfer->ux_pipe;
2279
2280 XHCIHIST_FUNC();
2281 XHCIHIST_CALLARGS("xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2282
2283 if (sc->sc_dying) {
2284 return USBD_IOERROR;
2285 }
2286
2287 usb_init_task(&xp->xp_async_task,
2288 xhci_clear_endpoint_stall_async_task, xfer, USB_TASKQ_MPSAFE);
2289 usb_add_task(xfer->ux_pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC);
2290 DPRINTFN(4, "ends", 0, 0, 0, 0);
2291
2292 return USBD_NORMAL_COMPLETION;
2293 }
2294
2295 /* Process roothub port status/change events and notify to uhub_intr. */
2296 static void
2297 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport)
2298 {
2299 XHCIHIST_FUNC();
2300 XHCIHIST_CALLARGS("xhci%jd: port %ju status change",
2301 device_unit(sc->sc_dev), ctlrport, 0, 0);
2302
2303 if (ctlrport > sc->sc_maxports)
2304 return;
2305
2306 const size_t bn = xhci_ctlrport2bus(sc, ctlrport);
2307 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport);
2308 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn];
2309
2310 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change",
2311 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer);
2312
2313 if (xfer == NULL)
2314 return;
2315 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
2316
2317 uint8_t *p = xfer->ux_buf;
2318 memset(p, 0, xfer->ux_length);
2319 p[rhp / NBBY] |= 1 << (rhp % NBBY);
2320 xfer->ux_actlen = xfer->ux_length;
2321 xfer->ux_status = USBD_NORMAL_COMPLETION;
2322 usb_transfer_complete(xfer);
2323 }
2324
2325 /* Process Transfer Events */
2326 static void
2327 xhci_event_transfer(struct xhci_softc * const sc,
2328 const struct xhci_trb * const trb)
2329 {
2330 uint64_t trb_0;
2331 uint32_t trb_2, trb_3;
2332 uint8_t trbcode;
2333 u_int slot, dci;
2334 struct xhci_slot *xs;
2335 struct xhci_ring *xr;
2336 struct xhci_xfer *xx;
2337 struct usbd_xfer *xfer;
2338 usbd_status err;
2339
2340 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2341
2342 trb_0 = le64toh(trb->trb_0);
2343 trb_2 = le32toh(trb->trb_2);
2344 trb_3 = le32toh(trb->trb_3);
2345 trbcode = XHCI_TRB_2_ERROR_GET(trb_2);
2346 slot = XHCI_TRB_3_SLOT_GET(trb_3);
2347 dci = XHCI_TRB_3_EP_GET(trb_3);
2348 xs = &sc->sc_slots[slot];
2349 xr = xs->xs_xr[dci];
2350
2351 /* sanity check */
2352 KASSERT(xr != NULL);
2353 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots,
2354 "invalid xs_idx %u slot %u", xs->xs_idx, slot);
2355
2356 int idx = 0;
2357 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2358 if (xhci_trb_get_idx(xr, trb_0, &idx)) {
2359 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0);
2360 return;
2361 }
2362 xx = xr->xr_cookies[idx];
2363
2364 /* clear cookie of consumed TRB */
2365 xr->xr_cookies[idx] = NULL;
2366
2367 /*
2368 * xx is NULL if pipe is opened but xfer is not started.
2369 * It happens when stopping idle pipe.
2370 */
2371 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) {
2372 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju",
2373 idx, (uintptr_t)xx, trbcode, dci);
2374 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0,
2375 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)),
2376 0, 0);
2377 return;
2378 }
2379 } else {
2380 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */
2381 xx = (void *)(uintptr_t)(trb_0 & ~0x3);
2382 }
2383 /* XXX this may not happen */
2384 if (xx == NULL) {
2385 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0);
2386 return;
2387 }
2388 xfer = &xx->xx_xfer;
2389 /* XXX this may happen when detaching */
2390 if (xfer == NULL) {
2391 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx",
2392 (uintptr_t)xx, trb_0, 0, 0);
2393 return;
2394 }
2395 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2396 /* XXX I dunno why this happens */
2397 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer);
2398
2399 if (!xfer->ux_pipe->up_repeat &&
2400 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) {
2401 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer,
2402 0, 0, 0);
2403 return;
2404 }
2405
2406 const uint8_t xfertype =
2407 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes);
2408
2409 /* 4.11.5.2 Event Data TRB */
2410 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) {
2411 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx"
2412 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0);
2413 if ((trb_0 & 0x3) == 0x3) {
2414 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2);
2415 }
2416 }
2417
2418 switch (trbcode) {
2419 case XHCI_TRB_ERROR_SHORT_PKT:
2420 case XHCI_TRB_ERROR_SUCCESS:
2421 /*
2422 * A ctrl transfer can generate two events if it has a Data
2423 * stage. A short data stage can be OK and should not
2424 * complete the transfer as the status stage needs to be
2425 * performed.
2426 *
2427 * Note: Data and Status stage events point at same xfer.
2428 * ux_actlen and ux_dmabuf will be passed to
2429 * usb_transfer_complete after the Status stage event.
2430 *
2431 * It can be distingished which stage generates the event:
2432 * + by checking least 3 bits of trb_0 if ED==1.
2433 * (see xhci_device_ctrl_start).
2434 * + by checking the type of original TRB if ED==0.
2435 *
2436 * In addition, intr, bulk, and isoc transfer currently
2437 * consists of single TD, so the "skip" is not needed.
2438 * ctrl xfer uses EVENT_DATA, and others do not.
2439 * Thus driver can switch the flow by checking ED bit.
2440 */
2441 if (xfertype == UE_ISOCHRONOUS) {
2442 xfer->ux_frlengths[xx->xx_isoc_done] -=
2443 XHCI_TRB_2_REM_GET(trb_2);
2444 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done];
2445 if (++xx->xx_isoc_done < xfer->ux_nframes)
2446 return;
2447 } else
2448 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2449 if (xfer->ux_actlen == 0)
2450 xfer->ux_actlen = xfer->ux_length -
2451 XHCI_TRB_2_REM_GET(trb_2);
2452 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3))
2453 == XHCI_TRB_TYPE_DATA_STAGE) {
2454 return;
2455 }
2456 } else if ((trb_0 & 0x3) == 0x3) {
2457 return;
2458 }
2459 err = USBD_NORMAL_COMPLETION;
2460 break;
2461 case XHCI_TRB_ERROR_STOPPED:
2462 case XHCI_TRB_ERROR_LENGTH:
2463 case XHCI_TRB_ERROR_STOPPED_SHORT:
2464 err = USBD_IOERROR;
2465 break;
2466 case XHCI_TRB_ERROR_STALL:
2467 case XHCI_TRB_ERROR_BABBLE:
2468 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2469 xr->is_halted = true;
2470 /*
2471 * Try to claim this xfer for completion. If it has already
2472 * completed or aborted, drop it on the floor.
2473 */
2474 if (!usbd_xfer_trycomplete(xfer))
2475 return;
2476
2477 /*
2478 * Stalled endpoints can be recoverd by issuing
2479 * command TRB TYPE_RESET_EP on xHCI instead of
2480 * issuing request CLEAR_FEATURE UF_ENDPOINT_HALT
2481 * on the endpoint. However, this function may be
2482 * called from softint context (e.g. from umass),
2483 * in that case driver gets KASSERT in cv_timedwait
2484 * in xhci_do_command.
2485 * To avoid this, this runs reset_endpoint and
2486 * usb_transfer_complete in usb task thread
2487 * asynchronously (and then umass issues clear
2488 * UF_ENDPOINT_HALT).
2489 */
2490
2491 /* Override the status. */
2492 xfer->ux_status = USBD_STALLED;
2493
2494 xhci_clear_endpoint_stall_async(xfer);
2495 return;
2496 default:
2497 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2498 err = USBD_IOERROR;
2499 break;
2500 }
2501
2502 /*
2503 * Try to claim this xfer for completion. If it has already
2504 * completed or aborted, drop it on the floor.
2505 */
2506 if (!usbd_xfer_trycomplete(xfer))
2507 return;
2508
2509 /* Set the status. */
2510 xfer->ux_status = err;
2511
2512 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 ||
2513 (trb_0 & 0x3) == 0x0) {
2514 usb_transfer_complete(xfer);
2515 }
2516 }
2517
2518 /* Process Command complete events */
2519 static void
2520 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb)
2521 {
2522 uint64_t trb_0;
2523 uint32_t trb_2, trb_3;
2524
2525 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2526
2527 KASSERT(mutex_owned(&sc->sc_lock));
2528
2529 trb_0 = le64toh(trb->trb_0);
2530 trb_2 = le32toh(trb->trb_2);
2531 trb_3 = le32toh(trb->trb_3);
2532
2533 if (trb_0 == sc->sc_command_addr) {
2534 sc->sc_resultpending = false;
2535
2536 sc->sc_result_trb.trb_0 = trb_0;
2537 sc->sc_result_trb.trb_2 = trb_2;
2538 sc->sc_result_trb.trb_3 = trb_3;
2539 if (XHCI_TRB_2_ERROR_GET(trb_2) !=
2540 XHCI_TRB_ERROR_SUCCESS) {
2541 DPRINTFN(1, "command completion "
2542 "failure: 0x%016jx 0x%08jx 0x%08jx",
2543 trb_0, trb_2, trb_3, 0);
2544 }
2545 cv_signal(&sc->sc_command_cv);
2546 } else {
2547 DPRINTFN(1, "spurious event: %#jx 0x%016jx "
2548 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3);
2549 }
2550 }
2551
2552 /*
2553 * Process events.
2554 * called from xhci_softintr
2555 */
2556 static void
2557 xhci_handle_event(struct xhci_softc * const sc,
2558 const struct xhci_trb * const trb)
2559 {
2560 uint64_t trb_0;
2561 uint32_t trb_2, trb_3;
2562
2563 XHCIHIST_FUNC();
2564
2565 trb_0 = le64toh(trb->trb_0);
2566 trb_2 = le32toh(trb->trb_2);
2567 trb_3 = le32toh(trb->trb_3);
2568
2569 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx",
2570 (uintptr_t)trb, trb_0, trb_2, trb_3);
2571
2572 /*
2573 * 4.11.3.1, 6.4.2.1
2574 * TRB Pointer is invalid for these completion codes.
2575 */
2576 switch (XHCI_TRB_2_ERROR_GET(trb_2)) {
2577 case XHCI_TRB_ERROR_RING_UNDERRUN:
2578 case XHCI_TRB_ERROR_RING_OVERRUN:
2579 case XHCI_TRB_ERROR_VF_RING_FULL:
2580 return;
2581 default:
2582 if (trb_0 == 0) {
2583 return;
2584 }
2585 break;
2586 }
2587
2588 switch (XHCI_TRB_3_TYPE_GET(trb_3)) {
2589 case XHCI_TRB_EVENT_TRANSFER:
2590 xhci_event_transfer(sc, trb);
2591 break;
2592 case XHCI_TRB_EVENT_CMD_COMPLETE:
2593 xhci_event_cmd(sc, trb);
2594 break;
2595 case XHCI_TRB_EVENT_PORT_STS_CHANGE:
2596 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff));
2597 break;
2598 default:
2599 break;
2600 }
2601 }
2602
2603 static void
2604 xhci_softintr(void *v)
2605 {
2606 struct usbd_bus * const bus = v;
2607 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2608 struct xhci_ring * const er = sc->sc_er;
2609 struct xhci_trb *trb;
2610 int i, j, k;
2611
2612 XHCIHIST_FUNC();
2613
2614 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2615
2616 i = er->xr_ep;
2617 j = er->xr_cs;
2618
2619 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0);
2620
2621 while (1) {
2622 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE,
2623 BUS_DMASYNC_POSTREAD);
2624 trb = &er->xr_trb[i];
2625 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0;
2626
2627 if (j != k)
2628 break;
2629
2630 xhci_handle_event(sc, trb);
2631
2632 i++;
2633 if (i == er->xr_ntrb) {
2634 i = 0;
2635 j ^= 1;
2636 }
2637 }
2638
2639 er->xr_ep = i;
2640 er->xr_cs = j;
2641
2642 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) |
2643 XHCI_ERDP_BUSY);
2644
2645 DPRINTFN(16, "ends", 0, 0, 0, 0);
2646
2647 return;
2648 }
2649
2650 static void
2651 xhci_poll(struct usbd_bus *bus)
2652 {
2653 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2654
2655 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2656
2657 mutex_enter(&sc->sc_intr_lock);
2658 int ret = xhci_intr1(sc);
2659 if (ret) {
2660 xhci_softintr(bus);
2661 }
2662 mutex_exit(&sc->sc_intr_lock);
2663
2664 return;
2665 }
2666
2667 static struct usbd_xfer *
2668 xhci_allocx(struct usbd_bus *bus, unsigned int nframes)
2669 {
2670 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2671 struct xhci_xfer *xx;
2672 u_int ntrbs;
2673
2674 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2675
2676 ntrbs = uimax(3, nframes);
2677 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs;
2678
2679 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
2680 if (xx != NULL) {
2681 memset(xx, 0, sizeof(*xx));
2682 if (ntrbs > 0) {
2683 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP);
2684 xx->xx_ntrb = ntrbs;
2685 }
2686 #ifdef DIAGNOSTIC
2687 xx->xx_xfer.ux_state = XFER_BUSY;
2688 #endif
2689 }
2690
2691 return &xx->xx_xfer;
2692 }
2693
2694 static void
2695 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
2696 {
2697 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2698 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
2699
2700 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2701
2702 #ifdef DIAGNOSTIC
2703 if (xfer->ux_state != XFER_BUSY &&
2704 xfer->ux_status != USBD_NOT_STARTED) {
2705 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx",
2706 (uintptr_t)xfer, xfer->ux_state, 0, 0);
2707 }
2708 xfer->ux_state = XFER_FREE;
2709 #endif
2710 if (xx->xx_ntrb > 0) {
2711 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb));
2712 xx->xx_trb = NULL;
2713 xx->xx_ntrb = 0;
2714 }
2715 pool_cache_put(sc->sc_xferpool, xx);
2716 }
2717
2718 static bool
2719 xhci_dying(struct usbd_bus *bus)
2720 {
2721 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2722
2723 return sc->sc_dying;
2724 }
2725
2726 static void
2727 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
2728 {
2729 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2730
2731 *lock = &sc->sc_lock;
2732 }
2733
2734 extern uint32_t usb_cookie_no;
2735
2736 /*
2737 * xHCI 4.3
2738 * Called when uhub_explore finds a new device (via usbd_new_device).
2739 * Port initialization and speed detection (4.3.1) are already done in uhub.c.
2740 * This function does:
2741 * Allocate and construct dev structure of default endpoint (ep0).
2742 * Allocate and open pipe of ep0.
2743 * Enable slot and initialize slot context.
2744 * Set Address.
2745 * Read initial device descriptor.
2746 * Determine initial MaxPacketSize (mps) by speed.
2747 * Read full device descriptor.
2748 * Register this device.
2749 * Finally state of device transitions ADDRESSED.
2750 */
2751 static usbd_status
2752 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth,
2753 int speed, int port, struct usbd_port *up)
2754 {
2755 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2756 struct usbd_device *dev;
2757 usbd_status err;
2758 usb_device_descriptor_t *dd;
2759 struct xhci_slot *xs;
2760 uint32_t *cp;
2761
2762 XHCIHIST_FUNC();
2763 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx",
2764 port, depth, speed, (uintptr_t)up);
2765
2766 KASSERT(KERNEL_LOCKED_P());
2767
2768 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
2769 dev->ud_bus = bus;
2770 dev->ud_quirks = &usbd_no_quirk;
2771 dev->ud_addr = 0;
2772 dev->ud_ddesc.bMaxPacketSize = 0;
2773 dev->ud_depth = depth;
2774 dev->ud_powersrc = up;
2775 dev->ud_myhub = up->up_parent;
2776 dev->ud_speed = speed;
2777 dev->ud_langid = USBD_NOLANG;
2778 dev->ud_cookie.cookie = ++usb_cookie_no;
2779
2780 /* Set up default endpoint handle. */
2781 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc;
2782 /* doesn't matter, just don't let it uninitialized */
2783 dev->ud_ep0.ue_toggle = 0;
2784
2785 /* Set up default endpoint descriptor. */
2786 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE;
2787 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT;
2788 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT;
2789 dev->ud_ep0desc.bmAttributes = UE_CONTROL;
2790 dev->ud_ep0desc.bInterval = 0;
2791
2792 /* 4.3, 4.8.2.1 */
2793 switch (speed) {
2794 case USB_SPEED_SUPER:
2795 case USB_SPEED_SUPER_PLUS:
2796 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET);
2797 break;
2798 case USB_SPEED_FULL:
2799 /* XXX using 64 as initial mps of ep0 in FS */
2800 case USB_SPEED_HIGH:
2801 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET);
2802 break;
2803 case USB_SPEED_LOW:
2804 default:
2805 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET);
2806 break;
2807 }
2808
2809 up->up_dev = dev;
2810
2811 dd = &dev->ud_ddesc;
2812
2813 if (depth == 0 && port == 0) {
2814 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL);
2815 bus->ub_devices[USB_ROOTHUB_INDEX] = dev;
2816
2817 /* Establish the default pipe. */
2818 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2819 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2820 if (err) {
2821 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0);
2822 goto bad;
2823 }
2824 err = usbd_get_initial_ddesc(dev, dd);
2825 if (err) {
2826 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2827 goto bad;
2828 }
2829 } else {
2830 uint8_t slot = 0;
2831
2832 /* 4.3.2 */
2833 err = xhci_enable_slot(sc, &slot);
2834 if (err) {
2835 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0);
2836 goto bad;
2837 }
2838
2839 xs = &sc->sc_slots[slot];
2840 dev->ud_hcpriv = xs;
2841
2842 /* 4.3.3 initialize slot structure */
2843 err = xhci_init_slot(dev, slot);
2844 if (err) {
2845 DPRINTFN(1, "init slot %ju", err, 0, 0, 0);
2846 dev->ud_hcpriv = NULL;
2847 /*
2848 * We have to disable_slot here because
2849 * xs->xs_idx == 0 when xhci_init_slot fails,
2850 * in that case usbd_remove_dev won't work.
2851 */
2852 mutex_enter(&sc->sc_lock);
2853 xhci_disable_slot(sc, slot);
2854 mutex_exit(&sc->sc_lock);
2855 goto bad;
2856 }
2857
2858 /*
2859 * We have to establish the default pipe _after_ slot
2860 * structure has been prepared.
2861 */
2862 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2863 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2864 if (err) {
2865 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0,
2866 0);
2867 goto bad;
2868 }
2869
2870 /* 4.3.4 Address Assignment */
2871 err = xhci_set_address(dev, slot, false);
2872 if (err) {
2873 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0);
2874 goto bad;
2875 }
2876
2877 /* Allow device time to set new address */
2878 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE);
2879
2880 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2881 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT);
2882 HEXDUMP("slot context", cp, sc->sc_ctxsz);
2883 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3]));
2884 DPRINTFN(4, "device address %ju", addr, 0, 0, 0);
2885 /*
2886 * XXX ensure we know when the hardware does something
2887 * we can't yet cope with
2888 */
2889 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr);
2890 dev->ud_addr = addr;
2891
2892 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL,
2893 "addr %d already allocated", dev->ud_addr);
2894 /*
2895 * The root hub is given its own slot
2896 */
2897 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev;
2898
2899 err = usbd_get_initial_ddesc(dev, dd);
2900 if (err) {
2901 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2902 goto bad;
2903 }
2904
2905 /* 4.8.2.1 */
2906 if (USB_IS_SS(speed)) {
2907 if (dd->bMaxPacketSize != 9) {
2908 printf("%s: invalid mps 2^%u for SS ep0,"
2909 " using 512\n",
2910 device_xname(sc->sc_dev),
2911 dd->bMaxPacketSize);
2912 dd->bMaxPacketSize = 9;
2913 }
2914 USETW(dev->ud_ep0desc.wMaxPacketSize,
2915 (1 << dd->bMaxPacketSize));
2916 } else
2917 USETW(dev->ud_ep0desc.wMaxPacketSize,
2918 dd->bMaxPacketSize);
2919 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0);
2920 err = xhci_update_ep0_mps(sc, xs,
2921 UGETW(dev->ud_ep0desc.wMaxPacketSize));
2922 if (err) {
2923 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0);
2924 goto bad;
2925 }
2926 }
2927
2928 err = usbd_reload_device_desc(dev);
2929 if (err) {
2930 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0);
2931 goto bad;
2932 }
2933
2934 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,",
2935 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0);
2936 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,",
2937 dd->bDeviceClass, dd->bDeviceSubClass,
2938 dd->bDeviceProtocol, 0);
2939 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd",
2940 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations,
2941 dev->ud_speed);
2942
2943 usbd_get_device_strings(dev);
2944
2945 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev);
2946
2947 if (depth == 0 && port == 0) {
2948 usbd_attach_roothub(parent, dev);
2949 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0);
2950 return USBD_NORMAL_COMPLETION;
2951 }
2952
2953 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr);
2954 bad:
2955 if (err != USBD_NORMAL_COMPLETION) {
2956 if (depth == 0 && port == 0 && dev->ud_pipe0)
2957 usbd_kill_pipe(dev->ud_pipe0);
2958 usbd_remove_device(dev, up);
2959 }
2960
2961 return err;
2962 }
2963
2964 static usbd_status
2965 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp,
2966 size_t ntrb, size_t align)
2967 {
2968 size_t size = ntrb * XHCI_TRB_SIZE;
2969 struct xhci_ring *xr;
2970
2971 XHCIHIST_FUNC();
2972 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx",
2973 (uintptr_t)*xrp, ntrb, align, 0);
2974
2975 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP);
2976 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0);
2977
2978 int err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
2979 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xr->xr_dma);
2980 if (err) {
2981 kmem_free(xr, sizeof(struct xhci_ring));
2982 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0);
2983 return err;
2984 }
2985 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
2986 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP);
2987 xr->xr_trb = xhci_ring_trbv(xr, 0);
2988 xr->xr_ntrb = ntrb;
2989 xr->is_halted = false;
2990 xhci_host_dequeue(xr);
2991 *xrp = xr;
2992
2993 return USBD_NORMAL_COMPLETION;
2994 }
2995
2996 static void
2997 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr)
2998 {
2999 if (*xr == NULL)
3000 return;
3001
3002 usb_freemem(&(*xr)->xr_dma);
3003 mutex_destroy(&(*xr)->xr_lock);
3004 kmem_free((*xr)->xr_cookies,
3005 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb);
3006 kmem_free(*xr, sizeof(struct xhci_ring));
3007 *xr = NULL;
3008 }
3009
3010 static void
3011 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr,
3012 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs)
3013 {
3014 size_t i;
3015 u_int ri;
3016 u_int cs;
3017 uint64_t parameter;
3018 uint32_t status;
3019 uint32_t control;
3020
3021 XHCIHIST_FUNC();
3022 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju",
3023 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0);
3024
3025 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u",
3026 ntrbs, xr->xr_ntrb);
3027 for (i = 0; i < ntrbs; i++) {
3028 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr,
3029 (uintptr_t)trbs, i, 0);
3030 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx",
3031 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0);
3032 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) !=
3033 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3);
3034 }
3035
3036 ri = xr->xr_ep;
3037 cs = xr->xr_cs;
3038
3039 /*
3040 * Although the xhci hardware can do scatter/gather dma from
3041 * arbitrary sized buffers, there is a non-obvious restriction
3042 * that a LINK trb is only allowed at the end of a burst of
3043 * transfers - which might be 16kB.
3044 * Arbitrary aligned LINK trb definitely fail on Ivy bridge.
3045 * The simple solution is not to allow a LINK trb in the middle
3046 * of anything - as here.
3047 * XXX: (dsl) There are xhci controllers out there (eg some made by
3048 * ASMedia) that seem to lock up if they process a LINK trb but
3049 * cannot process the linked-to trb yet.
3050 * The code should write the 'cycle' bit on the link trb AFTER
3051 * adding the other trb.
3052 */
3053 u_int firstep = xr->xr_ep;
3054 u_int firstcs = xr->xr_cs;
3055
3056 for (i = 0; i < ntrbs; ) {
3057 u_int oldri = ri;
3058 u_int oldcs = cs;
3059
3060 if (ri >= (xr->xr_ntrb - 1)) {
3061 /* Put Link TD at the end of ring */
3062 parameter = xhci_ring_trbp(xr, 0);
3063 status = 0;
3064 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) |
3065 XHCI_TRB_3_TC_BIT;
3066 xr->xr_cookies[ri] = NULL;
3067 xr->xr_ep = 0;
3068 xr->xr_cs ^= 1;
3069 ri = xr->xr_ep;
3070 cs = xr->xr_cs;
3071 } else {
3072 parameter = trbs[i].trb_0;
3073 status = trbs[i].trb_2;
3074 control = trbs[i].trb_3;
3075
3076 xr->xr_cookies[ri] = cookie;
3077 ri++;
3078 i++;
3079 }
3080 /*
3081 * If this is a first TRB, mark it invalid to prevent
3082 * xHC from running it immediately.
3083 */
3084 if (oldri == firstep) {
3085 if (oldcs) {
3086 control &= ~XHCI_TRB_3_CYCLE_BIT;
3087 } else {
3088 control |= XHCI_TRB_3_CYCLE_BIT;
3089 }
3090 } else {
3091 if (oldcs) {
3092 control |= XHCI_TRB_3_CYCLE_BIT;
3093 } else {
3094 control &= ~XHCI_TRB_3_CYCLE_BIT;
3095 }
3096 }
3097 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control);
3098 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri,
3099 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3100 }
3101
3102 /* Now invert cycle bit of first TRB */
3103 if (firstcs) {
3104 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT);
3105 } else {
3106 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT);
3107 }
3108 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep,
3109 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3110
3111 xr->xr_ep = ri;
3112 xr->xr_cs = cs;
3113
3114 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep,
3115 xr->xr_cs, 0);
3116 }
3117
3118 static inline void
3119 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr,
3120 struct xhci_xfer *xx, u_int ntrb)
3121 {
3122 KASSERT(ntrb <= xx->xx_ntrb);
3123 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb);
3124 }
3125
3126 /*
3127 * Stop execution commands, purge all commands on command ring, and
3128 * rewind dequeue pointer.
3129 */
3130 static void
3131 xhci_abort_command(struct xhci_softc *sc)
3132 {
3133 struct xhci_ring * const cr = sc->sc_cr;
3134 uint64_t crcr;
3135 int i;
3136
3137 XHCIHIST_FUNC();
3138 XHCIHIST_CALLARGS("command %#jx timeout, aborting",
3139 sc->sc_command_addr, 0, 0, 0);
3140
3141 mutex_enter(&cr->xr_lock);
3142
3143 /* 4.6.1.2 Aborting a Command */
3144 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3145 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA);
3146
3147 for (i = 0; i < 500; i++) {
3148 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3149 if ((crcr & XHCI_CRCR_LO_CRR) == 0)
3150 break;
3151 usb_delay_ms(&sc->sc_bus, 1);
3152 }
3153 if ((crcr & XHCI_CRCR_LO_CRR) != 0) {
3154 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0);
3155 /* reset HC here? */
3156 }
3157
3158 /* reset command ring dequeue pointer */
3159 cr->xr_ep = 0;
3160 cr->xr_cs = 1;
3161 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs);
3162
3163 mutex_exit(&cr->xr_lock);
3164 }
3165
3166 /*
3167 * Put a command on command ring, ring bell, set timer, and cv_timedwait.
3168 * Command completion is notified by cv_signal from xhci_event_cmd()
3169 * (called from xhci_softint), or timed-out.
3170 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(),
3171 * then do_command examines it.
3172 */
3173 static usbd_status
3174 xhci_do_command_locked(struct xhci_softc * const sc,
3175 struct xhci_soft_trb * const trb, int timeout)
3176 {
3177 struct xhci_ring * const cr = sc->sc_cr;
3178 usbd_status err;
3179
3180 XHCIHIST_FUNC();
3181 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx",
3182 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3183
3184 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
3185 KASSERT(mutex_owned(&sc->sc_lock));
3186
3187 while (sc->sc_command_addr != 0 ||
3188 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp))
3189 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
3190
3191 /*
3192 * If enqueue pointer points at last of ring, it's Link TRB,
3193 * command TRB will be stored in 0th TRB.
3194 */
3195 if (cr->xr_ep == cr->xr_ntrb - 1)
3196 sc->sc_command_addr = xhci_ring_trbp(cr, 0);
3197 else
3198 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep);
3199
3200 sc->sc_resultpending = true;
3201
3202 mutex_enter(&cr->xr_lock);
3203 xhci_ring_put(sc, cr, NULL, trb, 1);
3204 mutex_exit(&cr->xr_lock);
3205
3206 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0);
3207
3208 while (sc->sc_resultpending) {
3209 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock,
3210 MAX(1, mstohz(timeout))) == EWOULDBLOCK) {
3211 xhci_abort_command(sc);
3212 err = USBD_TIMEOUT;
3213 goto timedout;
3214 }
3215 }
3216
3217 trb->trb_0 = sc->sc_result_trb.trb_0;
3218 trb->trb_2 = sc->sc_result_trb.trb_2;
3219 trb->trb_3 = sc->sc_result_trb.trb_3;
3220
3221 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx",
3222 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3223
3224 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) {
3225 case XHCI_TRB_ERROR_SUCCESS:
3226 err = USBD_NORMAL_COMPLETION;
3227 break;
3228 default:
3229 case 192 ... 223:
3230 DPRINTFN(5, "error %#jx",
3231 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0);
3232 err = USBD_IOERROR;
3233 break;
3234 case 224 ... 255:
3235 err = USBD_NORMAL_COMPLETION;
3236 break;
3237 }
3238
3239 timedout:
3240 sc->sc_resultpending = false;
3241 sc->sc_command_addr = 0;
3242 cv_broadcast(&sc->sc_cmdbusy_cv);
3243
3244 return err;
3245 }
3246
3247 static usbd_status
3248 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb,
3249 int timeout)
3250 {
3251
3252 mutex_enter(&sc->sc_lock);
3253 usbd_status ret = xhci_do_command_locked(sc, trb, timeout);
3254 mutex_exit(&sc->sc_lock);
3255
3256 return ret;
3257 }
3258
3259 static usbd_status
3260 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp)
3261 {
3262 struct xhci_soft_trb trb;
3263 usbd_status err;
3264
3265 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3266
3267 trb.trb_0 = 0;
3268 trb.trb_2 = 0;
3269 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT);
3270
3271 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3272 if (err != USBD_NORMAL_COMPLETION) {
3273 return err;
3274 }
3275
3276 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3);
3277
3278 return err;
3279 }
3280
3281 /*
3282 * xHCI 4.6.4
3283 * Deallocate ring and device/input context DMA buffers, and disable_slot.
3284 * All endpoints in the slot should be stopped.
3285 * Should be called with sc_lock held.
3286 */
3287 static usbd_status
3288 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot)
3289 {
3290 struct xhci_soft_trb trb;
3291 struct xhci_slot *xs;
3292 usbd_status err;
3293
3294 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3295
3296 if (sc->sc_dying)
3297 return USBD_IOERROR;
3298
3299 trb.trb_0 = 0;
3300 trb.trb_2 = 0;
3301 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) |
3302 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT);
3303
3304 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
3305
3306 if (!err) {
3307 xs = &sc->sc_slots[slot];
3308 if (xs->xs_idx != 0) {
3309 xhci_free_slot(sc, xs);
3310 xhci_set_dcba(sc, 0, slot);
3311 memset(xs, 0, sizeof(*xs));
3312 }
3313 }
3314
3315 return err;
3316 }
3317
3318 /*
3319 * Set address of device and transition slot state from ENABLED to ADDRESSED
3320 * if Block Setaddress Request (BSR) is false.
3321 * If BSR==true, transition slot state from ENABLED to DEFAULT.
3322 * see xHCI 1.1 4.5.3, 3.3.4
3323 * Should be called without sc_lock held.
3324 */
3325 static usbd_status
3326 xhci_address_device(struct xhci_softc * const sc,
3327 uint64_t icp, uint8_t slot_id, bool bsr)
3328 {
3329 struct xhci_soft_trb trb;
3330 usbd_status err;
3331
3332 XHCIHIST_FUNC();
3333 if (bsr) {
3334 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr",
3335 icp, slot_id, 0, 0);
3336 } else {
3337 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr",
3338 icp, slot_id, 0, 0);
3339 }
3340
3341 trb.trb_0 = icp;
3342 trb.trb_2 = 0;
3343 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) |
3344 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) |
3345 (bsr ? XHCI_TRB_3_BSR_BIT : 0);
3346
3347 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3348
3349 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS)
3350 err = USBD_NO_ADDR;
3351
3352 return err;
3353 }
3354
3355 static usbd_status
3356 xhci_update_ep0_mps(struct xhci_softc * const sc,
3357 struct xhci_slot * const xs, u_int mps)
3358 {
3359 struct xhci_soft_trb trb;
3360 usbd_status err;
3361 uint32_t * cp;
3362
3363 XHCIHIST_FUNC();
3364 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0);
3365
3366 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3367 cp[0] = htole32(0);
3368 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL));
3369
3370 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL));
3371 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps));
3372
3373 /* sync input contexts before they are read from memory */
3374 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3375 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3376 sc->sc_ctxsz * 4);
3377
3378 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
3379 trb.trb_2 = 0;
3380 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
3381 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX);
3382
3383 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3384 return err;
3385 }
3386
3387 static void
3388 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si)
3389 {
3390 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0);
3391
3392 XHCIHIST_FUNC();
3393 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd",
3394 (uintptr_t)&dcbaa[si], dcba, si, 0);
3395
3396 dcbaa[si] = htole64(dcba);
3397 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t),
3398 BUS_DMASYNC_PREWRITE);
3399 }
3400
3401 /*
3402 * Allocate device and input context DMA buffer, and
3403 * TRB DMA buffer for each endpoint.
3404 */
3405 static usbd_status
3406 xhci_init_slot(struct usbd_device *dev, uint32_t slot)
3407 {
3408 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3409 struct xhci_slot *xs;
3410
3411 XHCIHIST_FUNC();
3412 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0);
3413
3414 xs = &sc->sc_slots[slot];
3415
3416 /* allocate contexts */
3417 int err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3418 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_dc_dma);
3419 if (err) {
3420 DPRINTFN(1, "failed to allocmem output device context %jd",
3421 err, 0, 0, 0);
3422 return USBD_NOMEM;
3423 }
3424
3425 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3426 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_ic_dma);
3427 if (err) {
3428 DPRINTFN(1, "failed to allocmem input device context %jd",
3429 err, 0, 0, 0);
3430 goto bad1;
3431 }
3432
3433 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr));
3434 xs->xs_idx = slot;
3435
3436 return USBD_NORMAL_COMPLETION;
3437
3438 bad1:
3439 usb_freemem(&xs->xs_dc_dma);
3440 xs->xs_idx = 0;
3441 return USBD_NOMEM;
3442 }
3443
3444 static void
3445 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs)
3446 {
3447 u_int dci;
3448
3449 XHCIHIST_FUNC();
3450 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
3451
3452 /* deallocate all allocated rings in the slot */
3453 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
3454 if (xs->xs_xr[dci] != NULL)
3455 xhci_ring_free(sc, &xs->xs_xr[dci]);
3456 }
3457 usb_freemem(&xs->xs_ic_dma);
3458 usb_freemem(&xs->xs_dc_dma);
3459 xs->xs_idx = 0;
3460 }
3461
3462 /*
3463 * Setup slot context, set Device Context Base Address, and issue
3464 * Set Address Device command.
3465 */
3466 static usbd_status
3467 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr)
3468 {
3469 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3470 struct xhci_slot *xs;
3471 usbd_status err;
3472
3473 XHCIHIST_FUNC();
3474 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0);
3475
3476 xs = &sc->sc_slots[slot];
3477
3478 xhci_setup_ctx(dev->ud_pipe0);
3479
3480 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3481 sc->sc_ctxsz * 3);
3482
3483 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot);
3484
3485 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr);
3486
3487 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
3488 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0),
3489 sc->sc_ctxsz * 2);
3490
3491 return err;
3492 }
3493
3494 /*
3495 * 4.8.2, 6.2.3.2
3496 * construct slot/endpoint context parameters and do syncmem
3497 */
3498 static void
3499 xhci_setup_ctx(struct usbd_pipe *pipe)
3500 {
3501 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3502 struct usbd_device *dev = pipe->up_dev;
3503 struct xhci_slot * const xs = dev->ud_hcpriv;
3504 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3505 const u_int dci = xhci_ep_get_dci(ed);
3506 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3507 uint32_t *cp;
3508 uint16_t mps = UGETW(ed->wMaxPacketSize);
3509 uint8_t speed = dev->ud_speed;
3510 uint8_t ival = ed->bInterval;
3511
3512 XHCIHIST_FUNC();
3513 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju",
3514 (uintptr_t)pipe, xs->xs_idx, dci, speed);
3515
3516 /* set up initial input control context */
3517 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3518 cp[0] = htole32(0);
3519 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci));
3520 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT));
3521 cp[7] = htole32(0);
3522
3523 /* set up input slot context */
3524 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
3525 cp[0] =
3526 XHCI_SCTX_0_CTX_NUM_SET(dci) |
3527 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed));
3528 cp[1] = 0;
3529 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0);
3530 cp[3] = 0;
3531 xhci_setup_route(pipe, cp);
3532 xhci_setup_tthub(pipe, cp);
3533
3534 cp[0] = htole32(cp[0]);
3535 cp[1] = htole32(cp[1]);
3536 cp[2] = htole32(cp[2]);
3537 cp[3] = htole32(cp[3]);
3538
3539 /* set up input endpoint context */
3540 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci));
3541 cp[0] =
3542 XHCI_EPCTX_0_EPSTATE_SET(0) |
3543 XHCI_EPCTX_0_MULT_SET(0) |
3544 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) |
3545 XHCI_EPCTX_0_LSA_SET(0) |
3546 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0);
3547 cp[1] =
3548 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) |
3549 XHCI_EPCTX_1_HID_SET(0) |
3550 XHCI_EPCTX_1_MAXB_SET(0);
3551
3552 if (xfertype != UE_ISOCHRONOUS)
3553 cp[1] |= XHCI_EPCTX_1_CERR_SET(3);
3554
3555 if (xfertype == UE_CONTROL)
3556 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(8); /* 6.2.3 */
3557 else if (USB_IS_SS(speed))
3558 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(mps);
3559 else
3560 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(UE_GET_SIZE(mps));
3561
3562 xhci_setup_maxburst(pipe, cp);
3563
3564 switch (xfertype) {
3565 case UE_CONTROL:
3566 break;
3567 case UE_BULK:
3568 /* XXX Set MaxPStreams, HID, and LSA if streams enabled */
3569 break;
3570 case UE_INTERRUPT:
3571 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3572 ival = pipe->up_interval;
3573
3574 ival = xhci_bival2ival(ival, speed);
3575 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3576 break;
3577 case UE_ISOCHRONOUS:
3578 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3579 ival = pipe->up_interval;
3580
3581 /* xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 */
3582 if (speed == USB_SPEED_FULL)
3583 ival += 3; /* 1ms -> 125us */
3584 ival--;
3585 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3586 break;
3587 default:
3588 break;
3589 }
3590 DPRINTFN(4, "setting ival %ju MaxBurst %#jx",
3591 XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_1_MAXB_GET(cp[1]), 0, 0);
3592
3593 /* rewind TR dequeue pointer in xHC */
3594 /* can't use xhci_ep_get_dci() yet? */
3595 *(uint64_t *)(&cp[2]) = htole64(
3596 xhci_ring_trbp(xs->xs_xr[dci], 0) |
3597 XHCI_EPCTX_2_DCS_SET(1));
3598
3599 cp[0] = htole32(cp[0]);
3600 cp[1] = htole32(cp[1]);
3601 cp[4] = htole32(cp[4]);
3602
3603 /* rewind TR dequeue pointer in driver */
3604 struct xhci_ring *xr = xs->xs_xr[dci];
3605 mutex_enter(&xr->xr_lock);
3606 xhci_host_dequeue(xr);
3607 mutex_exit(&xr->xr_lock);
3608
3609 /* sync input contexts before they are read from memory */
3610 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3611 }
3612
3613 /*
3614 * Setup route string and roothub port of given device for slot context
3615 */
3616 static void
3617 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp)
3618 {
3619 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3620 struct usbd_device *dev = pipe->up_dev;
3621 struct usbd_port *up = dev->ud_powersrc;
3622 struct usbd_device *hub;
3623 struct usbd_device *adev;
3624 uint8_t rhport = 0;
3625 uint32_t route = 0;
3626
3627 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3628
3629 /* Locate root hub port and Determine route string */
3630 /* 4.3.3 route string does not include roothub port */
3631 for (hub = dev; hub != NULL; hub = hub->ud_myhub) {
3632 uint32_t dep;
3633
3634 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd",
3635 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc,
3636 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno :
3637 -1);
3638
3639 if (hub->ud_powersrc == NULL)
3640 break;
3641 dep = hub->ud_depth;
3642 if (dep == 0)
3643 break;
3644 rhport = hub->ud_powersrc->up_portno;
3645 if (dep > USB_HUB_MAX_DEPTH)
3646 continue;
3647
3648 route |=
3649 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport)
3650 << ((dep - 1) * 4);
3651 }
3652 route = route >> 4;
3653 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1;
3654
3655 /* Locate port on upstream high speed hub */
3656 for (adev = dev, hub = up->up_parent;
3657 hub != NULL && hub->ud_speed != USB_SPEED_HIGH;
3658 adev = hub, hub = hub->ud_myhub)
3659 ;
3660 if (hub) {
3661 int p;
3662 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) {
3663 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) {
3664 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1];
3665 goto found;
3666 }
3667 }
3668 panic("%s: cannot find HS port", __func__);
3669 found:
3670 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0);
3671 } else {
3672 dev->ud_myhsport = NULL;
3673 }
3674
3675 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport);
3676
3677 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport,
3678 ctlrport, route, (uintptr_t)hub);
3679
3680 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route);
3681 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport);
3682 }
3683
3684 /*
3685 * Setup whether device is hub, whether device uses MTT, and
3686 * TT informations if it uses MTT.
3687 */
3688 static void
3689 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp)
3690 {
3691 struct usbd_device *dev = pipe->up_dev;
3692 struct usbd_port *myhsport = dev->ud_myhsport;
3693 usb_device_descriptor_t * const dd = &dev->ud_ddesc;
3694 uint32_t speed = dev->ud_speed;
3695 uint8_t rhaddr = dev->ud_bus->ub_rhaddr;
3696 uint8_t tthubslot, ttportnum;
3697 bool ishub;
3698 bool usemtt;
3699
3700 XHCIHIST_FUNC();
3701
3702 /*
3703 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2
3704 * tthubslot:
3705 * This is the slot ID of parent HS hub
3706 * if LS/FS device is connected && connected through HS hub.
3707 * This is 0 if device is not LS/FS device ||
3708 * parent hub is not HS hub ||
3709 * attached to root hub.
3710 * ttportnum:
3711 * This is the downstream facing port of parent HS hub
3712 * if LS/FS device is connected.
3713 * This is 0 if device is not LS/FS device ||
3714 * parent hub is not HS hub ||
3715 * attached to root hub.
3716 */
3717 if (myhsport &&
3718 myhsport->up_parent->ud_addr != rhaddr &&
3719 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
3720 ttportnum = myhsport->up_portno;
3721 tthubslot = myhsport->up_parent->ud_addr;
3722 } else {
3723 ttportnum = 0;
3724 tthubslot = 0;
3725 }
3726 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd",
3727 (uintptr_t)myhsport, ttportnum, tthubslot, 0);
3728
3729 /* ishub is valid after reading UDESC_DEVICE */
3730 ishub = (dd->bDeviceClass == UDCLASS_HUB);
3731
3732 /* dev->ud_hub is valid after reading UDESC_HUB */
3733 if (ishub && dev->ud_hub) {
3734 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc;
3735 uint8_t ttt =
3736 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK);
3737
3738 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts);
3739 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt);
3740 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0);
3741 }
3742
3743 #define IS_MTTHUB(dd) \
3744 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT)
3745
3746 /*
3747 * MTT flag is set if
3748 * 1. this is HS hub && MTTs are supported and enabled; or
3749 * 2. this is LS or FS device && there is a parent HS hub where MTTs
3750 * are supported and enabled.
3751 *
3752 * XXX enabled is not tested yet
3753 */
3754 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd))
3755 usemtt = true;
3756 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) &&
3757 myhsport &&
3758 myhsport->up_parent->ud_addr != rhaddr &&
3759 IS_MTTHUB(&myhsport->up_parent->ud_ddesc))
3760 usemtt = true;
3761 else
3762 usemtt = false;
3763 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd",
3764 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt);
3765
3766 #undef IS_MTTHUB
3767
3768 cp[0] |=
3769 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) |
3770 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0);
3771 cp[2] |=
3772 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) |
3773 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum);
3774 }
3775
3776 /* set up params for periodic endpoint */
3777 static void
3778 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp)
3779 {
3780 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
3781 struct usbd_device *dev = pipe->up_dev;
3782 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3783 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3784 usbd_desc_iter_t iter;
3785 const usb_cdc_descriptor_t *cdcd;
3786 uint32_t maxb = 0;
3787 uint16_t mps = UGETW(ed->wMaxPacketSize);
3788 uint8_t speed = dev->ud_speed;
3789 uint8_t mult = 0;
3790 uint8_t ep;
3791
3792 /* config desc is NULL when opening ep0 */
3793 if (dev == NULL || dev->ud_cdesc == NULL)
3794 goto no_cdcd;
3795 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev,
3796 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY);
3797 if (cdcd == NULL)
3798 goto no_cdcd;
3799 usb_desc_iter_init(dev, &iter);
3800 iter.cur = (const void *)cdcd;
3801
3802 /* find endpoint_ss_comp desc for ep of this pipe */
3803 for (ep = 0;;) {
3804 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter);
3805 if (cdcd == NULL)
3806 break;
3807 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) {
3808 ep = ((const usb_endpoint_descriptor_t *)cdcd)->
3809 bEndpointAddress;
3810 if (UE_GET_ADDR(ep) ==
3811 UE_GET_ADDR(ed->bEndpointAddress)) {
3812 cdcd = (const usb_cdc_descriptor_t *)
3813 usb_desc_iter_next(&iter);
3814 break;
3815 }
3816 ep = 0;
3817 }
3818 }
3819 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) {
3820 const usb_endpoint_ss_comp_descriptor_t * esscd =
3821 (const usb_endpoint_ss_comp_descriptor_t *)cdcd;
3822 maxb = esscd->bMaxBurst;
3823 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes);
3824 }
3825
3826 no_cdcd:
3827 /* 6.2.3.4, 4.8.2.4 */
3828 if (USB_IS_SS(speed)) {
3829 /* USB 3.1 9.6.6 */
3830 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps);
3831 /* USB 3.1 9.6.7 */
3832 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3833 #ifdef notyet
3834 if (xfertype == UE_ISOCHRONOUS) {
3835 }
3836 if (XHCI_HCC2_LEC(sc->sc_hcc2) != 0) {
3837 /* use ESIT */
3838 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(x);
3839 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(x);
3840
3841 /* XXX if LEC = 1, set ESIT instead */
3842 cp[0] |= XHCI_EPCTX_0_MULT_SET(0);
3843 } else {
3844 /* use ival */
3845 }
3846 #endif
3847 } else {
3848 /* USB 2.0 9.6.6 */
3849 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(UE_GET_SIZE(mps));
3850
3851 /* 6.2.3.4 */
3852 if (speed == USB_SPEED_HIGH &&
3853 (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)) {
3854 maxb = UE_GET_TRANS(mps);
3855 } else {
3856 /* LS/FS or HS CTRL or HS BULK */
3857 maxb = 0;
3858 }
3859 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3860 }
3861 xpipe->xp_maxb = maxb + 1;
3862 xpipe->xp_mult = mult + 1;
3863 }
3864
3865 /*
3866 * Convert endpoint bInterval value to endpoint context interval value
3867 * for Interrupt pipe.
3868 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6
3869 */
3870 static uint32_t
3871 xhci_bival2ival(uint32_t ival, uint32_t speed)
3872 {
3873 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) {
3874 int i;
3875
3876 /*
3877 * round ival down to "the nearest base 2 multiple of
3878 * bInterval * 8".
3879 * bInterval is at most 255 as its type is uByte.
3880 * 255(ms) = 2040(x 125us) < 2^11, so start with 10.
3881 */
3882 for (i = 10; i > 0; i--) {
3883 if ((ival * 8) >= (1 << i))
3884 break;
3885 }
3886 ival = i;
3887 } else {
3888 /* Interval = bInterval-1 for SS/HS */
3889 ival--;
3890 }
3891
3892 return ival;
3893 }
3894
3895 /* ----- */
3896
3897 static void
3898 xhci_noop(struct usbd_pipe *pipe)
3899 {
3900 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3901 }
3902
3903 /*
3904 * Process root hub request.
3905 */
3906 static int
3907 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
3908 void *buf, int buflen)
3909 {
3910 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
3911 usb_port_status_t ps;
3912 int l, totlen = 0;
3913 uint16_t len, value, index;
3914 int port, i;
3915 uint32_t v;
3916
3917 XHCIHIST_FUNC();
3918
3919 if (sc->sc_dying)
3920 return -1;
3921
3922 size_t bn = bus == &sc->sc_bus ? 0 : 1;
3923
3924 len = UGETW(req->wLength);
3925 value = UGETW(req->wValue);
3926 index = UGETW(req->wIndex);
3927
3928 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx",
3929 req->bmRequestType | (req->bRequest << 8), value, index, len);
3930
3931 #define C(x,y) ((x) | ((y) << 8))
3932 switch (C(req->bRequest, req->bmRequestType)) {
3933 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
3934 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0);
3935 if (len == 0)
3936 break;
3937 switch (value) {
3938 #define sd ((usb_string_descriptor_t *)buf)
3939 case C(2, UDESC_STRING):
3940 /* Product */
3941 totlen = usb_makestrdesc(sd, len, "xHCI root hub");
3942 break;
3943 #undef sd
3944 default:
3945 /* default from usbroothub */
3946 return buflen;
3947 }
3948 break;
3949
3950 /* Hub requests */
3951 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
3952 break;
3953 /* Clear Port Feature request */
3954 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): {
3955 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
3956
3957 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd",
3958 index, value, bn, cp);
3959 if (index < 1 || index > sc->sc_rhportcount[bn]) {
3960 return -1;
3961 }
3962 port = XHCI_PORTSC(cp);
3963 v = xhci_op_read_4(sc, port);
3964 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0);
3965 v &= ~XHCI_PS_CLEAR;
3966 switch (value) {
3967 case UHF_PORT_ENABLE:
3968 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED);
3969 break;
3970 case UHF_PORT_SUSPEND:
3971 return -1;
3972 case UHF_PORT_POWER:
3973 break;
3974 case UHF_PORT_TEST:
3975 case UHF_PORT_INDICATOR:
3976 return -1;
3977 case UHF_C_PORT_CONNECTION:
3978 xhci_op_write_4(sc, port, v | XHCI_PS_CSC);
3979 break;
3980 case UHF_C_PORT_ENABLE:
3981 case UHF_C_PORT_SUSPEND:
3982 case UHF_C_PORT_OVER_CURRENT:
3983 return -1;
3984 case UHF_C_BH_PORT_RESET:
3985 xhci_op_write_4(sc, port, v | XHCI_PS_WRC);
3986 break;
3987 case UHF_C_PORT_RESET:
3988 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
3989 break;
3990 case UHF_C_PORT_LINK_STATE:
3991 xhci_op_write_4(sc, port, v | XHCI_PS_PLC);
3992 break;
3993 case UHF_C_PORT_CONFIG_ERROR:
3994 xhci_op_write_4(sc, port, v | XHCI_PS_CEC);
3995 break;
3996 default:
3997 return -1;
3998 }
3999 break;
4000 }
4001 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
4002 if (len == 0)
4003 break;
4004 if ((value & 0xff) != 0) {
4005 return -1;
4006 }
4007 usb_hub_descriptor_t hubd;
4008
4009 totlen = uimin(buflen, sizeof(hubd));
4010 memcpy(&hubd, buf, totlen);
4011 hubd.bNbrPorts = sc->sc_rhportcount[bn];
4012 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH);
4013 hubd.bPwrOn2PwrGood = 200;
4014 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) {
4015 /* XXX can't find out? */
4016 hubd.DeviceRemovable[i++] = 0;
4017 }
4018 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
4019 totlen = uimin(totlen, hubd.bDescLength);
4020 memcpy(buf, &hubd, totlen);
4021 break;
4022 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
4023 if (len != 4) {
4024 return -1;
4025 }
4026 memset(buf, 0, len); /* ? XXX */
4027 totlen = len;
4028 break;
4029 /* Get Port Status request */
4030 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): {
4031 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4032
4033 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju",
4034 bn, index, cp, 0);
4035 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4036 DPRINTFN(5, "bad get port status: index=%jd bn=%jd "
4037 "portcount=%jd",
4038 index, bn, sc->sc_rhportcount[bn], 0);
4039 return -1;
4040 }
4041 if (len != 4) {
4042 DPRINTFN(5, "bad get port status: len %jd != 4",
4043 len, 0, 0, 0);
4044 return -1;
4045 }
4046 v = xhci_op_read_4(sc, XHCI_PORTSC(cp));
4047 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0);
4048 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v));
4049 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS;
4050 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED;
4051 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR;
4052 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND;
4053 if (v & XHCI_PS_PR) i |= UPS_RESET;
4054 if (v & XHCI_PS_PP) {
4055 if (i & UPS_OTHER_SPEED)
4056 i |= UPS_PORT_POWER_SS;
4057 else
4058 i |= UPS_PORT_POWER;
4059 }
4060 if (i & UPS_OTHER_SPEED)
4061 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v));
4062 if (sc->sc_vendor_port_status)
4063 i = sc->sc_vendor_port_status(sc, v, i);
4064 USETW(ps.wPortStatus, i);
4065 i = 0;
4066 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS;
4067 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED;
4068 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR;
4069 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET;
4070 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET;
4071 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE;
4072 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR;
4073 USETW(ps.wPortChange, i);
4074 totlen = uimin(len, sizeof(ps));
4075 memcpy(buf, &ps, totlen);
4076 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx"
4077 " totlen %jd",
4078 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0);
4079 break;
4080 }
4081 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
4082 return -1;
4083 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE):
4084 break;
4085 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
4086 break;
4087 /* Set Port Feature request */
4088 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): {
4089 int optval = (index >> 8) & 0xff;
4090 index &= 0xff;
4091 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4092 return -1;
4093 }
4094
4095 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4096
4097 port = XHCI_PORTSC(cp);
4098 v = xhci_op_read_4(sc, port);
4099 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0);
4100 v &= ~XHCI_PS_CLEAR;
4101 switch (value) {
4102 case UHF_PORT_ENABLE:
4103 xhci_op_write_4(sc, port, v | XHCI_PS_PED);
4104 break;
4105 case UHF_PORT_SUSPEND:
4106 /* XXX suspend */
4107 break;
4108 case UHF_PORT_RESET:
4109 xhci_op_write_4(sc, port, v | XHCI_PS_PR);
4110 /* Wait for reset to complete. */
4111 for (i = 0; i < USB_PORT_ROOT_RESET_DELAY / 10; i++) {
4112 if (sc->sc_dying) {
4113 return -1;
4114 }
4115 v = xhci_op_read_4(sc, port);
4116 if ((v & XHCI_PS_PR) == 0) {
4117 break;
4118 }
4119 usb_delay_ms(&sc->sc_bus, 10);
4120 }
4121 break;
4122 case UHF_PORT_POWER:
4123 /* XXX power control */
4124 break;
4125 /* XXX more */
4126 case UHF_C_PORT_RESET:
4127 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4128 break;
4129 case UHF_PORT_U1_TIMEOUT:
4130 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4131 return -1;
4132 }
4133 port = XHCI_PORTPMSC(cp);
4134 v = xhci_op_read_4(sc, port);
4135 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4136 index, cp, v, 0);
4137 v &= ~XHCI_PM3_U1TO_SET(0xff);
4138 v |= XHCI_PM3_U1TO_SET(optval);
4139 xhci_op_write_4(sc, port, v);
4140 break;
4141 case UHF_PORT_U2_TIMEOUT:
4142 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4143 return -1;
4144 }
4145 port = XHCI_PORTPMSC(cp);
4146 v = xhci_op_read_4(sc, port);
4147 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4148 index, cp, v, 0);
4149 v &= ~XHCI_PM3_U2TO_SET(0xff);
4150 v |= XHCI_PM3_U2TO_SET(optval);
4151 xhci_op_write_4(sc, port, v);
4152 break;
4153 default:
4154 return -1;
4155 }
4156 }
4157 break;
4158 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
4159 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
4160 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
4161 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
4162 break;
4163 default:
4164 /* default from usbroothub */
4165 return buflen;
4166 }
4167
4168 return totlen;
4169 }
4170
4171 /* root hub interrupt */
4172
4173 static usbd_status
4174 xhci_root_intr_transfer(struct usbd_xfer *xfer)
4175 {
4176 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4177 usbd_status err;
4178
4179 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4180
4181 /* Insert last in queue. */
4182 mutex_enter(&sc->sc_lock);
4183 err = usb_insert_transfer(xfer);
4184 mutex_exit(&sc->sc_lock);
4185 if (err)
4186 return err;
4187
4188 /* Pipe isn't running, start first */
4189 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4190 }
4191
4192 /* Wait for roothub port status/change */
4193 static usbd_status
4194 xhci_root_intr_start(struct usbd_xfer *xfer)
4195 {
4196 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4197 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4198 const bool polling = xhci_polling_p(sc);
4199
4200 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4201
4202 if (sc->sc_dying)
4203 return USBD_IOERROR;
4204
4205 if (!polling)
4206 mutex_enter(&sc->sc_lock);
4207 KASSERT(sc->sc_intrxfer[bn] == NULL);
4208 sc->sc_intrxfer[bn] = xfer;
4209 xfer->ux_status = USBD_IN_PROGRESS;
4210 if (!polling)
4211 mutex_exit(&sc->sc_lock);
4212
4213 return USBD_IN_PROGRESS;
4214 }
4215
4216 static void
4217 xhci_root_intr_abort(struct usbd_xfer *xfer)
4218 {
4219 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4220 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4221
4222 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4223
4224 KASSERT(mutex_owned(&sc->sc_lock));
4225 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
4226
4227 /* If xfer has already completed, nothing to do here. */
4228 if (sc->sc_intrxfer[bn] == NULL)
4229 return;
4230
4231 /*
4232 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer.
4233 * Cancel it.
4234 */
4235 KASSERT(sc->sc_intrxfer[bn] == xfer);
4236 xfer->ux_status = USBD_CANCELLED;
4237 usb_transfer_complete(xfer);
4238 }
4239
4240 static void
4241 xhci_root_intr_close(struct usbd_pipe *pipe)
4242 {
4243 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe);
4244 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer;
4245 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4246
4247 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4248
4249 KASSERT(mutex_owned(&sc->sc_lock));
4250
4251 /*
4252 * Caller must guarantee the xfer has completed first, by
4253 * closing the pipe only after normal completion or an abort.
4254 */
4255 KASSERT(sc->sc_intrxfer[bn] == NULL);
4256 }
4257
4258 static void
4259 xhci_root_intr_done(struct usbd_xfer *xfer)
4260 {
4261 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4262 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4263
4264 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4265
4266 KASSERT(mutex_owned(&sc->sc_lock));
4267
4268 /* Claim the xfer so it doesn't get completed again. */
4269 KASSERT(sc->sc_intrxfer[bn] == xfer);
4270 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
4271 sc->sc_intrxfer[bn] = NULL;
4272 }
4273
4274 /* -------------- */
4275 /* device control */
4276
4277 static usbd_status
4278 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
4279 {
4280 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4281 usbd_status err;
4282
4283 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4284
4285 /* Insert last in queue. */
4286 mutex_enter(&sc->sc_lock);
4287 err = usb_insert_transfer(xfer);
4288 mutex_exit(&sc->sc_lock);
4289 if (err)
4290 return err;
4291
4292 /* Pipe isn't running, start first */
4293 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4294 }
4295
4296 static usbd_status
4297 xhci_device_ctrl_start(struct usbd_xfer *xfer)
4298 {
4299 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4300 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4301 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4302 struct xhci_ring * const tr = xs->xs_xr[dci];
4303 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4304 usb_device_request_t * const req = &xfer->ux_request;
4305 const bool isread = usbd_xfer_isread(xfer);
4306 const uint32_t len = UGETW(req->wLength);
4307 usb_dma_t * const dma = &xfer->ux_dmabuf;
4308 uint64_t parameter;
4309 uint32_t status;
4310 uint32_t control;
4311 u_int i;
4312 const bool polling = xhci_polling_p(sc);
4313
4314 XHCIHIST_FUNC();
4315 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx",
4316 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue),
4317 UGETW(req->wIndex), UGETW(req->wLength));
4318
4319 /* we rely on the bottom bits for extra info */
4320 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %zx",
4321 (uintptr_t) xfer);
4322
4323 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0);
4324
4325 i = 0;
4326
4327 /* setup phase */
4328 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */
4329 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req));
4330 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE :
4331 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) |
4332 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) |
4333 XHCI_TRB_3_IDT_BIT;
4334 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4335
4336 if (len != 0) {
4337 /* data phase */
4338 parameter = DMAADDR(dma, 0);
4339 KASSERTMSG(len <= 0x10000, "len %d", len);
4340 status = XHCI_TRB_2_IRQ_SET(0) |
4341 XHCI_TRB_2_TDSZ_SET(0) |
4342 XHCI_TRB_2_BYTES_SET(len);
4343 control = (isread ? XHCI_TRB_3_DIR_IN : 0) |
4344 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) |
4345 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4346 XHCI_TRB_3_IOC_BIT;
4347 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4348
4349 usb_syncmem(dma, 0, len,
4350 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4351 }
4352
4353 parameter = 0;
4354 status = XHCI_TRB_2_IRQ_SET(0);
4355 /* the status stage has inverted direction */
4356 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) |
4357 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) |
4358 XHCI_TRB_3_IOC_BIT;
4359 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4360
4361 if (!polling)
4362 mutex_enter(&tr->xr_lock);
4363 xhci_ring_put_xfer(sc, tr, xx, i);
4364 if (!polling)
4365 mutex_exit(&tr->xr_lock);
4366
4367 if (!polling)
4368 mutex_enter(&sc->sc_lock);
4369 xfer->ux_status = USBD_IN_PROGRESS;
4370 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4371 usbd_xfer_schedule_timeout(xfer);
4372 if (!polling)
4373 mutex_exit(&sc->sc_lock);
4374
4375 return USBD_IN_PROGRESS;
4376 }
4377
4378 static void
4379 xhci_device_ctrl_done(struct usbd_xfer *xfer)
4380 {
4381 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4382 usb_device_request_t *req = &xfer->ux_request;
4383 int len = UGETW(req->wLength);
4384 int rd = req->bmRequestType & UT_READ;
4385
4386 if (len)
4387 usb_syncmem(&xfer->ux_dmabuf, 0, len,
4388 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4389 }
4390
4391 static void
4392 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
4393 {
4394 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4395
4396 usbd_xfer_abort(xfer);
4397 }
4398
4399 static void
4400 xhci_device_ctrl_close(struct usbd_pipe *pipe)
4401 {
4402 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4403
4404 xhci_close_pipe(pipe);
4405 }
4406
4407 /* ------------------ */
4408 /* device isochronous */
4409
4410 static usbd_status
4411 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
4412 {
4413 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4414 usbd_status err;
4415
4416 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4417
4418 /* Insert last in queue. */
4419 mutex_enter(&sc->sc_lock);
4420 err = usb_insert_transfer(xfer);
4421 mutex_exit(&sc->sc_lock);
4422 if (err)
4423 return err;
4424
4425 return xhci_device_isoc_enter(xfer);
4426 }
4427
4428 static usbd_status
4429 xhci_device_isoc_enter(struct usbd_xfer *xfer)
4430 {
4431 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4432 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4433 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4434 struct xhci_ring * const tr = xs->xs_xr[dci];
4435 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4436 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe;
4437 uint32_t len = xfer->ux_length;
4438 usb_dma_t * const dma = &xfer->ux_dmabuf;
4439 uint64_t parameter;
4440 uint32_t status;
4441 uint32_t control;
4442 uint32_t mfindex;
4443 uint32_t offs;
4444 int i, ival;
4445 const bool polling = xhci_polling_p(sc);
4446 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
4447 const uint16_t mps = UE_GET_SIZE(MPS);
4448 const uint8_t maxb = xpipe->xp_maxb;
4449 u_int tdpc, tbc, tlbpc;
4450
4451 XHCIHIST_FUNC();
4452 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4453 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4454
4455 if (sc->sc_dying)
4456 return USBD_IOERROR;
4457
4458 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths);
4459 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4460
4461 const bool isread = usbd_xfer_isread(xfer);
4462 if (xfer->ux_length)
4463 usb_syncmem(dma, 0, xfer->ux_length,
4464 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4465
4466 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
4467 if (ival >= 1 && ival <= 16)
4468 ival = 1 << (ival - 1);
4469 else
4470 ival = 1; /* fake something up */
4471
4472 if (xpipe->xp_isoc_next == -1) {
4473 mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX);
4474 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0);
4475 mfindex = XHCI_MFINDEX_GET(mfindex + 1);
4476 mfindex /= USB_UFRAMES_PER_FRAME;
4477 mfindex += 7; /* 7 frames is max possible IST */
4478 xpipe->xp_isoc_next = roundup2(mfindex, ival);
4479 }
4480
4481 offs = 0;
4482 for (i = 0; i < xfer->ux_nframes; i++) {
4483 len = xfer->ux_frlengths[i];
4484
4485 tdpc = howmany(len, mps);
4486 tbc = howmany(tdpc, maxb) - 1;
4487 tlbpc = tdpc % maxb;
4488 tlbpc = tlbpc ? tlbpc - 1 : maxb - 1;
4489
4490 KASSERTMSG(len <= 0x10000, "len %d", len);
4491 parameter = DMAADDR(dma, offs);
4492 status = XHCI_TRB_2_IRQ_SET(0) |
4493 XHCI_TRB_2_TDSZ_SET(0) |
4494 XHCI_TRB_2_BYTES_SET(len);
4495 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) |
4496 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4497 XHCI_TRB_3_TBC_SET(tbc) |
4498 XHCI_TRB_3_TLBPC_SET(tlbpc) |
4499 XHCI_TRB_3_IOC_BIT;
4500 if (XHCI_HCC_CFC(sc->sc_hcc)) {
4501 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4502 #if 0
4503 } else if (xpipe->xp_isoc_next == -1) {
4504 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4505 #endif
4506 } else {
4507 control |= XHCI_TRB_3_ISO_SIA_BIT;
4508 }
4509 #if 0
4510 if (i != xfer->ux_nframes - 1)
4511 control |= XHCI_TRB_3_BEI_BIT;
4512 #endif
4513 xhci_xfer_put_trb(xx, i, parameter, status, control);
4514
4515 xpipe->xp_isoc_next += ival;
4516 offs += len;
4517 }
4518
4519 xx->xx_isoc_done = 0;
4520
4521 if (!polling)
4522 mutex_enter(&tr->xr_lock);
4523 xhci_ring_put_xfer(sc, tr, xx, i);
4524 if (!polling)
4525 mutex_exit(&tr->xr_lock);
4526
4527 if (!polling)
4528 mutex_enter(&sc->sc_lock);
4529 xfer->ux_status = USBD_IN_PROGRESS;
4530 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4531 usbd_xfer_schedule_timeout(xfer);
4532 if (!polling)
4533 mutex_exit(&sc->sc_lock);
4534
4535 return USBD_IN_PROGRESS;
4536 }
4537
4538 static void
4539 xhci_device_isoc_abort(struct usbd_xfer *xfer)
4540 {
4541 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4542
4543 usbd_xfer_abort(xfer);
4544 }
4545
4546 static void
4547 xhci_device_isoc_close(struct usbd_pipe *pipe)
4548 {
4549 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4550
4551 xhci_close_pipe(pipe);
4552 }
4553
4554 static void
4555 xhci_device_isoc_done(struct usbd_xfer *xfer)
4556 {
4557 #ifdef USB_DEBUG
4558 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4559 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4560 #endif
4561 const bool isread = usbd_xfer_isread(xfer);
4562
4563 XHCIHIST_FUNC();
4564 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4565 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4566
4567 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4568 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4569 }
4570
4571 /* ----------- */
4572 /* device bulk */
4573
4574 static usbd_status
4575 xhci_device_bulk_transfer(struct usbd_xfer *xfer)
4576 {
4577 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4578 usbd_status err;
4579
4580 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4581
4582 /* Insert last in queue. */
4583 mutex_enter(&sc->sc_lock);
4584 err = usb_insert_transfer(xfer);
4585 mutex_exit(&sc->sc_lock);
4586 if (err)
4587 return err;
4588
4589 /*
4590 * Pipe isn't running (otherwise err would be USBD_INPROG),
4591 * so start it first.
4592 */
4593 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4594 }
4595
4596 static usbd_status
4597 xhci_device_bulk_start(struct usbd_xfer *xfer)
4598 {
4599 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4600 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4601 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4602 struct xhci_ring * const tr = xs->xs_xr[dci];
4603 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4604 const uint32_t len = xfer->ux_length;
4605 usb_dma_t * const dma = &xfer->ux_dmabuf;
4606 uint64_t parameter;
4607 uint32_t status;
4608 uint32_t control;
4609 u_int i = 0;
4610 const bool polling = xhci_polling_p(sc);
4611
4612 XHCIHIST_FUNC();
4613 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4614 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4615
4616 if (sc->sc_dying)
4617 return USBD_IOERROR;
4618
4619 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4620
4621 parameter = DMAADDR(dma, 0);
4622 const bool isread = usbd_xfer_isread(xfer);
4623 if (len)
4624 usb_syncmem(dma, 0, len,
4625 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4626
4627 /*
4628 * XXX: (dsl) The physical buffer must not cross a 64k boundary.
4629 * If the user supplied buffer crosses such a boundary then 2
4630 * (or more) TRB should be used.
4631 * If multiple TRB are used the td_size field must be set correctly.
4632 * For v1.0 devices (like ivy bridge) this is the number of usb data
4633 * blocks needed to complete the transfer.
4634 * Setting it to 1 in the last TRB causes an extra zero-length
4635 * data block be sent.
4636 * The earlier documentation differs, I don't know how it behaves.
4637 */
4638 KASSERTMSG(len <= 0x10000, "len %d", len);
4639 status = XHCI_TRB_2_IRQ_SET(0) |
4640 XHCI_TRB_2_TDSZ_SET(0) |
4641 XHCI_TRB_2_BYTES_SET(len);
4642 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4643 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4644 XHCI_TRB_3_IOC_BIT;
4645 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4646
4647 if (!polling)
4648 mutex_enter(&tr->xr_lock);
4649 xhci_ring_put_xfer(sc, tr, xx, i);
4650 if (!polling)
4651 mutex_exit(&tr->xr_lock);
4652
4653 if (!polling)
4654 mutex_enter(&sc->sc_lock);
4655 xfer->ux_status = USBD_IN_PROGRESS;
4656 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4657 usbd_xfer_schedule_timeout(xfer);
4658 if (!polling)
4659 mutex_exit(&sc->sc_lock);
4660
4661 return USBD_IN_PROGRESS;
4662 }
4663
4664 static void
4665 xhci_device_bulk_done(struct usbd_xfer *xfer)
4666 {
4667 #ifdef USB_DEBUG
4668 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4669 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4670 #endif
4671 const bool isread = usbd_xfer_isread(xfer);
4672
4673 XHCIHIST_FUNC();
4674 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4675 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4676
4677 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4678 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4679 }
4680
4681 static void
4682 xhci_device_bulk_abort(struct usbd_xfer *xfer)
4683 {
4684 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4685
4686 usbd_xfer_abort(xfer);
4687 }
4688
4689 static void
4690 xhci_device_bulk_close(struct usbd_pipe *pipe)
4691 {
4692 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4693
4694 xhci_close_pipe(pipe);
4695 }
4696
4697 /* ---------------- */
4698 /* device interrupt */
4699
4700 static usbd_status
4701 xhci_device_intr_transfer(struct usbd_xfer *xfer)
4702 {
4703 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4704 usbd_status err;
4705
4706 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4707
4708 /* Insert last in queue. */
4709 mutex_enter(&sc->sc_lock);
4710 err = usb_insert_transfer(xfer);
4711 mutex_exit(&sc->sc_lock);
4712 if (err)
4713 return err;
4714
4715 /*
4716 * Pipe isn't running (otherwise err would be USBD_INPROG),
4717 * so start it first.
4718 */
4719 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4720 }
4721
4722 static usbd_status
4723 xhci_device_intr_start(struct usbd_xfer *xfer)
4724 {
4725 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4726 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4727 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4728 struct xhci_ring * const tr = xs->xs_xr[dci];
4729 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4730 const uint32_t len = xfer->ux_length;
4731 const bool polling = xhci_polling_p(sc);
4732 usb_dma_t * const dma = &xfer->ux_dmabuf;
4733 uint64_t parameter;
4734 uint32_t status;
4735 uint32_t control;
4736 u_int i = 0;
4737
4738 XHCIHIST_FUNC();
4739 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4740 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4741
4742 if (sc->sc_dying)
4743 return USBD_IOERROR;
4744
4745 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4746
4747 const bool isread = usbd_xfer_isread(xfer);
4748 if (len)
4749 usb_syncmem(dma, 0, len,
4750 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4751
4752 parameter = DMAADDR(dma, 0);
4753 KASSERTMSG(len <= 0x10000, "len %d", len);
4754 status = XHCI_TRB_2_IRQ_SET(0) |
4755 XHCI_TRB_2_TDSZ_SET(0) |
4756 XHCI_TRB_2_BYTES_SET(len);
4757 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4758 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT;
4759 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4760
4761 if (!polling)
4762 mutex_enter(&tr->xr_lock);
4763 xhci_ring_put_xfer(sc, tr, xx, i);
4764 if (!polling)
4765 mutex_exit(&tr->xr_lock);
4766
4767 if (!polling)
4768 mutex_enter(&sc->sc_lock);
4769 xfer->ux_status = USBD_IN_PROGRESS;
4770 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4771 usbd_xfer_schedule_timeout(xfer);
4772 if (!polling)
4773 mutex_exit(&sc->sc_lock);
4774
4775 return USBD_IN_PROGRESS;
4776 }
4777
4778 static void
4779 xhci_device_intr_done(struct usbd_xfer *xfer)
4780 {
4781 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4782 #ifdef USB_DEBUG
4783 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4784 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4785 #endif
4786 const bool isread = usbd_xfer_isread(xfer);
4787
4788 XHCIHIST_FUNC();
4789 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4790 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4791
4792 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4793
4794 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4795 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4796 }
4797
4798 static void
4799 xhci_device_intr_abort(struct usbd_xfer *xfer)
4800 {
4801 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4802
4803 XHCIHIST_FUNC();
4804 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0);
4805
4806 KASSERT(mutex_owned(&sc->sc_lock));
4807 usbd_xfer_abort(xfer);
4808 }
4809
4810 static void
4811 xhci_device_intr_close(struct usbd_pipe *pipe)
4812 {
4813 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
4814
4815 XHCIHIST_FUNC();
4816 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0);
4817
4818 xhci_close_pipe(pipe);
4819 }
4820