xhci.c revision 1.173 1 /* $NetBSD: xhci.c,v 1.173 2022/10/11 09:18:22 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * USB rev 2.0 and rev 3.1 specification
31 * http://www.usb.org/developers/docs/
32 * xHCI rev 1.1 specification
33 * http://www.intel.com/technology/usb/spec.htm
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.173 2022/10/11 09:18:22 msaitoh Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_usb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/kmem.h>
47 #include <sys/device.h>
48 #include <sys/select.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/endian.h>
58
59 #include <dev/usb/usb.h>
60 #include <dev/usb/usbdi.h>
61 #include <dev/usb/usbdivar.h>
62 #include <dev/usb/usbdi_util.h>
63 #include <dev/usb/usbhist.h>
64 #include <dev/usb/usb_mem.h>
65 #include <dev/usb/usb_quirks.h>
66
67 #include <dev/usb/xhcireg.h>
68 #include <dev/usb/xhcivar.h>
69 #include <dev/usb/usbroothub.h>
70
71
72 #ifdef USB_DEBUG
73 #ifndef XHCI_DEBUG
74 #define xhcidebug 0
75 #else /* !XHCI_DEBUG */
76 #define HEXDUMP(a, b, c) \
77 do { \
78 if (xhcidebug > 0) \
79 hexdump(printf, a, b, c); \
80 } while (/*CONSTCOND*/0)
81 static int xhcidebug = 0;
82
83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup")
84 {
85 int err;
86 const struct sysctlnode *rnode;
87 const struct sysctlnode *cnode;
88
89 err = sysctl_createv(clog, 0, NULL, &rnode,
90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci",
91 SYSCTL_DESCR("xhci global controls"),
92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
93
94 if (err)
95 goto fail;
96
97 /* control debugging printfs */
98 err = sysctl_createv(clog, 0, &rnode, &cnode,
99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
100 "debug", SYSCTL_DESCR("Enable debugging output"),
101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL);
102 if (err)
103 goto fail;
104
105 return;
106 fail:
107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
108 }
109
110 #endif /* !XHCI_DEBUG */
111 #endif /* USB_DEBUG */
112
113 #ifndef HEXDUMP
114 #define HEXDUMP(a, b, c)
115 #endif
116
117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D)
118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D)
119 #define XHCIHIST_FUNC() USBHIST_FUNC()
120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug)
121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \
122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D)
123
124 #define XHCI_DCI_SLOT 0
125 #define XHCI_DCI_EP_CONTROL 1
126
127 #define XHCI_ICI_INPUT_CONTROL 0
128
129 struct xhci_pipe {
130 struct usbd_pipe xp_pipe;
131 struct usb_task xp_async_task;
132 int16_t xp_isoc_next; /* next frame */
133 uint8_t xp_maxb; /* max burst */
134 uint8_t xp_mult;
135 };
136
137 #define XHCI_COMMAND_RING_TRBS 256
138 #define XHCI_EVENT_RING_TRBS 256
139 #define XHCI_EVENT_RING_SEGMENTS 1
140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT
141
142 static usbd_status xhci_open(struct usbd_pipe *);
143 static void xhci_close_pipe(struct usbd_pipe *);
144 static int xhci_intr1(struct xhci_softc * const);
145 static void xhci_softintr(void *);
146 static void xhci_poll(struct usbd_bus *);
147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int);
148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
149 static void xhci_abortx(struct usbd_xfer *);
150 static bool xhci_dying(struct usbd_bus *);
151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **);
152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int,
153 struct usbd_port *);
154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
155 void *, int);
156
157 static void xhci_pipe_restart(struct usbd_pipe *);
158 static void xhci_pipe_restart_async_task(void *);
159 static void xhci_pipe_restart_async(struct usbd_pipe *);
160
161 static usbd_status xhci_configure_endpoint(struct usbd_pipe *);
162 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *);
163 static void xhci_reset_endpoint(struct usbd_pipe *);
164 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *,
165 struct xhci_slot *, u_int, uint32_t);
166 static usbd_status xhci_stop_endpoint(struct usbd_pipe *);
167
168 static void xhci_host_dequeue(struct xhci_ring * const);
169 static void xhci_set_dequeue(struct usbd_pipe *);
170
171 static usbd_status xhci_do_command(struct xhci_softc * const,
172 struct xhci_soft_trb * const, int);
173 static usbd_status xhci_do_command_locked(struct xhci_softc * const,
174 struct xhci_soft_trb * const, int);
175 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t);
176 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *);
177 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool);
178 static usbd_status xhci_enable_slot(struct xhci_softc * const,
179 uint8_t * const);
180 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t);
181 static usbd_status xhci_address_device(struct xhci_softc * const,
182 uint64_t, uint8_t, bool);
183 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int);
184 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const,
185 struct xhci_slot * const, u_int);
186 static usbd_status xhci_ring_init(struct xhci_softc * const,
187 struct xhci_ring **, size_t, size_t);
188 static void xhci_ring_free(struct xhci_softc * const,
189 struct xhci_ring ** const);
190
191 static void xhci_setup_ctx(struct usbd_pipe *);
192 static void xhci_setup_route(struct usbd_pipe *, uint32_t *);
193 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *);
194 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *);
195 static uint32_t xhci_bival2ival(uint32_t, uint32_t);
196
197 static void xhci_noop(struct usbd_pipe *);
198
199 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *);
200 static usbd_status xhci_root_intr_start(struct usbd_xfer *);
201 static void xhci_root_intr_abort(struct usbd_xfer *);
202 static void xhci_root_intr_close(struct usbd_pipe *);
203 static void xhci_root_intr_done(struct usbd_xfer *);
204
205 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *);
206 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *);
207 static void xhci_device_ctrl_abort(struct usbd_xfer *);
208 static void xhci_device_ctrl_close(struct usbd_pipe *);
209 static void xhci_device_ctrl_done(struct usbd_xfer *);
210
211 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *);
212 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *);
213 static void xhci_device_isoc_abort(struct usbd_xfer *);
214 static void xhci_device_isoc_close(struct usbd_pipe *);
215 static void xhci_device_isoc_done(struct usbd_xfer *);
216
217 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *);
218 static usbd_status xhci_device_intr_start(struct usbd_xfer *);
219 static void xhci_device_intr_abort(struct usbd_xfer *);
220 static void xhci_device_intr_close(struct usbd_pipe *);
221 static void xhci_device_intr_done(struct usbd_xfer *);
222
223 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *);
224 static usbd_status xhci_device_bulk_start(struct usbd_xfer *);
225 static void xhci_device_bulk_abort(struct usbd_xfer *);
226 static void xhci_device_bulk_close(struct usbd_pipe *);
227 static void xhci_device_bulk_done(struct usbd_xfer *);
228
229 static const struct usbd_bus_methods xhci_bus_methods = {
230 .ubm_open = xhci_open,
231 .ubm_softint = xhci_softintr,
232 .ubm_dopoll = xhci_poll,
233 .ubm_allocx = xhci_allocx,
234 .ubm_freex = xhci_freex,
235 .ubm_abortx = xhci_abortx,
236 .ubm_dying = xhci_dying,
237 .ubm_getlock = xhci_get_lock,
238 .ubm_newdev = xhci_new_device,
239 .ubm_rhctrl = xhci_roothub_ctrl,
240 };
241
242 static const struct usbd_pipe_methods xhci_root_intr_methods = {
243 .upm_transfer = xhci_root_intr_transfer,
244 .upm_start = xhci_root_intr_start,
245 .upm_abort = xhci_root_intr_abort,
246 .upm_close = xhci_root_intr_close,
247 .upm_cleartoggle = xhci_noop,
248 .upm_done = xhci_root_intr_done,
249 };
250
251
252 static const struct usbd_pipe_methods xhci_device_ctrl_methods = {
253 .upm_transfer = xhci_device_ctrl_transfer,
254 .upm_start = xhci_device_ctrl_start,
255 .upm_abort = xhci_device_ctrl_abort,
256 .upm_close = xhci_device_ctrl_close,
257 .upm_cleartoggle = xhci_noop,
258 .upm_done = xhci_device_ctrl_done,
259 };
260
261 static const struct usbd_pipe_methods xhci_device_isoc_methods = {
262 .upm_transfer = xhci_device_isoc_transfer,
263 .upm_abort = xhci_device_isoc_abort,
264 .upm_close = xhci_device_isoc_close,
265 .upm_cleartoggle = xhci_noop,
266 .upm_done = xhci_device_isoc_done,
267 };
268
269 static const struct usbd_pipe_methods xhci_device_bulk_methods = {
270 .upm_transfer = xhci_device_bulk_transfer,
271 .upm_start = xhci_device_bulk_start,
272 .upm_abort = xhci_device_bulk_abort,
273 .upm_close = xhci_device_bulk_close,
274 .upm_cleartoggle = xhci_noop,
275 .upm_done = xhci_device_bulk_done,
276 };
277
278 static const struct usbd_pipe_methods xhci_device_intr_methods = {
279 .upm_transfer = xhci_device_intr_transfer,
280 .upm_start = xhci_device_intr_start,
281 .upm_abort = xhci_device_intr_abort,
282 .upm_close = xhci_device_intr_close,
283 .upm_cleartoggle = xhci_noop,
284 .upm_done = xhci_device_intr_done,
285 };
286
287 static inline uint32_t
288 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset)
289 {
290 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset);
291 }
292
293 static inline uint32_t
294 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset)
295 {
296 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
297 }
298
299 static inline uint32_t
300 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset)
301 {
302 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset);
303 }
304
305 static inline void
306 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset,
307 uint32_t value)
308 {
309 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value);
310 }
311
312 #if 0 /* unused */
313 static inline void
314 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset,
315 uint32_t value)
316 {
317 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value);
318 }
319 #endif /* unused */
320
321 static inline uint32_t
322 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset)
323 {
324 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset);
325 }
326
327 static inline uint32_t
328 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset)
329 {
330 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
331 }
332
333 static inline void
334 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset,
335 uint32_t value)
336 {
337 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
338 }
339
340 static inline uint64_t
341 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset)
342 {
343 uint64_t value;
344
345 #ifdef XHCI_USE_BUS_SPACE_8
346 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset);
347 #else
348 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
349 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh,
350 offset + 4) << 32;
351 #endif
352
353 return value;
354 }
355
356 static inline void
357 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset,
358 uint64_t value)
359 {
360 #ifdef XHCI_USE_BUS_SPACE_8
361 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value);
362 #else
363 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0,
364 (value >> 0) & 0xffffffff);
365 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4,
366 (value >> 32) & 0xffffffff);
367 #endif
368 }
369
370 static inline uint32_t
371 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset)
372 {
373 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
374 }
375
376 static inline void
377 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset,
378 uint32_t value)
379 {
380 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
381 }
382
383 static inline uint64_t
384 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset)
385 {
386 uint64_t value;
387
388 #ifdef XHCI_USE_BUS_SPACE_8
389 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset);
390 #else
391 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
392 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh,
393 offset + 4) << 32;
394 #endif
395
396 return value;
397 }
398
399 static inline void
400 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset,
401 uint64_t value)
402 {
403 #ifdef XHCI_USE_BUS_SPACE_8
404 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value);
405 #else
406 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0,
407 (value >> 0) & 0xffffffff);
408 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4,
409 (value >> 32) & 0xffffffff);
410 #endif
411 }
412
413 #if 0 /* unused */
414 static inline uint32_t
415 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset)
416 {
417 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset);
418 }
419 #endif /* unused */
420
421 static inline void
422 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset,
423 uint32_t value)
424 {
425 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value);
426 }
427
428 /* --- */
429
430 static inline uint8_t
431 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed)
432 {
433 u_int eptype = 0;
434
435 switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
436 case UE_CONTROL:
437 eptype = 0x0;
438 break;
439 case UE_ISOCHRONOUS:
440 eptype = 0x1;
441 break;
442 case UE_BULK:
443 eptype = 0x2;
444 break;
445 case UE_INTERRUPT:
446 eptype = 0x3;
447 break;
448 }
449
450 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
451 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
452 return eptype | 0x4;
453 else
454 return eptype;
455 }
456
457 static u_int
458 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed)
459 {
460 /* xHCI 1.0 section 4.5.1 */
461 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress);
462 u_int in = 0;
463
464 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
465 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
466 in = 1;
467
468 return epaddr * 2 + in;
469 }
470
471 static inline u_int
472 xhci_dci_to_ici(const u_int i)
473 {
474 return i + 1;
475 }
476
477 static inline void *
478 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs,
479 const u_int dci)
480 {
481 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
482 }
483
484 #if 0 /* unused */
485 static inline bus_addr_t
486 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs,
487 const u_int dci)
488 {
489 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
490 }
491 #endif /* unused */
492
493 static inline void *
494 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs,
495 const u_int ici)
496 {
497 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
498 }
499
500 static inline bus_addr_t
501 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs,
502 const u_int ici)
503 {
504 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
505 }
506
507 static inline struct xhci_trb *
508 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx)
509 {
510 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
511 }
512
513 static inline bus_addr_t
514 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx)
515 {
516 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
517 }
518
519 static inline void
520 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx,
521 uint64_t parameter, uint32_t status, uint32_t control)
522 {
523 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb);
524 xx->xx_trb[idx].trb_0 = parameter;
525 xx->xx_trb[idx].trb_2 = status;
526 xx->xx_trb[idx].trb_3 = control;
527 }
528
529 static inline void
530 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status,
531 uint32_t control)
532 {
533 trb->trb_0 = htole64(parameter);
534 trb->trb_2 = htole32(status);
535 trb->trb_3 = htole32(control);
536 }
537
538 static int
539 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx)
540 {
541 /* base address of TRBs */
542 bus_addr_t trbp = xhci_ring_trbp(xr, 0);
543
544 /* trb_0 range sanity check */
545 if (trb_0 == 0 || trb_0 < trbp ||
546 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 ||
547 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) {
548 return 1;
549 }
550 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb);
551 return 0;
552 }
553
554 static unsigned int
555 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs,
556 u_int dci)
557 {
558 uint32_t *cp;
559
560 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
561 cp = xhci_slot_get_dcv(sc, xs, dci);
562 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0]));
563 }
564
565 static inline unsigned int
566 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport)
567 {
568 const unsigned int port = ctlrport - 1;
569 const uint8_t bit = __BIT(port % NBBY);
570
571 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit);
572 }
573
574 /*
575 * Return the roothub port for a controller port. Both are 1..n.
576 */
577 static inline unsigned int
578 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport)
579 {
580
581 return sc->sc_ctlrportmap[ctrlport - 1];
582 }
583
584 /*
585 * Return the controller port for a bus roothub port. Both are 1..n.
586 */
587 static inline unsigned int
588 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn,
589 unsigned int rhport)
590 {
591
592 return sc->sc_rhportmap[bn][rhport - 1];
593 }
594
595 /* --- */
596
597 void
598 xhci_childdet(device_t self, device_t child)
599 {
600 struct xhci_softc * const sc = device_private(self);
601
602 mutex_enter(&sc->sc_intr_lock);
603 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child));
604 if (child == sc->sc_child2)
605 sc->sc_child2 = NULL;
606 else if (child == sc->sc_child)
607 sc->sc_child = NULL;
608 mutex_exit(&sc->sc_intr_lock);
609 }
610
611 int
612 xhci_detach(struct xhci_softc *sc, int flags)
613 {
614 int rv = 0;
615
616 if (sc->sc_child2 != NULL) {
617 rv = config_detach(sc->sc_child2, flags);
618 if (rv != 0)
619 return rv;
620 KASSERT(sc->sc_child2 == NULL);
621 }
622
623 if (sc->sc_child != NULL) {
624 rv = config_detach(sc->sc_child, flags);
625 if (rv != 0)
626 return rv;
627 KASSERT(sc->sc_child == NULL);
628 }
629
630 /* XXX unconfigure/free slots */
631
632 /* verify: */
633 xhci_rt_write_4(sc, XHCI_IMAN(0), 0);
634 xhci_op_write_4(sc, XHCI_USBCMD, 0);
635 /* do we need to wait for stop? */
636
637 xhci_op_write_8(sc, XHCI_CRCR, 0);
638 xhci_ring_free(sc, &sc->sc_cr);
639 cv_destroy(&sc->sc_command_cv);
640 cv_destroy(&sc->sc_cmdbusy_cv);
641
642 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0);
643 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0);
644 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY);
645 xhci_ring_free(sc, &sc->sc_er);
646
647 usb_freemem(&sc->sc_eventst_dma);
648
649 xhci_op_write_8(sc, XHCI_DCBAAP, 0);
650 usb_freemem(&sc->sc_dcbaa_dma);
651
652 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots);
653
654 kmem_free(sc->sc_ctlrportbus,
655 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY));
656 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int));
657
658 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
659 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int));
660 }
661
662 mutex_destroy(&sc->sc_rhlock);
663 mutex_destroy(&sc->sc_lock);
664 mutex_destroy(&sc->sc_intr_lock);
665
666 pool_cache_destroy(sc->sc_xferpool);
667
668 return rv;
669 }
670
671 int
672 xhci_activate(device_t self, enum devact act)
673 {
674 struct xhci_softc * const sc = device_private(self);
675
676 switch (act) {
677 case DVACT_DEACTIVATE:
678 sc->sc_dying = true;
679 return 0;
680 default:
681 return EOPNOTSUPP;
682 }
683 }
684
685 bool
686 xhci_suspend(device_t self, const pmf_qual_t *qual)
687 {
688 struct xhci_softc * const sc = device_private(self);
689 size_t i, j, bn, dci;
690 int port;
691 uint32_t v;
692 usbd_status err;
693 bool ok = false;
694
695 XHCIHIST_FUNC(); XHCIHIST_CALLED();
696
697 /*
698 * Block issuance of new commands, and wait for all pending
699 * commands to complete.
700 */
701 mutex_enter(&sc->sc_lock);
702 KASSERT(sc->sc_suspender == NULL);
703 sc->sc_suspender = curlwp;
704 while (sc->sc_command_addr != 0)
705 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
706 mutex_exit(&sc->sc_lock);
707
708 /*
709 * Block roothub xfers which might touch portsc registers until
710 * we're done suspending.
711 */
712 mutex_enter(&sc->sc_rhlock);
713
714 /*
715 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
716 * xHCI Power Management, p. 342
717 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342
718 */
719
720 /*
721 * `1. Stop all USB activity by issuing Stop Endpoint Commands
722 * for Busy endpoints in the Running state. If the Force
723 * Save Context Capability (FSC = ``0'') is not supported,
724 * then Stop Endpoint Commands shall be issued for all idle
725 * endpoints in the Running state as well. The Stop
726 * Endpoint Command causes the xHC to update the respective
727 * Endpoint or Stream Contexts in system memory, e.g. the
728 * TR Dequeue Pointer, DCS, etc. fields. Refer to
729 * Implementation Note "0".'
730 */
731 for (i = 0; i < sc->sc_maxslots; i++) {
732 struct xhci_slot *xs = &sc->sc_slots[i];
733
734 /* Skip if the slot is not in use. */
735 if (xs->xs_idx == 0)
736 continue;
737
738 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
739 /* Skip if the endpoint is not Running. */
740 /* XXX What about Busy? */
741 if (xhci_get_epstate(sc, xs, dci) !=
742 XHCI_EPSTATE_RUNNING)
743 continue;
744
745 /* Stop endpoint. */
746 mutex_enter(&sc->sc_lock);
747 err = xhci_stop_endpoint_cmd(sc, xs, dci,
748 XHCI_TRB_3_SUSP_EP_BIT);
749 mutex_exit(&sc->sc_lock);
750 if (err) {
751 device_printf(self, "failed to stop endpoint"
752 " slot %zu dci %zu err %d\n",
753 i, dci, err);
754 goto out;
755 }
756 }
757 }
758
759 /*
760 * Next, suspend all the ports:
761 *
762 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15:
763 * Suspend-Resume, pp. 276-283
764 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276
765 */
766 for (bn = 0; bn < 2; bn++) {
767 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
768 /* 4.15.1: Port Suspend. */
769 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
770
771 /*
772 * `System software places individual ports
773 * into suspend mode by writing a ``3'' into
774 * the appropriate PORTSC register Port Link
775 * State (PLS) field (refer to Section 5.4.8).
776 * Software should only set the PLS field to
777 * ``3'' when the port is in the Enabled
778 * state.'
779 *
780 * `Software should not attempt to suspend a
781 * port unless the port reports that it is in
782 * the enabled (PED = ``1''; PLS < ``3'')
783 * state (refer to Section 5.4.8 for more
784 * information about PED and PLS).'
785 */
786 v = xhci_op_read_4(sc, port);
787 if (((v & XHCI_PS_PED) == 0) ||
788 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3)
789 continue;
790 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
791 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3);
792 xhci_op_write_4(sc, port, v);
793
794 /*
795 * `When the PLS field is written with U3
796 * (``3''), the status of the PLS bit will not
797 * change to the target U state U3 until the
798 * suspend signaling has completed to the
799 * attached device (which may be as long as
800 * 10ms.).'
801 *
802 * `Software is required to wait for U3
803 * transitions to complete before it puts the
804 * xHC into a low power state, and before
805 * resuming the port.'
806 *
807 * XXX Take advantage of the technique to
808 * reduce polling on host controllers that
809 * support the U3C capability.
810 */
811 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) {
812 v = xhci_op_read_4(sc, port);
813 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3)
814 break;
815 usb_delay_ms(&sc->sc_bus, 1);
816 }
817 if (j == XHCI_WAIT_PLS_U3) {
818 device_printf(self,
819 "suspend timeout on bus %zu port %zu\n",
820 bn, i);
821 goto out;
822 }
823 }
824 }
825
826 /*
827 * `2. Ensure that the Command Ring is in the Stopped state
828 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is
829 * empty), and all Command Completion Events associated
830 * with them have been received.'
831 *
832 * XXX
833 */
834
835 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */
836 xhci_op_write_4(sc, XHCI_USBCMD,
837 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS);
838
839 /*
840 * `4. Read the Operational Runtime, and VTIO registers in the
841 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ,
842 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their
843 * state.'
844 *
845 * (We don't use VTIO here (XXX for now?).)
846 */
847 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
848 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL);
849 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP);
850 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG);
851 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0));
852 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0));
853 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0));
854 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0));
855 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0));
856
857 /*
858 * `5. Set the Controller Save State (CSS) flag in the USBCMD
859 * register (5.4.1)...'
860 */
861 xhci_op_write_4(sc, XHCI_USBCMD,
862 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS);
863
864 /*
865 * `...and wait for the Save State Status (SSS) flag in the
866 * USBSTS register (5.4.2) to transition to ``0''.'
867 */
868 for (i = 0; i < XHCI_WAIT_SSS; i++) {
869 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0)
870 break;
871 usb_delay_ms(&sc->sc_bus, 1);
872 }
873 if (i >= XHCI_WAIT_SSS) {
874 device_printf(self, "suspend timeout, USBSTS.SSS\n");
875 /*
876 * Just optimistically go on and check SRE anyway --
877 * what's the worst that could happen?
878 */
879 }
880
881 /*
882 * `Note: After a Save or Restore operation completes, the
883 * Save/Restore Error (SRE) flag in the USBSTS register should
884 * be checked to ensure that the operation completed
885 * successfully.'
886 */
887 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
888 device_printf(self, "suspend error, USBSTS.SRE\n");
889 goto out;
890 }
891
892 /* Success! */
893 ok = true;
894
895 out: mutex_exit(&sc->sc_rhlock);
896 if (!ok) {
897 /*
898 * If suspend failed, resume command issuance.
899 */
900 mutex_enter(&sc->sc_lock);
901 KASSERT(sc->sc_suspender == curlwp);
902 sc->sc_suspender = NULL;
903 cv_broadcast(&sc->sc_cmdbusy_cv);
904 mutex_exit(&sc->sc_lock);
905 }
906 return ok;
907 }
908
909 bool
910 xhci_resume(device_t self, const pmf_qual_t *qual)
911 {
912 struct xhci_softc * const sc = device_private(self);
913 size_t i, j, bn, dci;
914 int port;
915 uint32_t v;
916 bool ok = false;
917
918 XHCIHIST_FUNC(); XHCIHIST_CALLED();
919
920 KASSERT(sc->sc_suspender);
921
922 /*
923 * Block roothub xfers which might touch portsc registers until
924 * we're done resuming.
925 */
926 mutex_enter(&sc->sc_rhlock);
927
928 /*
929 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
930 * xHCI Power Management, p. 343
931 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343
932 */
933
934 /*
935 * `4. Restore the Operational Runtime, and VTIO registers with
936 * their previously saved state in the following order:
937 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN,
938 * IMOD, and VTIO.'
939 *
940 * (We don't use VTIO here (for now?).)
941 */
942 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd);
943 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl);
944 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap);
945 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config);
946 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0);
947 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0);
948 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0);
949 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0);
950 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0);
951
952 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */
953
954 /*
955 * `5. Set the Controller Restore State (CRS) flag in the
956 * USBCMD register (5.4.1) to ``1''...'
957 */
958 xhci_op_write_4(sc, XHCI_USBCMD,
959 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS);
960
961 /*
962 * `...and wait for the Restore State Status (RSS) in the
963 * USBSTS register (5.4.2) to transition to ``0''.'
964 */
965 for (i = 0; i < XHCI_WAIT_RSS; i++) {
966 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0)
967 break;
968 usb_delay_ms(&sc->sc_bus, 1);
969 }
970 if (i >= XHCI_WAIT_RSS) {
971 device_printf(self, "resume timeout, USBSTS.RSS\n");
972 goto out;
973 }
974
975 /*
976 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits
977 * are consistent with the RCS values to be written to the
978 * CRCR.'
979 *
980 * XXX Hope just zeroing it is good enough!
981 */
982 xhci_host_dequeue(sc->sc_cr);
983
984 /*
985 * `7. Write the CRCR with the address and RCS value of the
986 * reinitialized Command Ring. Note that this write will
987 * cause the Command Ring to restart at the address
988 * specified by the CRCR.'
989 */
990 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
991 sc->sc_cr->xr_cs);
992
993 /*
994 * `8. Enable the controller by setting Run/Stop (R/S) =
995 * ``1''.'
996 */
997 xhci_op_write_4(sc, XHCI_USBCMD,
998 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS);
999
1000 /*
1001 * `9. Software shall walk the USB topology and initialize each
1002 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and
1003 * external hub ports attached to USB devices.'
1004 *
1005 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2
1006 * `Port Resume', 4.15.2.2 `Host Initiated'.
1007 *
1008 * XXX We should maybe batch up initiating the state
1009 * transitions, and then wait for them to complete all at once.
1010 */
1011 for (bn = 0; bn < 2; bn++) {
1012 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
1013 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
1014
1015 /* `When a port is in the U3 state: ...' */
1016 v = xhci_op_read_4(sc, port);
1017 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3)
1018 continue;
1019
1020 /*
1021 * `For a USB2 protocol port, software shall
1022 * write a ``15'' (Resume) to the PLS field to
1023 * initiate resume signaling. The port shall
1024 * transition to the Resume substate and the
1025 * xHC shall transmit the resume signaling
1026 * within 1ms (T_URSM). Software shall ensure
1027 * that resume is signaled for at least 20ms
1028 * (T_DRSMDN). Software shall start timing
1029 * T_DRSMDN from the write of ``15'' (Resume)
1030 * to PLS.'
1031 */
1032 if (bn == 1) {
1033 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0);
1034 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1035 v |= XHCI_PS_LWS;
1036 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME);
1037 xhci_op_write_4(sc, port, v);
1038 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
1039 } else {
1040 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0);
1041 }
1042
1043 /*
1044 * `For a USB3 protocol port [and a USB2
1045 * protocol port after transitioning to
1046 * Resume], software shall write a ``0'' (U0)
1047 * to the PLS field...'
1048 */
1049 v = xhci_op_read_4(sc, port);
1050 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1051 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0);
1052 xhci_op_write_4(sc, port, v);
1053
1054 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) {
1055 v = xhci_op_read_4(sc, port);
1056 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0)
1057 break;
1058 usb_delay_ms(&sc->sc_bus, 1);
1059 }
1060 if (j == XHCI_WAIT_PLS_U0) {
1061 device_printf(self,
1062 "resume timeout on bus %zu port %zu\n",
1063 bn, i);
1064 goto out;
1065 }
1066 }
1067 }
1068
1069 /*
1070 * `10. Restart each of the previously Running endpoints by
1071 * ringing their doorbells.'
1072 */
1073 for (i = 0; i < sc->sc_maxslots; i++) {
1074 struct xhci_slot *xs = &sc->sc_slots[i];
1075
1076 /* Skip if the slot is not in use. */
1077 if (xs->xs_idx == 0)
1078 continue;
1079
1080 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
1081 /* Skip if the endpoint is not Running. */
1082 if (xhci_get_epstate(sc, xs, dci) !=
1083 XHCI_EPSTATE_RUNNING)
1084 continue;
1085
1086 /* Ring the doorbell. */
1087 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
1088 }
1089 }
1090
1091 /*
1092 * `Note: After a Save or Restore operation completes, the
1093 * Save/Restore Error (SRE) flag in the USBSTS register should
1094 * be checked to ensure that the operation completed
1095 * successfully.'
1096 */
1097 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
1098 device_printf(self, "resume error, USBSTS.SRE\n");
1099 goto out;
1100 }
1101
1102 /* Success! */
1103 ok = true;
1104
1105 out: /*
1106 * Resume command issuance. If the hardware failed to resume,
1107 * well, tough -- deadlocking because everything is held up on
1108 * the suspension, with no opportunity to detach, isn't better
1109 * than timing out waiting for dead hardware.
1110 */
1111 mutex_enter(&sc->sc_lock);
1112 KASSERT(sc->sc_suspender);
1113 sc->sc_suspender = NULL;
1114 cv_broadcast(&sc->sc_cmdbusy_cv);
1115 mutex_exit(&sc->sc_lock);
1116
1117 mutex_exit(&sc->sc_rhlock);
1118 return ok;
1119 }
1120
1121 bool
1122 xhci_shutdown(device_t self, int flags)
1123 {
1124 return false;
1125 }
1126
1127 static int
1128 xhci_hc_reset(struct xhci_softc * const sc)
1129 {
1130 uint32_t usbcmd, usbsts;
1131 int i;
1132
1133 /* Check controller not ready */
1134 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1135 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1136 if ((usbsts & XHCI_STS_CNR) == 0)
1137 break;
1138 usb_delay_ms(&sc->sc_bus, 1);
1139 }
1140 if (i >= XHCI_WAIT_CNR) {
1141 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n");
1142 return EIO;
1143 }
1144
1145 /* Halt controller */
1146 usbcmd = 0;
1147 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1148 usb_delay_ms(&sc->sc_bus, 1);
1149
1150 /* Reset controller */
1151 usbcmd = XHCI_CMD_HCRST;
1152 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1153 for (i = 0; i < XHCI_WAIT_HCRST; i++) {
1154 /*
1155 * Wait 1ms first. Existing Intel xHCI requires 1ms delay to
1156 * prevent system hang (Errata).
1157 */
1158 usb_delay_ms(&sc->sc_bus, 1);
1159 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
1160 if ((usbcmd & XHCI_CMD_HCRST) == 0)
1161 break;
1162 }
1163 if (i >= XHCI_WAIT_HCRST) {
1164 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n");
1165 return EIO;
1166 }
1167
1168 /* Check controller not ready */
1169 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1170 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1171 if ((usbsts & XHCI_STS_CNR) == 0)
1172 break;
1173 usb_delay_ms(&sc->sc_bus, 1);
1174 }
1175 if (i >= XHCI_WAIT_CNR) {
1176 aprint_error_dev(sc->sc_dev,
1177 "controller not ready timeout after reset\n");
1178 return EIO;
1179 }
1180
1181 return 0;
1182 }
1183
1184 /* 7.2 xHCI Support Protocol Capability */
1185 static void
1186 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp)
1187 {
1188 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1189
1190 /* XXX Cache this lot */
1191
1192 const uint32_t w0 = xhci_read_4(sc, ecp);
1193 const uint32_t w4 = xhci_read_4(sc, ecp + 4);
1194 const uint32_t w8 = xhci_read_4(sc, ecp + 8);
1195 const uint32_t wc = xhci_read_4(sc, ecp + 0xc);
1196
1197 aprint_debug_dev(sc->sc_dev,
1198 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc);
1199
1200 if (w4 != XHCI_XECP_USBID)
1201 return;
1202
1203 const int major = XHCI_XECP_SP_W0_MAJOR(w0);
1204 const int minor = XHCI_XECP_SP_W0_MINOR(w0);
1205 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8);
1206 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8);
1207
1208 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16));
1209 switch (mm) {
1210 case 0x0200:
1211 case 0x0300:
1212 case 0x0301:
1213 case 0x0310:
1214 case 0x0320:
1215 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n",
1216 major == 3 ? "ss" : "hs", cpo, cpo + cpc -1);
1217 if (major == 3)
1218 sc->sc_usb3nports = cpo + cpc -1;
1219 else
1220 sc->sc_usb2nports = cpo + cpc -1;
1221 break;
1222 default:
1223 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n",
1224 major, minor);
1225 return;
1226 }
1227
1228 const size_t bus = (major == 3) ? 0 : 1;
1229
1230 /* Index arrays with 0..n-1 where ports are numbered 1..n */
1231 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) {
1232 if (sc->sc_ctlrportmap[cp] != 0) {
1233 aprint_error_dev(sc->sc_dev, "controller port %zu "
1234 "already assigned", cp);
1235 continue;
1236 }
1237
1238 sc->sc_ctlrportbus[cp / NBBY] |=
1239 bus == 0 ? 0 : __BIT(cp % NBBY);
1240
1241 const size_t rhp = sc->sc_rhportcount[bus]++;
1242
1243 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0,
1244 "bus %zu rhp %zu is %d", bus, rhp,
1245 sc->sc_rhportmap[bus][rhp]);
1246
1247 sc->sc_rhportmap[bus][rhp] = cp + 1;
1248 sc->sc_ctlrportmap[cp] = rhp + 1;
1249 }
1250 }
1251
1252 /* Process extended capabilities */
1253 static void
1254 xhci_ecp(struct xhci_softc *sc)
1255 {
1256 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1257
1258 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4;
1259 while (ecp != 0) {
1260 uint32_t ecr = xhci_read_4(sc, ecp);
1261 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr);
1262 switch (XHCI_XECP_ID(ecr)) {
1263 case XHCI_ID_PROTOCOLS: {
1264 xhci_id_protocols(sc, ecp);
1265 break;
1266 }
1267 case XHCI_ID_USB_LEGACY: {
1268 uint8_t bios_sem;
1269
1270 /* Take host controller ownership from BIOS */
1271 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM);
1272 if (bios_sem) {
1273 /* sets xHCI to be owned by OS */
1274 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1);
1275 aprint_debug_dev(sc->sc_dev,
1276 "waiting for BIOS to give up control\n");
1277 for (int i = 0; i < 5000; i++) {
1278 bios_sem = xhci_read_1(sc, ecp +
1279 XHCI_XECP_BIOS_SEM);
1280 if (bios_sem == 0)
1281 break;
1282 DELAY(1000);
1283 }
1284 if (bios_sem) {
1285 aprint_error_dev(sc->sc_dev,
1286 "timed out waiting for BIOS\n");
1287 }
1288 }
1289 break;
1290 }
1291 default:
1292 break;
1293 }
1294 ecr = xhci_read_4(sc, ecp);
1295 if (XHCI_XECP_NEXT(ecr) == 0) {
1296 ecp = 0;
1297 } else {
1298 ecp += XHCI_XECP_NEXT(ecr) * 4;
1299 }
1300 }
1301 }
1302
1303 #define XHCI_HCCPREV1_BITS \
1304 "\177\020" /* New bitmask */ \
1305 "f\020\020XECP\0" \
1306 "f\014\4MAXPSA\0" \
1307 "b\013CFC\0" \
1308 "b\012SEC\0" \
1309 "b\011SBD\0" \
1310 "b\010FSE\0" \
1311 "b\7NSS\0" \
1312 "b\6LTC\0" \
1313 "b\5LHRC\0" \
1314 "b\4PIND\0" \
1315 "b\3PPC\0" \
1316 "b\2CZC\0" \
1317 "b\1BNC\0" \
1318 "b\0AC64\0" \
1319 "\0"
1320 #define XHCI_HCCV1_x_BITS \
1321 "\177\020" /* New bitmask */ \
1322 "f\020\020XECP\0" \
1323 "f\014\4MAXPSA\0" \
1324 "b\013CFC\0" \
1325 "b\012SEC\0" \
1326 "b\011SPC\0" \
1327 "b\010PAE\0" \
1328 "b\7NSS\0" \
1329 "b\6LTC\0" \
1330 "b\5LHRC\0" \
1331 "b\4PIND\0" \
1332 "b\3PPC\0" \
1333 "b\2CSZ\0" \
1334 "b\1BNC\0" \
1335 "b\0AC64\0" \
1336 "\0"
1337
1338 #define XHCI_HCC2_BITS \
1339 "\177\020" /* New bitmask */ \
1340 "b\7ETC_TSC\0" \
1341 "b\6ETC\0" \
1342 "b\5CIC\0" \
1343 "b\4LEC\0" \
1344 "b\3CTC\0" \
1345 "b\2FSC\0" \
1346 "b\1CMC\0" \
1347 "b\0U3C\0" \
1348 "\0"
1349
1350 void
1351 xhci_start(struct xhci_softc *sc)
1352 {
1353 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA);
1354 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0)
1355 /* Intel xhci needs interrupt rate moderated. */
1356 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP);
1357 else
1358 xhci_rt_write_4(sc, XHCI_IMOD(0), 0);
1359 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n",
1360 xhci_rt_read_4(sc, XHCI_IMOD(0)));
1361
1362 /* Go! */
1363 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
1364 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n",
1365 xhci_op_read_4(sc, XHCI_USBCMD));
1366 }
1367
1368 int
1369 xhci_init(struct xhci_softc *sc)
1370 {
1371 bus_size_t bsz;
1372 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff;
1373 uint32_t pagesize, config;
1374 int i = 0;
1375 uint16_t hciversion;
1376 uint8_t caplength;
1377
1378 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1379
1380 /* Set up the bus struct for the usb 3 and usb 2 buses */
1381 sc->sc_bus.ub_methods = &xhci_bus_methods;
1382 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe);
1383 sc->sc_bus.ub_usedma = true;
1384 sc->sc_bus.ub_hcpriv = sc;
1385
1386 sc->sc_bus2.ub_methods = &xhci_bus_methods;
1387 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe);
1388 sc->sc_bus2.ub_revision = USBREV_2_0;
1389 sc->sc_bus2.ub_usedma = true;
1390 sc->sc_bus2.ub_hcpriv = sc;
1391 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag;
1392
1393 caplength = xhci_read_1(sc, XHCI_CAPLENGTH);
1394 hciversion = xhci_read_2(sc, XHCI_HCIVERSION);
1395
1396 if (hciversion < XHCI_HCIVERSION_0_96 ||
1397 hciversion >= 0x0200) {
1398 aprint_normal_dev(sc->sc_dev,
1399 "xHCI version %x.%x not known to be supported\n",
1400 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1401 } else {
1402 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n",
1403 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1404 }
1405
1406 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength,
1407 &sc->sc_cbh) != 0) {
1408 aprint_error_dev(sc->sc_dev, "capability subregion failure\n");
1409 return ENOMEM;
1410 }
1411
1412 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1);
1413 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1);
1414 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1);
1415 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1);
1416 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2);
1417 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3);
1418 aprint_debug_dev(sc->sc_dev,
1419 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3);
1420
1421 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS);
1422 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32;
1423
1424 char sbuf[128];
1425 if (hciversion < XHCI_HCIVERSION_1_0)
1426 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc);
1427 else
1428 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc);
1429 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf);
1430 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n",
1431 XHCI_HCC_XECP(sc->sc_hcc) * 4);
1432 if (hciversion >= XHCI_HCIVERSION_1_1) {
1433 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2);
1434 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2);
1435 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf);
1436 }
1437
1438 /* default all ports to bus 0, i.e. usb 3 */
1439 sc->sc_ctlrportbus = kmem_zalloc(
1440 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP);
1441 sc->sc_ctlrportmap = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1442
1443 /* controller port to bus roothub port map */
1444 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
1445 sc->sc_rhportmap[j] = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1446 }
1447
1448 /*
1449 * Process all Extended Capabilities
1450 */
1451 xhci_ecp(sc);
1452
1453 bsz = XHCI_PORTSC(sc->sc_maxports);
1454 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz,
1455 &sc->sc_obh) != 0) {
1456 aprint_error_dev(sc->sc_dev, "operational subregion failure\n");
1457 return ENOMEM;
1458 }
1459
1460 dboff = xhci_cap_read_4(sc, XHCI_DBOFF);
1461 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff,
1462 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) {
1463 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n");
1464 return ENOMEM;
1465 }
1466
1467 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF);
1468 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff,
1469 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) {
1470 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n");
1471 return ENOMEM;
1472 }
1473
1474 int rv;
1475 rv = xhci_hc_reset(sc);
1476 if (rv != 0) {
1477 return rv;
1478 }
1479
1480 if (sc->sc_vendor_init)
1481 sc->sc_vendor_init(sc);
1482
1483 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE);
1484 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize);
1485 pagesize = ffs(pagesize);
1486 if (pagesize == 0) {
1487 aprint_error_dev(sc->sc_dev, "pagesize is 0\n");
1488 return EIO;
1489 }
1490 sc->sc_pgsz = 1 << (12 + (pagesize - 1));
1491 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz);
1492 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n",
1493 (uint32_t)sc->sc_maxslots);
1494 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports);
1495
1496 int err;
1497 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2);
1498 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf);
1499 if (sc->sc_maxspbuf != 0) {
1500 err = usb_allocmem(sc->sc_bus.ub_dmatag,
1501 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t),
1502 USBMALLOC_COHERENT | USBMALLOC_ZERO,
1503 &sc->sc_spbufarray_dma);
1504 if (err) {
1505 aprint_error_dev(sc->sc_dev,
1506 "spbufarray init fail, err %d\n", err);
1507 return ENOMEM;
1508 }
1509
1510 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) *
1511 sc->sc_maxspbuf, KM_SLEEP);
1512 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0);
1513 for (i = 0; i < sc->sc_maxspbuf; i++) {
1514 usb_dma_t * const dma = &sc->sc_spbuf_dma[i];
1515 /* allocate contexts */
1516 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz,
1517 sc->sc_pgsz, USBMALLOC_COHERENT | USBMALLOC_ZERO,
1518 dma);
1519 if (err) {
1520 aprint_error_dev(sc->sc_dev,
1521 "spbufarray_dma init fail, err %d\n", err);
1522 rv = ENOMEM;
1523 goto bad1;
1524 }
1525 spbufarray[i] = htole64(DMAADDR(dma, 0));
1526 usb_syncmem(dma, 0, sc->sc_pgsz,
1527 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1528 }
1529
1530 usb_syncmem(&sc->sc_spbufarray_dma, 0,
1531 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE);
1532 }
1533
1534 config = xhci_op_read_4(sc, XHCI_CONFIG);
1535 config &= ~0xFF;
1536 config |= sc->sc_maxslots & 0xFF;
1537 xhci_op_write_4(sc, XHCI_CONFIG, config);
1538
1539 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS,
1540 XHCI_COMMAND_RING_SEGMENTS_ALIGN);
1541 if (err) {
1542 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n",
1543 err);
1544 rv = ENOMEM;
1545 goto bad1;
1546 }
1547
1548 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS,
1549 XHCI_EVENT_RING_SEGMENTS_ALIGN);
1550 if (err) {
1551 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n",
1552 err);
1553 rv = ENOMEM;
1554 goto bad2;
1555 }
1556
1557 usb_dma_t *dma;
1558 size_t size;
1559 size_t align;
1560
1561 dma = &sc->sc_eventst_dma;
1562 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE,
1563 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN);
1564 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size);
1565 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN;
1566 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1567 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma);
1568 if (err) {
1569 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n",
1570 err);
1571 rv = ENOMEM;
1572 goto bad3;
1573 }
1574
1575 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n",
1576 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0),
1577 KERNADDR(&sc->sc_eventst_dma, 0),
1578 sc->sc_eventst_dma.udma_block->size);
1579
1580 dma = &sc->sc_dcbaa_dma;
1581 size = (1 + sc->sc_maxslots) * sizeof(uint64_t);
1582 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size);
1583 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN;
1584 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1585 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma);
1586 if (err) {
1587 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err);
1588 rv = ENOMEM;
1589 goto bad4;
1590 }
1591 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n",
1592 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0),
1593 KERNADDR(&sc->sc_dcbaa_dma, 0),
1594 sc->sc_dcbaa_dma.udma_block->size);
1595
1596 if (sc->sc_maxspbuf != 0) {
1597 /*
1598 * DCBA entry 0 hold the scratchbuf array pointer.
1599 */
1600 *(uint64_t *)KERNADDR(dma, 0) =
1601 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0));
1602 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1603 }
1604
1605 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots,
1606 KM_SLEEP);
1607 if (sc->sc_slots == NULL) {
1608 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err);
1609 rv = ENOMEM;
1610 goto bad;
1611 }
1612
1613 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0,
1614 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
1615 if (sc->sc_xferpool == NULL) {
1616 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n",
1617 err);
1618 rv = ENOMEM;
1619 goto bad;
1620 }
1621
1622 cv_init(&sc->sc_command_cv, "xhcicmd");
1623 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq");
1624 mutex_init(&sc->sc_rhlock, MUTEX_DEFAULT, IPL_NONE);
1625 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
1626 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
1627
1628 struct xhci_erste *erst;
1629 erst = KERNADDR(&sc->sc_eventst_dma, 0);
1630 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0));
1631 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb);
1632 erst[0].erste_3 = htole32(0);
1633 usb_syncmem(&sc->sc_eventst_dma, 0,
1634 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE);
1635
1636 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS);
1637 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0));
1638 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) |
1639 XHCI_ERDP_BUSY);
1640
1641 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0));
1642 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1643 sc->sc_cr->xr_cs);
1644
1645 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0),
1646 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS);
1647
1648 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0)
1649 xhci_start(sc);
1650
1651 return 0;
1652
1653 bad:
1654 if (sc->sc_xferpool) {
1655 pool_cache_destroy(sc->sc_xferpool);
1656 sc->sc_xferpool = NULL;
1657 }
1658
1659 if (sc->sc_slots) {
1660 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) *
1661 sc->sc_maxslots);
1662 sc->sc_slots = NULL;
1663 }
1664
1665 usb_freemem(&sc->sc_dcbaa_dma);
1666 bad4:
1667 usb_freemem(&sc->sc_eventst_dma);
1668 bad3:
1669 xhci_ring_free(sc, &sc->sc_er);
1670 bad2:
1671 xhci_ring_free(sc, &sc->sc_cr);
1672 i = sc->sc_maxspbuf;
1673 bad1:
1674 for (int j = 0; j < i; j++)
1675 usb_freemem(&sc->sc_spbuf_dma[j]);
1676 usb_freemem(&sc->sc_spbufarray_dma);
1677
1678 return rv;
1679 }
1680
1681 static inline bool
1682 xhci_polling_p(struct xhci_softc * const sc)
1683 {
1684 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling;
1685 }
1686
1687 int
1688 xhci_intr(void *v)
1689 {
1690 struct xhci_softc * const sc = v;
1691 int ret = 0;
1692
1693 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1694
1695 if (sc == NULL)
1696 return 0;
1697
1698 mutex_spin_enter(&sc->sc_intr_lock);
1699
1700 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1701 goto done;
1702
1703 /* If we get an interrupt while polling, then just ignore it. */
1704 if (xhci_polling_p(sc)) {
1705 #ifdef DIAGNOSTIC
1706 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1707 #endif
1708 goto done;
1709 }
1710
1711 ret = xhci_intr1(sc);
1712 if (ret) {
1713 KASSERT(sc->sc_child || sc->sc_child2);
1714
1715 /*
1716 * One of child busses could be already detached. It doesn't
1717 * matter on which of the two the softintr is scheduled.
1718 */
1719 if (sc->sc_child)
1720 usb_schedsoftintr(&sc->sc_bus);
1721 else
1722 usb_schedsoftintr(&sc->sc_bus2);
1723 }
1724 done:
1725 mutex_spin_exit(&sc->sc_intr_lock);
1726 return ret;
1727 }
1728
1729 int
1730 xhci_intr1(struct xhci_softc * const sc)
1731 {
1732 uint32_t usbsts;
1733 uint32_t iman;
1734
1735 XHCIHIST_FUNC();
1736
1737 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1738 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0);
1739 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD |
1740 XHCI_STS_HCE)) == 0) {
1741 DPRINTFN(16, "ignored intr not for %jd",
1742 device_unit(sc->sc_dev), 0, 0, 0);
1743 return 0;
1744 }
1745
1746 /*
1747 * Clear EINT and other transient flags, to not misenterpret
1748 * next shared interrupt. Also, to avoid race, EINT must be cleared
1749 * before XHCI_IMAN_INTR_PEND is cleared.
1750 */
1751 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & XHCI_STS_RSVDP0);
1752
1753 #ifdef XHCI_DEBUG
1754 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1755 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1756 #endif
1757
1758 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1759 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1760 iman |= XHCI_IMAN_INTR_PEND;
1761 xhci_rt_write_4(sc, XHCI_IMAN(0), iman);
1762
1763 #ifdef XHCI_DEBUG
1764 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1765 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1766 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1767 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1768 #endif
1769
1770 return 1;
1771 }
1772
1773 /*
1774 * 3 port speed types used in USB stack
1775 *
1776 * usbdi speed
1777 * definition: USB_SPEED_* in usb.h
1778 * They are used in struct usbd_device in USB stack.
1779 * ioctl interface uses these values too.
1780 * port_status speed
1781 * definition: UPS_*_SPEED in usb.h
1782 * They are used in usb_port_status_t and valid only for USB 2.0.
1783 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus
1784 * of usb_port_status_ext_t indicates port speed.
1785 * Note that some 3.0 values overlap with 2.0 values.
1786 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and
1787 * means UPS_LOW_SPEED in HS.)
1788 * port status returned from hub also uses these values.
1789 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed
1790 * or more.
1791 * xspeed:
1792 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1)
1793 * They are used in only slot context and PORTSC reg of xhci.
1794 * The difference between usbdi speed and xspeed is
1795 * that FS and LS values are swapped.
1796 */
1797
1798 /* convert usbdi speed to xspeed */
1799 static int
1800 xhci_speed2xspeed(int speed)
1801 {
1802 switch (speed) {
1803 case USB_SPEED_LOW: return 2;
1804 case USB_SPEED_FULL: return 1;
1805 default: return speed;
1806 }
1807 }
1808
1809 #if 0
1810 /* convert xspeed to usbdi speed */
1811 static int
1812 xhci_xspeed2speed(int xspeed)
1813 {
1814 switch (xspeed) {
1815 case 1: return USB_SPEED_FULL;
1816 case 2: return USB_SPEED_LOW;
1817 default: return xspeed;
1818 }
1819 }
1820 #endif
1821
1822 /* convert xspeed to port status speed */
1823 static int
1824 xhci_xspeed2psspeed(int xspeed)
1825 {
1826 switch (xspeed) {
1827 case 0: return 0;
1828 case 1: return UPS_FULL_SPEED;
1829 case 2: return UPS_LOW_SPEED;
1830 case 3: return UPS_HIGH_SPEED;
1831 default: return UPS_OTHER_SPEED;
1832 }
1833 }
1834
1835 /*
1836 * Construct input contexts and issue TRB to open pipe.
1837 */
1838 static usbd_status
1839 xhci_configure_endpoint(struct usbd_pipe *pipe)
1840 {
1841 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1842 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1843 #ifdef USB_DEBUG
1844 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1845 #endif
1846 struct xhci_soft_trb trb;
1847 usbd_status err;
1848
1849 XHCIHIST_FUNC();
1850 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx",
1851 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress,
1852 pipe->up_endpoint->ue_edesc->bmAttributes);
1853
1854 /* XXX ensure input context is available? */
1855
1856 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz);
1857
1858 /* set up context */
1859 xhci_setup_ctx(pipe);
1860
1861 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0),
1862 sc->sc_ctxsz * 1);
1863 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs,
1864 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1);
1865
1866 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1867 trb.trb_2 = 0;
1868 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1869 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1870
1871 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
1872
1873 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1874 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci),
1875 sc->sc_ctxsz * 1);
1876
1877 return err;
1878 }
1879
1880 #if 0
1881 static usbd_status
1882 xhci_unconfigure_endpoint(struct usbd_pipe *pipe)
1883 {
1884 #ifdef USB_DEBUG
1885 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1886 #endif
1887
1888 XHCIHIST_FUNC();
1889 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
1890
1891 return USBD_NORMAL_COMPLETION;
1892 }
1893 #endif
1894
1895 /* 4.6.8, 6.4.3.7 */
1896 static void
1897 xhci_reset_endpoint(struct usbd_pipe *pipe)
1898 {
1899 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1900 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1901 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1902 struct xhci_soft_trb trb;
1903
1904 XHCIHIST_FUNC();
1905 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1906
1907 KASSERT(mutex_owned(&sc->sc_lock));
1908
1909 trb.trb_0 = 0;
1910 trb.trb_2 = 0;
1911 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1912 XHCI_TRB_3_EP_SET(dci) |
1913 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP);
1914
1915 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
1916 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
1917 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
1918 }
1919 }
1920
1921 /*
1922 * 4.6.9, 6.4.3.8
1923 * Stop execution of TDs on xfer ring.
1924 * Should be called with sc_lock held.
1925 */
1926 static usbd_status
1927 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci,
1928 uint32_t trb3flags)
1929 {
1930 struct xhci_soft_trb trb;
1931 usbd_status err;
1932
1933 XHCIHIST_FUNC();
1934 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1935
1936 KASSERT(mutex_owned(&sc->sc_lock));
1937
1938 trb.trb_0 = 0;
1939 trb.trb_2 = 0;
1940 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1941 XHCI_TRB_3_EP_SET(dci) |
1942 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) |
1943 trb3flags;
1944
1945 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1946
1947 return err;
1948 }
1949
1950 static usbd_status
1951 xhci_stop_endpoint(struct usbd_pipe *pipe)
1952 {
1953 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1954 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1955 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1956
1957 XHCIHIST_FUNC();
1958 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1959
1960 KASSERT(mutex_owned(&sc->sc_lock));
1961
1962 return xhci_stop_endpoint_cmd(sc, xs, dci, 0);
1963 }
1964
1965 /*
1966 * Set TR Dequeue Pointer.
1967 * xHCI 1.1 4.6.10 6.4.3.9
1968 * Purge all of the TRBs on ring and reinitialize ring.
1969 * Set TR dequeue Pointer to 0 and Cycle State to 1.
1970 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE
1971 * error will be generated.
1972 */
1973 static void
1974 xhci_set_dequeue(struct usbd_pipe *pipe)
1975 {
1976 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1977 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1978 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1979 struct xhci_ring * const xr = xs->xs_xr[dci];
1980 struct xhci_soft_trb trb;
1981
1982 XHCIHIST_FUNC();
1983 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1984
1985 KASSERT(mutex_owned(&sc->sc_lock));
1986 KASSERT(xr != NULL);
1987
1988 xhci_host_dequeue(xr);
1989
1990 /* set DCS */
1991 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */
1992 trb.trb_2 = 0;
1993 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1994 XHCI_TRB_3_EP_SET(dci) |
1995 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE);
1996
1997 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
1998 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
1999 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
2000 }
2001 }
2002
2003 /*
2004 * Open new pipe: called from usbd_setup_pipe_flags.
2005 * Fills methods of pipe.
2006 * If pipe is not for ep0, calls configure_endpoint.
2007 */
2008 static usbd_status
2009 xhci_open(struct usbd_pipe *pipe)
2010 {
2011 struct usbd_device * const dev = pipe->up_dev;
2012 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
2013 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
2014 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2015 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2016 const u_int dci = xhci_ep_get_dci(ed);
2017 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
2018 usbd_status err;
2019
2020 XHCIHIST_FUNC();
2021 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr,
2022 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed);
2023 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx",
2024 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress,
2025 ed->bmAttributes);
2026 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize),
2027 ed->bInterval, 0, 0);
2028
2029 if (sc->sc_dying)
2030 return USBD_IOERROR;
2031
2032 /* Root Hub */
2033 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) {
2034 switch (ed->bEndpointAddress) {
2035 case USB_CONTROL_ENDPOINT:
2036 pipe->up_methods = &roothub_ctrl_methods;
2037 break;
2038 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
2039 pipe->up_methods = &xhci_root_intr_methods;
2040 break;
2041 default:
2042 pipe->up_methods = NULL;
2043 DPRINTFN(0, "bad bEndpointAddress 0x%02jx",
2044 ed->bEndpointAddress, 0, 0, 0);
2045 return USBD_INVAL;
2046 }
2047 return USBD_NORMAL_COMPLETION;
2048 }
2049
2050 usb_init_task(&xpipe->xp_async_task, xhci_pipe_restart_async_task,
2051 pipe, USB_TASKQ_MPSAFE);
2052
2053 switch (xfertype) {
2054 case UE_CONTROL:
2055 pipe->up_methods = &xhci_device_ctrl_methods;
2056 break;
2057 case UE_ISOCHRONOUS:
2058 pipe->up_methods = &xhci_device_isoc_methods;
2059 pipe->up_serialise = false;
2060 xpipe->xp_isoc_next = -1;
2061 break;
2062 case UE_BULK:
2063 pipe->up_methods = &xhci_device_bulk_methods;
2064 break;
2065 case UE_INTERRUPT:
2066 pipe->up_methods = &xhci_device_intr_methods;
2067 break;
2068 default:
2069 return USBD_IOERROR;
2070 break;
2071 }
2072
2073 KASSERT(xs != NULL);
2074 KASSERT(xs->xs_xr[dci] == NULL);
2075
2076 /* allocate transfer ring */
2077 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS,
2078 XHCI_TRB_ALIGN);
2079 if (err) {
2080 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0);
2081 return err;
2082 }
2083
2084 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT)
2085 return xhci_configure_endpoint(pipe);
2086
2087 return USBD_NORMAL_COMPLETION;
2088 }
2089
2090 /*
2091 * Closes pipe, called from usbd_kill_pipe via close methods.
2092 * If the endpoint to be closed is ep0, disable_slot.
2093 * Should be called with sc_lock held.
2094 */
2095 static void
2096 xhci_close_pipe(struct usbd_pipe *pipe)
2097 {
2098 struct xhci_pipe * const xp =
2099 container_of(pipe, struct xhci_pipe, xp_pipe);
2100 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2101 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2102 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2103 const u_int dci = xhci_ep_get_dci(ed);
2104 struct xhci_soft_trb trb;
2105 uint32_t *cp;
2106
2107 XHCIHIST_FUNC();
2108
2109 usb_rem_task_wait(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC,
2110 &sc->sc_lock);
2111
2112 if (sc->sc_dying)
2113 return;
2114
2115 /* xs is uninitialized before xhci_init_slot */
2116 if (xs == NULL || xs->xs_idx == 0)
2117 return;
2118
2119 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2120 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2121
2122 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
2123 KASSERT(mutex_owned(&sc->sc_lock));
2124
2125 if (pipe->up_dev->ud_depth == 0)
2126 return;
2127
2128 if (dci == XHCI_DCI_EP_CONTROL) {
2129 DPRINTFN(4, "closing ep0", 0, 0, 0, 0);
2130 /* This frees all rings */
2131 xhci_disable_slot(sc, xs->xs_idx);
2132 return;
2133 }
2134
2135 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED)
2136 (void)xhci_stop_endpoint(pipe);
2137
2138 /*
2139 * set appropriate bit to be dropped.
2140 * don't set DC bit to 1, otherwise all endpoints
2141 * would be deconfigured.
2142 */
2143 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
2144 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci));
2145 cp[1] = htole32(0);
2146
2147 /* XXX should be most significant one, not dci? */
2148 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
2149 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci));
2150
2151 /* configure ep context performs an implicit dequeue */
2152 xhci_host_dequeue(xs->xs_xr[dci]);
2153
2154 /* sync input contexts before they are read from memory */
2155 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
2156
2157 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
2158 trb.trb_2 = 0;
2159 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2160 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
2161
2162 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
2163 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2164
2165 xhci_ring_free(sc, &xs->xs_xr[dci]);
2166 xs->xs_xr[dci] = NULL;
2167 }
2168
2169 /*
2170 * Abort transfer. Must be called with sc_lock held. Releases and
2171 * reacquires sc_lock to sleep until hardware acknowledges abort.
2172 */
2173 static void
2174 xhci_abortx(struct usbd_xfer *xfer)
2175 {
2176 XHCIHIST_FUNC();
2177 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2178
2179 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx",
2180 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0);
2181
2182 KASSERT(mutex_owned(&sc->sc_lock));
2183 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2184 xfer->ux_status == USBD_TIMEOUT),
2185 "bad abort status: %d", xfer->ux_status);
2186
2187 xhci_pipe_restart(xfer->ux_pipe);
2188
2189 DPRINTFN(14, "end", 0, 0, 0, 0);
2190 }
2191
2192 static void
2193 xhci_host_dequeue(struct xhci_ring * const xr)
2194 {
2195 /* When dequeueing the controller, update our struct copy too */
2196 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE);
2197 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE,
2198 BUS_DMASYNC_PREWRITE);
2199 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies));
2200
2201 xr->xr_ep = 0;
2202 xr->xr_cs = 1;
2203 }
2204
2205 /*
2206 * Recover STALLed endpoint, or stop endpoint to abort a pipe.
2207 * xHCI 1.1 sect 4.10.2.1
2208 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove
2209 * all transfers on transfer ring.
2210 */
2211 static void
2212 xhci_pipe_restart(struct usbd_pipe *pipe)
2213 {
2214 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2215 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2216 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2217
2218 XHCIHIST_FUNC();
2219 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2220 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2221
2222 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2223
2224 /*
2225 * - If the endpoint is halted, indicating a stall, reset it.
2226 * - If the endpoint is stopped, we're already good.
2227 * - Otherwise, someone wanted to abort the pipe, so stop the
2228 * endpoint.
2229 *
2230 * In any case, clear the ring.
2231 */
2232 switch (xhci_get_epstate(sc, xs, dci)) {
2233 case XHCI_EPSTATE_HALTED:
2234 xhci_reset_endpoint(pipe);
2235 break;
2236 case XHCI_EPSTATE_STOPPED:
2237 break;
2238 default:
2239 xhci_stop_endpoint(pipe);
2240 break;
2241 }
2242
2243 switch (xhci_get_epstate(sc, xs, dci)) {
2244 case XHCI_EPSTATE_STOPPED:
2245 break;
2246 case XHCI_EPSTATE_ERROR:
2247 device_printf(sc->sc_dev, "endpoint 0x%x error\n",
2248 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2249 break;
2250 default:
2251 device_printf(sc->sc_dev, "endpoint 0x%x failed to stop\n",
2252 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2253 }
2254
2255 xhci_set_dequeue(pipe);
2256
2257 DPRINTFN(4, "ends", 0, 0, 0, 0);
2258 }
2259
2260 static void
2261 xhci_pipe_restart_async_task(void *cookie)
2262 {
2263 struct usbd_pipe * const pipe = cookie;
2264 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2265 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2266 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2267 struct xhci_ring * const tr = xs->xs_xr[dci];
2268 struct usbd_xfer *xfer;
2269
2270 XHCIHIST_FUNC();
2271 XHCIHIST_CALLARGS("sc=%#jx pipe=%#jx",
2272 (uintptr_t)sc, (uintptr_t)pipe, 0, 0);
2273
2274 mutex_enter(&sc->sc_lock);
2275
2276 xhci_pipe_restart(pipe);
2277
2278 /*
2279 * We halted our own queue because it stalled. Mark it no
2280 * longer halted and start issuing queued transfers again.
2281 */
2282 tr->is_halted = false;
2283 xfer = SIMPLEQ_FIRST(&pipe->up_queue);
2284 if (xfer) {
2285 /*
2286 * If the first xfer of the queue is not in progress,
2287 * though, there may be a concurrent software abort
2288 * that has already cancelled it and is now in the
2289 * middle of a concurrent xhci_pipe_restart waiting to
2290 * reacquire the pipe (bus) lock. So only restart the
2291 * xfer if it's still USBD_IN_PROGRESS.
2292 *
2293 * Either way, xfers on the queue can't be in
2294 * USBD_NOT_STARTED.
2295 */
2296 KASSERT(xfer->ux_status != USBD_NOT_STARTED);
2297 if (xfer->ux_status == USBD_IN_PROGRESS) {
2298 (*pipe->up_methods->upm_start)(xfer);
2299 } else {
2300 DPRINTF("pipe restart race xfer=%#jx status=%jd",
2301 (uintptr_t)xfer, xfer->ux_status, 0, 0);
2302 }
2303 }
2304
2305 mutex_exit(&sc->sc_lock);
2306 }
2307
2308 static void
2309 xhci_pipe_restart_async(struct usbd_pipe *pipe)
2310 {
2311 struct xhci_pipe * const xp =
2312 container_of(pipe, struct xhci_pipe, xp_pipe);
2313 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2314 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2315 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2316 struct xhci_ring * const tr = xs->xs_xr[dci];
2317
2318 XHCIHIST_FUNC();
2319 XHCIHIST_CALLARGS("pipe %#jx", (uintptr_t)pipe, 0, 0, 0);
2320
2321 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2322
2323 tr->is_halted = true;
2324 usb_add_task(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC);
2325
2326 DPRINTFN(4, "ends", 0, 0, 0, 0);
2327 }
2328
2329 /* Process roothub port status/change events and notify to uhub_intr. */
2330 static void
2331 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport)
2332 {
2333 XHCIHIST_FUNC();
2334 XHCIHIST_CALLARGS("xhci%jd: port %ju status change",
2335 device_unit(sc->sc_dev), ctlrport, 0, 0);
2336
2337 if (ctlrport > sc->sc_maxports)
2338 return;
2339
2340 const size_t bn = xhci_ctlrport2bus(sc, ctlrport);
2341 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport);
2342 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn];
2343
2344 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change",
2345 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer);
2346
2347 if (xfer == NULL)
2348 return;
2349 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
2350
2351 uint8_t *p = xfer->ux_buf;
2352 memset(p, 0, xfer->ux_length);
2353 p[rhp / NBBY] |= 1 << (rhp % NBBY);
2354 xfer->ux_actlen = xfer->ux_length;
2355 xfer->ux_status = USBD_NORMAL_COMPLETION;
2356 usb_transfer_complete(xfer);
2357 }
2358
2359 /* Process Transfer Events */
2360 static void
2361 xhci_event_transfer(struct xhci_softc * const sc,
2362 const struct xhci_trb * const trb)
2363 {
2364 uint64_t trb_0;
2365 uint32_t trb_2, trb_3;
2366 uint8_t trbcode;
2367 u_int slot, dci;
2368 struct xhci_slot *xs;
2369 struct xhci_ring *xr;
2370 struct xhci_xfer *xx;
2371 struct usbd_xfer *xfer;
2372 usbd_status err;
2373
2374 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2375
2376 trb_0 = le64toh(trb->trb_0);
2377 trb_2 = le32toh(trb->trb_2);
2378 trb_3 = le32toh(trb->trb_3);
2379 trbcode = XHCI_TRB_2_ERROR_GET(trb_2);
2380 slot = XHCI_TRB_3_SLOT_GET(trb_3);
2381 dci = XHCI_TRB_3_EP_GET(trb_3);
2382 xs = &sc->sc_slots[slot];
2383 xr = xs->xs_xr[dci];
2384
2385 /* sanity check */
2386 KASSERT(xr != NULL);
2387 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots,
2388 "invalid xs_idx %u slot %u", xs->xs_idx, slot);
2389
2390 int idx = 0;
2391 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2392 if (xhci_trb_get_idx(xr, trb_0, &idx)) {
2393 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0);
2394 return;
2395 }
2396 xx = xr->xr_cookies[idx];
2397
2398 /* clear cookie of consumed TRB */
2399 xr->xr_cookies[idx] = NULL;
2400
2401 /*
2402 * xx is NULL if pipe is opened but xfer is not started.
2403 * It happens when stopping idle pipe.
2404 */
2405 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) {
2406 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju",
2407 idx, (uintptr_t)xx, trbcode, dci);
2408 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0,
2409 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)),
2410 0, 0);
2411 return;
2412 }
2413 } else {
2414 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */
2415 xx = (void *)(uintptr_t)(trb_0 & ~0x3);
2416 }
2417 /* XXX this may not happen */
2418 if (xx == NULL) {
2419 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0);
2420 return;
2421 }
2422 xfer = &xx->xx_xfer;
2423 /* XXX this may happen when detaching */
2424 if (xfer == NULL) {
2425 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx",
2426 (uintptr_t)xx, trb_0, 0, 0);
2427 return;
2428 }
2429 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2430 /* XXX I dunno why this happens */
2431 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer);
2432
2433 if (!xfer->ux_pipe->up_repeat &&
2434 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) {
2435 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer,
2436 0, 0, 0);
2437 return;
2438 }
2439
2440 const uint8_t xfertype =
2441 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes);
2442
2443 /* 4.11.5.2 Event Data TRB */
2444 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) {
2445 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx"
2446 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0);
2447 if ((trb_0 & 0x3) == 0x3) {
2448 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2);
2449 }
2450 }
2451
2452 switch (trbcode) {
2453 case XHCI_TRB_ERROR_SHORT_PKT:
2454 case XHCI_TRB_ERROR_SUCCESS:
2455 /*
2456 * A ctrl transfer can generate two events if it has a Data
2457 * stage. A short data stage can be OK and should not
2458 * complete the transfer as the status stage needs to be
2459 * performed.
2460 *
2461 * Note: Data and Status stage events point at same xfer.
2462 * ux_actlen and ux_dmabuf will be passed to
2463 * usb_transfer_complete after the Status stage event.
2464 *
2465 * It can be distinguished which stage generates the event:
2466 * + by checking least 3 bits of trb_0 if ED==1.
2467 * (see xhci_device_ctrl_start).
2468 * + by checking the type of original TRB if ED==0.
2469 *
2470 * In addition, intr, bulk, and isoc transfer currently
2471 * consists of single TD, so the "skip" is not needed.
2472 * ctrl xfer uses EVENT_DATA, and others do not.
2473 * Thus driver can switch the flow by checking ED bit.
2474 */
2475 if (xfertype == UE_ISOCHRONOUS) {
2476 xfer->ux_frlengths[xx->xx_isoc_done] -=
2477 XHCI_TRB_2_REM_GET(trb_2);
2478 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done];
2479 if (++xx->xx_isoc_done < xfer->ux_nframes)
2480 return;
2481 } else
2482 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2483 if (xfer->ux_actlen == 0)
2484 xfer->ux_actlen = xfer->ux_length -
2485 XHCI_TRB_2_REM_GET(trb_2);
2486 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3))
2487 == XHCI_TRB_TYPE_DATA_STAGE) {
2488 return;
2489 }
2490 } else if ((trb_0 & 0x3) == 0x3) {
2491 return;
2492 }
2493 err = USBD_NORMAL_COMPLETION;
2494 break;
2495 case XHCI_TRB_ERROR_STOPPED:
2496 case XHCI_TRB_ERROR_LENGTH:
2497 case XHCI_TRB_ERROR_STOPPED_SHORT:
2498 err = USBD_IOERROR;
2499 break;
2500 case XHCI_TRB_ERROR_STALL:
2501 case XHCI_TRB_ERROR_BABBLE:
2502 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2503 xhci_pipe_restart_async(xfer->ux_pipe);
2504 err = USBD_STALLED;
2505 break;
2506 default:
2507 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2508 err = USBD_IOERROR;
2509 break;
2510 }
2511
2512 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 ||
2513 (trb_0 & 0x3) == 0x0) {
2514 /*
2515 * Try to claim this xfer for completion. If it has
2516 * already completed or aborted, drop it on the floor.
2517 */
2518 if (!usbd_xfer_trycomplete(xfer))
2519 return;
2520
2521 /* Set the status. */
2522 xfer->ux_status = err;
2523
2524 usb_transfer_complete(xfer);
2525 }
2526 }
2527
2528 /* Process Command complete events */
2529 static void
2530 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb)
2531 {
2532 uint64_t trb_0;
2533 uint32_t trb_2, trb_3;
2534
2535 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2536
2537 KASSERT(mutex_owned(&sc->sc_lock));
2538
2539 trb_0 = le64toh(trb->trb_0);
2540 trb_2 = le32toh(trb->trb_2);
2541 trb_3 = le32toh(trb->trb_3);
2542
2543 if (trb_0 == sc->sc_command_addr) {
2544 sc->sc_resultpending = false;
2545
2546 sc->sc_result_trb.trb_0 = trb_0;
2547 sc->sc_result_trb.trb_2 = trb_2;
2548 sc->sc_result_trb.trb_3 = trb_3;
2549 if (XHCI_TRB_2_ERROR_GET(trb_2) !=
2550 XHCI_TRB_ERROR_SUCCESS) {
2551 DPRINTFN(1, "command completion "
2552 "failure: 0x%016jx 0x%08jx 0x%08jx",
2553 trb_0, trb_2, trb_3, 0);
2554 }
2555 cv_signal(&sc->sc_command_cv);
2556 } else {
2557 DPRINTFN(1, "spurious event: %#jx 0x%016jx "
2558 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3);
2559 }
2560 }
2561
2562 /*
2563 * Process events.
2564 * called from xhci_softintr
2565 */
2566 static void
2567 xhci_handle_event(struct xhci_softc * const sc,
2568 const struct xhci_trb * const trb)
2569 {
2570 uint64_t trb_0;
2571 uint32_t trb_2, trb_3;
2572
2573 XHCIHIST_FUNC();
2574
2575 trb_0 = le64toh(trb->trb_0);
2576 trb_2 = le32toh(trb->trb_2);
2577 trb_3 = le32toh(trb->trb_3);
2578
2579 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx",
2580 (uintptr_t)trb, trb_0, trb_2, trb_3);
2581
2582 /*
2583 * 4.11.3.1, 6.4.2.1
2584 * TRB Pointer is invalid for these completion codes.
2585 */
2586 switch (XHCI_TRB_2_ERROR_GET(trb_2)) {
2587 case XHCI_TRB_ERROR_RING_UNDERRUN:
2588 case XHCI_TRB_ERROR_RING_OVERRUN:
2589 case XHCI_TRB_ERROR_VF_RING_FULL:
2590 return;
2591 default:
2592 if (trb_0 == 0) {
2593 return;
2594 }
2595 break;
2596 }
2597
2598 switch (XHCI_TRB_3_TYPE_GET(trb_3)) {
2599 case XHCI_TRB_EVENT_TRANSFER:
2600 xhci_event_transfer(sc, trb);
2601 break;
2602 case XHCI_TRB_EVENT_CMD_COMPLETE:
2603 xhci_event_cmd(sc, trb);
2604 break;
2605 case XHCI_TRB_EVENT_PORT_STS_CHANGE:
2606 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff));
2607 break;
2608 default:
2609 break;
2610 }
2611 }
2612
2613 static void
2614 xhci_softintr(void *v)
2615 {
2616 struct usbd_bus * const bus = v;
2617 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2618 struct xhci_ring * const er = sc->sc_er;
2619 struct xhci_trb *trb;
2620 int i, j, k;
2621
2622 XHCIHIST_FUNC();
2623
2624 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2625
2626 i = er->xr_ep;
2627 j = er->xr_cs;
2628
2629 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0);
2630
2631 while (1) {
2632 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE,
2633 BUS_DMASYNC_POSTREAD);
2634 trb = &er->xr_trb[i];
2635 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0;
2636
2637 if (j != k)
2638 break;
2639
2640 xhci_handle_event(sc, trb);
2641
2642 i++;
2643 if (i == er->xr_ntrb) {
2644 i = 0;
2645 j ^= 1;
2646 }
2647 }
2648
2649 er->xr_ep = i;
2650 er->xr_cs = j;
2651
2652 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) |
2653 XHCI_ERDP_BUSY);
2654
2655 DPRINTFN(16, "ends", 0, 0, 0, 0);
2656
2657 return;
2658 }
2659
2660 static void
2661 xhci_poll(struct usbd_bus *bus)
2662 {
2663 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2664
2665 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2666
2667 mutex_enter(&sc->sc_intr_lock);
2668 int ret = xhci_intr1(sc);
2669 if (ret) {
2670 xhci_softintr(bus);
2671 }
2672 mutex_exit(&sc->sc_intr_lock);
2673
2674 return;
2675 }
2676
2677 static struct usbd_xfer *
2678 xhci_allocx(struct usbd_bus *bus, unsigned int nframes)
2679 {
2680 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2681 struct xhci_xfer *xx;
2682 u_int ntrbs;
2683
2684 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2685
2686 ntrbs = uimax(3, nframes);
2687 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs;
2688
2689 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
2690 if (xx != NULL) {
2691 memset(xx, 0, sizeof(*xx));
2692 if (ntrbs > 0) {
2693 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP);
2694 xx->xx_ntrb = ntrbs;
2695 }
2696 #ifdef DIAGNOSTIC
2697 xx->xx_xfer.ux_state = XFER_BUSY;
2698 #endif
2699 }
2700
2701 return &xx->xx_xfer;
2702 }
2703
2704 static void
2705 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
2706 {
2707 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2708 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
2709
2710 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2711
2712 #ifdef DIAGNOSTIC
2713 if (xfer->ux_state != XFER_BUSY &&
2714 xfer->ux_status != USBD_NOT_STARTED) {
2715 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx",
2716 (uintptr_t)xfer, xfer->ux_state, 0, 0);
2717 }
2718 xfer->ux_state = XFER_FREE;
2719 #endif
2720 if (xx->xx_ntrb > 0) {
2721 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb));
2722 xx->xx_trb = NULL;
2723 xx->xx_ntrb = 0;
2724 }
2725 pool_cache_put(sc->sc_xferpool, xx);
2726 }
2727
2728 static bool
2729 xhci_dying(struct usbd_bus *bus)
2730 {
2731 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2732
2733 return sc->sc_dying;
2734 }
2735
2736 static void
2737 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
2738 {
2739 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2740
2741 *lock = &sc->sc_lock;
2742 }
2743
2744 extern uint32_t usb_cookie_no;
2745
2746 /*
2747 * xHCI 4.3
2748 * Called when uhub_explore finds a new device (via usbd_new_device).
2749 * Port initialization and speed detection (4.3.1) are already done in uhub.c.
2750 * This function does:
2751 * Allocate and construct dev structure of default endpoint (ep0).
2752 * Allocate and open pipe of ep0.
2753 * Enable slot and initialize slot context.
2754 * Set Address.
2755 * Read initial device descriptor.
2756 * Determine initial MaxPacketSize (mps) by speed.
2757 * Read full device descriptor.
2758 * Register this device.
2759 * Finally state of device transitions ADDRESSED.
2760 */
2761 static usbd_status
2762 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth,
2763 int speed, int port, struct usbd_port *up)
2764 {
2765 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2766 struct usbd_device *dev;
2767 usbd_status err;
2768 usb_device_descriptor_t *dd;
2769 struct xhci_slot *xs;
2770 uint32_t *cp;
2771
2772 XHCIHIST_FUNC();
2773 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx",
2774 port, depth, speed, (uintptr_t)up);
2775
2776 KASSERT(KERNEL_LOCKED_P());
2777
2778 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
2779 dev->ud_bus = bus;
2780 dev->ud_quirks = &usbd_no_quirk;
2781 dev->ud_addr = 0;
2782 dev->ud_ddesc.bMaxPacketSize = 0;
2783 dev->ud_depth = depth;
2784 dev->ud_powersrc = up;
2785 dev->ud_myhub = up->up_parent;
2786 dev->ud_speed = speed;
2787 dev->ud_langid = USBD_NOLANG;
2788 dev->ud_cookie.cookie = ++usb_cookie_no;
2789
2790 /* Set up default endpoint handle. */
2791 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc;
2792 /* doesn't matter, just don't let it uninitialized */
2793 dev->ud_ep0.ue_toggle = 0;
2794
2795 /* Set up default endpoint descriptor. */
2796 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE;
2797 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT;
2798 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT;
2799 dev->ud_ep0desc.bmAttributes = UE_CONTROL;
2800 dev->ud_ep0desc.bInterval = 0;
2801
2802 /* 4.3, 4.8.2.1 */
2803 switch (speed) {
2804 case USB_SPEED_SUPER:
2805 case USB_SPEED_SUPER_PLUS:
2806 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET);
2807 break;
2808 case USB_SPEED_FULL:
2809 /* XXX using 64 as initial mps of ep0 in FS */
2810 case USB_SPEED_HIGH:
2811 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET);
2812 break;
2813 case USB_SPEED_LOW:
2814 default:
2815 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET);
2816 break;
2817 }
2818
2819 up->up_dev = dev;
2820
2821 dd = &dev->ud_ddesc;
2822
2823 if (depth == 0 && port == 0) {
2824 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL);
2825 bus->ub_devices[USB_ROOTHUB_INDEX] = dev;
2826
2827 /* Establish the default pipe. */
2828 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2829 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2830 if (err) {
2831 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0);
2832 goto bad;
2833 }
2834 err = usbd_get_initial_ddesc(dev, dd);
2835 if (err) {
2836 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2837 goto bad;
2838 }
2839 } else {
2840 uint8_t slot = 0;
2841
2842 /* 4.3.2 */
2843 err = xhci_enable_slot(sc, &slot);
2844 if (err) {
2845 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0);
2846 goto bad;
2847 }
2848
2849 xs = &sc->sc_slots[slot];
2850 dev->ud_hcpriv = xs;
2851
2852 /* 4.3.3 initialize slot structure */
2853 err = xhci_init_slot(dev, slot);
2854 if (err) {
2855 DPRINTFN(1, "init slot %ju", err, 0, 0, 0);
2856 dev->ud_hcpriv = NULL;
2857 /*
2858 * We have to disable_slot here because
2859 * xs->xs_idx == 0 when xhci_init_slot fails,
2860 * in that case usbd_remove_dev won't work.
2861 */
2862 mutex_enter(&sc->sc_lock);
2863 xhci_disable_slot(sc, slot);
2864 mutex_exit(&sc->sc_lock);
2865 goto bad;
2866 }
2867
2868 /*
2869 * We have to establish the default pipe _after_ slot
2870 * structure has been prepared.
2871 */
2872 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2873 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2874 if (err) {
2875 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0,
2876 0);
2877 goto bad;
2878 }
2879
2880 /* 4.3.4 Address Assignment */
2881 err = xhci_set_address(dev, slot, false);
2882 if (err) {
2883 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0);
2884 goto bad;
2885 }
2886
2887 /* Allow device time to set new address */
2888 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE);
2889
2890 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2891 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT);
2892 HEXDUMP("slot context", cp, sc->sc_ctxsz);
2893 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3]));
2894 DPRINTFN(4, "device address %ju", addr, 0, 0, 0);
2895 /*
2896 * XXX ensure we know when the hardware does something
2897 * we can't yet cope with
2898 */
2899 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr);
2900 dev->ud_addr = addr;
2901
2902 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL,
2903 "addr %d already allocated", dev->ud_addr);
2904 /*
2905 * The root hub is given its own slot
2906 */
2907 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev;
2908
2909 err = usbd_get_initial_ddesc(dev, dd);
2910 if (err) {
2911 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2912 goto bad;
2913 }
2914
2915 /* 4.8.2.1 */
2916 if (USB_IS_SS(speed)) {
2917 if (dd->bMaxPacketSize != 9) {
2918 printf("%s: invalid mps 2^%u for SS ep0,"
2919 " using 512\n",
2920 device_xname(sc->sc_dev),
2921 dd->bMaxPacketSize);
2922 dd->bMaxPacketSize = 9;
2923 }
2924 USETW(dev->ud_ep0desc.wMaxPacketSize,
2925 (1 << dd->bMaxPacketSize));
2926 } else
2927 USETW(dev->ud_ep0desc.wMaxPacketSize,
2928 dd->bMaxPacketSize);
2929 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0);
2930 err = xhci_update_ep0_mps(sc, xs,
2931 UGETW(dev->ud_ep0desc.wMaxPacketSize));
2932 if (err) {
2933 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0);
2934 goto bad;
2935 }
2936 }
2937
2938 err = usbd_reload_device_desc(dev);
2939 if (err) {
2940 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0);
2941 goto bad;
2942 }
2943
2944 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,",
2945 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0);
2946 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,",
2947 dd->bDeviceClass, dd->bDeviceSubClass,
2948 dd->bDeviceProtocol, 0);
2949 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd",
2950 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations,
2951 dev->ud_speed);
2952
2953 usbd_get_device_strings(dev);
2954
2955 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev);
2956
2957 if (depth == 0 && port == 0) {
2958 usbd_attach_roothub(parent, dev);
2959 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0);
2960 return USBD_NORMAL_COMPLETION;
2961 }
2962
2963 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr);
2964 bad:
2965 if (err != USBD_NORMAL_COMPLETION) {
2966 if (depth == 0 && port == 0 && dev->ud_pipe0)
2967 usbd_kill_pipe(dev->ud_pipe0);
2968 usbd_remove_device(dev, up);
2969 }
2970
2971 return err;
2972 }
2973
2974 static usbd_status
2975 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp,
2976 size_t ntrb, size_t align)
2977 {
2978 size_t size = ntrb * XHCI_TRB_SIZE;
2979 struct xhci_ring *xr;
2980
2981 XHCIHIST_FUNC();
2982 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx",
2983 (uintptr_t)*xrp, ntrb, align, 0);
2984
2985 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP);
2986 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0);
2987
2988 int err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
2989 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xr->xr_dma);
2990 if (err) {
2991 kmem_free(xr, sizeof(struct xhci_ring));
2992 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0);
2993 return err;
2994 }
2995 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
2996 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP);
2997 xr->xr_trb = xhci_ring_trbv(xr, 0);
2998 xr->xr_ntrb = ntrb;
2999 xr->is_halted = false;
3000 xhci_host_dequeue(xr);
3001 *xrp = xr;
3002
3003 return USBD_NORMAL_COMPLETION;
3004 }
3005
3006 static void
3007 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr)
3008 {
3009 if (*xr == NULL)
3010 return;
3011
3012 usb_freemem(&(*xr)->xr_dma);
3013 mutex_destroy(&(*xr)->xr_lock);
3014 kmem_free((*xr)->xr_cookies,
3015 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb);
3016 kmem_free(*xr, sizeof(struct xhci_ring));
3017 *xr = NULL;
3018 }
3019
3020 static void
3021 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr,
3022 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs)
3023 {
3024 size_t i;
3025 u_int ri;
3026 u_int cs;
3027 uint64_t parameter;
3028 uint32_t status;
3029 uint32_t control;
3030
3031 XHCIHIST_FUNC();
3032 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju",
3033 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0);
3034
3035 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u",
3036 ntrbs, xr->xr_ntrb);
3037 for (i = 0; i < ntrbs; i++) {
3038 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr,
3039 (uintptr_t)trbs, i, 0);
3040 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx",
3041 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0);
3042 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) !=
3043 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3);
3044 }
3045
3046 ri = xr->xr_ep;
3047 cs = xr->xr_cs;
3048
3049 /*
3050 * Although the xhci hardware can do scatter/gather dma from
3051 * arbitrary sized buffers, there is a non-obvious restriction
3052 * that a LINK trb is only allowed at the end of a burst of
3053 * transfers - which might be 16kB.
3054 * Arbitrary aligned LINK trb definitely fail on Ivy bridge.
3055 * The simple solution is not to allow a LINK trb in the middle
3056 * of anything - as here.
3057 * XXX: (dsl) There are xhci controllers out there (eg some made by
3058 * ASMedia) that seem to lock up if they process a LINK trb but
3059 * cannot process the linked-to trb yet.
3060 * The code should write the 'cycle' bit on the link trb AFTER
3061 * adding the other trb.
3062 */
3063 u_int firstep = xr->xr_ep;
3064 u_int firstcs = xr->xr_cs;
3065
3066 for (i = 0; i < ntrbs; ) {
3067 u_int oldri = ri;
3068 u_int oldcs = cs;
3069
3070 if (ri >= (xr->xr_ntrb - 1)) {
3071 /* Put Link TD at the end of ring */
3072 parameter = xhci_ring_trbp(xr, 0);
3073 status = 0;
3074 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) |
3075 XHCI_TRB_3_TC_BIT;
3076 xr->xr_cookies[ri] = NULL;
3077 xr->xr_ep = 0;
3078 xr->xr_cs ^= 1;
3079 ri = xr->xr_ep;
3080 cs = xr->xr_cs;
3081 } else {
3082 parameter = trbs[i].trb_0;
3083 status = trbs[i].trb_2;
3084 control = trbs[i].trb_3;
3085
3086 xr->xr_cookies[ri] = cookie;
3087 ri++;
3088 i++;
3089 }
3090 /*
3091 * If this is a first TRB, mark it invalid to prevent
3092 * xHC from running it immediately.
3093 */
3094 if (oldri == firstep) {
3095 if (oldcs) {
3096 control &= ~XHCI_TRB_3_CYCLE_BIT;
3097 } else {
3098 control |= XHCI_TRB_3_CYCLE_BIT;
3099 }
3100 } else {
3101 if (oldcs) {
3102 control |= XHCI_TRB_3_CYCLE_BIT;
3103 } else {
3104 control &= ~XHCI_TRB_3_CYCLE_BIT;
3105 }
3106 }
3107 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control);
3108 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri,
3109 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3110 }
3111
3112 /* Now invert cycle bit of first TRB */
3113 if (firstcs) {
3114 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT);
3115 } else {
3116 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT);
3117 }
3118 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep,
3119 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3120
3121 xr->xr_ep = ri;
3122 xr->xr_cs = cs;
3123
3124 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep,
3125 xr->xr_cs, 0);
3126 }
3127
3128 static inline void
3129 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr,
3130 struct xhci_xfer *xx, u_int ntrb)
3131 {
3132 KASSERT(ntrb <= xx->xx_ntrb);
3133 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb);
3134 }
3135
3136 /*
3137 * Stop execution commands, purge all commands on command ring, and
3138 * rewind dequeue pointer.
3139 */
3140 static void
3141 xhci_abort_command(struct xhci_softc *sc)
3142 {
3143 struct xhci_ring * const cr = sc->sc_cr;
3144 uint64_t crcr;
3145 int i;
3146
3147 XHCIHIST_FUNC();
3148 XHCIHIST_CALLARGS("command %#jx timeout, aborting",
3149 sc->sc_command_addr, 0, 0, 0);
3150
3151 mutex_enter(&cr->xr_lock);
3152
3153 /* 4.6.1.2 Aborting a Command */
3154 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3155 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA);
3156
3157 for (i = 0; i < 500; i++) {
3158 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3159 if ((crcr & XHCI_CRCR_LO_CRR) == 0)
3160 break;
3161 usb_delay_ms(&sc->sc_bus, 1);
3162 }
3163 if ((crcr & XHCI_CRCR_LO_CRR) != 0) {
3164 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0);
3165 /* reset HC here? */
3166 }
3167
3168 /* reset command ring dequeue pointer */
3169 cr->xr_ep = 0;
3170 cr->xr_cs = 1;
3171 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs);
3172
3173 mutex_exit(&cr->xr_lock);
3174 }
3175
3176 /*
3177 * Put a command on command ring, ring bell, set timer, and cv_timedwait.
3178 * Command completion is notified by cv_signal from xhci_event_cmd()
3179 * (called from xhci_softint), or timed-out.
3180 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(),
3181 * then do_command examines it.
3182 */
3183 static usbd_status
3184 xhci_do_command_locked(struct xhci_softc * const sc,
3185 struct xhci_soft_trb * const trb, int timeout)
3186 {
3187 struct xhci_ring * const cr = sc->sc_cr;
3188 usbd_status err;
3189
3190 XHCIHIST_FUNC();
3191 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx",
3192 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3193
3194 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
3195 KASSERT(mutex_owned(&sc->sc_lock));
3196
3197 while (sc->sc_command_addr != 0 ||
3198 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp))
3199 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
3200
3201 /*
3202 * If enqueue pointer points at last of ring, it's Link TRB,
3203 * command TRB will be stored in 0th TRB.
3204 */
3205 if (cr->xr_ep == cr->xr_ntrb - 1)
3206 sc->sc_command_addr = xhci_ring_trbp(cr, 0);
3207 else
3208 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep);
3209
3210 sc->sc_resultpending = true;
3211
3212 mutex_enter(&cr->xr_lock);
3213 xhci_ring_put(sc, cr, NULL, trb, 1);
3214 mutex_exit(&cr->xr_lock);
3215
3216 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0);
3217
3218 while (sc->sc_resultpending) {
3219 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock,
3220 MAX(1, mstohz(timeout))) == EWOULDBLOCK) {
3221 xhci_abort_command(sc);
3222 err = USBD_TIMEOUT;
3223 goto timedout;
3224 }
3225 }
3226
3227 trb->trb_0 = sc->sc_result_trb.trb_0;
3228 trb->trb_2 = sc->sc_result_trb.trb_2;
3229 trb->trb_3 = sc->sc_result_trb.trb_3;
3230
3231 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx",
3232 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3233
3234 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) {
3235 case XHCI_TRB_ERROR_SUCCESS:
3236 err = USBD_NORMAL_COMPLETION;
3237 break;
3238 default:
3239 case 192 ... 223:
3240 DPRINTFN(5, "error %#jx",
3241 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0);
3242 err = USBD_IOERROR;
3243 break;
3244 case 224 ... 255:
3245 err = USBD_NORMAL_COMPLETION;
3246 break;
3247 }
3248
3249 timedout:
3250 sc->sc_resultpending = false;
3251 sc->sc_command_addr = 0;
3252 cv_broadcast(&sc->sc_cmdbusy_cv);
3253
3254 return err;
3255 }
3256
3257 static usbd_status
3258 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb,
3259 int timeout)
3260 {
3261
3262 mutex_enter(&sc->sc_lock);
3263 usbd_status ret = xhci_do_command_locked(sc, trb, timeout);
3264 mutex_exit(&sc->sc_lock);
3265
3266 return ret;
3267 }
3268
3269 static usbd_status
3270 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp)
3271 {
3272 struct xhci_soft_trb trb;
3273 usbd_status err;
3274
3275 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3276
3277 trb.trb_0 = 0;
3278 trb.trb_2 = 0;
3279 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT);
3280
3281 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3282 if (err != USBD_NORMAL_COMPLETION) {
3283 return err;
3284 }
3285
3286 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3);
3287
3288 return err;
3289 }
3290
3291 /*
3292 * xHCI 4.6.4
3293 * Deallocate ring and device/input context DMA buffers, and disable_slot.
3294 * All endpoints in the slot should be stopped.
3295 * Should be called with sc_lock held.
3296 */
3297 static usbd_status
3298 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot)
3299 {
3300 struct xhci_soft_trb trb;
3301 struct xhci_slot *xs;
3302 usbd_status err;
3303
3304 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3305
3306 if (sc->sc_dying)
3307 return USBD_IOERROR;
3308
3309 trb.trb_0 = 0;
3310 trb.trb_2 = 0;
3311 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) |
3312 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT);
3313
3314 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
3315
3316 if (!err) {
3317 xs = &sc->sc_slots[slot];
3318 if (xs->xs_idx != 0) {
3319 xhci_free_slot(sc, xs);
3320 xhci_set_dcba(sc, 0, slot);
3321 memset(xs, 0, sizeof(*xs));
3322 }
3323 }
3324
3325 return err;
3326 }
3327
3328 /*
3329 * Set address of device and transition slot state from ENABLED to ADDRESSED
3330 * if Block Setaddress Request (BSR) is false.
3331 * If BSR==true, transition slot state from ENABLED to DEFAULT.
3332 * see xHCI 1.1 4.5.3, 3.3.4
3333 * Should be called without sc_lock held.
3334 */
3335 static usbd_status
3336 xhci_address_device(struct xhci_softc * const sc,
3337 uint64_t icp, uint8_t slot_id, bool bsr)
3338 {
3339 struct xhci_soft_trb trb;
3340 usbd_status err;
3341
3342 XHCIHIST_FUNC();
3343 if (bsr) {
3344 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr",
3345 icp, slot_id, 0, 0);
3346 } else {
3347 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr",
3348 icp, slot_id, 0, 0);
3349 }
3350
3351 trb.trb_0 = icp;
3352 trb.trb_2 = 0;
3353 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) |
3354 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) |
3355 (bsr ? XHCI_TRB_3_BSR_BIT : 0);
3356
3357 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3358
3359 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS)
3360 err = USBD_NO_ADDR;
3361
3362 return err;
3363 }
3364
3365 static usbd_status
3366 xhci_update_ep0_mps(struct xhci_softc * const sc,
3367 struct xhci_slot * const xs, u_int mps)
3368 {
3369 struct xhci_soft_trb trb;
3370 usbd_status err;
3371 uint32_t * cp;
3372
3373 XHCIHIST_FUNC();
3374 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0);
3375
3376 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3377 cp[0] = htole32(0);
3378 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL));
3379
3380 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL));
3381 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps));
3382
3383 /* sync input contexts before they are read from memory */
3384 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3385 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3386 sc->sc_ctxsz * 4);
3387
3388 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
3389 trb.trb_2 = 0;
3390 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
3391 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX);
3392
3393 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3394 return err;
3395 }
3396
3397 static void
3398 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si)
3399 {
3400 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0);
3401
3402 XHCIHIST_FUNC();
3403 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd",
3404 (uintptr_t)&dcbaa[si], dcba, si, 0);
3405
3406 dcbaa[si] = htole64(dcba);
3407 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t),
3408 BUS_DMASYNC_PREWRITE);
3409 }
3410
3411 /*
3412 * Allocate device and input context DMA buffer, and
3413 * TRB DMA buffer for each endpoint.
3414 */
3415 static usbd_status
3416 xhci_init_slot(struct usbd_device *dev, uint32_t slot)
3417 {
3418 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3419 struct xhci_slot *xs;
3420
3421 XHCIHIST_FUNC();
3422 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0);
3423
3424 xs = &sc->sc_slots[slot];
3425
3426 /* allocate contexts */
3427 int err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3428 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_dc_dma);
3429 if (err) {
3430 DPRINTFN(1, "failed to allocmem output device context %jd",
3431 err, 0, 0, 0);
3432 return USBD_NOMEM;
3433 }
3434
3435 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3436 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_ic_dma);
3437 if (err) {
3438 DPRINTFN(1, "failed to allocmem input device context %jd",
3439 err, 0, 0, 0);
3440 goto bad1;
3441 }
3442
3443 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr));
3444 xs->xs_idx = slot;
3445
3446 return USBD_NORMAL_COMPLETION;
3447
3448 bad1:
3449 usb_freemem(&xs->xs_dc_dma);
3450 xs->xs_idx = 0;
3451 return USBD_NOMEM;
3452 }
3453
3454 static void
3455 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs)
3456 {
3457 u_int dci;
3458
3459 XHCIHIST_FUNC();
3460 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
3461
3462 /* deallocate all allocated rings in the slot */
3463 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
3464 if (xs->xs_xr[dci] != NULL)
3465 xhci_ring_free(sc, &xs->xs_xr[dci]);
3466 }
3467 usb_freemem(&xs->xs_ic_dma);
3468 usb_freemem(&xs->xs_dc_dma);
3469 xs->xs_idx = 0;
3470 }
3471
3472 /*
3473 * Setup slot context, set Device Context Base Address, and issue
3474 * Set Address Device command.
3475 */
3476 static usbd_status
3477 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr)
3478 {
3479 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3480 struct xhci_slot *xs;
3481 usbd_status err;
3482
3483 XHCIHIST_FUNC();
3484 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0);
3485
3486 xs = &sc->sc_slots[slot];
3487
3488 xhci_setup_ctx(dev->ud_pipe0);
3489
3490 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3491 sc->sc_ctxsz * 3);
3492
3493 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot);
3494
3495 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr);
3496
3497 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
3498 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0),
3499 sc->sc_ctxsz * 2);
3500
3501 return err;
3502 }
3503
3504 /*
3505 * 4.8.2, 6.2.3.2
3506 * construct slot/endpoint context parameters and do syncmem
3507 */
3508 static void
3509 xhci_setup_ctx(struct usbd_pipe *pipe)
3510 {
3511 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3512 struct usbd_device *dev = pipe->up_dev;
3513 struct xhci_slot * const xs = dev->ud_hcpriv;
3514 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3515 const u_int dci = xhci_ep_get_dci(ed);
3516 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3517 uint32_t *cp;
3518 uint16_t mps = UGETW(ed->wMaxPacketSize);
3519 uint8_t speed = dev->ud_speed;
3520 uint8_t ival = ed->bInterval;
3521
3522 XHCIHIST_FUNC();
3523 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju",
3524 (uintptr_t)pipe, xs->xs_idx, dci, speed);
3525
3526 /* set up initial input control context */
3527 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3528 cp[0] = htole32(0);
3529 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci));
3530 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT));
3531 cp[7] = htole32(0);
3532
3533 /* set up input slot context */
3534 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
3535 cp[0] =
3536 XHCI_SCTX_0_CTX_NUM_SET(dci) |
3537 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed));
3538 cp[1] = 0;
3539 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0);
3540 cp[3] = 0;
3541 xhci_setup_route(pipe, cp);
3542 xhci_setup_tthub(pipe, cp);
3543
3544 cp[0] = htole32(cp[0]);
3545 cp[1] = htole32(cp[1]);
3546 cp[2] = htole32(cp[2]);
3547 cp[3] = htole32(cp[3]);
3548
3549 /* set up input endpoint context */
3550 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci));
3551 cp[0] =
3552 XHCI_EPCTX_0_EPSTATE_SET(0) |
3553 XHCI_EPCTX_0_MULT_SET(0) |
3554 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) |
3555 XHCI_EPCTX_0_LSA_SET(0) |
3556 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0);
3557 cp[1] =
3558 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) |
3559 XHCI_EPCTX_1_HID_SET(0) |
3560 XHCI_EPCTX_1_MAXB_SET(0);
3561
3562 if (xfertype != UE_ISOCHRONOUS)
3563 cp[1] |= XHCI_EPCTX_1_CERR_SET(3);
3564
3565 if (xfertype == UE_CONTROL)
3566 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(8); /* 6.2.3 */
3567 else if (USB_IS_SS(speed))
3568 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(mps);
3569 else
3570 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(UE_GET_SIZE(mps));
3571
3572 xhci_setup_maxburst(pipe, cp);
3573
3574 switch (xfertype) {
3575 case UE_CONTROL:
3576 break;
3577 case UE_BULK:
3578 /* XXX Set MaxPStreams, HID, and LSA if streams enabled */
3579 break;
3580 case UE_INTERRUPT:
3581 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3582 ival = pipe->up_interval;
3583
3584 ival = xhci_bival2ival(ival, speed);
3585 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3586 break;
3587 case UE_ISOCHRONOUS:
3588 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3589 ival = pipe->up_interval;
3590
3591 /* xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 */
3592 if (speed == USB_SPEED_FULL)
3593 ival += 3; /* 1ms -> 125us */
3594 ival--;
3595 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3596 break;
3597 default:
3598 break;
3599 }
3600 DPRINTFN(4, "setting ival %ju MaxBurst %#jx",
3601 XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_1_MAXB_GET(cp[1]), 0, 0);
3602
3603 /* rewind TR dequeue pointer in xHC */
3604 /* can't use xhci_ep_get_dci() yet? */
3605 *(uint64_t *)(&cp[2]) = htole64(
3606 xhci_ring_trbp(xs->xs_xr[dci], 0) |
3607 XHCI_EPCTX_2_DCS_SET(1));
3608
3609 cp[0] = htole32(cp[0]);
3610 cp[1] = htole32(cp[1]);
3611 cp[4] = htole32(cp[4]);
3612
3613 /* rewind TR dequeue pointer in driver */
3614 struct xhci_ring *xr = xs->xs_xr[dci];
3615 mutex_enter(&xr->xr_lock);
3616 xhci_host_dequeue(xr);
3617 mutex_exit(&xr->xr_lock);
3618
3619 /* sync input contexts before they are read from memory */
3620 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3621 }
3622
3623 /*
3624 * Setup route string and roothub port of given device for slot context
3625 */
3626 static void
3627 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp)
3628 {
3629 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3630 struct usbd_device *dev = pipe->up_dev;
3631 struct usbd_port *up = dev->ud_powersrc;
3632 struct usbd_device *hub;
3633 struct usbd_device *adev;
3634 uint8_t rhport = 0;
3635 uint32_t route = 0;
3636
3637 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3638
3639 /* Locate root hub port and Determine route string */
3640 /* 4.3.3 route string does not include roothub port */
3641 for (hub = dev; hub != NULL; hub = hub->ud_myhub) {
3642 uint32_t dep;
3643
3644 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd",
3645 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc,
3646 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno :
3647 -1);
3648
3649 if (hub->ud_powersrc == NULL)
3650 break;
3651 dep = hub->ud_depth;
3652 if (dep == 0)
3653 break;
3654 rhport = hub->ud_powersrc->up_portno;
3655 if (dep > USB_HUB_MAX_DEPTH)
3656 continue;
3657
3658 route |=
3659 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport)
3660 << ((dep - 1) * 4);
3661 }
3662 route = route >> 4;
3663 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1;
3664
3665 /* Locate port on upstream high speed hub */
3666 for (adev = dev, hub = up->up_parent;
3667 hub != NULL && hub->ud_speed != USB_SPEED_HIGH;
3668 adev = hub, hub = hub->ud_myhub)
3669 ;
3670 if (hub) {
3671 int p;
3672 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) {
3673 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) {
3674 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1];
3675 goto found;
3676 }
3677 }
3678 panic("%s: cannot find HS port", __func__);
3679 found:
3680 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0);
3681 } else {
3682 dev->ud_myhsport = NULL;
3683 }
3684
3685 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport);
3686
3687 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport,
3688 ctlrport, route, (uintptr_t)hub);
3689
3690 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route);
3691 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport);
3692 }
3693
3694 /*
3695 * Setup whether device is hub, whether device uses MTT, and
3696 * TT informations if it uses MTT.
3697 */
3698 static void
3699 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp)
3700 {
3701 struct usbd_device *dev = pipe->up_dev;
3702 struct usbd_port *myhsport = dev->ud_myhsport;
3703 usb_device_descriptor_t * const dd = &dev->ud_ddesc;
3704 uint32_t speed = dev->ud_speed;
3705 uint8_t rhaddr = dev->ud_bus->ub_rhaddr;
3706 uint8_t tthubslot, ttportnum;
3707 bool ishub;
3708 bool usemtt;
3709
3710 XHCIHIST_FUNC();
3711
3712 /*
3713 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2
3714 * tthubslot:
3715 * This is the slot ID of parent HS hub
3716 * if LS/FS device is connected && connected through HS hub.
3717 * This is 0 if device is not LS/FS device ||
3718 * parent hub is not HS hub ||
3719 * attached to root hub.
3720 * ttportnum:
3721 * This is the downstream facing port of parent HS hub
3722 * if LS/FS device is connected.
3723 * This is 0 if device is not LS/FS device ||
3724 * parent hub is not HS hub ||
3725 * attached to root hub.
3726 */
3727 if (myhsport &&
3728 myhsport->up_parent->ud_addr != rhaddr &&
3729 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
3730 ttportnum = myhsport->up_portno;
3731 tthubslot = myhsport->up_parent->ud_addr;
3732 } else {
3733 ttportnum = 0;
3734 tthubslot = 0;
3735 }
3736 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd",
3737 (uintptr_t)myhsport, ttportnum, tthubslot, 0);
3738
3739 /* ishub is valid after reading UDESC_DEVICE */
3740 ishub = (dd->bDeviceClass == UDCLASS_HUB);
3741
3742 /* dev->ud_hub is valid after reading UDESC_HUB */
3743 if (ishub && dev->ud_hub) {
3744 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc;
3745 uint8_t ttt =
3746 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK);
3747
3748 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts);
3749 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt);
3750 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0);
3751 }
3752
3753 #define IS_MTTHUB(dd) \
3754 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT)
3755
3756 /*
3757 * MTT flag is set if
3758 * 1. this is HS hub && MTTs are supported and enabled; or
3759 * 2. this is LS or FS device && there is a parent HS hub where MTTs
3760 * are supported and enabled.
3761 *
3762 * XXX enabled is not tested yet
3763 */
3764 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd))
3765 usemtt = true;
3766 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) &&
3767 myhsport &&
3768 myhsport->up_parent->ud_addr != rhaddr &&
3769 IS_MTTHUB(&myhsport->up_parent->ud_ddesc))
3770 usemtt = true;
3771 else
3772 usemtt = false;
3773 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd",
3774 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt);
3775
3776 #undef IS_MTTHUB
3777
3778 cp[0] |=
3779 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) |
3780 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0);
3781 cp[2] |=
3782 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) |
3783 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum);
3784 }
3785
3786 /* set up params for periodic endpoint */
3787 static void
3788 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp)
3789 {
3790 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
3791 struct usbd_device *dev = pipe->up_dev;
3792 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3793 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3794 usbd_desc_iter_t iter;
3795 const usb_cdc_descriptor_t *cdcd;
3796 uint32_t maxb = 0;
3797 uint16_t mps = UGETW(ed->wMaxPacketSize);
3798 uint8_t speed = dev->ud_speed;
3799 uint8_t mult = 0;
3800 uint8_t ep;
3801
3802 /* config desc is NULL when opening ep0 */
3803 if (dev == NULL || dev->ud_cdesc == NULL)
3804 goto no_cdcd;
3805 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev,
3806 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY);
3807 if (cdcd == NULL)
3808 goto no_cdcd;
3809 usb_desc_iter_init(dev, &iter);
3810 iter.cur = (const void *)cdcd;
3811
3812 /* find endpoint_ss_comp desc for ep of this pipe */
3813 for (ep = 0;;) {
3814 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter);
3815 if (cdcd == NULL)
3816 break;
3817 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) {
3818 ep = ((const usb_endpoint_descriptor_t *)cdcd)->
3819 bEndpointAddress;
3820 if (UE_GET_ADDR(ep) ==
3821 UE_GET_ADDR(ed->bEndpointAddress)) {
3822 cdcd = (const usb_cdc_descriptor_t *)
3823 usb_desc_iter_next(&iter);
3824 break;
3825 }
3826 ep = 0;
3827 }
3828 }
3829 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) {
3830 const usb_endpoint_ss_comp_descriptor_t * esscd =
3831 (const usb_endpoint_ss_comp_descriptor_t *)cdcd;
3832 maxb = esscd->bMaxBurst;
3833 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes);
3834 }
3835
3836 no_cdcd:
3837 /* 6.2.3.4, 4.8.2.4 */
3838 if (USB_IS_SS(speed)) {
3839 /* USB 3.1 9.6.6 */
3840 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps);
3841 /* USB 3.1 9.6.7 */
3842 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3843 #ifdef notyet
3844 if (xfertype == UE_ISOCHRONOUS) {
3845 }
3846 if (XHCI_HCC2_LEC(sc->sc_hcc2) != 0) {
3847 /* use ESIT */
3848 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(x);
3849 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(x);
3850
3851 /* XXX if LEC = 1, set ESIT instead */
3852 cp[0] |= XHCI_EPCTX_0_MULT_SET(0);
3853 } else {
3854 /* use ival */
3855 }
3856 #endif
3857 } else {
3858 /* USB 2.0 9.6.6 */
3859 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(UE_GET_SIZE(mps));
3860
3861 /* 6.2.3.4 */
3862 if (speed == USB_SPEED_HIGH &&
3863 (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)) {
3864 maxb = UE_GET_TRANS(mps);
3865 } else {
3866 /* LS/FS or HS CTRL or HS BULK */
3867 maxb = 0;
3868 }
3869 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3870 }
3871 xpipe->xp_maxb = maxb + 1;
3872 xpipe->xp_mult = mult + 1;
3873 }
3874
3875 /*
3876 * Convert endpoint bInterval value to endpoint context interval value
3877 * for Interrupt pipe.
3878 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6
3879 */
3880 static uint32_t
3881 xhci_bival2ival(uint32_t ival, uint32_t speed)
3882 {
3883 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) {
3884 int i;
3885
3886 /*
3887 * round ival down to "the nearest base 2 multiple of
3888 * bInterval * 8".
3889 * bInterval is at most 255 as its type is uByte.
3890 * 255(ms) = 2040(x 125us) < 2^11, so start with 10.
3891 */
3892 for (i = 10; i > 0; i--) {
3893 if ((ival * 8) >= (1 << i))
3894 break;
3895 }
3896 ival = i;
3897 } else {
3898 /* Interval = bInterval-1 for SS/HS */
3899 ival--;
3900 }
3901
3902 return ival;
3903 }
3904
3905 /* ----- */
3906
3907 static void
3908 xhci_noop(struct usbd_pipe *pipe)
3909 {
3910 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3911 }
3912
3913 /*
3914 * Process root hub request.
3915 */
3916 static int
3917 xhci_roothub_ctrl_locked(struct usbd_bus *bus, usb_device_request_t *req,
3918 void *buf, int buflen)
3919 {
3920 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
3921 usb_port_status_t ps;
3922 int l, totlen = 0;
3923 uint16_t len, value, index;
3924 int port, i;
3925 uint32_t v;
3926
3927 XHCIHIST_FUNC();
3928
3929 KASSERT(mutex_owned(&sc->sc_rhlock));
3930
3931 if (sc->sc_dying)
3932 return -1;
3933
3934 size_t bn = bus == &sc->sc_bus ? 0 : 1;
3935
3936 len = UGETW(req->wLength);
3937 value = UGETW(req->wValue);
3938 index = UGETW(req->wIndex);
3939
3940 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx",
3941 req->bmRequestType | (req->bRequest << 8), value, index, len);
3942
3943 #define C(x,y) ((x) | ((y) << 8))
3944 switch (C(req->bRequest, req->bmRequestType)) {
3945 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
3946 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0);
3947 if (len == 0)
3948 break;
3949 switch (value) {
3950 #define sd ((usb_string_descriptor_t *)buf)
3951 case C(2, UDESC_STRING):
3952 /* Product */
3953 totlen = usb_makestrdesc(sd, len, "xHCI root hub");
3954 break;
3955 #undef sd
3956 default:
3957 /* default from usbroothub */
3958 return buflen;
3959 }
3960 break;
3961
3962 /* Hub requests */
3963 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
3964 break;
3965 /* Clear Port Feature request */
3966 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): {
3967 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
3968
3969 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd",
3970 index, value, bn, cp);
3971 if (index < 1 || index > sc->sc_rhportcount[bn]) {
3972 return -1;
3973 }
3974 port = XHCI_PORTSC(cp);
3975 v = xhci_op_read_4(sc, port);
3976 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0);
3977 v &= ~XHCI_PS_CLEAR;
3978 switch (value) {
3979 case UHF_PORT_ENABLE:
3980 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED);
3981 break;
3982 case UHF_PORT_SUSPEND:
3983 return -1;
3984 case UHF_PORT_POWER:
3985 break;
3986 case UHF_PORT_TEST:
3987 case UHF_PORT_INDICATOR:
3988 return -1;
3989 case UHF_C_PORT_CONNECTION:
3990 xhci_op_write_4(sc, port, v | XHCI_PS_CSC);
3991 break;
3992 case UHF_C_PORT_ENABLE:
3993 case UHF_C_PORT_SUSPEND:
3994 case UHF_C_PORT_OVER_CURRENT:
3995 return -1;
3996 case UHF_C_BH_PORT_RESET:
3997 xhci_op_write_4(sc, port, v | XHCI_PS_WRC);
3998 break;
3999 case UHF_C_PORT_RESET:
4000 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4001 break;
4002 case UHF_C_PORT_LINK_STATE:
4003 xhci_op_write_4(sc, port, v | XHCI_PS_PLC);
4004 break;
4005 case UHF_C_PORT_CONFIG_ERROR:
4006 xhci_op_write_4(sc, port, v | XHCI_PS_CEC);
4007 break;
4008 default:
4009 return -1;
4010 }
4011 break;
4012 }
4013 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
4014 if (len == 0)
4015 break;
4016 if ((value & 0xff) != 0) {
4017 return -1;
4018 }
4019 usb_hub_descriptor_t hubd;
4020
4021 totlen = uimin(buflen, sizeof(hubd));
4022 memcpy(&hubd, buf, totlen);
4023 hubd.bNbrPorts = sc->sc_rhportcount[bn];
4024 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH);
4025 hubd.bPwrOn2PwrGood = 200;
4026 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) {
4027 /* XXX can't find out? */
4028 hubd.DeviceRemovable[i++] = 0;
4029 }
4030 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
4031 totlen = uimin(totlen, hubd.bDescLength);
4032 memcpy(buf, &hubd, totlen);
4033 break;
4034 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
4035 if (len != 4) {
4036 return -1;
4037 }
4038 memset(buf, 0, len); /* ? XXX */
4039 totlen = len;
4040 break;
4041 /* Get Port Status request */
4042 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): {
4043 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4044
4045 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju",
4046 bn, index, cp, 0);
4047 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4048 DPRINTFN(5, "bad get port status: index=%jd bn=%jd "
4049 "portcount=%jd",
4050 index, bn, sc->sc_rhportcount[bn], 0);
4051 return -1;
4052 }
4053 if (len != 4) {
4054 DPRINTFN(5, "bad get port status: len %jd != 4",
4055 len, 0, 0, 0);
4056 return -1;
4057 }
4058 v = xhci_op_read_4(sc, XHCI_PORTSC(cp));
4059 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0);
4060 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v));
4061 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS;
4062 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED;
4063 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR;
4064 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND;
4065 if (v & XHCI_PS_PR) i |= UPS_RESET;
4066 if (v & XHCI_PS_PP) {
4067 if (i & UPS_OTHER_SPEED)
4068 i |= UPS_PORT_POWER_SS;
4069 else
4070 i |= UPS_PORT_POWER;
4071 }
4072 if (i & UPS_OTHER_SPEED)
4073 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v));
4074 if (sc->sc_vendor_port_status)
4075 i = sc->sc_vendor_port_status(sc, v, i);
4076 USETW(ps.wPortStatus, i);
4077 i = 0;
4078 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS;
4079 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED;
4080 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR;
4081 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET;
4082 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET;
4083 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE;
4084 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR;
4085 USETW(ps.wPortChange, i);
4086 totlen = uimin(len, sizeof(ps));
4087 memcpy(buf, &ps, totlen);
4088 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx"
4089 " totlen %jd",
4090 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0);
4091 break;
4092 }
4093 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
4094 return -1;
4095 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE):
4096 break;
4097 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
4098 break;
4099 /* Set Port Feature request */
4100 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): {
4101 int optval = (index >> 8) & 0xff;
4102 index &= 0xff;
4103 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4104 return -1;
4105 }
4106
4107 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4108
4109 port = XHCI_PORTSC(cp);
4110 v = xhci_op_read_4(sc, port);
4111 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0);
4112 v &= ~XHCI_PS_CLEAR;
4113 switch (value) {
4114 case UHF_PORT_ENABLE:
4115 xhci_op_write_4(sc, port, v | XHCI_PS_PED);
4116 break;
4117 case UHF_PORT_SUSPEND:
4118 /* XXX suspend */
4119 break;
4120 case UHF_PORT_RESET:
4121 xhci_op_write_4(sc, port, v | XHCI_PS_PR);
4122 /* Wait for reset to complete. */
4123 for (i = 0; i < USB_PORT_ROOT_RESET_DELAY / 10; i++) {
4124 if (sc->sc_dying) {
4125 return -1;
4126 }
4127 v = xhci_op_read_4(sc, port);
4128 if ((v & XHCI_PS_PR) == 0) {
4129 break;
4130 }
4131 usb_delay_ms(&sc->sc_bus, 10);
4132 }
4133 break;
4134 case UHF_PORT_POWER:
4135 /* XXX power control */
4136 break;
4137 /* XXX more */
4138 case UHF_C_PORT_RESET:
4139 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4140 break;
4141 case UHF_PORT_U1_TIMEOUT:
4142 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4143 return -1;
4144 }
4145 port = XHCI_PORTPMSC(cp);
4146 v = xhci_op_read_4(sc, port);
4147 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4148 index, cp, v, 0);
4149 v &= ~XHCI_PM3_U1TO_SET(0xff);
4150 v |= XHCI_PM3_U1TO_SET(optval);
4151 xhci_op_write_4(sc, port, v);
4152 break;
4153 case UHF_PORT_U2_TIMEOUT:
4154 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4155 return -1;
4156 }
4157 port = XHCI_PORTPMSC(cp);
4158 v = xhci_op_read_4(sc, port);
4159 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4160 index, cp, v, 0);
4161 v &= ~XHCI_PM3_U2TO_SET(0xff);
4162 v |= XHCI_PM3_U2TO_SET(optval);
4163 xhci_op_write_4(sc, port, v);
4164 break;
4165 default:
4166 return -1;
4167 }
4168 }
4169 break;
4170 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
4171 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
4172 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
4173 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
4174 break;
4175 default:
4176 /* default from usbroothub */
4177 return buflen;
4178 }
4179
4180 return totlen;
4181 }
4182
4183 static int
4184 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
4185 void *buf, int buflen)
4186 {
4187 struct xhci_softc *sc = XHCI_BUS2SC(bus);
4188 int actlen;
4189
4190 mutex_enter(&sc->sc_rhlock);
4191 actlen = xhci_roothub_ctrl_locked(bus, req, buf, buflen);
4192 mutex_exit(&sc->sc_rhlock);
4193
4194 return actlen;
4195 }
4196
4197 /* root hub interrupt */
4198
4199 static usbd_status
4200 xhci_root_intr_transfer(struct usbd_xfer *xfer)
4201 {
4202 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4203
4204 /* Pipe isn't running, start first */
4205 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4206 }
4207
4208 /* Wait for roothub port status/change */
4209 static usbd_status
4210 xhci_root_intr_start(struct usbd_xfer *xfer)
4211 {
4212 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4213 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4214
4215 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4216
4217 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4218
4219 if (sc->sc_dying)
4220 return USBD_IOERROR;
4221
4222 KASSERT(sc->sc_intrxfer[bn] == NULL);
4223 sc->sc_intrxfer[bn] = xfer;
4224 xfer->ux_status = USBD_IN_PROGRESS;
4225
4226 return USBD_IN_PROGRESS;
4227 }
4228
4229 static void
4230 xhci_root_intr_abort(struct usbd_xfer *xfer)
4231 {
4232 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4233 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4234
4235 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4236
4237 KASSERT(mutex_owned(&sc->sc_lock));
4238 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
4239
4240 /* If xfer has already completed, nothing to do here. */
4241 if (sc->sc_intrxfer[bn] == NULL)
4242 return;
4243
4244 /*
4245 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer.
4246 * Cancel it.
4247 */
4248 KASSERT(sc->sc_intrxfer[bn] == xfer);
4249 xfer->ux_status = USBD_CANCELLED;
4250 usb_transfer_complete(xfer);
4251 }
4252
4253 static void
4254 xhci_root_intr_close(struct usbd_pipe *pipe)
4255 {
4256 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe);
4257 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer;
4258 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4259
4260 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4261
4262 KASSERT(mutex_owned(&sc->sc_lock));
4263
4264 /*
4265 * Caller must guarantee the xfer has completed first, by
4266 * closing the pipe only after normal completion or an abort.
4267 */
4268 KASSERT(sc->sc_intrxfer[bn] == NULL);
4269 }
4270
4271 static void
4272 xhci_root_intr_done(struct usbd_xfer *xfer)
4273 {
4274 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4275 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4276
4277 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4278
4279 KASSERT(mutex_owned(&sc->sc_lock));
4280
4281 /* Claim the xfer so it doesn't get completed again. */
4282 KASSERT(sc->sc_intrxfer[bn] == xfer);
4283 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
4284 sc->sc_intrxfer[bn] = NULL;
4285 }
4286
4287 /* -------------- */
4288 /* device control */
4289
4290 static usbd_status
4291 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
4292 {
4293 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4294
4295 /* Pipe isn't running, start first */
4296 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4297 }
4298
4299 static usbd_status
4300 xhci_device_ctrl_start(struct usbd_xfer *xfer)
4301 {
4302 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4303 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4304 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4305 struct xhci_ring * const tr = xs->xs_xr[dci];
4306 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4307 usb_device_request_t * const req = &xfer->ux_request;
4308 const bool isread = usbd_xfer_isread(xfer);
4309 const uint32_t len = UGETW(req->wLength);
4310 usb_dma_t * const dma = &xfer->ux_dmabuf;
4311 uint64_t parameter;
4312 uint32_t status;
4313 uint32_t control;
4314 u_int i;
4315 const bool polling = xhci_polling_p(sc);
4316
4317 XHCIHIST_FUNC();
4318 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx",
4319 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue),
4320 UGETW(req->wIndex), UGETW(req->wLength));
4321
4322 KASSERT(polling || mutex_owned(&sc->sc_lock));
4323
4324 /* we rely on the bottom bits for extra info */
4325 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %zx",
4326 (uintptr_t) xfer);
4327
4328 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0);
4329
4330 if (tr->is_halted)
4331 goto out;
4332
4333 i = 0;
4334
4335 /* setup phase */
4336 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */
4337 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req));
4338 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE :
4339 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) |
4340 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) |
4341 XHCI_TRB_3_IDT_BIT;
4342 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4343
4344 if (len != 0) {
4345 /* data phase */
4346 parameter = DMAADDR(dma, 0);
4347 KASSERTMSG(len <= 0x10000, "len %d", len);
4348 status = XHCI_TRB_2_IRQ_SET(0) |
4349 XHCI_TRB_2_TDSZ_SET(0) |
4350 XHCI_TRB_2_BYTES_SET(len);
4351 control = (isread ? XHCI_TRB_3_DIR_IN : 0) |
4352 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) |
4353 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4354 XHCI_TRB_3_IOC_BIT;
4355 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4356
4357 usb_syncmem(dma, 0, len,
4358 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4359 }
4360
4361 parameter = 0;
4362 status = XHCI_TRB_2_IRQ_SET(0);
4363 /* the status stage has inverted direction */
4364 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) |
4365 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) |
4366 XHCI_TRB_3_IOC_BIT;
4367 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4368
4369 if (!polling)
4370 mutex_enter(&tr->xr_lock);
4371 xhci_ring_put_xfer(sc, tr, xx, i);
4372 if (!polling)
4373 mutex_exit(&tr->xr_lock);
4374
4375 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4376
4377 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4378 usbd_xfer_schedule_timeout(xfer);
4379 xfer->ux_status = USBD_IN_PROGRESS;
4380 } else {
4381 /*
4382 * We must be coming from xhci_pipe_restart -- timeout
4383 * already set up, nothing to do.
4384 */
4385 }
4386 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4387
4388 return USBD_IN_PROGRESS;
4389 }
4390
4391 static void
4392 xhci_device_ctrl_done(struct usbd_xfer *xfer)
4393 {
4394 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4395 usb_device_request_t *req = &xfer->ux_request;
4396 int len = UGETW(req->wLength);
4397 int rd = req->bmRequestType & UT_READ;
4398
4399 if (len)
4400 usb_syncmem(&xfer->ux_dmabuf, 0, len,
4401 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4402 }
4403
4404 static void
4405 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
4406 {
4407 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4408
4409 usbd_xfer_abort(xfer);
4410 }
4411
4412 static void
4413 xhci_device_ctrl_close(struct usbd_pipe *pipe)
4414 {
4415 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4416
4417 xhci_close_pipe(pipe);
4418 }
4419
4420 /* ------------------ */
4421 /* device isochronous */
4422
4423 static usbd_status
4424 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
4425 {
4426 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4427
4428 return xhci_device_isoc_enter(xfer);
4429 }
4430
4431 static usbd_status
4432 xhci_device_isoc_enter(struct usbd_xfer *xfer)
4433 {
4434 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4435 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4436 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4437 struct xhci_ring * const tr = xs->xs_xr[dci];
4438 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4439 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe;
4440 uint32_t len = xfer->ux_length;
4441 usb_dma_t * const dma = &xfer->ux_dmabuf;
4442 uint64_t parameter;
4443 uint32_t status;
4444 uint32_t control;
4445 uint32_t mfindex;
4446 uint32_t offs;
4447 int i, ival;
4448 const bool polling = xhci_polling_p(sc);
4449 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
4450 const uint16_t mps = UE_GET_SIZE(MPS);
4451 const uint8_t maxb = xpipe->xp_maxb;
4452 u_int tdpc, tbc, tlbpc;
4453
4454 XHCIHIST_FUNC();
4455 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4456 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4457
4458 KASSERT(polling || mutex_owned(&sc->sc_lock));
4459
4460 if (sc->sc_dying)
4461 return USBD_IOERROR;
4462
4463 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths);
4464 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4465
4466 const bool isread = usbd_xfer_isread(xfer);
4467 if (xfer->ux_length)
4468 usb_syncmem(dma, 0, xfer->ux_length,
4469 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4470
4471 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
4472 if (ival >= 1 && ival <= 16)
4473 ival = 1 << (ival - 1);
4474 else
4475 ival = 1; /* fake something up */
4476
4477 if (xpipe->xp_isoc_next == -1) {
4478 mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX);
4479 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0);
4480 mfindex = XHCI_MFINDEX_GET(mfindex + 1);
4481 mfindex /= USB_UFRAMES_PER_FRAME;
4482 mfindex += 7; /* 7 frames is max possible IST */
4483 xpipe->xp_isoc_next = roundup2(mfindex, ival);
4484 }
4485
4486 offs = 0;
4487 for (i = 0; i < xfer->ux_nframes; i++) {
4488 len = xfer->ux_frlengths[i];
4489
4490 tdpc = howmany(len, mps);
4491 tbc = howmany(tdpc, maxb) - 1;
4492 tlbpc = tdpc % maxb;
4493 tlbpc = tlbpc ? tlbpc - 1 : maxb - 1;
4494
4495 KASSERTMSG(len <= 0x10000, "len %d", len);
4496 parameter = DMAADDR(dma, offs);
4497 status = XHCI_TRB_2_IRQ_SET(0) |
4498 XHCI_TRB_2_TDSZ_SET(0) |
4499 XHCI_TRB_2_BYTES_SET(len);
4500 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) |
4501 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4502 XHCI_TRB_3_TBC_SET(tbc) |
4503 XHCI_TRB_3_TLBPC_SET(tlbpc) |
4504 XHCI_TRB_3_IOC_BIT;
4505 if (XHCI_HCC_CFC(sc->sc_hcc)) {
4506 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4507 #if 0
4508 } else if (xpipe->xp_isoc_next == -1) {
4509 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4510 #endif
4511 } else {
4512 control |= XHCI_TRB_3_ISO_SIA_BIT;
4513 }
4514 #if 0
4515 if (i != xfer->ux_nframes - 1)
4516 control |= XHCI_TRB_3_BEI_BIT;
4517 #endif
4518 xhci_xfer_put_trb(xx, i, parameter, status, control);
4519
4520 xpipe->xp_isoc_next += ival;
4521 offs += len;
4522 }
4523
4524 xx->xx_isoc_done = 0;
4525
4526 if (!polling)
4527 mutex_enter(&tr->xr_lock);
4528 xhci_ring_put_xfer(sc, tr, xx, i);
4529 if (!polling)
4530 mutex_exit(&tr->xr_lock);
4531
4532 xfer->ux_status = USBD_IN_PROGRESS;
4533 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4534 usbd_xfer_schedule_timeout(xfer);
4535
4536 return USBD_IN_PROGRESS;
4537 }
4538
4539 static void
4540 xhci_device_isoc_abort(struct usbd_xfer *xfer)
4541 {
4542 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4543
4544 usbd_xfer_abort(xfer);
4545 }
4546
4547 static void
4548 xhci_device_isoc_close(struct usbd_pipe *pipe)
4549 {
4550 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4551
4552 xhci_close_pipe(pipe);
4553 }
4554
4555 static void
4556 xhci_device_isoc_done(struct usbd_xfer *xfer)
4557 {
4558 #ifdef USB_DEBUG
4559 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4560 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4561 #endif
4562 const bool isread = usbd_xfer_isread(xfer);
4563
4564 XHCIHIST_FUNC();
4565 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4566 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4567
4568 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4569 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4570 }
4571
4572 /* ----------- */
4573 /* device bulk */
4574
4575 static usbd_status
4576 xhci_device_bulk_transfer(struct usbd_xfer *xfer)
4577 {
4578 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4579
4580 /* Pipe isn't running, so start it first. */
4581 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4582 }
4583
4584 static usbd_status
4585 xhci_device_bulk_start(struct usbd_xfer *xfer)
4586 {
4587 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4588 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4589 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4590 struct xhci_ring * const tr = xs->xs_xr[dci];
4591 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4592 const uint32_t len = xfer->ux_length;
4593 usb_dma_t * const dma = &xfer->ux_dmabuf;
4594 uint64_t parameter;
4595 uint32_t status;
4596 uint32_t control;
4597 u_int i = 0;
4598 const bool polling = xhci_polling_p(sc);
4599
4600 XHCIHIST_FUNC();
4601 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4602 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4603
4604 KASSERT(polling || mutex_owned(&sc->sc_lock));
4605
4606 if (sc->sc_dying)
4607 return USBD_IOERROR;
4608
4609 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4610
4611 if (tr->is_halted)
4612 goto out;
4613
4614 parameter = DMAADDR(dma, 0);
4615 const bool isread = usbd_xfer_isread(xfer);
4616 if (len)
4617 usb_syncmem(dma, 0, len,
4618 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4619
4620 /*
4621 * XXX: (dsl) The physical buffer must not cross a 64k boundary.
4622 * If the user supplied buffer crosses such a boundary then 2
4623 * (or more) TRB should be used.
4624 * If multiple TRB are used the td_size field must be set correctly.
4625 * For v1.0 devices (like ivy bridge) this is the number of usb data
4626 * blocks needed to complete the transfer.
4627 * Setting it to 1 in the last TRB causes an extra zero-length
4628 * data block be sent.
4629 * The earlier documentation differs, I don't know how it behaves.
4630 */
4631 KASSERTMSG(len <= 0x10000, "len %d", len);
4632 status = XHCI_TRB_2_IRQ_SET(0) |
4633 XHCI_TRB_2_TDSZ_SET(0) |
4634 XHCI_TRB_2_BYTES_SET(len);
4635 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4636 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4637 XHCI_TRB_3_IOC_BIT;
4638 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4639
4640 if (!polling)
4641 mutex_enter(&tr->xr_lock);
4642 xhci_ring_put_xfer(sc, tr, xx, i);
4643 if (!polling)
4644 mutex_exit(&tr->xr_lock);
4645
4646 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4647
4648 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4649 xfer->ux_status = USBD_IN_PROGRESS;
4650 usbd_xfer_schedule_timeout(xfer);
4651 } else {
4652 /*
4653 * We must be coming from xhci_pipe_restart -- timeout
4654 * already set up, nothing to do.
4655 */
4656 }
4657 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4658
4659 return USBD_IN_PROGRESS;
4660 }
4661
4662 static void
4663 xhci_device_bulk_done(struct usbd_xfer *xfer)
4664 {
4665 #ifdef USB_DEBUG
4666 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4667 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4668 #endif
4669 const bool isread = usbd_xfer_isread(xfer);
4670
4671 XHCIHIST_FUNC();
4672 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4673 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4674
4675 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4676 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4677 }
4678
4679 static void
4680 xhci_device_bulk_abort(struct usbd_xfer *xfer)
4681 {
4682 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4683
4684 usbd_xfer_abort(xfer);
4685 }
4686
4687 static void
4688 xhci_device_bulk_close(struct usbd_pipe *pipe)
4689 {
4690 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4691
4692 xhci_close_pipe(pipe);
4693 }
4694
4695 /* ---------------- */
4696 /* device interrupt */
4697
4698 static usbd_status
4699 xhci_device_intr_transfer(struct usbd_xfer *xfer)
4700 {
4701 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4702
4703 /* Pipe isn't running, so start it first. */
4704 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4705 }
4706
4707 static usbd_status
4708 xhci_device_intr_start(struct usbd_xfer *xfer)
4709 {
4710 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4711 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4712 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4713 struct xhci_ring * const tr = xs->xs_xr[dci];
4714 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4715 const uint32_t len = xfer->ux_length;
4716 const bool polling = xhci_polling_p(sc);
4717 usb_dma_t * const dma = &xfer->ux_dmabuf;
4718 uint64_t parameter;
4719 uint32_t status;
4720 uint32_t control;
4721 u_int i = 0;
4722
4723 XHCIHIST_FUNC();
4724 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4725 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4726
4727 KASSERT(polling || mutex_owned(&sc->sc_lock));
4728
4729 if (sc->sc_dying)
4730 return USBD_IOERROR;
4731
4732 if (tr->is_halted)
4733 goto out;
4734
4735 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4736
4737 const bool isread = usbd_xfer_isread(xfer);
4738 if (len)
4739 usb_syncmem(dma, 0, len,
4740 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4741
4742 parameter = DMAADDR(dma, 0);
4743 KASSERTMSG(len <= 0x10000, "len %d", len);
4744 status = XHCI_TRB_2_IRQ_SET(0) |
4745 XHCI_TRB_2_TDSZ_SET(0) |
4746 XHCI_TRB_2_BYTES_SET(len);
4747 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4748 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT;
4749 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4750
4751 if (!polling)
4752 mutex_enter(&tr->xr_lock);
4753 xhci_ring_put_xfer(sc, tr, xx, i);
4754 if (!polling)
4755 mutex_exit(&tr->xr_lock);
4756
4757 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4758
4759 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4760 xfer->ux_status = USBD_IN_PROGRESS;
4761 usbd_xfer_schedule_timeout(xfer);
4762 } else {
4763 /*
4764 * We must be coming from xhci_pipe_restart -- timeout
4765 * already set up, nothing to do.
4766 */
4767 }
4768 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4769
4770 return USBD_IN_PROGRESS;
4771 }
4772
4773 static void
4774 xhci_device_intr_done(struct usbd_xfer *xfer)
4775 {
4776 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4777 #ifdef USB_DEBUG
4778 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4779 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4780 #endif
4781 const bool isread = usbd_xfer_isread(xfer);
4782
4783 XHCIHIST_FUNC();
4784 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4785 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4786
4787 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4788
4789 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4790 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4791 }
4792
4793 static void
4794 xhci_device_intr_abort(struct usbd_xfer *xfer)
4795 {
4796 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4797
4798 XHCIHIST_FUNC();
4799 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0);
4800
4801 KASSERT(mutex_owned(&sc->sc_lock));
4802 usbd_xfer_abort(xfer);
4803 }
4804
4805 static void
4806 xhci_device_intr_close(struct usbd_pipe *pipe)
4807 {
4808 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
4809
4810 XHCIHIST_FUNC();
4811 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0);
4812
4813 xhci_close_pipe(pipe);
4814 }
4815