xhci.c revision 1.135 1 /* $NetBSD: xhci.c,v 1.135 2020/10/15 09:37:40 jmcneill Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * USB rev 2.0 and rev 3.1 specification
31 * http://www.usb.org/developers/docs/
32 * xHCI rev 1.1 specification
33 * http://www.intel.com/technology/usb/spec.htm
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.135 2020/10/15 09:37:40 jmcneill Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_usb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/kmem.h>
47 #include <sys/device.h>
48 #include <sys/select.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/endian.h>
58
59 #include <dev/usb/usb.h>
60 #include <dev/usb/usbdi.h>
61 #include <dev/usb/usbdivar.h>
62 #include <dev/usb/usbdi_util.h>
63 #include <dev/usb/usbhist.h>
64 #include <dev/usb/usb_mem.h>
65 #include <dev/usb/usb_quirks.h>
66
67 #include <dev/usb/xhcireg.h>
68 #include <dev/usb/xhcivar.h>
69 #include <dev/usb/usbroothub.h>
70
71
72 #ifdef USB_DEBUG
73 #ifndef XHCI_DEBUG
74 #define xhcidebug 0
75 #else /* !XHCI_DEBUG */
76 #define HEXDUMP(a, b, c) \
77 do { \
78 if (xhcidebug > 0) \
79 hexdump(printf, a, b, c); \
80 } while (/*CONSTCOND*/0)
81 static int xhcidebug = 0;
82
83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup")
84 {
85 int err;
86 const struct sysctlnode *rnode;
87 const struct sysctlnode *cnode;
88
89 err = sysctl_createv(clog, 0, NULL, &rnode,
90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci",
91 SYSCTL_DESCR("xhci global controls"),
92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
93
94 if (err)
95 goto fail;
96
97 /* control debugging printfs */
98 err = sysctl_createv(clog, 0, &rnode, &cnode,
99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
100 "debug", SYSCTL_DESCR("Enable debugging output"),
101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL);
102 if (err)
103 goto fail;
104
105 return;
106 fail:
107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
108 }
109
110 #endif /* !XHCI_DEBUG */
111 #endif /* USB_DEBUG */
112
113 #ifndef HEXDUMP
114 #define HEXDUMP(a, b, c)
115 #endif
116
117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D)
118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D)
119 #define XHCIHIST_FUNC() USBHIST_FUNC()
120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug)
121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \
122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D)
123
124 #define XHCI_DCI_SLOT 0
125 #define XHCI_DCI_EP_CONTROL 1
126
127 #define XHCI_ICI_INPUT_CONTROL 0
128
129 struct xhci_pipe {
130 struct usbd_pipe xp_pipe;
131 struct usb_task xp_async_task;
132 int16_t xp_isoc_next; /* next frame */
133 uint8_t xp_maxb; /* max burst */
134 uint8_t xp_mult;
135 };
136
137 #define XHCI_COMMAND_RING_TRBS 256
138 #define XHCI_EVENT_RING_TRBS 256
139 #define XHCI_EVENT_RING_SEGMENTS 1
140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT
141
142 static usbd_status xhci_open(struct usbd_pipe *);
143 static void xhci_close_pipe(struct usbd_pipe *);
144 static int xhci_intr1(struct xhci_softc * const);
145 static void xhci_softintr(void *);
146 static void xhci_poll(struct usbd_bus *);
147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int);
148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
149 static void xhci_abortx(struct usbd_xfer *);
150 static bool xhci_dying(struct usbd_bus *);
151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **);
152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int,
153 struct usbd_port *);
154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
155 void *, int);
156
157 static usbd_status xhci_configure_endpoint(struct usbd_pipe *);
158 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *);
159 static usbd_status xhci_reset_endpoint(struct usbd_pipe *);
160 static usbd_status xhci_stop_endpoint(struct usbd_pipe *);
161
162 static void xhci_host_dequeue(struct xhci_ring * const);
163 static usbd_status xhci_set_dequeue(struct usbd_pipe *);
164
165 static usbd_status xhci_do_command(struct xhci_softc * const,
166 struct xhci_soft_trb * const, int);
167 static usbd_status xhci_do_command_locked(struct xhci_softc * const,
168 struct xhci_soft_trb * const, int);
169 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t);
170 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *);
171 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool);
172 static usbd_status xhci_enable_slot(struct xhci_softc * const,
173 uint8_t * const);
174 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t);
175 static usbd_status xhci_address_device(struct xhci_softc * const,
176 uint64_t, uint8_t, bool);
177 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int);
178 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const,
179 struct xhci_slot * const, u_int);
180 static usbd_status xhci_ring_init(struct xhci_softc * const,
181 struct xhci_ring **, size_t, size_t);
182 static void xhci_ring_free(struct xhci_softc * const,
183 struct xhci_ring ** const);
184
185 static void xhci_setup_ctx(struct usbd_pipe *);
186 static void xhci_setup_route(struct usbd_pipe *, uint32_t *);
187 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *);
188 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *);
189 static uint32_t xhci_bival2ival(uint32_t, uint32_t);
190
191 static void xhci_noop(struct usbd_pipe *);
192
193 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *);
194 static usbd_status xhci_root_intr_start(struct usbd_xfer *);
195 static void xhci_root_intr_abort(struct usbd_xfer *);
196 static void xhci_root_intr_close(struct usbd_pipe *);
197 static void xhci_root_intr_done(struct usbd_xfer *);
198
199 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *);
200 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *);
201 static void xhci_device_ctrl_abort(struct usbd_xfer *);
202 static void xhci_device_ctrl_close(struct usbd_pipe *);
203 static void xhci_device_ctrl_done(struct usbd_xfer *);
204
205 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *);
206 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *);
207 static void xhci_device_isoc_abort(struct usbd_xfer *);
208 static void xhci_device_isoc_close(struct usbd_pipe *);
209 static void xhci_device_isoc_done(struct usbd_xfer *);
210
211 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *);
212 static usbd_status xhci_device_intr_start(struct usbd_xfer *);
213 static void xhci_device_intr_abort(struct usbd_xfer *);
214 static void xhci_device_intr_close(struct usbd_pipe *);
215 static void xhci_device_intr_done(struct usbd_xfer *);
216
217 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *);
218 static usbd_status xhci_device_bulk_start(struct usbd_xfer *);
219 static void xhci_device_bulk_abort(struct usbd_xfer *);
220 static void xhci_device_bulk_close(struct usbd_pipe *);
221 static void xhci_device_bulk_done(struct usbd_xfer *);
222
223 static const struct usbd_bus_methods xhci_bus_methods = {
224 .ubm_open = xhci_open,
225 .ubm_softint = xhci_softintr,
226 .ubm_dopoll = xhci_poll,
227 .ubm_allocx = xhci_allocx,
228 .ubm_freex = xhci_freex,
229 .ubm_abortx = xhci_abortx,
230 .ubm_dying = xhci_dying,
231 .ubm_getlock = xhci_get_lock,
232 .ubm_newdev = xhci_new_device,
233 .ubm_rhctrl = xhci_roothub_ctrl,
234 };
235
236 static const struct usbd_pipe_methods xhci_root_intr_methods = {
237 .upm_transfer = xhci_root_intr_transfer,
238 .upm_start = xhci_root_intr_start,
239 .upm_abort = xhci_root_intr_abort,
240 .upm_close = xhci_root_intr_close,
241 .upm_cleartoggle = xhci_noop,
242 .upm_done = xhci_root_intr_done,
243 };
244
245
246 static const struct usbd_pipe_methods xhci_device_ctrl_methods = {
247 .upm_transfer = xhci_device_ctrl_transfer,
248 .upm_start = xhci_device_ctrl_start,
249 .upm_abort = xhci_device_ctrl_abort,
250 .upm_close = xhci_device_ctrl_close,
251 .upm_cleartoggle = xhci_noop,
252 .upm_done = xhci_device_ctrl_done,
253 };
254
255 static const struct usbd_pipe_methods xhci_device_isoc_methods = {
256 .upm_transfer = xhci_device_isoc_transfer,
257 .upm_abort = xhci_device_isoc_abort,
258 .upm_close = xhci_device_isoc_close,
259 .upm_cleartoggle = xhci_noop,
260 .upm_done = xhci_device_isoc_done,
261 };
262
263 static const struct usbd_pipe_methods xhci_device_bulk_methods = {
264 .upm_transfer = xhci_device_bulk_transfer,
265 .upm_start = xhci_device_bulk_start,
266 .upm_abort = xhci_device_bulk_abort,
267 .upm_close = xhci_device_bulk_close,
268 .upm_cleartoggle = xhci_noop,
269 .upm_done = xhci_device_bulk_done,
270 };
271
272 static const struct usbd_pipe_methods xhci_device_intr_methods = {
273 .upm_transfer = xhci_device_intr_transfer,
274 .upm_start = xhci_device_intr_start,
275 .upm_abort = xhci_device_intr_abort,
276 .upm_close = xhci_device_intr_close,
277 .upm_cleartoggle = xhci_noop,
278 .upm_done = xhci_device_intr_done,
279 };
280
281 static inline uint32_t
282 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset)
283 {
284 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset);
285 }
286
287 static inline uint32_t
288 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset)
289 {
290 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
291 }
292
293 static inline uint32_t
294 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset)
295 {
296 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset);
297 }
298
299 static inline void
300 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset,
301 uint32_t value)
302 {
303 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value);
304 }
305
306 #if 0 /* unused */
307 static inline void
308 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset,
309 uint32_t value)
310 {
311 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value);
312 }
313 #endif /* unused */
314
315 static inline void
316 xhci_barrier(const struct xhci_softc * const sc, int flags)
317 {
318 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 0, sc->sc_ios, flags);
319 }
320
321 static inline uint32_t
322 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset)
323 {
324 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset);
325 }
326
327 static inline uint32_t
328 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset)
329 {
330 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
331 }
332
333 static inline void
334 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset,
335 uint32_t value)
336 {
337 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
338 }
339
340 static inline uint64_t
341 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset)
342 {
343 uint64_t value;
344
345 if (XHCI_HCC_AC64(sc->sc_hcc)) {
346 #ifdef XHCI_USE_BUS_SPACE_8
347 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset);
348 #else
349 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
350 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh,
351 offset + 4) << 32;
352 #endif
353 } else {
354 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
355 }
356
357 return value;
358 }
359
360 static inline void
361 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset,
362 uint64_t value)
363 {
364 if (XHCI_HCC_AC64(sc->sc_hcc)) {
365 #ifdef XHCI_USE_BUS_SPACE_8
366 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value);
367 #else
368 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0,
369 (value >> 0) & 0xffffffff);
370 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4,
371 (value >> 32) & 0xffffffff);
372 #endif
373 } else {
374 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
375 }
376 }
377
378 static inline uint32_t
379 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset)
380 {
381 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
382 }
383
384 static inline void
385 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset,
386 uint32_t value)
387 {
388 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
389 }
390
391 #if 0 /* unused */
392 static inline uint64_t
393 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset)
394 {
395 uint64_t value;
396
397 if (XHCI_HCC_AC64(sc->sc_hcc)) {
398 #ifdef XHCI_USE_BUS_SPACE_8
399 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset);
400 #else
401 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
402 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh,
403 offset + 4) << 32;
404 #endif
405 } else {
406 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
407 }
408
409 return value;
410 }
411 #endif /* unused */
412
413 static inline void
414 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset,
415 uint64_t value)
416 {
417 if (XHCI_HCC_AC64(sc->sc_hcc)) {
418 #ifdef XHCI_USE_BUS_SPACE_8
419 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value);
420 #else
421 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0,
422 (value >> 0) & 0xffffffff);
423 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4,
424 (value >> 32) & 0xffffffff);
425 #endif
426 } else {
427 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
428 }
429 }
430
431 #if 0 /* unused */
432 static inline uint32_t
433 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset)
434 {
435 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset);
436 }
437 #endif /* unused */
438
439 static inline void
440 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset,
441 uint32_t value)
442 {
443 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value);
444 }
445
446 /* --- */
447
448 static inline uint8_t
449 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed)
450 {
451 u_int eptype = 0;
452
453 switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
454 case UE_CONTROL:
455 eptype = 0x0;
456 break;
457 case UE_ISOCHRONOUS:
458 eptype = 0x1;
459 break;
460 case UE_BULK:
461 eptype = 0x2;
462 break;
463 case UE_INTERRUPT:
464 eptype = 0x3;
465 break;
466 }
467
468 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
469 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
470 return eptype | 0x4;
471 else
472 return eptype;
473 }
474
475 static u_int
476 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed)
477 {
478 /* xHCI 1.0 section 4.5.1 */
479 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress);
480 u_int in = 0;
481
482 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
483 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
484 in = 1;
485
486 return epaddr * 2 + in;
487 }
488
489 static inline u_int
490 xhci_dci_to_ici(const u_int i)
491 {
492 return i + 1;
493 }
494
495 static inline void *
496 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs,
497 const u_int dci)
498 {
499 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
500 }
501
502 #if 0 /* unused */
503 static inline bus_addr_t
504 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs,
505 const u_int dci)
506 {
507 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
508 }
509 #endif /* unused */
510
511 static inline void *
512 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs,
513 const u_int ici)
514 {
515 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
516 }
517
518 static inline bus_addr_t
519 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs,
520 const u_int ici)
521 {
522 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
523 }
524
525 static inline struct xhci_trb *
526 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx)
527 {
528 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
529 }
530
531 static inline bus_addr_t
532 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx)
533 {
534 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
535 }
536
537 static inline void
538 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx,
539 uint64_t parameter, uint32_t status, uint32_t control)
540 {
541 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb);
542 xx->xx_trb[idx].trb_0 = parameter;
543 xx->xx_trb[idx].trb_2 = status;
544 xx->xx_trb[idx].trb_3 = control;
545 }
546
547 static inline void
548 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status,
549 uint32_t control)
550 {
551 trb->trb_0 = htole64(parameter);
552 trb->trb_2 = htole32(status);
553 trb->trb_3 = htole32(control);
554 }
555
556 static int
557 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx)
558 {
559 /* base address of TRBs */
560 bus_addr_t trbp = xhci_ring_trbp(xr, 0);
561
562 /* trb_0 range sanity check */
563 if (trb_0 == 0 || trb_0 < trbp ||
564 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 ||
565 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) {
566 return 1;
567 }
568 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb);
569 return 0;
570 }
571
572 static unsigned int
573 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs,
574 u_int dci)
575 {
576 uint32_t *cp;
577
578 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
579 cp = xhci_slot_get_dcv(sc, xs, dci);
580 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0]));
581 }
582
583 static inline unsigned int
584 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport)
585 {
586 const unsigned int port = ctlrport - 1;
587 const uint8_t bit = __BIT(port % NBBY);
588
589 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit);
590 }
591
592 /*
593 * Return the roothub port for a controller port. Both are 1..n.
594 */
595 static inline unsigned int
596 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport)
597 {
598
599 return sc->sc_ctlrportmap[ctrlport - 1];
600 }
601
602 /*
603 * Return the controller port for a bus roothub port. Both are 1..n.
604 */
605 static inline unsigned int
606 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn,
607 unsigned int rhport)
608 {
609
610 return sc->sc_rhportmap[bn][rhport - 1];
611 }
612
613 /* --- */
614
615 void
616 xhci_childdet(device_t self, device_t child)
617 {
618 struct xhci_softc * const sc = device_private(self);
619
620 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child));
621 if (child == sc->sc_child2)
622 sc->sc_child2 = NULL;
623 else if (child == sc->sc_child)
624 sc->sc_child = NULL;
625 }
626
627 int
628 xhci_detach(struct xhci_softc *sc, int flags)
629 {
630 int rv = 0;
631
632 if (sc->sc_child2 != NULL) {
633 rv = config_detach(sc->sc_child2, flags);
634 if (rv != 0)
635 return rv;
636 KASSERT(sc->sc_child2 == NULL);
637 }
638
639 if (sc->sc_child != NULL) {
640 rv = config_detach(sc->sc_child, flags);
641 if (rv != 0)
642 return rv;
643 KASSERT(sc->sc_child == NULL);
644 }
645
646 /* XXX unconfigure/free slots */
647
648 /* verify: */
649 xhci_rt_write_4(sc, XHCI_IMAN(0), 0);
650 xhci_op_write_4(sc, XHCI_USBCMD, 0);
651 /* do we need to wait for stop? */
652
653 xhci_op_write_8(sc, XHCI_CRCR, 0);
654 xhci_ring_free(sc, &sc->sc_cr);
655 cv_destroy(&sc->sc_command_cv);
656 cv_destroy(&sc->sc_cmdbusy_cv);
657
658 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0);
659 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0);
660 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY);
661 xhci_ring_free(sc, &sc->sc_er);
662
663 usb_freemem(&sc->sc_bus, &sc->sc_eventst_dma);
664
665 xhci_op_write_8(sc, XHCI_DCBAAP, 0);
666 usb_freemem(&sc->sc_bus, &sc->sc_dcbaa_dma);
667
668 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots);
669
670 kmem_free(sc->sc_ctlrportbus,
671 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY));
672 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int));
673
674 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
675 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int));
676 }
677
678 mutex_destroy(&sc->sc_lock);
679 mutex_destroy(&sc->sc_intr_lock);
680
681 pool_cache_destroy(sc->sc_xferpool);
682
683 return rv;
684 }
685
686 int
687 xhci_activate(device_t self, enum devact act)
688 {
689 struct xhci_softc * const sc = device_private(self);
690
691 switch (act) {
692 case DVACT_DEACTIVATE:
693 sc->sc_dying = true;
694 return 0;
695 default:
696 return EOPNOTSUPP;
697 }
698 }
699
700 bool
701 xhci_suspend(device_t dv, const pmf_qual_t *qual)
702 {
703 return false;
704 }
705
706 bool
707 xhci_resume(device_t dv, const pmf_qual_t *qual)
708 {
709 return false;
710 }
711
712 bool
713 xhci_shutdown(device_t self, int flags)
714 {
715 return false;
716 }
717
718 static int
719 xhci_hc_reset(struct xhci_softc * const sc)
720 {
721 uint32_t usbcmd, usbsts;
722 int i;
723
724 /* Check controller not ready */
725 for (i = 0; i < XHCI_WAIT_CNR; i++) {
726 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
727 if ((usbsts & XHCI_STS_CNR) == 0)
728 break;
729 usb_delay_ms(&sc->sc_bus, 1);
730 }
731 if (i >= XHCI_WAIT_CNR) {
732 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n");
733 return EIO;
734 }
735
736 /* Halt controller */
737 usbcmd = 0;
738 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
739 usb_delay_ms(&sc->sc_bus, 1);
740
741 /* Reset controller */
742 usbcmd = XHCI_CMD_HCRST;
743 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
744 for (i = 0; i < XHCI_WAIT_HCRST; i++) {
745 /*
746 * Wait 1ms first. Existing Intel xHCI requies 1ms delay to
747 * prevent system hang (Errata).
748 */
749 usb_delay_ms(&sc->sc_bus, 1);
750 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
751 if ((usbcmd & XHCI_CMD_HCRST) == 0)
752 break;
753 }
754 if (i >= XHCI_WAIT_HCRST) {
755 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n");
756 return EIO;
757 }
758
759 /* Check controller not ready */
760 for (i = 0; i < XHCI_WAIT_CNR; i++) {
761 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
762 if ((usbsts & XHCI_STS_CNR) == 0)
763 break;
764 usb_delay_ms(&sc->sc_bus, 1);
765 }
766 if (i >= XHCI_WAIT_CNR) {
767 aprint_error_dev(sc->sc_dev,
768 "controller not ready timeout after reset\n");
769 return EIO;
770 }
771
772 return 0;
773 }
774
775
776 /* 7.2 xHCI Support Protocol Capability */
777 static void
778 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp)
779 {
780 XHCIHIST_FUNC(); XHCIHIST_CALLED();
781
782 /* XXX Cache this lot */
783
784 const uint32_t w0 = xhci_read_4(sc, ecp);
785 const uint32_t w4 = xhci_read_4(sc, ecp + 4);
786 const uint32_t w8 = xhci_read_4(sc, ecp + 8);
787 const uint32_t wc = xhci_read_4(sc, ecp + 0xc);
788
789 aprint_debug_dev(sc->sc_dev,
790 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc);
791
792 if (w4 != XHCI_XECP_USBID)
793 return;
794
795 const int major = XHCI_XECP_SP_W0_MAJOR(w0);
796 const int minor = XHCI_XECP_SP_W0_MINOR(w0);
797 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8);
798 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8);
799
800 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16));
801 switch (mm) {
802 case 0x0200:
803 case 0x0300:
804 case 0x0301:
805 case 0x0310:
806 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n",
807 major == 3 ? "ss" : "hs", cpo, cpo + cpc -1);
808 break;
809 default:
810 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n",
811 major, minor);
812 return;
813 }
814
815 const size_t bus = (major == 3) ? 0 : 1;
816
817 /* Index arrays with 0..n-1 where ports are numbered 1..n */
818 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) {
819 if (sc->sc_ctlrportmap[cp] != 0) {
820 aprint_error_dev(sc->sc_dev, "controller port %zu "
821 "already assigned", cp);
822 continue;
823 }
824
825 sc->sc_ctlrportbus[cp / NBBY] |=
826 bus == 0 ? 0 : __BIT(cp % NBBY);
827
828 const size_t rhp = sc->sc_rhportcount[bus]++;
829
830 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0,
831 "bus %zu rhp %zu is %d", bus, rhp,
832 sc->sc_rhportmap[bus][rhp]);
833
834 sc->sc_rhportmap[bus][rhp] = cp + 1;
835 sc->sc_ctlrportmap[cp] = rhp + 1;
836 }
837 }
838
839 /* Process extended capabilities */
840 static void
841 xhci_ecp(struct xhci_softc *sc)
842 {
843 XHCIHIST_FUNC(); XHCIHIST_CALLED();
844
845 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4;
846 while (ecp != 0) {
847 uint32_t ecr = xhci_read_4(sc, ecp);
848 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr);
849 switch (XHCI_XECP_ID(ecr)) {
850 case XHCI_ID_PROTOCOLS: {
851 xhci_id_protocols(sc, ecp);
852 break;
853 }
854 case XHCI_ID_USB_LEGACY: {
855 uint8_t bios_sem;
856
857 /* Take host controller ownership from BIOS */
858 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM);
859 if (bios_sem) {
860 /* sets xHCI to be owned by OS */
861 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1);
862 aprint_debug_dev(sc->sc_dev,
863 "waiting for BIOS to give up control\n");
864 for (int i = 0; i < 5000; i++) {
865 bios_sem = xhci_read_1(sc, ecp +
866 XHCI_XECP_BIOS_SEM);
867 if (bios_sem == 0)
868 break;
869 DELAY(1000);
870 }
871 if (bios_sem) {
872 aprint_error_dev(sc->sc_dev,
873 "timed out waiting for BIOS\n");
874 }
875 }
876 break;
877 }
878 default:
879 break;
880 }
881 ecr = xhci_read_4(sc, ecp);
882 if (XHCI_XECP_NEXT(ecr) == 0) {
883 ecp = 0;
884 } else {
885 ecp += XHCI_XECP_NEXT(ecr) * 4;
886 }
887 }
888 }
889
890 #define XHCI_HCCPREV1_BITS \
891 "\177\020" /* New bitmask */ \
892 "f\020\020XECP\0" \
893 "f\014\4MAXPSA\0" \
894 "b\013CFC\0" \
895 "b\012SEC\0" \
896 "b\011SBD\0" \
897 "b\010FSE\0" \
898 "b\7NSS\0" \
899 "b\6LTC\0" \
900 "b\5LHRC\0" \
901 "b\4PIND\0" \
902 "b\3PPC\0" \
903 "b\2CZC\0" \
904 "b\1BNC\0" \
905 "b\0AC64\0" \
906 "\0"
907 #define XHCI_HCCV1_x_BITS \
908 "\177\020" /* New bitmask */ \
909 "f\020\020XECP\0" \
910 "f\014\4MAXPSA\0" \
911 "b\013CFC\0" \
912 "b\012SEC\0" \
913 "b\011SPC\0" \
914 "b\010PAE\0" \
915 "b\7NSS\0" \
916 "b\6LTC\0" \
917 "b\5LHRC\0" \
918 "b\4PIND\0" \
919 "b\3PPC\0" \
920 "b\2CSZ\0" \
921 "b\1BNC\0" \
922 "b\0AC64\0" \
923 "\0"
924
925 #define XHCI_HCC2_BITS \
926 "\177\020" /* New bitmask */ \
927 "b\7ETC_TSC\0" \
928 "b\6ETC\0" \
929 "b\5CIC\0" \
930 "b\4LEC\0" \
931 "b\3CTC\0" \
932 "b\2FSC\0" \
933 "b\1CMC\0" \
934 "b\0U3C\0" \
935 "\0"
936
937 void
938 xhci_start(struct xhci_softc *sc)
939 {
940 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA);
941 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0)
942 /* Intel xhci needs interrupt rate moderated. */
943 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP);
944 else
945 xhci_rt_write_4(sc, XHCI_IMOD(0), 0);
946 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n",
947 xhci_rt_read_4(sc, XHCI_IMOD(0)));
948
949 /* Go! */
950 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
951 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n",
952 xhci_op_read_4(sc, XHCI_USBCMD));
953 }
954
955 int
956 xhci_init(struct xhci_softc *sc)
957 {
958 bus_size_t bsz;
959 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff;
960 uint32_t pagesize, config;
961 int i = 0;
962 uint16_t hciversion;
963 uint8_t caplength;
964
965 XHCIHIST_FUNC(); XHCIHIST_CALLED();
966
967 /* Set up the bus struct for the usb 3 and usb 2 buses */
968 sc->sc_bus.ub_methods = &xhci_bus_methods;
969 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe);
970 sc->sc_bus.ub_usedma = true;
971 sc->sc_bus.ub_hcpriv = sc;
972
973 sc->sc_bus2.ub_methods = &xhci_bus_methods;
974 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe);
975 sc->sc_bus2.ub_revision = USBREV_2_0;
976 sc->sc_bus2.ub_usedma = true;
977 sc->sc_bus2.ub_hcpriv = sc;
978 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag;
979
980 caplength = xhci_read_1(sc, XHCI_CAPLENGTH);
981 hciversion = xhci_read_2(sc, XHCI_HCIVERSION);
982
983 if (hciversion < XHCI_HCIVERSION_0_96 ||
984 hciversion >= 0x0200) {
985 aprint_normal_dev(sc->sc_dev,
986 "xHCI version %x.%x not known to be supported\n",
987 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
988 } else {
989 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n",
990 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
991 }
992
993 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength,
994 &sc->sc_cbh) != 0) {
995 aprint_error_dev(sc->sc_dev, "capability subregion failure\n");
996 return ENOMEM;
997 }
998
999 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1);
1000 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1);
1001 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1);
1002 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1);
1003 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2);
1004 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3);
1005 aprint_debug_dev(sc->sc_dev,
1006 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3);
1007
1008 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS);
1009 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32;
1010
1011 char sbuf[128];
1012 if (hciversion < XHCI_HCIVERSION_1_0)
1013 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc);
1014 else
1015 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc);
1016 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf);
1017 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n",
1018 XHCI_HCC_XECP(sc->sc_hcc) * 4);
1019 if (hciversion >= XHCI_HCIVERSION_1_1) {
1020 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2);
1021 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2);
1022 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf);
1023 }
1024
1025 /* default all ports to bus 0, i.e. usb 3 */
1026 sc->sc_ctlrportbus = kmem_zalloc(
1027 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP);
1028 sc->sc_ctlrportmap = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1029
1030 /* controller port to bus roothub port map */
1031 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
1032 sc->sc_rhportmap[j] = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1033 }
1034
1035 /*
1036 * Process all Extended Capabilities
1037 */
1038 xhci_ecp(sc);
1039
1040 bsz = XHCI_PORTSC(sc->sc_maxports);
1041 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz,
1042 &sc->sc_obh) != 0) {
1043 aprint_error_dev(sc->sc_dev, "operational subregion failure\n");
1044 return ENOMEM;
1045 }
1046
1047 dboff = xhci_cap_read_4(sc, XHCI_DBOFF);
1048 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff,
1049 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) {
1050 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n");
1051 return ENOMEM;
1052 }
1053
1054 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF);
1055 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff,
1056 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) {
1057 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n");
1058 return ENOMEM;
1059 }
1060
1061 int rv;
1062 rv = xhci_hc_reset(sc);
1063 if (rv != 0) {
1064 return rv;
1065 }
1066
1067 if (sc->sc_vendor_init)
1068 sc->sc_vendor_init(sc);
1069
1070 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE);
1071 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize);
1072 pagesize = ffs(pagesize);
1073 if (pagesize == 0) {
1074 aprint_error_dev(sc->sc_dev, "pagesize is 0\n");
1075 return EIO;
1076 }
1077 sc->sc_pgsz = 1 << (12 + (pagesize - 1));
1078 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz);
1079 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n",
1080 (uint32_t)sc->sc_maxslots);
1081 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports);
1082
1083 usbd_status err;
1084
1085 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2);
1086 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf);
1087 if (sc->sc_maxspbuf != 0) {
1088 err = usb_allocmem(&sc->sc_bus,
1089 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t),
1090 USBMALLOC_COHERENT, &sc->sc_spbufarray_dma);
1091 if (err) {
1092 aprint_error_dev(sc->sc_dev,
1093 "spbufarray init fail, err %d\n", err);
1094 return ENOMEM;
1095 }
1096
1097 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) *
1098 sc->sc_maxspbuf, KM_SLEEP);
1099 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0);
1100 for (i = 0; i < sc->sc_maxspbuf; i++) {
1101 usb_dma_t * const dma = &sc->sc_spbuf_dma[i];
1102 /* allocate contexts */
1103 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz,
1104 sc->sc_pgsz, USBMALLOC_COHERENT, dma);
1105 if (err) {
1106 aprint_error_dev(sc->sc_dev,
1107 "spbufarray_dma init fail, err %d\n", err);
1108 rv = ENOMEM;
1109 goto bad1;
1110 }
1111 spbufarray[i] = htole64(DMAADDR(dma, 0));
1112 usb_syncmem(dma, 0, sc->sc_pgsz,
1113 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1114 }
1115
1116 usb_syncmem(&sc->sc_spbufarray_dma, 0,
1117 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE);
1118 }
1119
1120 config = xhci_op_read_4(sc, XHCI_CONFIG);
1121 config &= ~0xFF;
1122 config |= sc->sc_maxslots & 0xFF;
1123 xhci_op_write_4(sc, XHCI_CONFIG, config);
1124
1125 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS,
1126 XHCI_COMMAND_RING_SEGMENTS_ALIGN);
1127 if (err) {
1128 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n",
1129 err);
1130 rv = ENOMEM;
1131 goto bad1;
1132 }
1133
1134 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS,
1135 XHCI_EVENT_RING_SEGMENTS_ALIGN);
1136 if (err) {
1137 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n",
1138 err);
1139 rv = ENOMEM;
1140 goto bad2;
1141 }
1142
1143 usb_dma_t *dma;
1144 size_t size;
1145 size_t align;
1146
1147 dma = &sc->sc_eventst_dma;
1148 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE,
1149 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN);
1150 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size);
1151 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN;
1152 err = usb_allocmem(&sc->sc_bus, size, align, USBMALLOC_COHERENT, dma);
1153 if (err) {
1154 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n",
1155 err);
1156 rv = ENOMEM;
1157 goto bad3;
1158 }
1159
1160 memset(KERNADDR(dma, 0), 0, size);
1161 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1162 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n",
1163 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0),
1164 KERNADDR(&sc->sc_eventst_dma, 0),
1165 sc->sc_eventst_dma.udma_block->size);
1166
1167 dma = &sc->sc_dcbaa_dma;
1168 size = (1 + sc->sc_maxslots) * sizeof(uint64_t);
1169 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size);
1170 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN;
1171 err = usb_allocmem(&sc->sc_bus, size, align, USBMALLOC_COHERENT, dma);
1172 if (err) {
1173 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err);
1174 rv = ENOMEM;
1175 goto bad4;
1176 }
1177 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n",
1178 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0),
1179 KERNADDR(&sc->sc_dcbaa_dma, 0),
1180 sc->sc_dcbaa_dma.udma_block->size);
1181
1182 memset(KERNADDR(dma, 0), 0, size);
1183 if (sc->sc_maxspbuf != 0) {
1184 /*
1185 * DCBA entry 0 hold the scratchbuf array pointer.
1186 */
1187 *(uint64_t *)KERNADDR(dma, 0) =
1188 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0));
1189 }
1190 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1191
1192 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots,
1193 KM_SLEEP);
1194 if (sc->sc_slots == NULL) {
1195 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err);
1196 rv = ENOMEM;
1197 goto bad;
1198 }
1199
1200 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0,
1201 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
1202 if (sc->sc_xferpool == NULL) {
1203 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n",
1204 err);
1205 rv = ENOMEM;
1206 goto bad;
1207 }
1208
1209 cv_init(&sc->sc_command_cv, "xhcicmd");
1210 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq");
1211 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
1212 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
1213
1214 struct xhci_erste *erst;
1215 erst = KERNADDR(&sc->sc_eventst_dma, 0);
1216 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0));
1217 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb);
1218 erst[0].erste_3 = htole32(0);
1219 usb_syncmem(&sc->sc_eventst_dma, 0,
1220 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE);
1221
1222 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS);
1223 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0));
1224 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) |
1225 XHCI_ERDP_BUSY);
1226
1227 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0));
1228 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1229 sc->sc_cr->xr_cs);
1230
1231 xhci_barrier(sc, BUS_SPACE_BARRIER_WRITE);
1232
1233 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0),
1234 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS);
1235
1236 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0)
1237 xhci_start(sc);
1238
1239 return 0;
1240
1241 bad:
1242 if (sc->sc_xferpool) {
1243 pool_cache_destroy(sc->sc_xferpool);
1244 sc->sc_xferpool = NULL;
1245 }
1246
1247 if (sc->sc_slots) {
1248 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) *
1249 sc->sc_maxslots);
1250 sc->sc_slots = NULL;
1251 }
1252
1253 usb_freemem(&sc->sc_bus, &sc->sc_dcbaa_dma);
1254 bad4:
1255 usb_freemem(&sc->sc_bus, &sc->sc_eventst_dma);
1256 bad3:
1257 xhci_ring_free(sc, &sc->sc_er);
1258 bad2:
1259 xhci_ring_free(sc, &sc->sc_cr);
1260 i = sc->sc_maxspbuf;
1261 bad1:
1262 for (int j = 0; j < i; j++)
1263 usb_freemem(&sc->sc_bus, &sc->sc_spbuf_dma[j]);
1264 usb_freemem(&sc->sc_bus, &sc->sc_spbufarray_dma);
1265
1266 return rv;
1267 }
1268
1269 static inline bool
1270 xhci_polling_p(struct xhci_softc * const sc)
1271 {
1272 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling;
1273 }
1274
1275 int
1276 xhci_intr(void *v)
1277 {
1278 struct xhci_softc * const sc = v;
1279 int ret = 0;
1280
1281 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1282
1283 if (sc == NULL)
1284 return 0;
1285
1286 mutex_spin_enter(&sc->sc_intr_lock);
1287
1288 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1289 goto done;
1290
1291 /* If we get an interrupt while polling, then just ignore it. */
1292 if (xhci_polling_p(sc)) {
1293 #ifdef DIAGNOSTIC
1294 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1295 #endif
1296 goto done;
1297 }
1298
1299 ret = xhci_intr1(sc);
1300 if (ret) {
1301 KASSERT(sc->sc_child || sc->sc_child2);
1302
1303 /*
1304 * One of child busses could be already detached. It doesn't
1305 * matter on which of the two the softintr is scheduled.
1306 */
1307 if (sc->sc_child)
1308 usb_schedsoftintr(&sc->sc_bus);
1309 else
1310 usb_schedsoftintr(&sc->sc_bus2);
1311 }
1312 done:
1313 mutex_spin_exit(&sc->sc_intr_lock);
1314 return ret;
1315 }
1316
1317 int
1318 xhci_intr1(struct xhci_softc * const sc)
1319 {
1320 uint32_t usbsts;
1321 uint32_t iman;
1322
1323 XHCIHIST_FUNC();
1324
1325 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1326 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0);
1327 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD |
1328 XHCI_STS_HCE)) == 0) {
1329 DPRINTFN(16, "ignored intr not for %jd",
1330 device_unit(sc->sc_dev), 0, 0, 0);
1331 return 0;
1332 }
1333
1334 /*
1335 * Clear EINT and other transient flags, to not misenterpret
1336 * next shared interrupt. Also, to avoid race, EINT must be cleared
1337 * before XHCI_IMAN_INTR_PEND is cleared.
1338 */
1339 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & XHCI_STS_RSVDP0);
1340
1341 #ifdef XHCI_DEBUG
1342 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1343 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1344 #endif
1345
1346 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1347 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1348 iman |= XHCI_IMAN_INTR_PEND;
1349 xhci_rt_write_4(sc, XHCI_IMAN(0), iman);
1350
1351 #ifdef XHCI_DEBUG
1352 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1353 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1354 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1355 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1356 #endif
1357
1358 return 1;
1359 }
1360
1361 /*
1362 * 3 port speed types used in USB stack
1363 *
1364 * usbdi speed
1365 * definition: USB_SPEED_* in usb.h
1366 * They are used in struct usbd_device in USB stack.
1367 * ioctl interface uses these values too.
1368 * port_status speed
1369 * definition: UPS_*_SPEED in usb.h
1370 * They are used in usb_port_status_t and valid only for USB 2.0.
1371 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus
1372 * of usb_port_status_ext_t indicates port speed.
1373 * Note that some 3.0 values overlap with 2.0 values.
1374 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and
1375 * means UPS_LOW_SPEED in HS.)
1376 * port status returned from hub also uses these values.
1377 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed
1378 * or more.
1379 * xspeed:
1380 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1)
1381 * They are used in only slot context and PORTSC reg of xhci.
1382 * The difference between usbdi speed and xspeed is
1383 * that FS and LS values are swapped.
1384 */
1385
1386 /* convert usbdi speed to xspeed */
1387 static int
1388 xhci_speed2xspeed(int speed)
1389 {
1390 switch (speed) {
1391 case USB_SPEED_LOW: return 2;
1392 case USB_SPEED_FULL: return 1;
1393 default: return speed;
1394 }
1395 }
1396
1397 #if 0
1398 /* convert xspeed to usbdi speed */
1399 static int
1400 xhci_xspeed2speed(int xspeed)
1401 {
1402 switch (xspeed) {
1403 case 1: return USB_SPEED_FULL;
1404 case 2: return USB_SPEED_LOW;
1405 default: return xspeed;
1406 }
1407 }
1408 #endif
1409
1410 /* convert xspeed to port status speed */
1411 static int
1412 xhci_xspeed2psspeed(int xspeed)
1413 {
1414 switch (xspeed) {
1415 case 0: return 0;
1416 case 1: return UPS_FULL_SPEED;
1417 case 2: return UPS_LOW_SPEED;
1418 case 3: return UPS_HIGH_SPEED;
1419 default: return UPS_OTHER_SPEED;
1420 }
1421 }
1422
1423 /*
1424 * Construct input contexts and issue TRB to open pipe.
1425 */
1426 static usbd_status
1427 xhci_configure_endpoint(struct usbd_pipe *pipe)
1428 {
1429 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1430 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1431 #ifdef USB_DEBUG
1432 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1433 #endif
1434 struct xhci_soft_trb trb;
1435 usbd_status err;
1436
1437 XHCIHIST_FUNC();
1438 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx",
1439 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress,
1440 pipe->up_endpoint->ue_edesc->bmAttributes);
1441
1442 /* XXX ensure input context is available? */
1443
1444 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz);
1445
1446 /* set up context */
1447 xhci_setup_ctx(pipe);
1448
1449 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0),
1450 sc->sc_ctxsz * 1);
1451 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs,
1452 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1);
1453
1454 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1455 trb.trb_2 = 0;
1456 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1457 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1458
1459 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
1460
1461 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1462 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci),
1463 sc->sc_ctxsz * 1);
1464
1465 return err;
1466 }
1467
1468 #if 0
1469 static usbd_status
1470 xhci_unconfigure_endpoint(struct usbd_pipe *pipe)
1471 {
1472 #ifdef USB_DEBUG
1473 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1474 #endif
1475
1476 XHCIHIST_FUNC();
1477 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
1478
1479 return USBD_NORMAL_COMPLETION;
1480 }
1481 #endif
1482
1483 /* 4.6.8, 6.4.3.7 */
1484 static usbd_status
1485 xhci_reset_endpoint_locked(struct usbd_pipe *pipe)
1486 {
1487 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1488 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1489 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1490 struct xhci_soft_trb trb;
1491 usbd_status err;
1492
1493 XHCIHIST_FUNC();
1494 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1495
1496 KASSERT(mutex_owned(&sc->sc_lock));
1497
1498 trb.trb_0 = 0;
1499 trb.trb_2 = 0;
1500 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1501 XHCI_TRB_3_EP_SET(dci) |
1502 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP);
1503
1504 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1505
1506 return err;
1507 }
1508
1509 static usbd_status
1510 xhci_reset_endpoint(struct usbd_pipe *pipe)
1511 {
1512 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1513
1514 mutex_enter(&sc->sc_lock);
1515 usbd_status ret = xhci_reset_endpoint_locked(pipe);
1516 mutex_exit(&sc->sc_lock);
1517
1518 return ret;
1519 }
1520
1521 /*
1522 * 4.6.9, 6.4.3.8
1523 * Stop execution of TDs on xfer ring.
1524 * Should be called with sc_lock held.
1525 */
1526 static usbd_status
1527 xhci_stop_endpoint(struct usbd_pipe *pipe)
1528 {
1529 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1530 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1531 struct xhci_soft_trb trb;
1532 usbd_status err;
1533 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1534
1535 XHCIHIST_FUNC();
1536 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1537
1538 KASSERT(mutex_owned(&sc->sc_lock));
1539
1540 trb.trb_0 = 0;
1541 trb.trb_2 = 0;
1542 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1543 XHCI_TRB_3_EP_SET(dci) |
1544 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP);
1545
1546 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1547
1548 return err;
1549 }
1550
1551 /*
1552 * Set TR Dequeue Pointer.
1553 * xHCI 1.1 4.6.10 6.4.3.9
1554 * Purge all of the TRBs on ring and reinitialize ring.
1555 * Set TR dequeue Pointr to 0 and Cycle State to 1.
1556 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE
1557 * error will be generated.
1558 */
1559 static usbd_status
1560 xhci_set_dequeue_locked(struct usbd_pipe *pipe)
1561 {
1562 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1563 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1564 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1565 struct xhci_ring * const xr = xs->xs_xr[dci];
1566 struct xhci_soft_trb trb;
1567 usbd_status err;
1568
1569 XHCIHIST_FUNC();
1570 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1571
1572 KASSERT(mutex_owned(&sc->sc_lock));
1573 KASSERT(xr != NULL);
1574
1575 xhci_host_dequeue(xr);
1576
1577 /* set DCS */
1578 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */
1579 trb.trb_2 = 0;
1580 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1581 XHCI_TRB_3_EP_SET(dci) |
1582 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE);
1583
1584 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1585
1586 return err;
1587 }
1588
1589 static usbd_status
1590 xhci_set_dequeue(struct usbd_pipe *pipe)
1591 {
1592 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1593
1594 mutex_enter(&sc->sc_lock);
1595 usbd_status ret = xhci_set_dequeue_locked(pipe);
1596 mutex_exit(&sc->sc_lock);
1597
1598 return ret;
1599 }
1600
1601 /*
1602 * Open new pipe: called from usbd_setup_pipe_flags.
1603 * Fills methods of pipe.
1604 * If pipe is not for ep0, calls configure_endpoint.
1605 */
1606 static usbd_status
1607 xhci_open(struct usbd_pipe *pipe)
1608 {
1609 struct usbd_device * const dev = pipe->up_dev;
1610 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
1611 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
1612 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1613 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
1614 const u_int dci = xhci_ep_get_dci(ed);
1615 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1616 usbd_status err;
1617
1618 XHCIHIST_FUNC();
1619 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr,
1620 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed);
1621 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx",
1622 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress,
1623 ed->bmAttributes);
1624 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize),
1625 ed->bInterval, 0, 0);
1626
1627 if (sc->sc_dying)
1628 return USBD_IOERROR;
1629
1630 /* Root Hub */
1631 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) {
1632 switch (ed->bEndpointAddress) {
1633 case USB_CONTROL_ENDPOINT:
1634 pipe->up_methods = &roothub_ctrl_methods;
1635 break;
1636 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
1637 pipe->up_methods = &xhci_root_intr_methods;
1638 break;
1639 default:
1640 pipe->up_methods = NULL;
1641 DPRINTFN(0, "bad bEndpointAddress 0x%02jx",
1642 ed->bEndpointAddress, 0, 0, 0);
1643 return USBD_INVAL;
1644 }
1645 return USBD_NORMAL_COMPLETION;
1646 }
1647
1648 switch (xfertype) {
1649 case UE_CONTROL:
1650 pipe->up_methods = &xhci_device_ctrl_methods;
1651 break;
1652 case UE_ISOCHRONOUS:
1653 pipe->up_methods = &xhci_device_isoc_methods;
1654 pipe->up_serialise = false;
1655 xpipe->xp_isoc_next = -1;
1656 break;
1657 case UE_BULK:
1658 pipe->up_methods = &xhci_device_bulk_methods;
1659 break;
1660 case UE_INTERRUPT:
1661 pipe->up_methods = &xhci_device_intr_methods;
1662 break;
1663 default:
1664 return USBD_IOERROR;
1665 break;
1666 }
1667
1668 KASSERT(xs != NULL);
1669 KASSERT(xs->xs_xr[dci] == NULL);
1670
1671 /* allocate transfer ring */
1672 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS,
1673 XHCI_TRB_ALIGN);
1674 if (err) {
1675 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0);
1676 return err;
1677 }
1678
1679 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT)
1680 return xhci_configure_endpoint(pipe);
1681
1682 return USBD_NORMAL_COMPLETION;
1683 }
1684
1685 /*
1686 * Closes pipe, called from usbd_kill_pipe via close methods.
1687 * If the endpoint to be closed is ep0, disable_slot.
1688 * Should be called with sc_lock held.
1689 */
1690 static void
1691 xhci_close_pipe(struct usbd_pipe *pipe)
1692 {
1693 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1694 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1695 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
1696 const u_int dci = xhci_ep_get_dci(ed);
1697 struct xhci_soft_trb trb;
1698 uint32_t *cp;
1699
1700 XHCIHIST_FUNC();
1701
1702 if (sc->sc_dying)
1703 return;
1704
1705 /* xs is uninitialized before xhci_init_slot */
1706 if (xs == NULL || xs->xs_idx == 0)
1707 return;
1708
1709 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
1710 (uintptr_t)pipe, xs->xs_idx, dci, 0);
1711
1712 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
1713 KASSERT(mutex_owned(&sc->sc_lock));
1714
1715 if (pipe->up_dev->ud_depth == 0)
1716 return;
1717
1718 if (dci == XHCI_DCI_EP_CONTROL) {
1719 DPRINTFN(4, "closing ep0", 0, 0, 0, 0);
1720 /* This frees all rings */
1721 xhci_disable_slot(sc, xs->xs_idx);
1722 return;
1723 }
1724
1725 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED)
1726 (void)xhci_stop_endpoint(pipe);
1727
1728 /*
1729 * set appropriate bit to be dropped.
1730 * don't set DC bit to 1, otherwise all endpoints
1731 * would be deconfigured.
1732 */
1733 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
1734 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci));
1735 cp[1] = htole32(0);
1736
1737 /* XXX should be most significant one, not dci? */
1738 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
1739 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci));
1740
1741 /* configure ep context performs an implicit dequeue */
1742 xhci_host_dequeue(xs->xs_xr[dci]);
1743
1744 /* sync input contexts before they are read from memory */
1745 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
1746
1747 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1748 trb.trb_2 = 0;
1749 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1750 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1751
1752 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1753 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1754
1755 xhci_ring_free(sc, &xs->xs_xr[dci]);
1756 }
1757
1758 /*
1759 * Abort transfer.
1760 * Should be called with sc_lock held.
1761 */
1762 static void
1763 xhci_abortx(struct usbd_xfer *xfer)
1764 {
1765 XHCIHIST_FUNC();
1766 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
1767 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
1768 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
1769
1770 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx",
1771 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0);
1772
1773 KASSERT(mutex_owned(&sc->sc_lock));
1774 ASSERT_SLEEPABLE();
1775
1776 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
1777 xfer->ux_status == USBD_TIMEOUT),
1778 "bad abort status: %d", xfer->ux_status);
1779
1780 /*
1781 * If we're dying, skip the hardware action and just notify the
1782 * software that we're done.
1783 */
1784 if (sc->sc_dying) {
1785 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer,
1786 xfer->ux_status, 0, 0);
1787 goto dying;
1788 }
1789
1790 /*
1791 * HC Step 1: Stop execution of TD on the ring.
1792 */
1793 switch (xhci_get_epstate(sc, xs, dci)) {
1794 case XHCI_EPSTATE_HALTED:
1795 (void)xhci_reset_endpoint_locked(xfer->ux_pipe);
1796 break;
1797 case XHCI_EPSTATE_STOPPED:
1798 break;
1799 default:
1800 (void)xhci_stop_endpoint(xfer->ux_pipe);
1801 break;
1802 }
1803 #ifdef DIAGNOSTIC
1804 uint32_t epst = xhci_get_epstate(sc, xs, dci);
1805 if (epst != XHCI_EPSTATE_STOPPED)
1806 DPRINTFN(4, "dci %ju not stopped %ju", dci, epst, 0, 0);
1807 #endif
1808
1809 /*
1810 * HC Step 2: Remove any vestiges of the xfer from the ring.
1811 */
1812 xhci_set_dequeue_locked(xfer->ux_pipe);
1813
1814 /*
1815 * Final Step: Notify completion to waiting xfers.
1816 */
1817 dying:
1818 usb_transfer_complete(xfer);
1819 DPRINTFN(14, "end", 0, 0, 0, 0);
1820
1821 KASSERT(mutex_owned(&sc->sc_lock));
1822 }
1823
1824 static void
1825 xhci_host_dequeue(struct xhci_ring * const xr)
1826 {
1827 /* When dequeueing the controller, update our struct copy too */
1828 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE);
1829 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE,
1830 BUS_DMASYNC_PREWRITE);
1831 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies));
1832
1833 xr->xr_ep = 0;
1834 xr->xr_cs = 1;
1835 }
1836
1837 /*
1838 * Recover STALLed endpoint.
1839 * xHCI 1.1 sect 4.10.2.1
1840 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove
1841 * all transfers on transfer ring.
1842 * These are done in thread context asynchronously.
1843 */
1844 static void
1845 xhci_clear_endpoint_stall_async_task(void *cookie)
1846 {
1847 struct usbd_xfer * const xfer = cookie;
1848 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
1849 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
1850 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
1851 struct xhci_ring * const tr = xs->xs_xr[dci];
1852
1853 XHCIHIST_FUNC();
1854 XHCIHIST_CALLARGS("xfer %#jx slot %ju dci %ju", (uintptr_t)xfer, xs->xs_idx,
1855 dci, 0);
1856
1857 /*
1858 * XXXMRG: Stall task can run after slot is disabled when yanked.
1859 * This hack notices that the xs has been memset() in
1860 * xhci_disable_slot() and returns. Both xhci_reset_endpoint()
1861 * and xhci_set_dequeue() rely upon a valid ring setup for correct
1862 * operation, and the latter will fault, as would
1863 * usb_transfer_complete() if it got that far.
1864 */
1865 if (xs->xs_idx == 0) {
1866 DPRINTFN(4, "ends xs_idx is 0", 0, 0, 0, 0);
1867 return;
1868 }
1869
1870 KASSERT(tr != NULL);
1871
1872 xhci_reset_endpoint(xfer->ux_pipe);
1873 xhci_set_dequeue(xfer->ux_pipe);
1874
1875 mutex_enter(&sc->sc_lock);
1876 tr->is_halted = false;
1877 usb_transfer_complete(xfer);
1878 mutex_exit(&sc->sc_lock);
1879 DPRINTFN(4, "ends", 0, 0, 0, 0);
1880 }
1881
1882 static usbd_status
1883 xhci_clear_endpoint_stall_async(struct usbd_xfer *xfer)
1884 {
1885 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
1886 struct xhci_pipe * const xp = (struct xhci_pipe *)xfer->ux_pipe;
1887
1888 XHCIHIST_FUNC();
1889 XHCIHIST_CALLARGS("xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
1890
1891 if (sc->sc_dying) {
1892 return USBD_IOERROR;
1893 }
1894
1895 usb_init_task(&xp->xp_async_task,
1896 xhci_clear_endpoint_stall_async_task, xfer, USB_TASKQ_MPSAFE);
1897 usb_add_task(xfer->ux_pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC);
1898 DPRINTFN(4, "ends", 0, 0, 0, 0);
1899
1900 return USBD_NORMAL_COMPLETION;
1901 }
1902
1903 /* Process roothub port status/change events and notify to uhub_intr. */
1904 static void
1905 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport)
1906 {
1907 XHCIHIST_FUNC();
1908 XHCIHIST_CALLARGS("xhci%jd: port %ju status change",
1909 device_unit(sc->sc_dev), ctlrport, 0, 0);
1910
1911 if (ctlrport > sc->sc_maxports)
1912 return;
1913
1914 const size_t bn = xhci_ctlrport2bus(sc, ctlrport);
1915 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport);
1916 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn];
1917
1918 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change",
1919 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer);
1920
1921 if (xfer == NULL)
1922 return;
1923 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
1924
1925 uint8_t *p = xfer->ux_buf;
1926 memset(p, 0, xfer->ux_length);
1927 p[rhp / NBBY] |= 1 << (rhp % NBBY);
1928 xfer->ux_actlen = xfer->ux_length;
1929 xfer->ux_status = USBD_NORMAL_COMPLETION;
1930 usb_transfer_complete(xfer);
1931 }
1932
1933 /* Process Transfer Events */
1934 static void
1935 xhci_event_transfer(struct xhci_softc * const sc,
1936 const struct xhci_trb * const trb)
1937 {
1938 uint64_t trb_0;
1939 uint32_t trb_2, trb_3;
1940 uint8_t trbcode;
1941 u_int slot, dci;
1942 struct xhci_slot *xs;
1943 struct xhci_ring *xr;
1944 struct xhci_xfer *xx;
1945 struct usbd_xfer *xfer;
1946 usbd_status err;
1947
1948 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1949
1950 trb_0 = le64toh(trb->trb_0);
1951 trb_2 = le32toh(trb->trb_2);
1952 trb_3 = le32toh(trb->trb_3);
1953 trbcode = XHCI_TRB_2_ERROR_GET(trb_2);
1954 slot = XHCI_TRB_3_SLOT_GET(trb_3);
1955 dci = XHCI_TRB_3_EP_GET(trb_3);
1956 xs = &sc->sc_slots[slot];
1957 xr = xs->xs_xr[dci];
1958
1959 /* sanity check */
1960 KASSERT(xr != NULL);
1961 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots,
1962 "invalid xs_idx %u slot %u", xs->xs_idx, slot);
1963
1964 int idx = 0;
1965 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
1966 if (xhci_trb_get_idx(xr, trb_0, &idx)) {
1967 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0);
1968 return;
1969 }
1970 xx = xr->xr_cookies[idx];
1971
1972 /* clear cookie of consumed TRB */
1973 xr->xr_cookies[idx] = NULL;
1974
1975 /*
1976 * xx is NULL if pipe is opened but xfer is not started.
1977 * It happens when stopping idle pipe.
1978 */
1979 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) {
1980 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju",
1981 idx, (uintptr_t)xx, trbcode, dci);
1982 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0,
1983 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)),
1984 0, 0);
1985 return;
1986 }
1987 } else {
1988 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */
1989 xx = (void *)(uintptr_t)(trb_0 & ~0x3);
1990 }
1991 /* XXX this may not happen */
1992 if (xx == NULL) {
1993 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0);
1994 return;
1995 }
1996 xfer = &xx->xx_xfer;
1997 /* XXX this may happen when detaching */
1998 if (xfer == NULL) {
1999 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx",
2000 (uintptr_t)xx, trb_0, 0, 0);
2001 return;
2002 }
2003 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2004 /* XXX I dunno why this happens */
2005 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer);
2006
2007 if (!xfer->ux_pipe->up_repeat &&
2008 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) {
2009 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer,
2010 0, 0, 0);
2011 return;
2012 }
2013
2014 const uint8_t xfertype =
2015 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes);
2016
2017 /* 4.11.5.2 Event Data TRB */
2018 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) {
2019 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx"
2020 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0);
2021 if ((trb_0 & 0x3) == 0x3) {
2022 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2);
2023 }
2024 }
2025
2026 switch (trbcode) {
2027 case XHCI_TRB_ERROR_SHORT_PKT:
2028 case XHCI_TRB_ERROR_SUCCESS:
2029 /*
2030 * A ctrl transfer can generate two events if it has a Data
2031 * stage. A short data stage can be OK and should not
2032 * complete the transfer as the status stage needs to be
2033 * performed.
2034 *
2035 * Note: Data and Status stage events point at same xfer.
2036 * ux_actlen and ux_dmabuf will be passed to
2037 * usb_transfer_complete after the Status stage event.
2038 *
2039 * It can be distingished which stage generates the event:
2040 * + by checking least 3 bits of trb_0 if ED==1.
2041 * (see xhci_device_ctrl_start).
2042 * + by checking the type of original TRB if ED==0.
2043 *
2044 * In addition, intr, bulk, and isoc transfer currently
2045 * consists of single TD, so the "skip" is not needed.
2046 * ctrl xfer uses EVENT_DATA, and others do not.
2047 * Thus driver can switch the flow by checking ED bit.
2048 */
2049 if (xfertype == UE_ISOCHRONOUS) {
2050 xfer->ux_frlengths[xx->xx_isoc_done] -=
2051 XHCI_TRB_2_REM_GET(trb_2);
2052 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done];
2053 if (++xx->xx_isoc_done < xfer->ux_nframes)
2054 return;
2055 } else
2056 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2057 if (xfer->ux_actlen == 0)
2058 xfer->ux_actlen = xfer->ux_length -
2059 XHCI_TRB_2_REM_GET(trb_2);
2060 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3))
2061 == XHCI_TRB_TYPE_DATA_STAGE) {
2062 return;
2063 }
2064 } else if ((trb_0 & 0x3) == 0x3) {
2065 return;
2066 }
2067 err = USBD_NORMAL_COMPLETION;
2068 break;
2069 case XHCI_TRB_ERROR_STOPPED:
2070 case XHCI_TRB_ERROR_LENGTH:
2071 case XHCI_TRB_ERROR_STOPPED_SHORT:
2072 err = USBD_IOERROR;
2073 break;
2074 case XHCI_TRB_ERROR_STALL:
2075 case XHCI_TRB_ERROR_BABBLE:
2076 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2077 xr->is_halted = true;
2078 /*
2079 * Try to claim this xfer for completion. If it has already
2080 * completed or aborted, drop it on the floor.
2081 */
2082 if (!usbd_xfer_trycomplete(xfer))
2083 return;
2084
2085 /*
2086 * Stalled endpoints can be recoverd by issuing
2087 * command TRB TYPE_RESET_EP on xHCI instead of
2088 * issuing request CLEAR_FEATURE UF_ENDPOINT_HALT
2089 * on the endpoint. However, this function may be
2090 * called from softint context (e.g. from umass),
2091 * in that case driver gets KASSERT in cv_timedwait
2092 * in xhci_do_command.
2093 * To avoid this, this runs reset_endpoint and
2094 * usb_transfer_complete in usb task thread
2095 * asynchronously (and then umass issues clear
2096 * UF_ENDPOINT_HALT).
2097 */
2098
2099 /* Override the status. */
2100 xfer->ux_status = USBD_STALLED;
2101
2102 xhci_clear_endpoint_stall_async(xfer);
2103 return;
2104 default:
2105 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2106 err = USBD_IOERROR;
2107 break;
2108 }
2109
2110 /*
2111 * Try to claim this xfer for completion. If it has already
2112 * completed or aborted, drop it on the floor.
2113 */
2114 if (!usbd_xfer_trycomplete(xfer))
2115 return;
2116
2117 /* Set the status. */
2118 xfer->ux_status = err;
2119
2120 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 ||
2121 (trb_0 & 0x3) == 0x0) {
2122 usb_transfer_complete(xfer);
2123 }
2124 }
2125
2126 /* Process Command complete events */
2127 static void
2128 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb)
2129 {
2130 uint64_t trb_0;
2131 uint32_t trb_2, trb_3;
2132
2133 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2134
2135 KASSERT(mutex_owned(&sc->sc_lock));
2136
2137 trb_0 = le64toh(trb->trb_0);
2138 trb_2 = le32toh(trb->trb_2);
2139 trb_3 = le32toh(trb->trb_3);
2140
2141 if (trb_0 == sc->sc_command_addr) {
2142 sc->sc_resultpending = false;
2143
2144 sc->sc_result_trb.trb_0 = trb_0;
2145 sc->sc_result_trb.trb_2 = trb_2;
2146 sc->sc_result_trb.trb_3 = trb_3;
2147 if (XHCI_TRB_2_ERROR_GET(trb_2) !=
2148 XHCI_TRB_ERROR_SUCCESS) {
2149 DPRINTFN(1, "command completion "
2150 "failure: 0x%016jx 0x%08jx 0x%08jx",
2151 trb_0, trb_2, trb_3, 0);
2152 }
2153 cv_signal(&sc->sc_command_cv);
2154 } else {
2155 DPRINTFN(1, "spurious event: %#jx 0x%016jx "
2156 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3);
2157 }
2158 }
2159
2160 /*
2161 * Process events.
2162 * called from xhci_softintr
2163 */
2164 static void
2165 xhci_handle_event(struct xhci_softc * const sc,
2166 const struct xhci_trb * const trb)
2167 {
2168 uint64_t trb_0;
2169 uint32_t trb_2, trb_3;
2170
2171 XHCIHIST_FUNC();
2172
2173 trb_0 = le64toh(trb->trb_0);
2174 trb_2 = le32toh(trb->trb_2);
2175 trb_3 = le32toh(trb->trb_3);
2176
2177 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx",
2178 (uintptr_t)trb, trb_0, trb_2, trb_3);
2179
2180 /*
2181 * 4.11.3.1, 6.4.2.1
2182 * TRB Pointer is invalid for these completion codes.
2183 */
2184 switch (XHCI_TRB_2_ERROR_GET(trb_2)) {
2185 case XHCI_TRB_ERROR_RING_UNDERRUN:
2186 case XHCI_TRB_ERROR_RING_OVERRUN:
2187 case XHCI_TRB_ERROR_VF_RING_FULL:
2188 return;
2189 default:
2190 if (trb_0 == 0) {
2191 return;
2192 }
2193 break;
2194 }
2195
2196 switch (XHCI_TRB_3_TYPE_GET(trb_3)) {
2197 case XHCI_TRB_EVENT_TRANSFER:
2198 xhci_event_transfer(sc, trb);
2199 break;
2200 case XHCI_TRB_EVENT_CMD_COMPLETE:
2201 xhci_event_cmd(sc, trb);
2202 break;
2203 case XHCI_TRB_EVENT_PORT_STS_CHANGE:
2204 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff));
2205 break;
2206 default:
2207 break;
2208 }
2209 }
2210
2211 static void
2212 xhci_softintr(void *v)
2213 {
2214 struct usbd_bus * const bus = v;
2215 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2216 struct xhci_ring * const er = sc->sc_er;
2217 struct xhci_trb *trb;
2218 int i, j, k;
2219
2220 XHCIHIST_FUNC();
2221
2222 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2223
2224 i = er->xr_ep;
2225 j = er->xr_cs;
2226
2227 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0);
2228
2229 while (1) {
2230 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE,
2231 BUS_DMASYNC_POSTREAD);
2232 trb = &er->xr_trb[i];
2233 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0;
2234
2235 if (j != k)
2236 break;
2237
2238 xhci_handle_event(sc, trb);
2239
2240 i++;
2241 if (i == er->xr_ntrb) {
2242 i = 0;
2243 j ^= 1;
2244 }
2245 }
2246
2247 er->xr_ep = i;
2248 er->xr_cs = j;
2249
2250 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) |
2251 XHCI_ERDP_BUSY);
2252
2253 DPRINTFN(16, "ends", 0, 0, 0, 0);
2254
2255 return;
2256 }
2257
2258 static void
2259 xhci_poll(struct usbd_bus *bus)
2260 {
2261 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2262
2263 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2264
2265 mutex_enter(&sc->sc_intr_lock);
2266 int ret = xhci_intr1(sc);
2267 if (ret) {
2268 xhci_softintr(bus);
2269 }
2270 mutex_exit(&sc->sc_intr_lock);
2271
2272 return;
2273 }
2274
2275 static struct usbd_xfer *
2276 xhci_allocx(struct usbd_bus *bus, unsigned int nframes)
2277 {
2278 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2279 struct xhci_xfer *xx;
2280 u_int ntrbs;
2281
2282 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2283
2284 ntrbs = uimax(3, nframes);
2285 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs;
2286
2287 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
2288 if (xx != NULL) {
2289 memset(xx, 0, sizeof(*xx));
2290 if (ntrbs > 0) {
2291 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP);
2292 xx->xx_ntrb = ntrbs;
2293 }
2294 #ifdef DIAGNOSTIC
2295 xx->xx_xfer.ux_state = XFER_BUSY;
2296 #endif
2297 }
2298
2299 return &xx->xx_xfer;
2300 }
2301
2302 static void
2303 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
2304 {
2305 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2306 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
2307
2308 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2309
2310 #ifdef DIAGNOSTIC
2311 if (xfer->ux_state != XFER_BUSY &&
2312 xfer->ux_status != USBD_NOT_STARTED) {
2313 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx",
2314 (uintptr_t)xfer, xfer->ux_state, 0, 0);
2315 }
2316 xfer->ux_state = XFER_FREE;
2317 #endif
2318 if (xx->xx_ntrb > 0) {
2319 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb));
2320 xx->xx_trb = NULL;
2321 xx->xx_ntrb = 0;
2322 }
2323 pool_cache_put(sc->sc_xferpool, xx);
2324 }
2325
2326 static bool
2327 xhci_dying(struct usbd_bus *bus)
2328 {
2329 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2330
2331 return sc->sc_dying;
2332 }
2333
2334 static void
2335 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
2336 {
2337 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2338
2339 *lock = &sc->sc_lock;
2340 }
2341
2342 extern uint32_t usb_cookie_no;
2343
2344 /*
2345 * xHCI 4.3
2346 * Called when uhub_explore finds a new device (via usbd_new_device).
2347 * Port initialization and speed detection (4.3.1) are already done in uhub.c.
2348 * This function does:
2349 * Allocate and construct dev structure of default endpoint (ep0).
2350 * Allocate and open pipe of ep0.
2351 * Enable slot and initialize slot context.
2352 * Set Address.
2353 * Read initial device descriptor.
2354 * Determine initial MaxPacketSize (mps) by speed.
2355 * Read full device descriptor.
2356 * Register this device.
2357 * Finally state of device transitions ADDRESSED.
2358 */
2359 static usbd_status
2360 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth,
2361 int speed, int port, struct usbd_port *up)
2362 {
2363 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2364 struct usbd_device *dev;
2365 usbd_status err;
2366 usb_device_descriptor_t *dd;
2367 struct xhci_slot *xs;
2368 uint32_t *cp;
2369
2370 XHCIHIST_FUNC();
2371 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx",
2372 port, depth, speed, (uintptr_t)up);
2373
2374 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
2375 dev->ud_bus = bus;
2376 dev->ud_quirks = &usbd_no_quirk;
2377 dev->ud_addr = 0;
2378 dev->ud_ddesc.bMaxPacketSize = 0;
2379 dev->ud_depth = depth;
2380 dev->ud_powersrc = up;
2381 dev->ud_myhub = up->up_parent;
2382 dev->ud_speed = speed;
2383 dev->ud_langid = USBD_NOLANG;
2384 dev->ud_cookie.cookie = ++usb_cookie_no;
2385
2386 /* Set up default endpoint handle. */
2387 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc;
2388 /* doesn't matter, just don't let it uninitialized */
2389 dev->ud_ep0.ue_toggle = 0;
2390
2391 /* Set up default endpoint descriptor. */
2392 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE;
2393 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT;
2394 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT;
2395 dev->ud_ep0desc.bmAttributes = UE_CONTROL;
2396 dev->ud_ep0desc.bInterval = 0;
2397
2398 /* 4.3, 4.8.2.1 */
2399 switch (speed) {
2400 case USB_SPEED_SUPER:
2401 case USB_SPEED_SUPER_PLUS:
2402 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET);
2403 break;
2404 case USB_SPEED_FULL:
2405 /* XXX using 64 as initial mps of ep0 in FS */
2406 case USB_SPEED_HIGH:
2407 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET);
2408 break;
2409 case USB_SPEED_LOW:
2410 default:
2411 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET);
2412 break;
2413 }
2414
2415 up->up_dev = dev;
2416
2417 dd = &dev->ud_ddesc;
2418
2419 if (depth == 0 && port == 0) {
2420 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL);
2421 bus->ub_devices[USB_ROOTHUB_INDEX] = dev;
2422
2423 /* Establish the default pipe. */
2424 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2425 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2426 if (err) {
2427 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0);
2428 goto bad;
2429 }
2430 err = usbd_get_initial_ddesc(dev, dd);
2431 if (err) {
2432 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2433 goto bad;
2434 }
2435 } else {
2436 uint8_t slot = 0;
2437
2438 /* 4.3.2 */
2439 err = xhci_enable_slot(sc, &slot);
2440 if (err) {
2441 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0);
2442 goto bad;
2443 }
2444
2445 xs = &sc->sc_slots[slot];
2446 dev->ud_hcpriv = xs;
2447
2448 /* 4.3.3 initialize slot structure */
2449 err = xhci_init_slot(dev, slot);
2450 if (err) {
2451 DPRINTFN(1, "init slot %ju", err, 0, 0, 0);
2452 dev->ud_hcpriv = NULL;
2453 /*
2454 * We have to disable_slot here because
2455 * xs->xs_idx == 0 when xhci_init_slot fails,
2456 * in that case usbd_remove_dev won't work.
2457 */
2458 mutex_enter(&sc->sc_lock);
2459 xhci_disable_slot(sc, slot);
2460 mutex_exit(&sc->sc_lock);
2461 goto bad;
2462 }
2463
2464 /*
2465 * We have to establish the default pipe _after_ slot
2466 * structure has been prepared.
2467 */
2468 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2469 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2470 if (err) {
2471 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0,
2472 0);
2473 goto bad;
2474 }
2475
2476 /* 4.3.4 Address Assignment */
2477 err = xhci_set_address(dev, slot, false);
2478 if (err) {
2479 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0);
2480 goto bad;
2481 }
2482
2483 /* Allow device time to set new address */
2484 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE);
2485
2486 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2487 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT);
2488 HEXDUMP("slot context", cp, sc->sc_ctxsz);
2489 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3]));
2490 DPRINTFN(4, "device address %ju", addr, 0, 0, 0);
2491 /*
2492 * XXX ensure we know when the hardware does something
2493 * we can't yet cope with
2494 */
2495 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr);
2496 dev->ud_addr = addr;
2497
2498 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL,
2499 "addr %d already allocated", dev->ud_addr);
2500 /*
2501 * The root hub is given its own slot
2502 */
2503 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev;
2504
2505 err = usbd_get_initial_ddesc(dev, dd);
2506 if (err) {
2507 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2508 goto bad;
2509 }
2510
2511 /* 4.8.2.1 */
2512 if (USB_IS_SS(speed)) {
2513 if (dd->bMaxPacketSize != 9) {
2514 printf("%s: invalid mps 2^%u for SS ep0,"
2515 " using 512\n",
2516 device_xname(sc->sc_dev),
2517 dd->bMaxPacketSize);
2518 dd->bMaxPacketSize = 9;
2519 }
2520 USETW(dev->ud_ep0desc.wMaxPacketSize,
2521 (1 << dd->bMaxPacketSize));
2522 } else
2523 USETW(dev->ud_ep0desc.wMaxPacketSize,
2524 dd->bMaxPacketSize);
2525 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0);
2526 err = xhci_update_ep0_mps(sc, xs,
2527 UGETW(dev->ud_ep0desc.wMaxPacketSize));
2528 if (err) {
2529 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0);
2530 goto bad;
2531 }
2532 }
2533
2534 err = usbd_reload_device_desc(dev);
2535 if (err) {
2536 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0);
2537 goto bad;
2538 }
2539
2540 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,",
2541 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0);
2542 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,",
2543 dd->bDeviceClass, dd->bDeviceSubClass,
2544 dd->bDeviceProtocol, 0);
2545 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd",
2546 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations,
2547 dev->ud_speed);
2548
2549 usbd_get_device_strings(dev);
2550
2551 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev);
2552
2553 if (depth == 0 && port == 0) {
2554 usbd_attach_roothub(parent, dev);
2555 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0);
2556 return USBD_NORMAL_COMPLETION;
2557 }
2558
2559 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr);
2560 bad:
2561 if (err != USBD_NORMAL_COMPLETION) {
2562 usbd_remove_device(dev, up);
2563 }
2564
2565 return err;
2566 }
2567
2568 static usbd_status
2569 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp,
2570 size_t ntrb, size_t align)
2571 {
2572 usbd_status err;
2573 size_t size = ntrb * XHCI_TRB_SIZE;
2574 struct xhci_ring *xr;
2575
2576 XHCIHIST_FUNC();
2577 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx",
2578 (uintptr_t)*xrp, ntrb, align, 0);
2579
2580 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP);
2581 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0);
2582
2583 err = usb_allocmem(&sc->sc_bus, size, align, USBMALLOC_COHERENT,
2584 &xr->xr_dma);
2585 if (err) {
2586 kmem_free(xr, sizeof(struct xhci_ring));
2587 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0);
2588 return err;
2589 }
2590 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
2591 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP);
2592 xr->xr_trb = xhci_ring_trbv(xr, 0);
2593 xr->xr_ntrb = ntrb;
2594 xr->is_halted = false;
2595 xhci_host_dequeue(xr);
2596 *xrp = xr;
2597
2598 return USBD_NORMAL_COMPLETION;
2599 }
2600
2601 static void
2602 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr)
2603 {
2604 if (*xr == NULL)
2605 return;
2606
2607 usb_freemem(&sc->sc_bus, &(*xr)->xr_dma);
2608 mutex_destroy(&(*xr)->xr_lock);
2609 kmem_free((*xr)->xr_cookies,
2610 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb);
2611 kmem_free(*xr, sizeof(struct xhci_ring));
2612 *xr = NULL;
2613 }
2614
2615 static void
2616 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr,
2617 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs)
2618 {
2619 size_t i;
2620 u_int ri;
2621 u_int cs;
2622 uint64_t parameter;
2623 uint32_t status;
2624 uint32_t control;
2625
2626 XHCIHIST_FUNC();
2627 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju",
2628 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0);
2629
2630 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u",
2631 ntrbs, xr->xr_ntrb);
2632 for (i = 0; i < ntrbs; i++) {
2633 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr,
2634 (uintptr_t)trbs, i, 0);
2635 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx",
2636 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0);
2637 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) !=
2638 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3);
2639 }
2640
2641 ri = xr->xr_ep;
2642 cs = xr->xr_cs;
2643
2644 /*
2645 * Although the xhci hardware can do scatter/gather dma from
2646 * arbitrary sized buffers, there is a non-obvious restriction
2647 * that a LINK trb is only allowed at the end of a burst of
2648 * transfers - which might be 16kB.
2649 * Arbitrary aligned LINK trb definitely fail on Ivy bridge.
2650 * The simple solution is not to allow a LINK trb in the middle
2651 * of anything - as here.
2652 * XXX: (dsl) There are xhci controllers out there (eg some made by
2653 * ASMedia) that seem to lock up if they process a LINK trb but
2654 * cannot process the linked-to trb yet.
2655 * The code should write the 'cycle' bit on the link trb AFTER
2656 * adding the other trb.
2657 */
2658 u_int firstep = xr->xr_ep;
2659 u_int firstcs = xr->xr_cs;
2660
2661 for (i = 0; i < ntrbs; ) {
2662 u_int oldri = ri;
2663 u_int oldcs = cs;
2664
2665 if (ri >= (xr->xr_ntrb - 1)) {
2666 /* Put Link TD at the end of ring */
2667 parameter = xhci_ring_trbp(xr, 0);
2668 status = 0;
2669 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) |
2670 XHCI_TRB_3_TC_BIT;
2671 xr->xr_cookies[ri] = NULL;
2672 xr->xr_ep = 0;
2673 xr->xr_cs ^= 1;
2674 ri = xr->xr_ep;
2675 cs = xr->xr_cs;
2676 } else {
2677 parameter = trbs[i].trb_0;
2678 status = trbs[i].trb_2;
2679 control = trbs[i].trb_3;
2680
2681 xr->xr_cookies[ri] = cookie;
2682 ri++;
2683 i++;
2684 }
2685 /*
2686 * If this is a first TRB, mark it invalid to prevent
2687 * xHC from running it immediately.
2688 */
2689 if (oldri == firstep) {
2690 if (oldcs) {
2691 control &= ~XHCI_TRB_3_CYCLE_BIT;
2692 } else {
2693 control |= XHCI_TRB_3_CYCLE_BIT;
2694 }
2695 } else {
2696 if (oldcs) {
2697 control |= XHCI_TRB_3_CYCLE_BIT;
2698 } else {
2699 control &= ~XHCI_TRB_3_CYCLE_BIT;
2700 }
2701 }
2702 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control);
2703 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri,
2704 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
2705 }
2706
2707 /* Now invert cycle bit of first TRB */
2708 if (firstcs) {
2709 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT);
2710 } else {
2711 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT);
2712 }
2713 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep,
2714 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
2715
2716 xr->xr_ep = ri;
2717 xr->xr_cs = cs;
2718
2719 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep,
2720 xr->xr_cs, 0);
2721 }
2722
2723 static inline void
2724 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr,
2725 struct xhci_xfer *xx, u_int ntrb)
2726 {
2727 KASSERT(ntrb <= xx->xx_ntrb);
2728 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb);
2729 }
2730
2731 /*
2732 * Stop execution commands, purge all commands on command ring, and
2733 * rewind dequeue pointer.
2734 */
2735 static void
2736 xhci_abort_command(struct xhci_softc *sc)
2737 {
2738 struct xhci_ring * const cr = sc->sc_cr;
2739 uint64_t crcr;
2740 int i;
2741
2742 XHCIHIST_FUNC();
2743 XHCIHIST_CALLARGS("command %#jx timeout, aborting",
2744 sc->sc_command_addr, 0, 0, 0);
2745
2746 mutex_enter(&cr->xr_lock);
2747
2748 /* 4.6.1.2 Aborting a Command */
2749 crcr = xhci_op_read_8(sc, XHCI_CRCR);
2750 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA);
2751
2752 for (i = 0; i < 500; i++) {
2753 crcr = xhci_op_read_8(sc, XHCI_CRCR);
2754 if ((crcr & XHCI_CRCR_LO_CRR) == 0)
2755 break;
2756 usb_delay_ms(&sc->sc_bus, 1);
2757 }
2758 if ((crcr & XHCI_CRCR_LO_CRR) != 0) {
2759 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0);
2760 /* reset HC here? */
2761 }
2762
2763 /* reset command ring dequeue pointer */
2764 cr->xr_ep = 0;
2765 cr->xr_cs = 1;
2766 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs);
2767
2768 mutex_exit(&cr->xr_lock);
2769 }
2770
2771 /*
2772 * Put a command on command ring, ring bell, set timer, and cv_timedwait.
2773 * Command completion is notified by cv_signal from xhci_event_cmd()
2774 * (called from xhci_softint), or timed-out.
2775 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(),
2776 * then do_command examines it.
2777 */
2778 static usbd_status
2779 xhci_do_command_locked(struct xhci_softc * const sc,
2780 struct xhci_soft_trb * const trb, int timeout)
2781 {
2782 struct xhci_ring * const cr = sc->sc_cr;
2783 usbd_status err;
2784
2785 XHCIHIST_FUNC();
2786 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx",
2787 trb->trb_0, trb->trb_2, trb->trb_3, 0);
2788
2789 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
2790 KASSERT(mutex_owned(&sc->sc_lock));
2791
2792 while (sc->sc_command_addr != 0)
2793 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
2794
2795 /*
2796 * If enqueue pointer points at last of ring, it's Link TRB,
2797 * command TRB will be stored in 0th TRB.
2798 */
2799 if (cr->xr_ep == cr->xr_ntrb - 1)
2800 sc->sc_command_addr = xhci_ring_trbp(cr, 0);
2801 else
2802 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep);
2803
2804 sc->sc_resultpending = true;
2805
2806 mutex_enter(&cr->xr_lock);
2807 xhci_ring_put(sc, cr, NULL, trb, 1);
2808 mutex_exit(&cr->xr_lock);
2809
2810 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0);
2811
2812 while (sc->sc_resultpending) {
2813 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock,
2814 MAX(1, mstohz(timeout))) == EWOULDBLOCK) {
2815 xhci_abort_command(sc);
2816 err = USBD_TIMEOUT;
2817 goto timedout;
2818 }
2819 }
2820
2821 trb->trb_0 = sc->sc_result_trb.trb_0;
2822 trb->trb_2 = sc->sc_result_trb.trb_2;
2823 trb->trb_3 = sc->sc_result_trb.trb_3;
2824
2825 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx",
2826 trb->trb_0, trb->trb_2, trb->trb_3, 0);
2827
2828 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) {
2829 case XHCI_TRB_ERROR_SUCCESS:
2830 err = USBD_NORMAL_COMPLETION;
2831 break;
2832 default:
2833 case 192 ... 223:
2834 DPRINTFN(5, "error %#jx",
2835 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0);
2836 err = USBD_IOERROR;
2837 break;
2838 case 224 ... 255:
2839 err = USBD_NORMAL_COMPLETION;
2840 break;
2841 }
2842
2843 timedout:
2844 sc->sc_resultpending = false;
2845 sc->sc_command_addr = 0;
2846 cv_broadcast(&sc->sc_cmdbusy_cv);
2847
2848 return err;
2849 }
2850
2851 static usbd_status
2852 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb,
2853 int timeout)
2854 {
2855
2856 mutex_enter(&sc->sc_lock);
2857 usbd_status ret = xhci_do_command_locked(sc, trb, timeout);
2858 mutex_exit(&sc->sc_lock);
2859
2860 return ret;
2861 }
2862
2863 static usbd_status
2864 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp)
2865 {
2866 struct xhci_soft_trb trb;
2867 usbd_status err;
2868
2869 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2870
2871 trb.trb_0 = 0;
2872 trb.trb_2 = 0;
2873 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT);
2874
2875 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
2876 if (err != USBD_NORMAL_COMPLETION) {
2877 return err;
2878 }
2879
2880 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3);
2881
2882 return err;
2883 }
2884
2885 /*
2886 * xHCI 4.6.4
2887 * Deallocate ring and device/input context DMA buffers, and disable_slot.
2888 * All endpoints in the slot should be stopped.
2889 * Should be called with sc_lock held.
2890 */
2891 static usbd_status
2892 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot)
2893 {
2894 struct xhci_soft_trb trb;
2895 struct xhci_slot *xs;
2896 usbd_status err;
2897
2898 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2899
2900 if (sc->sc_dying)
2901 return USBD_IOERROR;
2902
2903 trb.trb_0 = 0;
2904 trb.trb_2 = 0;
2905 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) |
2906 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT);
2907
2908 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
2909
2910 if (!err) {
2911 xs = &sc->sc_slots[slot];
2912 if (xs->xs_idx != 0) {
2913 xhci_free_slot(sc, xs);
2914 xhci_set_dcba(sc, 0, slot);
2915 memset(xs, 0, sizeof(*xs));
2916 }
2917 }
2918
2919 return err;
2920 }
2921
2922 /*
2923 * Set address of device and transition slot state from ENABLED to ADDRESSED
2924 * if Block Setaddress Request (BSR) is false.
2925 * If BSR==true, transition slot state from ENABLED to DEFAULT.
2926 * see xHCI 1.1 4.5.3, 3.3.4
2927 * Should be called without sc_lock held.
2928 */
2929 static usbd_status
2930 xhci_address_device(struct xhci_softc * const sc,
2931 uint64_t icp, uint8_t slot_id, bool bsr)
2932 {
2933 struct xhci_soft_trb trb;
2934 usbd_status err;
2935
2936 XHCIHIST_FUNC();
2937 if (bsr) {
2938 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr",
2939 icp, slot_id, 0, 0);
2940 } else {
2941 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr",
2942 icp, slot_id, 0, 0);
2943 }
2944
2945 trb.trb_0 = icp;
2946 trb.trb_2 = 0;
2947 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) |
2948 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) |
2949 (bsr ? XHCI_TRB_3_BSR_BIT : 0);
2950
2951 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
2952
2953 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS)
2954 err = USBD_NO_ADDR;
2955
2956 return err;
2957 }
2958
2959 static usbd_status
2960 xhci_update_ep0_mps(struct xhci_softc * const sc,
2961 struct xhci_slot * const xs, u_int mps)
2962 {
2963 struct xhci_soft_trb trb;
2964 usbd_status err;
2965 uint32_t * cp;
2966
2967 XHCIHIST_FUNC();
2968 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0);
2969
2970 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
2971 cp[0] = htole32(0);
2972 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL));
2973
2974 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL));
2975 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps));
2976
2977 /* sync input contexts before they are read from memory */
2978 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
2979 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
2980 sc->sc_ctxsz * 4);
2981
2982 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
2983 trb.trb_2 = 0;
2984 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2985 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX);
2986
2987 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
2988 return err;
2989 }
2990
2991 static void
2992 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si)
2993 {
2994 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0);
2995
2996 XHCIHIST_FUNC();
2997 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd",
2998 (uintptr_t)&dcbaa[si], dcba, si, 0);
2999
3000 dcbaa[si] = htole64(dcba);
3001 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t),
3002 BUS_DMASYNC_PREWRITE);
3003 }
3004
3005 /*
3006 * Allocate device and input context DMA buffer, and
3007 * TRB DMA buffer for each endpoint.
3008 */
3009 static usbd_status
3010 xhci_init_slot(struct usbd_device *dev, uint32_t slot)
3011 {
3012 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3013 struct xhci_slot *xs;
3014 usbd_status err;
3015
3016 XHCIHIST_FUNC();
3017 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0);
3018
3019 xs = &sc->sc_slots[slot];
3020
3021 /* allocate contexts */
3022 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz, sc->sc_pgsz,
3023 USBMALLOC_COHERENT, &xs->xs_dc_dma);
3024 if (err) {
3025 DPRINTFN(1, "failed to allocmem output device context %jd",
3026 err, 0, 0, 0);
3027 return err;
3028 }
3029 memset(KERNADDR(&xs->xs_dc_dma, 0), 0, sc->sc_pgsz);
3030
3031 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz, sc->sc_pgsz,
3032 USBMALLOC_COHERENT, &xs->xs_ic_dma);
3033 if (err) {
3034 DPRINTFN(1, "failed to allocmem input device context %jd",
3035 err, 0, 0, 0);
3036 goto bad1;
3037 }
3038 memset(KERNADDR(&xs->xs_ic_dma, 0), 0, sc->sc_pgsz);
3039
3040 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr));
3041 xs->xs_idx = slot;
3042
3043 return USBD_NORMAL_COMPLETION;
3044
3045 bad1:
3046 usb_freemem(&sc->sc_bus, &xs->xs_dc_dma);
3047 xs->xs_idx = 0;
3048 return err;
3049 }
3050
3051 static void
3052 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs)
3053 {
3054 u_int dci;
3055
3056 XHCIHIST_FUNC();
3057 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
3058
3059 /* deallocate all allocated rings in the slot */
3060 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
3061 if (xs->xs_xr[dci] != NULL)
3062 xhci_ring_free(sc, &xs->xs_xr[dci]);
3063 }
3064 usb_freemem(&sc->sc_bus, &xs->xs_ic_dma);
3065 usb_freemem(&sc->sc_bus, &xs->xs_dc_dma);
3066 xs->xs_idx = 0;
3067 }
3068
3069 /*
3070 * Setup slot context, set Device Context Base Address, and issue
3071 * Set Address Device command.
3072 */
3073 static usbd_status
3074 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr)
3075 {
3076 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3077 struct xhci_slot *xs;
3078 usbd_status err;
3079
3080 XHCIHIST_FUNC();
3081 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0);
3082
3083 xs = &sc->sc_slots[slot];
3084
3085 xhci_setup_ctx(dev->ud_pipe0);
3086
3087 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3088 sc->sc_ctxsz * 3);
3089
3090 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot);
3091
3092 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr);
3093
3094 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
3095 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0),
3096 sc->sc_ctxsz * 2);
3097
3098 return err;
3099 }
3100
3101 /*
3102 * 4.8.2, 6.2.3.2
3103 * construct slot/endpoint context parameters and do syncmem
3104 */
3105 static void
3106 xhci_setup_ctx(struct usbd_pipe *pipe)
3107 {
3108 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3109 struct usbd_device *dev = pipe->up_dev;
3110 struct xhci_slot * const xs = dev->ud_hcpriv;
3111 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3112 const u_int dci = xhci_ep_get_dci(ed);
3113 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3114 uint32_t *cp;
3115 uint16_t mps = UGETW(ed->wMaxPacketSize);
3116 uint8_t speed = dev->ud_speed;
3117 uint8_t ival = ed->bInterval;
3118
3119 XHCIHIST_FUNC();
3120 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju",
3121 (uintptr_t)pipe, xs->xs_idx, dci, speed);
3122
3123 /* set up initial input control context */
3124 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3125 cp[0] = htole32(0);
3126 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci));
3127 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT));
3128 cp[7] = htole32(0);
3129
3130 /* set up input slot context */
3131 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
3132 cp[0] =
3133 XHCI_SCTX_0_CTX_NUM_SET(dci) |
3134 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed));
3135 cp[1] = 0;
3136 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0);
3137 cp[3] = 0;
3138 xhci_setup_route(pipe, cp);
3139 xhci_setup_tthub(pipe, cp);
3140
3141 cp[0] = htole32(cp[0]);
3142 cp[1] = htole32(cp[1]);
3143 cp[2] = htole32(cp[2]);
3144 cp[3] = htole32(cp[3]);
3145
3146 /* set up input endpoint context */
3147 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci));
3148 cp[0] =
3149 XHCI_EPCTX_0_EPSTATE_SET(0) |
3150 XHCI_EPCTX_0_MULT_SET(0) |
3151 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) |
3152 XHCI_EPCTX_0_LSA_SET(0) |
3153 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0);
3154 cp[1] =
3155 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) |
3156 XHCI_EPCTX_1_HID_SET(0) |
3157 XHCI_EPCTX_1_MAXB_SET(0);
3158
3159 if (xfertype != UE_ISOCHRONOUS)
3160 cp[1] |= XHCI_EPCTX_1_CERR_SET(3);
3161
3162 if (xfertype == UE_CONTROL)
3163 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(8); /* 6.2.3 */
3164 else if (USB_IS_SS(speed))
3165 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(mps);
3166 else
3167 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(UE_GET_SIZE(mps));
3168
3169 xhci_setup_maxburst(pipe, cp);
3170
3171 switch (xfertype) {
3172 case UE_CONTROL:
3173 break;
3174 case UE_BULK:
3175 /* XXX Set MaxPStreams, HID, and LSA if streams enabled */
3176 break;
3177 case UE_INTERRUPT:
3178 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3179 ival = pipe->up_interval;
3180
3181 ival = xhci_bival2ival(ival, speed);
3182 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3183 break;
3184 case UE_ISOCHRONOUS:
3185 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3186 ival = pipe->up_interval;
3187
3188 /* xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 */
3189 if (speed == USB_SPEED_FULL)
3190 ival += 3; /* 1ms -> 125us */
3191 ival--;
3192 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3193 break;
3194 default:
3195 break;
3196 }
3197 DPRINTFN(4, "setting ival %ju MaxBurst %#jx",
3198 XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_1_MAXB_GET(cp[1]), 0, 0);
3199
3200 /* rewind TR dequeue pointer in xHC */
3201 /* can't use xhci_ep_get_dci() yet? */
3202 *(uint64_t *)(&cp[2]) = htole64(
3203 xhci_ring_trbp(xs->xs_xr[dci], 0) |
3204 XHCI_EPCTX_2_DCS_SET(1));
3205
3206 cp[0] = htole32(cp[0]);
3207 cp[1] = htole32(cp[1]);
3208 cp[4] = htole32(cp[4]);
3209
3210 /* rewind TR dequeue pointer in driver */
3211 struct xhci_ring *xr = xs->xs_xr[dci];
3212 mutex_enter(&xr->xr_lock);
3213 xhci_host_dequeue(xr);
3214 mutex_exit(&xr->xr_lock);
3215
3216 /* sync input contexts before they are read from memory */
3217 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3218 }
3219
3220 /*
3221 * Setup route string and roothub port of given device for slot context
3222 */
3223 static void
3224 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp)
3225 {
3226 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3227 struct usbd_device *dev = pipe->up_dev;
3228 struct usbd_port *up = dev->ud_powersrc;
3229 struct usbd_device *hub;
3230 struct usbd_device *adev;
3231 uint8_t rhport = 0;
3232 uint32_t route = 0;
3233
3234 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3235
3236 /* Locate root hub port and Determine route string */
3237 /* 4.3.3 route string does not include roothub port */
3238 for (hub = dev; hub != NULL; hub = hub->ud_myhub) {
3239 uint32_t dep;
3240
3241 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd",
3242 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc,
3243 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno :
3244 -1);
3245
3246 if (hub->ud_powersrc == NULL)
3247 break;
3248 dep = hub->ud_depth;
3249 if (dep == 0)
3250 break;
3251 rhport = hub->ud_powersrc->up_portno;
3252 if (dep > USB_HUB_MAX_DEPTH)
3253 continue;
3254
3255 route |=
3256 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport)
3257 << ((dep - 1) * 4);
3258 }
3259 route = route >> 4;
3260 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1;
3261
3262 /* Locate port on upstream high speed hub */
3263 for (adev = dev, hub = up->up_parent;
3264 hub != NULL && hub->ud_speed != USB_SPEED_HIGH;
3265 adev = hub, hub = hub->ud_myhub)
3266 ;
3267 if (hub) {
3268 int p;
3269 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) {
3270 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) {
3271 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1];
3272 goto found;
3273 }
3274 }
3275 panic("%s: cannot find HS port", __func__);
3276 found:
3277 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0);
3278 } else {
3279 dev->ud_myhsport = NULL;
3280 }
3281
3282 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport);
3283
3284 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport,
3285 ctlrport, route, (uintptr_t)hub);
3286
3287 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route);
3288 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport);
3289 }
3290
3291 /*
3292 * Setup whether device is hub, whether device uses MTT, and
3293 * TT informations if it uses MTT.
3294 */
3295 static void
3296 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp)
3297 {
3298 struct usbd_device *dev = pipe->up_dev;
3299 struct usbd_port *myhsport = dev->ud_myhsport;
3300 usb_device_descriptor_t * const dd = &dev->ud_ddesc;
3301 uint32_t speed = dev->ud_speed;
3302 uint8_t rhaddr = dev->ud_bus->ub_rhaddr;
3303 uint8_t tthubslot, ttportnum;
3304 bool ishub;
3305 bool usemtt;
3306
3307 XHCIHIST_FUNC();
3308
3309 /*
3310 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2
3311 * tthubslot:
3312 * This is the slot ID of parent HS hub
3313 * if LS/FS device is connected && connected through HS hub.
3314 * This is 0 if device is not LS/FS device ||
3315 * parent hub is not HS hub ||
3316 * attached to root hub.
3317 * ttportnum:
3318 * This is the downstream facing port of parent HS hub
3319 * if LS/FS device is connected.
3320 * This is 0 if device is not LS/FS device ||
3321 * parent hub is not HS hub ||
3322 * attached to root hub.
3323 */
3324 if (myhsport &&
3325 myhsport->up_parent->ud_addr != rhaddr &&
3326 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
3327 ttportnum = myhsport->up_portno;
3328 tthubslot = myhsport->up_parent->ud_addr;
3329 } else {
3330 ttportnum = 0;
3331 tthubslot = 0;
3332 }
3333 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd",
3334 (uintptr_t)myhsport, ttportnum, tthubslot, 0);
3335
3336 /* ishub is valid after reading UDESC_DEVICE */
3337 ishub = (dd->bDeviceClass == UDCLASS_HUB);
3338
3339 /* dev->ud_hub is valid after reading UDESC_HUB */
3340 if (ishub && dev->ud_hub) {
3341 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc;
3342 uint8_t ttt =
3343 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK);
3344
3345 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts);
3346 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt);
3347 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0);
3348 }
3349
3350 #define IS_MTTHUB(dd) \
3351 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT)
3352
3353 /*
3354 * MTT flag is set if
3355 * 1. this is HS hub && MTTs are supported and enabled; or
3356 * 2. this is LS or FS device && there is a parent HS hub where MTTs
3357 * are supported and enabled.
3358 *
3359 * XXX enabled is not tested yet
3360 */
3361 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd))
3362 usemtt = true;
3363 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) &&
3364 myhsport &&
3365 myhsport->up_parent->ud_addr != rhaddr &&
3366 IS_MTTHUB(&myhsport->up_parent->ud_ddesc))
3367 usemtt = true;
3368 else
3369 usemtt = false;
3370 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd",
3371 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt);
3372
3373 #undef IS_MTTHUB
3374
3375 cp[0] |=
3376 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) |
3377 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0);
3378 cp[2] |=
3379 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) |
3380 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum);
3381 }
3382
3383 /* set up params for periodic endpoint */
3384 static void
3385 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp)
3386 {
3387 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
3388 struct usbd_device *dev = pipe->up_dev;
3389 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3390 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3391 usbd_desc_iter_t iter;
3392 const usb_cdc_descriptor_t *cdcd;
3393 uint32_t maxb = 0;
3394 uint16_t mps = UGETW(ed->wMaxPacketSize);
3395 uint8_t speed = dev->ud_speed;
3396 uint8_t mult = 0;
3397 uint8_t ep;
3398
3399 /* config desc is NULL when opening ep0 */
3400 if (dev == NULL || dev->ud_cdesc == NULL)
3401 goto no_cdcd;
3402 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev,
3403 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY);
3404 if (cdcd == NULL)
3405 goto no_cdcd;
3406 usb_desc_iter_init(dev, &iter);
3407 iter.cur = (const void *)cdcd;
3408
3409 /* find endpoint_ss_comp desc for ep of this pipe */
3410 for (ep = 0;;) {
3411 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter);
3412 if (cdcd == NULL)
3413 break;
3414 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) {
3415 ep = ((const usb_endpoint_descriptor_t *)cdcd)->
3416 bEndpointAddress;
3417 if (UE_GET_ADDR(ep) ==
3418 UE_GET_ADDR(ed->bEndpointAddress)) {
3419 cdcd = (const usb_cdc_descriptor_t *)
3420 usb_desc_iter_next(&iter);
3421 break;
3422 }
3423 ep = 0;
3424 }
3425 }
3426 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) {
3427 const usb_endpoint_ss_comp_descriptor_t * esscd =
3428 (const usb_endpoint_ss_comp_descriptor_t *)cdcd;
3429 maxb = esscd->bMaxBurst;
3430 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes);
3431 }
3432
3433 no_cdcd:
3434 /* 6.2.3.4, 4.8.2.4 */
3435 if (USB_IS_SS(speed)) {
3436 /* USB 3.1 9.6.6 */
3437 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps);
3438 /* USB 3.1 9.6.7 */
3439 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3440 #ifdef notyet
3441 if (xfertype == UE_ISOCHRONOUS) {
3442 }
3443 if (XHCI_HCC2_LEC(sc->sc_hcc2) != 0) {
3444 /* use ESIT */
3445 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(x);
3446 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(x);
3447
3448 /* XXX if LEC = 1, set ESIT instead */
3449 cp[0] |= XHCI_EPCTX_0_MULT_SET(0);
3450 } else {
3451 /* use ival */
3452 }
3453 #endif
3454 } else {
3455 /* USB 2.0 9.6.6 */
3456 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(UE_GET_SIZE(mps));
3457
3458 /* 6.2.3.4 */
3459 if (speed == USB_SPEED_HIGH &&
3460 (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)) {
3461 maxb = UE_GET_TRANS(mps);
3462 } else {
3463 /* LS/FS or HS CTRL or HS BULK */
3464 maxb = 0;
3465 }
3466 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3467 }
3468 xpipe->xp_maxb = maxb + 1;
3469 xpipe->xp_mult = mult + 1;
3470 }
3471
3472 /*
3473 * Convert endpoint bInterval value to endpoint context interval value
3474 * for Interrupt pipe.
3475 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6
3476 */
3477 static uint32_t
3478 xhci_bival2ival(uint32_t ival, uint32_t speed)
3479 {
3480 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) {
3481 int i;
3482
3483 /*
3484 * round ival down to "the nearest base 2 multiple of
3485 * bInterval * 8".
3486 * bInterval is at most 255 as its type is uByte.
3487 * 255(ms) = 2040(x 125us) < 2^11, so start with 10.
3488 */
3489 for (i = 10; i > 0; i--) {
3490 if ((ival * 8) >= (1 << i))
3491 break;
3492 }
3493 ival = i;
3494 } else {
3495 /* Interval = bInterval-1 for SS/HS */
3496 ival--;
3497 }
3498
3499 return ival;
3500 }
3501
3502 /* ----- */
3503
3504 static void
3505 xhci_noop(struct usbd_pipe *pipe)
3506 {
3507 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3508 }
3509
3510 /*
3511 * Process root hub request.
3512 */
3513 static int
3514 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
3515 void *buf, int buflen)
3516 {
3517 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
3518 usb_port_status_t ps;
3519 int l, totlen = 0;
3520 uint16_t len, value, index;
3521 int port, i;
3522 uint32_t v;
3523
3524 XHCIHIST_FUNC();
3525
3526 if (sc->sc_dying)
3527 return -1;
3528
3529 size_t bn = bus == &sc->sc_bus ? 0 : 1;
3530
3531 len = UGETW(req->wLength);
3532 value = UGETW(req->wValue);
3533 index = UGETW(req->wIndex);
3534
3535 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx",
3536 req->bmRequestType | (req->bRequest << 8), value, index, len);
3537
3538 #define C(x,y) ((x) | ((y) << 8))
3539 switch (C(req->bRequest, req->bmRequestType)) {
3540 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
3541 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0);
3542 if (len == 0)
3543 break;
3544 switch (value) {
3545 #define sd ((usb_string_descriptor_t *)buf)
3546 case C(2, UDESC_STRING):
3547 /* Product */
3548 totlen = usb_makestrdesc(sd, len, "xHCI root hub");
3549 break;
3550 #undef sd
3551 default:
3552 /* default from usbroothub */
3553 return buflen;
3554 }
3555 break;
3556
3557 /* Hub requests */
3558 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
3559 break;
3560 /* Clear Port Feature request */
3561 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): {
3562 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
3563
3564 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd",
3565 index, value, bn, cp);
3566 if (index < 1 || index > sc->sc_rhportcount[bn]) {
3567 return -1;
3568 }
3569 port = XHCI_PORTSC(cp);
3570 v = xhci_op_read_4(sc, port);
3571 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0);
3572 v &= ~XHCI_PS_CLEAR;
3573 switch (value) {
3574 case UHF_PORT_ENABLE:
3575 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED);
3576 break;
3577 case UHF_PORT_SUSPEND:
3578 return -1;
3579 case UHF_PORT_POWER:
3580 break;
3581 case UHF_PORT_TEST:
3582 case UHF_PORT_INDICATOR:
3583 return -1;
3584 case UHF_C_PORT_CONNECTION:
3585 xhci_op_write_4(sc, port, v | XHCI_PS_CSC);
3586 break;
3587 case UHF_C_PORT_ENABLE:
3588 case UHF_C_PORT_SUSPEND:
3589 case UHF_C_PORT_OVER_CURRENT:
3590 return -1;
3591 case UHF_C_BH_PORT_RESET:
3592 xhci_op_write_4(sc, port, v | XHCI_PS_WRC);
3593 break;
3594 case UHF_C_PORT_RESET:
3595 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
3596 break;
3597 case UHF_C_PORT_LINK_STATE:
3598 xhci_op_write_4(sc, port, v | XHCI_PS_PLC);
3599 break;
3600 case UHF_C_PORT_CONFIG_ERROR:
3601 xhci_op_write_4(sc, port, v | XHCI_PS_CEC);
3602 break;
3603 default:
3604 return -1;
3605 }
3606 break;
3607 }
3608 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
3609 if (len == 0)
3610 break;
3611 if ((value & 0xff) != 0) {
3612 return -1;
3613 }
3614 usb_hub_descriptor_t hubd;
3615
3616 totlen = uimin(buflen, sizeof(hubd));
3617 memcpy(&hubd, buf, totlen);
3618 hubd.bNbrPorts = sc->sc_rhportcount[bn];
3619 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH);
3620 hubd.bPwrOn2PwrGood = 200;
3621 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) {
3622 /* XXX can't find out? */
3623 hubd.DeviceRemovable[i++] = 0;
3624 }
3625 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
3626 totlen = uimin(totlen, hubd.bDescLength);
3627 memcpy(buf, &hubd, totlen);
3628 break;
3629 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
3630 if (len != 4) {
3631 return -1;
3632 }
3633 memset(buf, 0, len); /* ? XXX */
3634 totlen = len;
3635 break;
3636 /* Get Port Status request */
3637 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): {
3638 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
3639
3640 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju",
3641 bn, index, cp, 0);
3642 if (index < 1 || index > sc->sc_rhportcount[bn]) {
3643 DPRINTFN(5, "bad get port status: index=%jd bn=%jd "
3644 "portcount=%jd",
3645 index, bn, sc->sc_rhportcount[bn], 0);
3646 return -1;
3647 }
3648 if (len != 4) {
3649 DPRINTFN(5, "bad get port status: len %jd != 4",
3650 len, 0, 0, 0);
3651 return -1;
3652 }
3653 v = xhci_op_read_4(sc, XHCI_PORTSC(cp));
3654 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0);
3655 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v));
3656 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS;
3657 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED;
3658 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR;
3659 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND;
3660 if (v & XHCI_PS_PR) i |= UPS_RESET;
3661 if (v & XHCI_PS_PP) {
3662 if (i & UPS_OTHER_SPEED)
3663 i |= UPS_PORT_POWER_SS;
3664 else
3665 i |= UPS_PORT_POWER;
3666 }
3667 if (i & UPS_OTHER_SPEED)
3668 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v));
3669 if (sc->sc_vendor_port_status)
3670 i = sc->sc_vendor_port_status(sc, v, i);
3671 USETW(ps.wPortStatus, i);
3672 i = 0;
3673 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS;
3674 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED;
3675 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR;
3676 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET;
3677 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET;
3678 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE;
3679 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR;
3680 USETW(ps.wPortChange, i);
3681 totlen = uimin(len, sizeof(ps));
3682 memcpy(buf, &ps, totlen);
3683 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx"
3684 " totlen %jd",
3685 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0);
3686 break;
3687 }
3688 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
3689 return -1;
3690 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE):
3691 break;
3692 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
3693 break;
3694 /* Set Port Feature request */
3695 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): {
3696 int optval = (index >> 8) & 0xff;
3697 index &= 0xff;
3698 if (index < 1 || index > sc->sc_rhportcount[bn]) {
3699 return -1;
3700 }
3701
3702 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
3703
3704 port = XHCI_PORTSC(cp);
3705 v = xhci_op_read_4(sc, port);
3706 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0);
3707 v &= ~XHCI_PS_CLEAR;
3708 switch (value) {
3709 case UHF_PORT_ENABLE:
3710 xhci_op_write_4(sc, port, v | XHCI_PS_PED);
3711 break;
3712 case UHF_PORT_SUSPEND:
3713 /* XXX suspend */
3714 break;
3715 case UHF_PORT_RESET:
3716 v &= ~(XHCI_PS_PED | XHCI_PS_PR);
3717 xhci_op_write_4(sc, port, v | XHCI_PS_PR);
3718 /* Wait for reset to complete. */
3719 usb_delay_ms(&sc->sc_bus, USB_PORT_ROOT_RESET_DELAY);
3720 if (sc->sc_dying) {
3721 return -1;
3722 }
3723 v = xhci_op_read_4(sc, port);
3724 if (v & XHCI_PS_PR) {
3725 xhci_op_write_4(sc, port, v & ~XHCI_PS_PR);
3726 usb_delay_ms(&sc->sc_bus, 10);
3727 /* XXX */
3728 }
3729 break;
3730 case UHF_PORT_POWER:
3731 /* XXX power control */
3732 break;
3733 /* XXX more */
3734 case UHF_C_PORT_RESET:
3735 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
3736 break;
3737 case UHF_PORT_U1_TIMEOUT:
3738 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
3739 return -1;
3740 }
3741 port = XHCI_PORTPMSC(cp);
3742 v = xhci_op_read_4(sc, port);
3743 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
3744 index, cp, v, 0);
3745 v &= ~XHCI_PM3_U1TO_SET(0xff);
3746 v |= XHCI_PM3_U1TO_SET(optval);
3747 xhci_op_write_4(sc, port, v);
3748 break;
3749 case UHF_PORT_U2_TIMEOUT:
3750 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
3751 return -1;
3752 }
3753 port = XHCI_PORTPMSC(cp);
3754 v = xhci_op_read_4(sc, port);
3755 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
3756 index, cp, v, 0);
3757 v &= ~XHCI_PM3_U2TO_SET(0xff);
3758 v |= XHCI_PM3_U2TO_SET(optval);
3759 xhci_op_write_4(sc, port, v);
3760 break;
3761 default:
3762 return -1;
3763 }
3764 }
3765 break;
3766 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
3767 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
3768 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
3769 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
3770 break;
3771 default:
3772 /* default from usbroothub */
3773 return buflen;
3774 }
3775
3776 return totlen;
3777 }
3778
3779 /* root hub interrupt */
3780
3781 static usbd_status
3782 xhci_root_intr_transfer(struct usbd_xfer *xfer)
3783 {
3784 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
3785 usbd_status err;
3786
3787 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3788
3789 /* Insert last in queue. */
3790 mutex_enter(&sc->sc_lock);
3791 err = usb_insert_transfer(xfer);
3792 mutex_exit(&sc->sc_lock);
3793 if (err)
3794 return err;
3795
3796 /* Pipe isn't running, start first */
3797 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
3798 }
3799
3800 /* Wait for roothub port status/change */
3801 static usbd_status
3802 xhci_root_intr_start(struct usbd_xfer *xfer)
3803 {
3804 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
3805 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
3806 const bool polling = xhci_polling_p(sc);
3807
3808 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3809
3810 if (sc->sc_dying)
3811 return USBD_IOERROR;
3812
3813 if (!polling)
3814 mutex_enter(&sc->sc_lock);
3815 KASSERT(sc->sc_intrxfer[bn] == NULL);
3816 sc->sc_intrxfer[bn] = xfer;
3817 xfer->ux_status = USBD_IN_PROGRESS;
3818 if (!polling)
3819 mutex_exit(&sc->sc_lock);
3820
3821 return USBD_IN_PROGRESS;
3822 }
3823
3824 static void
3825 xhci_root_intr_abort(struct usbd_xfer *xfer)
3826 {
3827 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
3828 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
3829
3830 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3831
3832 KASSERT(mutex_owned(&sc->sc_lock));
3833 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
3834
3835 /* If xfer has already completed, nothing to do here. */
3836 if (sc->sc_intrxfer[bn] == NULL)
3837 return;
3838
3839 /*
3840 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer.
3841 * Cancel it.
3842 */
3843 KASSERT(sc->sc_intrxfer[bn] == xfer);
3844 xfer->ux_status = USBD_CANCELLED;
3845 usb_transfer_complete(xfer);
3846 }
3847
3848 static void
3849 xhci_root_intr_close(struct usbd_pipe *pipe)
3850 {
3851 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe);
3852 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer;
3853 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
3854
3855 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3856
3857 KASSERT(mutex_owned(&sc->sc_lock));
3858
3859 /*
3860 * Caller must guarantee the xfer has completed first, by
3861 * closing the pipe only after normal completion or an abort.
3862 */
3863 KASSERT(sc->sc_intrxfer[bn] == NULL);
3864 }
3865
3866 static void
3867 xhci_root_intr_done(struct usbd_xfer *xfer)
3868 {
3869 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
3870 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
3871
3872 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3873
3874 KASSERT(mutex_owned(&sc->sc_lock));
3875
3876 /* Claim the xfer so it doesn't get completed again. */
3877 KASSERT(sc->sc_intrxfer[bn] == xfer);
3878 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
3879 sc->sc_intrxfer[bn] = NULL;
3880 }
3881
3882 /* -------------- */
3883 /* device control */
3884
3885 static usbd_status
3886 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
3887 {
3888 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
3889 usbd_status err;
3890
3891 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3892
3893 /* Insert last in queue. */
3894 mutex_enter(&sc->sc_lock);
3895 err = usb_insert_transfer(xfer);
3896 mutex_exit(&sc->sc_lock);
3897 if (err)
3898 return err;
3899
3900 /* Pipe isn't running, start first */
3901 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
3902 }
3903
3904 static usbd_status
3905 xhci_device_ctrl_start(struct usbd_xfer *xfer)
3906 {
3907 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
3908 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
3909 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
3910 struct xhci_ring * const tr = xs->xs_xr[dci];
3911 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
3912 usb_device_request_t * const req = &xfer->ux_request;
3913 const bool isread = usbd_xfer_isread(xfer);
3914 const uint32_t len = UGETW(req->wLength);
3915 usb_dma_t * const dma = &xfer->ux_dmabuf;
3916 uint64_t parameter;
3917 uint32_t status;
3918 uint32_t control;
3919 u_int i;
3920 const bool polling = xhci_polling_p(sc);
3921
3922 XHCIHIST_FUNC();
3923 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx",
3924 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue),
3925 UGETW(req->wIndex), UGETW(req->wLength));
3926
3927 /* we rely on the bottom bits for extra info */
3928 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %zx",
3929 (uintptr_t) xfer);
3930
3931 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0);
3932
3933 i = 0;
3934
3935 /* setup phase */
3936 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */
3937 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req));
3938 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE :
3939 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) |
3940 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) |
3941 XHCI_TRB_3_IDT_BIT;
3942 xhci_xfer_put_trb(xx, i++, parameter, status, control);
3943
3944 if (len != 0) {
3945 /* data phase */
3946 parameter = DMAADDR(dma, 0);
3947 KASSERTMSG(len <= 0x10000, "len %d", len);
3948 status = XHCI_TRB_2_IRQ_SET(0) |
3949 XHCI_TRB_2_TDSZ_SET(0) |
3950 XHCI_TRB_2_BYTES_SET(len);
3951 control = (isread ? XHCI_TRB_3_DIR_IN : 0) |
3952 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) |
3953 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
3954 XHCI_TRB_3_IOC_BIT;
3955 xhci_xfer_put_trb(xx, i++, parameter, status, control);
3956
3957 usb_syncmem(dma, 0, len,
3958 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
3959 }
3960
3961 parameter = 0;
3962 status = XHCI_TRB_2_IRQ_SET(0);
3963 /* the status stage has inverted direction */
3964 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) |
3965 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) |
3966 XHCI_TRB_3_IOC_BIT;
3967 xhci_xfer_put_trb(xx, i++, parameter, status, control);
3968
3969 if (!polling)
3970 mutex_enter(&tr->xr_lock);
3971 xhci_ring_put_xfer(sc, tr, xx, i);
3972 if (!polling)
3973 mutex_exit(&tr->xr_lock);
3974
3975 if (!polling)
3976 mutex_enter(&sc->sc_lock);
3977 xfer->ux_status = USBD_IN_PROGRESS;
3978 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
3979 usbd_xfer_schedule_timeout(xfer);
3980 if (!polling)
3981 mutex_exit(&sc->sc_lock);
3982
3983 return USBD_IN_PROGRESS;
3984 }
3985
3986 static void
3987 xhci_device_ctrl_done(struct usbd_xfer *xfer)
3988 {
3989 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3990 usb_device_request_t *req = &xfer->ux_request;
3991 int len = UGETW(req->wLength);
3992 int rd = req->bmRequestType & UT_READ;
3993
3994 if (len)
3995 usb_syncmem(&xfer->ux_dmabuf, 0, len,
3996 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3997 }
3998
3999 static void
4000 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
4001 {
4002 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4003
4004 usbd_xfer_abort(xfer);
4005 }
4006
4007 static void
4008 xhci_device_ctrl_close(struct usbd_pipe *pipe)
4009 {
4010 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4011
4012 xhci_close_pipe(pipe);
4013 }
4014
4015 /* ------------------ */
4016 /* device isochronous */
4017
4018 static usbd_status
4019 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
4020 {
4021 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4022 usbd_status err;
4023
4024 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4025
4026 /* Insert last in queue. */
4027 mutex_enter(&sc->sc_lock);
4028 err = usb_insert_transfer(xfer);
4029 mutex_exit(&sc->sc_lock);
4030 if (err)
4031 return err;
4032
4033 return xhci_device_isoc_enter(xfer);
4034 }
4035
4036 static usbd_status
4037 xhci_device_isoc_enter(struct usbd_xfer *xfer)
4038 {
4039 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4040 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4041 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4042 struct xhci_ring * const tr = xs->xs_xr[dci];
4043 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4044 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe;
4045 uint32_t len = xfer->ux_length;
4046 usb_dma_t * const dma = &xfer->ux_dmabuf;
4047 uint64_t parameter;
4048 uint32_t status;
4049 uint32_t control;
4050 uint32_t mfindex;
4051 uint32_t offs;
4052 int i, ival;
4053 const bool polling = xhci_polling_p(sc);
4054 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
4055 const uint16_t mps = UE_GET_SIZE(MPS);
4056 const uint8_t maxb = xpipe->xp_maxb;
4057 u_int tdpc, tbc, tlbpc;
4058
4059 XHCIHIST_FUNC();
4060 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4061 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4062
4063 if (sc->sc_dying)
4064 return USBD_IOERROR;
4065
4066 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths);
4067 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4068
4069 const bool isread = usbd_xfer_isread(xfer);
4070 if (xfer->ux_length)
4071 usb_syncmem(dma, 0, xfer->ux_length,
4072 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4073
4074 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
4075 if (ival >= 1 && ival <= 16)
4076 ival = 1 << (ival - 1);
4077 else
4078 ival = 1; /* fake something up */
4079
4080 if (xpipe->xp_isoc_next == -1) {
4081 mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX);
4082 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0);
4083 mfindex = XHCI_MFINDEX_GET(mfindex + 1);
4084 mfindex /= USB_UFRAMES_PER_FRAME;
4085 mfindex += 7; /* 7 frames is max possible IST */
4086 xpipe->xp_isoc_next = roundup2(mfindex, ival);
4087 }
4088
4089 offs = 0;
4090 for (i = 0; i < xfer->ux_nframes; i++) {
4091 len = xfer->ux_frlengths[i];
4092
4093 tdpc = howmany(len, mps);
4094 tbc = howmany(tdpc, maxb) - 1;
4095 tlbpc = tdpc % maxb;
4096 tlbpc = tlbpc ? tlbpc - 1 : maxb - 1;
4097
4098 KASSERTMSG(len <= 0x10000, "len %d", len);
4099 parameter = DMAADDR(dma, offs);
4100 status = XHCI_TRB_2_IRQ_SET(0) |
4101 XHCI_TRB_2_TDSZ_SET(0) |
4102 XHCI_TRB_2_BYTES_SET(len);
4103 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) |
4104 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4105 XHCI_TRB_3_TBC_SET(tbc) |
4106 XHCI_TRB_3_TLBPC_SET(tlbpc) |
4107 XHCI_TRB_3_IOC_BIT;
4108 if (XHCI_HCC_CFC(sc->sc_hcc)) {
4109 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4110 #if 0
4111 } else if (xpipe->xp_isoc_next == -1) {
4112 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4113 #endif
4114 } else {
4115 control |= XHCI_TRB_3_ISO_SIA_BIT;
4116 }
4117 #if 0
4118 if (i != xfer->ux_nframes - 1)
4119 control |= XHCI_TRB_3_BEI_BIT;
4120 #endif
4121 xhci_xfer_put_trb(xx, i, parameter, status, control);
4122
4123 xpipe->xp_isoc_next += ival;
4124 offs += len;
4125 }
4126
4127 xx->xx_isoc_done = 0;
4128
4129 if (!polling)
4130 mutex_enter(&tr->xr_lock);
4131 xhci_ring_put_xfer(sc, tr, xx, i);
4132 if (!polling)
4133 mutex_exit(&tr->xr_lock);
4134
4135 if (!polling)
4136 mutex_enter(&sc->sc_lock);
4137 xfer->ux_status = USBD_IN_PROGRESS;
4138 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4139 usbd_xfer_schedule_timeout(xfer);
4140 if (!polling)
4141 mutex_exit(&sc->sc_lock);
4142
4143 return USBD_IN_PROGRESS;
4144 }
4145
4146 static void
4147 xhci_device_isoc_abort(struct usbd_xfer *xfer)
4148 {
4149 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4150
4151 usbd_xfer_abort(xfer);
4152 }
4153
4154 static void
4155 xhci_device_isoc_close(struct usbd_pipe *pipe)
4156 {
4157 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4158
4159 xhci_close_pipe(pipe);
4160 }
4161
4162 static void
4163 xhci_device_isoc_done(struct usbd_xfer *xfer)
4164 {
4165 #ifdef USB_DEBUG
4166 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4167 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4168 #endif
4169 const bool isread = usbd_xfer_isread(xfer);
4170
4171 XHCIHIST_FUNC();
4172 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4173 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4174
4175 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4176 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4177 }
4178
4179 /* ----------- */
4180 /* device bulk */
4181
4182 static usbd_status
4183 xhci_device_bulk_transfer(struct usbd_xfer *xfer)
4184 {
4185 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4186 usbd_status err;
4187
4188 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4189
4190 /* Insert last in queue. */
4191 mutex_enter(&sc->sc_lock);
4192 err = usb_insert_transfer(xfer);
4193 mutex_exit(&sc->sc_lock);
4194 if (err)
4195 return err;
4196
4197 /*
4198 * Pipe isn't running (otherwise err would be USBD_INPROG),
4199 * so start it first.
4200 */
4201 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4202 }
4203
4204 static usbd_status
4205 xhci_device_bulk_start(struct usbd_xfer *xfer)
4206 {
4207 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4208 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4209 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4210 struct xhci_ring * const tr = xs->xs_xr[dci];
4211 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4212 const uint32_t len = xfer->ux_length;
4213 usb_dma_t * const dma = &xfer->ux_dmabuf;
4214 uint64_t parameter;
4215 uint32_t status;
4216 uint32_t control;
4217 u_int i = 0;
4218 const bool polling = xhci_polling_p(sc);
4219
4220 XHCIHIST_FUNC();
4221 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4222 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4223
4224 if (sc->sc_dying)
4225 return USBD_IOERROR;
4226
4227 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4228
4229 parameter = DMAADDR(dma, 0);
4230 const bool isread = usbd_xfer_isread(xfer);
4231 if (len)
4232 usb_syncmem(dma, 0, len,
4233 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4234
4235 /*
4236 * XXX: (dsl) The physical buffer must not cross a 64k boundary.
4237 * If the user supplied buffer crosses such a boundary then 2
4238 * (or more) TRB should be used.
4239 * If multiple TRB are used the td_size field must be set correctly.
4240 * For v1.0 devices (like ivy bridge) this is the number of usb data
4241 * blocks needed to complete the transfer.
4242 * Setting it to 1 in the last TRB causes an extra zero-length
4243 * data block be sent.
4244 * The earlier documentation differs, I don't know how it behaves.
4245 */
4246 KASSERTMSG(len <= 0x10000, "len %d", len);
4247 status = XHCI_TRB_2_IRQ_SET(0) |
4248 XHCI_TRB_2_TDSZ_SET(0) |
4249 XHCI_TRB_2_BYTES_SET(len);
4250 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4251 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4252 XHCI_TRB_3_IOC_BIT;
4253 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4254
4255 if (!polling)
4256 mutex_enter(&tr->xr_lock);
4257 xhci_ring_put_xfer(sc, tr, xx, i);
4258 if (!polling)
4259 mutex_exit(&tr->xr_lock);
4260
4261 if (!polling)
4262 mutex_enter(&sc->sc_lock);
4263 xfer->ux_status = USBD_IN_PROGRESS;
4264 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4265 usbd_xfer_schedule_timeout(xfer);
4266 if (!polling)
4267 mutex_exit(&sc->sc_lock);
4268
4269 return USBD_IN_PROGRESS;
4270 }
4271
4272 static void
4273 xhci_device_bulk_done(struct usbd_xfer *xfer)
4274 {
4275 #ifdef USB_DEBUG
4276 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4277 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4278 #endif
4279 const bool isread = usbd_xfer_isread(xfer);
4280
4281 XHCIHIST_FUNC();
4282 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4283 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4284
4285 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4286 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4287 }
4288
4289 static void
4290 xhci_device_bulk_abort(struct usbd_xfer *xfer)
4291 {
4292 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4293
4294 usbd_xfer_abort(xfer);
4295 }
4296
4297 static void
4298 xhci_device_bulk_close(struct usbd_pipe *pipe)
4299 {
4300 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4301
4302 xhci_close_pipe(pipe);
4303 }
4304
4305 /* ---------------- */
4306 /* device interrupt */
4307
4308 static usbd_status
4309 xhci_device_intr_transfer(struct usbd_xfer *xfer)
4310 {
4311 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4312 usbd_status err;
4313
4314 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4315
4316 /* Insert last in queue. */
4317 mutex_enter(&sc->sc_lock);
4318 err = usb_insert_transfer(xfer);
4319 mutex_exit(&sc->sc_lock);
4320 if (err)
4321 return err;
4322
4323 /*
4324 * Pipe isn't running (otherwise err would be USBD_INPROG),
4325 * so start it first.
4326 */
4327 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4328 }
4329
4330 static usbd_status
4331 xhci_device_intr_start(struct usbd_xfer *xfer)
4332 {
4333 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4334 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4335 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4336 struct xhci_ring * const tr = xs->xs_xr[dci];
4337 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4338 const uint32_t len = xfer->ux_length;
4339 const bool polling = xhci_polling_p(sc);
4340 usb_dma_t * const dma = &xfer->ux_dmabuf;
4341 uint64_t parameter;
4342 uint32_t status;
4343 uint32_t control;
4344 u_int i = 0;
4345
4346 XHCIHIST_FUNC();
4347 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4348 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4349
4350 if (sc->sc_dying)
4351 return USBD_IOERROR;
4352
4353 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4354
4355 const bool isread = usbd_xfer_isread(xfer);
4356 if (len)
4357 usb_syncmem(dma, 0, len,
4358 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4359
4360 parameter = DMAADDR(dma, 0);
4361 KASSERTMSG(len <= 0x10000, "len %d", len);
4362 status = XHCI_TRB_2_IRQ_SET(0) |
4363 XHCI_TRB_2_TDSZ_SET(0) |
4364 XHCI_TRB_2_BYTES_SET(len);
4365 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4366 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT;
4367 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4368
4369 if (!polling)
4370 mutex_enter(&tr->xr_lock);
4371 xhci_ring_put_xfer(sc, tr, xx, i);
4372 if (!polling)
4373 mutex_exit(&tr->xr_lock);
4374
4375 if (!polling)
4376 mutex_enter(&sc->sc_lock);
4377 xfer->ux_status = USBD_IN_PROGRESS;
4378 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4379 usbd_xfer_schedule_timeout(xfer);
4380 if (!polling)
4381 mutex_exit(&sc->sc_lock);
4382
4383 return USBD_IN_PROGRESS;
4384 }
4385
4386 static void
4387 xhci_device_intr_done(struct usbd_xfer *xfer)
4388 {
4389 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4390 #ifdef USB_DEBUG
4391 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4392 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4393 #endif
4394 const bool isread = usbd_xfer_isread(xfer);
4395
4396 XHCIHIST_FUNC();
4397 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4398 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4399
4400 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4401
4402 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4403 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4404 }
4405
4406 static void
4407 xhci_device_intr_abort(struct usbd_xfer *xfer)
4408 {
4409 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4410
4411 XHCIHIST_FUNC();
4412 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0);
4413
4414 KASSERT(mutex_owned(&sc->sc_lock));
4415 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
4416 usbd_xfer_abort(xfer);
4417 }
4418
4419 static void
4420 xhci_device_intr_close(struct usbd_pipe *pipe)
4421 {
4422 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
4423
4424 XHCIHIST_FUNC();
4425 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0);
4426
4427 xhci_close_pipe(pipe);
4428 }
4429