xhci.c revision 1.134 1 /* $NetBSD: xhci.c,v 1.134 2020/08/21 20:46:03 jakllsch Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * USB rev 2.0 and rev 3.1 specification
31 * http://www.usb.org/developers/docs/
32 * xHCI rev 1.1 specification
33 * http://www.intel.com/technology/usb/spec.htm
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.134 2020/08/21 20:46:03 jakllsch Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_usb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/kmem.h>
47 #include <sys/device.h>
48 #include <sys/select.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/endian.h>
58
59 #include <dev/usb/usb.h>
60 #include <dev/usb/usbdi.h>
61 #include <dev/usb/usbdivar.h>
62 #include <dev/usb/usbdi_util.h>
63 #include <dev/usb/usbhist.h>
64 #include <dev/usb/usb_mem.h>
65 #include <dev/usb/usb_quirks.h>
66
67 #include <dev/usb/xhcireg.h>
68 #include <dev/usb/xhcivar.h>
69 #include <dev/usb/usbroothub.h>
70
71
72 #ifdef USB_DEBUG
73 #ifndef XHCI_DEBUG
74 #define xhcidebug 0
75 #else /* !XHCI_DEBUG */
76 #define HEXDUMP(a, b, c) \
77 do { \
78 if (xhcidebug > 0) \
79 hexdump(printf, a, b, c); \
80 } while (/*CONSTCOND*/0)
81 static int xhcidebug = 0;
82
83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup")
84 {
85 int err;
86 const struct sysctlnode *rnode;
87 const struct sysctlnode *cnode;
88
89 err = sysctl_createv(clog, 0, NULL, &rnode,
90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci",
91 SYSCTL_DESCR("xhci global controls"),
92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
93
94 if (err)
95 goto fail;
96
97 /* control debugging printfs */
98 err = sysctl_createv(clog, 0, &rnode, &cnode,
99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
100 "debug", SYSCTL_DESCR("Enable debugging output"),
101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL);
102 if (err)
103 goto fail;
104
105 return;
106 fail:
107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
108 }
109
110 #endif /* !XHCI_DEBUG */
111 #endif /* USB_DEBUG */
112
113 #ifndef HEXDUMP
114 #define HEXDUMP(a, b, c)
115 #endif
116
117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D)
118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D)
119 #define XHCIHIST_FUNC() USBHIST_FUNC()
120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug)
121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \
122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D)
123
124 #define XHCI_DCI_SLOT 0
125 #define XHCI_DCI_EP_CONTROL 1
126
127 #define XHCI_ICI_INPUT_CONTROL 0
128
129 struct xhci_pipe {
130 struct usbd_pipe xp_pipe;
131 struct usb_task xp_async_task;
132 int16_t xp_isoc_next; /* next frame */
133 uint8_t xp_maxb; /* max burst */
134 uint8_t xp_mult;
135 };
136
137 #define XHCI_COMMAND_RING_TRBS 256
138 #define XHCI_EVENT_RING_TRBS 256
139 #define XHCI_EVENT_RING_SEGMENTS 1
140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT
141
142 static usbd_status xhci_open(struct usbd_pipe *);
143 static void xhci_close_pipe(struct usbd_pipe *);
144 static int xhci_intr1(struct xhci_softc * const);
145 static void xhci_softintr(void *);
146 static void xhci_poll(struct usbd_bus *);
147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int);
148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
149 static void xhci_abortx(struct usbd_xfer *);
150 static bool xhci_dying(struct usbd_bus *);
151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **);
152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int,
153 struct usbd_port *);
154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
155 void *, int);
156
157 static usbd_status xhci_configure_endpoint(struct usbd_pipe *);
158 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *);
159 static usbd_status xhci_reset_endpoint(struct usbd_pipe *);
160 static usbd_status xhci_stop_endpoint(struct usbd_pipe *);
161
162 static void xhci_host_dequeue(struct xhci_ring * const);
163 static usbd_status xhci_set_dequeue(struct usbd_pipe *);
164
165 static usbd_status xhci_do_command(struct xhci_softc * const,
166 struct xhci_soft_trb * const, int);
167 static usbd_status xhci_do_command_locked(struct xhci_softc * const,
168 struct xhci_soft_trb * const, int);
169 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t);
170 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *);
171 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool);
172 static usbd_status xhci_enable_slot(struct xhci_softc * const,
173 uint8_t * const);
174 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t);
175 static usbd_status xhci_address_device(struct xhci_softc * const,
176 uint64_t, uint8_t, bool);
177 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int);
178 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const,
179 struct xhci_slot * const, u_int);
180 static usbd_status xhci_ring_init(struct xhci_softc * const,
181 struct xhci_ring **, size_t, size_t);
182 static void xhci_ring_free(struct xhci_softc * const,
183 struct xhci_ring ** const);
184
185 static void xhci_setup_ctx(struct usbd_pipe *);
186 static void xhci_setup_route(struct usbd_pipe *, uint32_t *);
187 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *);
188 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *);
189 static uint32_t xhci_bival2ival(uint32_t, uint32_t);
190
191 static void xhci_noop(struct usbd_pipe *);
192
193 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *);
194 static usbd_status xhci_root_intr_start(struct usbd_xfer *);
195 static void xhci_root_intr_abort(struct usbd_xfer *);
196 static void xhci_root_intr_close(struct usbd_pipe *);
197 static void xhci_root_intr_done(struct usbd_xfer *);
198
199 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *);
200 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *);
201 static void xhci_device_ctrl_abort(struct usbd_xfer *);
202 static void xhci_device_ctrl_close(struct usbd_pipe *);
203 static void xhci_device_ctrl_done(struct usbd_xfer *);
204
205 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *);
206 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *);
207 static void xhci_device_isoc_abort(struct usbd_xfer *);
208 static void xhci_device_isoc_close(struct usbd_pipe *);
209 static void xhci_device_isoc_done(struct usbd_xfer *);
210
211 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *);
212 static usbd_status xhci_device_intr_start(struct usbd_xfer *);
213 static void xhci_device_intr_abort(struct usbd_xfer *);
214 static void xhci_device_intr_close(struct usbd_pipe *);
215 static void xhci_device_intr_done(struct usbd_xfer *);
216
217 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *);
218 static usbd_status xhci_device_bulk_start(struct usbd_xfer *);
219 static void xhci_device_bulk_abort(struct usbd_xfer *);
220 static void xhci_device_bulk_close(struct usbd_pipe *);
221 static void xhci_device_bulk_done(struct usbd_xfer *);
222
223 static const struct usbd_bus_methods xhci_bus_methods = {
224 .ubm_open = xhci_open,
225 .ubm_softint = xhci_softintr,
226 .ubm_dopoll = xhci_poll,
227 .ubm_allocx = xhci_allocx,
228 .ubm_freex = xhci_freex,
229 .ubm_abortx = xhci_abortx,
230 .ubm_dying = xhci_dying,
231 .ubm_getlock = xhci_get_lock,
232 .ubm_newdev = xhci_new_device,
233 .ubm_rhctrl = xhci_roothub_ctrl,
234 };
235
236 static const struct usbd_pipe_methods xhci_root_intr_methods = {
237 .upm_transfer = xhci_root_intr_transfer,
238 .upm_start = xhci_root_intr_start,
239 .upm_abort = xhci_root_intr_abort,
240 .upm_close = xhci_root_intr_close,
241 .upm_cleartoggle = xhci_noop,
242 .upm_done = xhci_root_intr_done,
243 };
244
245
246 static const struct usbd_pipe_methods xhci_device_ctrl_methods = {
247 .upm_transfer = xhci_device_ctrl_transfer,
248 .upm_start = xhci_device_ctrl_start,
249 .upm_abort = xhci_device_ctrl_abort,
250 .upm_close = xhci_device_ctrl_close,
251 .upm_cleartoggle = xhci_noop,
252 .upm_done = xhci_device_ctrl_done,
253 };
254
255 static const struct usbd_pipe_methods xhci_device_isoc_methods = {
256 .upm_transfer = xhci_device_isoc_transfer,
257 .upm_abort = xhci_device_isoc_abort,
258 .upm_close = xhci_device_isoc_close,
259 .upm_cleartoggle = xhci_noop,
260 .upm_done = xhci_device_isoc_done,
261 };
262
263 static const struct usbd_pipe_methods xhci_device_bulk_methods = {
264 .upm_transfer = xhci_device_bulk_transfer,
265 .upm_start = xhci_device_bulk_start,
266 .upm_abort = xhci_device_bulk_abort,
267 .upm_close = xhci_device_bulk_close,
268 .upm_cleartoggle = xhci_noop,
269 .upm_done = xhci_device_bulk_done,
270 };
271
272 static const struct usbd_pipe_methods xhci_device_intr_methods = {
273 .upm_transfer = xhci_device_intr_transfer,
274 .upm_start = xhci_device_intr_start,
275 .upm_abort = xhci_device_intr_abort,
276 .upm_close = xhci_device_intr_close,
277 .upm_cleartoggle = xhci_noop,
278 .upm_done = xhci_device_intr_done,
279 };
280
281 static inline uint32_t
282 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset)
283 {
284 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset);
285 }
286
287 static inline uint32_t
288 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset)
289 {
290 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
291 }
292
293 static inline uint32_t
294 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset)
295 {
296 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset);
297 }
298
299 static inline void
300 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset,
301 uint32_t value)
302 {
303 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value);
304 }
305
306 #if 0 /* unused */
307 static inline void
308 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset,
309 uint32_t value)
310 {
311 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value);
312 }
313 #endif /* unused */
314
315 static inline uint32_t
316 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset)
317 {
318 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset);
319 }
320
321 static inline uint32_t
322 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset)
323 {
324 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
325 }
326
327 static inline void
328 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset,
329 uint32_t value)
330 {
331 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
332 }
333
334 static inline uint64_t
335 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset)
336 {
337 uint64_t value;
338
339 if (XHCI_HCC_AC64(sc->sc_hcc)) {
340 #ifdef XHCI_USE_BUS_SPACE_8
341 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset);
342 #else
343 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
344 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh,
345 offset + 4) << 32;
346 #endif
347 } else {
348 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
349 }
350
351 return value;
352 }
353
354 static inline void
355 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset,
356 uint64_t value)
357 {
358 if (XHCI_HCC_AC64(sc->sc_hcc)) {
359 #ifdef XHCI_USE_BUS_SPACE_8
360 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value);
361 #else
362 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0,
363 (value >> 0) & 0xffffffff);
364 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4,
365 (value >> 32) & 0xffffffff);
366 #endif
367 } else {
368 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
369 }
370 }
371
372 static inline void
373 xhci_op_barrier(const struct xhci_softc * const sc, bus_size_t offset,
374 bus_size_t len, int flags)
375 {
376 bus_space_barrier(sc->sc_iot, sc->sc_obh, offset, len, flags);
377 }
378
379 static inline uint32_t
380 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset)
381 {
382 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
383 }
384
385 static inline void
386 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset,
387 uint32_t value)
388 {
389 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
390 }
391
392 #if 0 /* unused */
393 static inline uint64_t
394 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset)
395 {
396 uint64_t value;
397
398 if (XHCI_HCC_AC64(sc->sc_hcc)) {
399 #ifdef XHCI_USE_BUS_SPACE_8
400 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset);
401 #else
402 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
403 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh,
404 offset + 4) << 32;
405 #endif
406 } else {
407 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
408 }
409
410 return value;
411 }
412 #endif /* unused */
413
414 static inline void
415 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset,
416 uint64_t value)
417 {
418 if (XHCI_HCC_AC64(sc->sc_hcc)) {
419 #ifdef XHCI_USE_BUS_SPACE_8
420 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value);
421 #else
422 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0,
423 (value >> 0) & 0xffffffff);
424 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4,
425 (value >> 32) & 0xffffffff);
426 #endif
427 } else {
428 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
429 }
430 }
431
432 #if 0 /* unused */
433 static inline uint32_t
434 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset)
435 {
436 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset);
437 }
438 #endif /* unused */
439
440 static inline void
441 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset,
442 uint32_t value)
443 {
444 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value);
445 }
446
447 /* --- */
448
449 static inline uint8_t
450 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed)
451 {
452 u_int eptype = 0;
453
454 switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
455 case UE_CONTROL:
456 eptype = 0x0;
457 break;
458 case UE_ISOCHRONOUS:
459 eptype = 0x1;
460 break;
461 case UE_BULK:
462 eptype = 0x2;
463 break;
464 case UE_INTERRUPT:
465 eptype = 0x3;
466 break;
467 }
468
469 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
470 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
471 return eptype | 0x4;
472 else
473 return eptype;
474 }
475
476 static u_int
477 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed)
478 {
479 /* xHCI 1.0 section 4.5.1 */
480 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress);
481 u_int in = 0;
482
483 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
484 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
485 in = 1;
486
487 return epaddr * 2 + in;
488 }
489
490 static inline u_int
491 xhci_dci_to_ici(const u_int i)
492 {
493 return i + 1;
494 }
495
496 static inline void *
497 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs,
498 const u_int dci)
499 {
500 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
501 }
502
503 #if 0 /* unused */
504 static inline bus_addr_t
505 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs,
506 const u_int dci)
507 {
508 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
509 }
510 #endif /* unused */
511
512 static inline void *
513 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs,
514 const u_int ici)
515 {
516 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
517 }
518
519 static inline bus_addr_t
520 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs,
521 const u_int ici)
522 {
523 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
524 }
525
526 static inline struct xhci_trb *
527 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx)
528 {
529 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
530 }
531
532 static inline bus_addr_t
533 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx)
534 {
535 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
536 }
537
538 static inline void
539 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx,
540 uint64_t parameter, uint32_t status, uint32_t control)
541 {
542 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb);
543 xx->xx_trb[idx].trb_0 = parameter;
544 xx->xx_trb[idx].trb_2 = status;
545 xx->xx_trb[idx].trb_3 = control;
546 }
547
548 static inline void
549 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status,
550 uint32_t control)
551 {
552 trb->trb_0 = htole64(parameter);
553 trb->trb_2 = htole32(status);
554 trb->trb_3 = htole32(control);
555 }
556
557 static int
558 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx)
559 {
560 /* base address of TRBs */
561 bus_addr_t trbp = xhci_ring_trbp(xr, 0);
562
563 /* trb_0 range sanity check */
564 if (trb_0 == 0 || trb_0 < trbp ||
565 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 ||
566 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) {
567 return 1;
568 }
569 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb);
570 return 0;
571 }
572
573 static unsigned int
574 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs,
575 u_int dci)
576 {
577 uint32_t *cp;
578
579 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
580 cp = xhci_slot_get_dcv(sc, xs, dci);
581 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0]));
582 }
583
584 static inline unsigned int
585 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport)
586 {
587 const unsigned int port = ctlrport - 1;
588 const uint8_t bit = __BIT(port % NBBY);
589
590 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit);
591 }
592
593 /*
594 * Return the roothub port for a controller port. Both are 1..n.
595 */
596 static inline unsigned int
597 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport)
598 {
599
600 return sc->sc_ctlrportmap[ctrlport - 1];
601 }
602
603 /*
604 * Return the controller port for a bus roothub port. Both are 1..n.
605 */
606 static inline unsigned int
607 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn,
608 unsigned int rhport)
609 {
610
611 return sc->sc_rhportmap[bn][rhport - 1];
612 }
613
614 /* --- */
615
616 void
617 xhci_childdet(device_t self, device_t child)
618 {
619 struct xhci_softc * const sc = device_private(self);
620
621 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child));
622 if (child == sc->sc_child2)
623 sc->sc_child2 = NULL;
624 else if (child == sc->sc_child)
625 sc->sc_child = NULL;
626 }
627
628 int
629 xhci_detach(struct xhci_softc *sc, int flags)
630 {
631 int rv = 0;
632
633 if (sc->sc_child2 != NULL) {
634 rv = config_detach(sc->sc_child2, flags);
635 if (rv != 0)
636 return rv;
637 KASSERT(sc->sc_child2 == NULL);
638 }
639
640 if (sc->sc_child != NULL) {
641 rv = config_detach(sc->sc_child, flags);
642 if (rv != 0)
643 return rv;
644 KASSERT(sc->sc_child == NULL);
645 }
646
647 /* XXX unconfigure/free slots */
648
649 /* verify: */
650 xhci_rt_write_4(sc, XHCI_IMAN(0), 0);
651 xhci_op_write_4(sc, XHCI_USBCMD, 0);
652 /* do we need to wait for stop? */
653
654 xhci_op_write_8(sc, XHCI_CRCR, 0);
655 xhci_ring_free(sc, &sc->sc_cr);
656 cv_destroy(&sc->sc_command_cv);
657 cv_destroy(&sc->sc_cmdbusy_cv);
658
659 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0);
660 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0);
661 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY);
662 xhci_ring_free(sc, &sc->sc_er);
663
664 usb_freemem(&sc->sc_bus, &sc->sc_eventst_dma);
665
666 xhci_op_write_8(sc, XHCI_DCBAAP, 0);
667 usb_freemem(&sc->sc_bus, &sc->sc_dcbaa_dma);
668
669 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots);
670
671 kmem_free(sc->sc_ctlrportbus,
672 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY));
673 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int));
674
675 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
676 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int));
677 }
678
679 mutex_destroy(&sc->sc_lock);
680 mutex_destroy(&sc->sc_intr_lock);
681
682 pool_cache_destroy(sc->sc_xferpool);
683
684 return rv;
685 }
686
687 int
688 xhci_activate(device_t self, enum devact act)
689 {
690 struct xhci_softc * const sc = device_private(self);
691
692 switch (act) {
693 case DVACT_DEACTIVATE:
694 sc->sc_dying = true;
695 return 0;
696 default:
697 return EOPNOTSUPP;
698 }
699 }
700
701 bool
702 xhci_suspend(device_t dv, const pmf_qual_t *qual)
703 {
704 return false;
705 }
706
707 bool
708 xhci_resume(device_t dv, const pmf_qual_t *qual)
709 {
710 return false;
711 }
712
713 bool
714 xhci_shutdown(device_t self, int flags)
715 {
716 return false;
717 }
718
719 static int
720 xhci_hc_reset(struct xhci_softc * const sc)
721 {
722 uint32_t usbcmd, usbsts;
723 int i;
724
725 /* Check controller not ready */
726 for (i = 0; i < XHCI_WAIT_CNR; i++) {
727 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
728 if ((usbsts & XHCI_STS_CNR) == 0)
729 break;
730 usb_delay_ms(&sc->sc_bus, 1);
731 }
732 if (i >= XHCI_WAIT_CNR) {
733 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n");
734 return EIO;
735 }
736
737 /* Halt controller */
738 usbcmd = 0;
739 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
740 usb_delay_ms(&sc->sc_bus, 1);
741
742 /* Reset controller */
743 usbcmd = XHCI_CMD_HCRST;
744 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
745 for (i = 0; i < XHCI_WAIT_HCRST; i++) {
746 /*
747 * Wait 1ms first. Existing Intel xHCI requies 1ms delay to
748 * prevent system hang (Errata).
749 */
750 usb_delay_ms(&sc->sc_bus, 1);
751 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
752 if ((usbcmd & XHCI_CMD_HCRST) == 0)
753 break;
754 }
755 if (i >= XHCI_WAIT_HCRST) {
756 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n");
757 return EIO;
758 }
759
760 /* Check controller not ready */
761 for (i = 0; i < XHCI_WAIT_CNR; i++) {
762 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
763 if ((usbsts & XHCI_STS_CNR) == 0)
764 break;
765 usb_delay_ms(&sc->sc_bus, 1);
766 }
767 if (i >= XHCI_WAIT_CNR) {
768 aprint_error_dev(sc->sc_dev,
769 "controller not ready timeout after reset\n");
770 return EIO;
771 }
772
773 return 0;
774 }
775
776
777 /* 7.2 xHCI Support Protocol Capability */
778 static void
779 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp)
780 {
781 XHCIHIST_FUNC(); XHCIHIST_CALLED();
782
783 /* XXX Cache this lot */
784
785 const uint32_t w0 = xhci_read_4(sc, ecp);
786 const uint32_t w4 = xhci_read_4(sc, ecp + 4);
787 const uint32_t w8 = xhci_read_4(sc, ecp + 8);
788 const uint32_t wc = xhci_read_4(sc, ecp + 0xc);
789
790 aprint_debug_dev(sc->sc_dev,
791 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc);
792
793 if (w4 != XHCI_XECP_USBID)
794 return;
795
796 const int major = XHCI_XECP_SP_W0_MAJOR(w0);
797 const int minor = XHCI_XECP_SP_W0_MINOR(w0);
798 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8);
799 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8);
800
801 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16));
802 switch (mm) {
803 case 0x0200:
804 case 0x0300:
805 case 0x0301:
806 case 0x0310:
807 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n",
808 major == 3 ? "ss" : "hs", cpo, cpo + cpc -1);
809 break;
810 default:
811 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n",
812 major, minor);
813 return;
814 }
815
816 const size_t bus = (major == 3) ? 0 : 1;
817
818 /* Index arrays with 0..n-1 where ports are numbered 1..n */
819 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) {
820 if (sc->sc_ctlrportmap[cp] != 0) {
821 aprint_error_dev(sc->sc_dev, "controller port %zu "
822 "already assigned", cp);
823 continue;
824 }
825
826 sc->sc_ctlrportbus[cp / NBBY] |=
827 bus == 0 ? 0 : __BIT(cp % NBBY);
828
829 const size_t rhp = sc->sc_rhportcount[bus]++;
830
831 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0,
832 "bus %zu rhp %zu is %d", bus, rhp,
833 sc->sc_rhportmap[bus][rhp]);
834
835 sc->sc_rhportmap[bus][rhp] = cp + 1;
836 sc->sc_ctlrportmap[cp] = rhp + 1;
837 }
838 }
839
840 /* Process extended capabilities */
841 static void
842 xhci_ecp(struct xhci_softc *sc)
843 {
844 XHCIHIST_FUNC(); XHCIHIST_CALLED();
845
846 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4;
847 while (ecp != 0) {
848 uint32_t ecr = xhci_read_4(sc, ecp);
849 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr);
850 switch (XHCI_XECP_ID(ecr)) {
851 case XHCI_ID_PROTOCOLS: {
852 xhci_id_protocols(sc, ecp);
853 break;
854 }
855 case XHCI_ID_USB_LEGACY: {
856 uint8_t bios_sem;
857
858 /* Take host controller ownership from BIOS */
859 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM);
860 if (bios_sem) {
861 /* sets xHCI to be owned by OS */
862 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1);
863 aprint_debug_dev(sc->sc_dev,
864 "waiting for BIOS to give up control\n");
865 for (int i = 0; i < 5000; i++) {
866 bios_sem = xhci_read_1(sc, ecp +
867 XHCI_XECP_BIOS_SEM);
868 if (bios_sem == 0)
869 break;
870 DELAY(1000);
871 }
872 if (bios_sem) {
873 aprint_error_dev(sc->sc_dev,
874 "timed out waiting for BIOS\n");
875 }
876 }
877 break;
878 }
879 default:
880 break;
881 }
882 ecr = xhci_read_4(sc, ecp);
883 if (XHCI_XECP_NEXT(ecr) == 0) {
884 ecp = 0;
885 } else {
886 ecp += XHCI_XECP_NEXT(ecr) * 4;
887 }
888 }
889 }
890
891 #define XHCI_HCCPREV1_BITS \
892 "\177\020" /* New bitmask */ \
893 "f\020\020XECP\0" \
894 "f\014\4MAXPSA\0" \
895 "b\013CFC\0" \
896 "b\012SEC\0" \
897 "b\011SBD\0" \
898 "b\010FSE\0" \
899 "b\7NSS\0" \
900 "b\6LTC\0" \
901 "b\5LHRC\0" \
902 "b\4PIND\0" \
903 "b\3PPC\0" \
904 "b\2CZC\0" \
905 "b\1BNC\0" \
906 "b\0AC64\0" \
907 "\0"
908 #define XHCI_HCCV1_x_BITS \
909 "\177\020" /* New bitmask */ \
910 "f\020\020XECP\0" \
911 "f\014\4MAXPSA\0" \
912 "b\013CFC\0" \
913 "b\012SEC\0" \
914 "b\011SPC\0" \
915 "b\010PAE\0" \
916 "b\7NSS\0" \
917 "b\6LTC\0" \
918 "b\5LHRC\0" \
919 "b\4PIND\0" \
920 "b\3PPC\0" \
921 "b\2CSZ\0" \
922 "b\1BNC\0" \
923 "b\0AC64\0" \
924 "\0"
925
926 #define XHCI_HCC2_BITS \
927 "\177\020" /* New bitmask */ \
928 "b\7ETC_TSC\0" \
929 "b\6ETC\0" \
930 "b\5CIC\0" \
931 "b\4LEC\0" \
932 "b\3CTC\0" \
933 "b\2FSC\0" \
934 "b\1CMC\0" \
935 "b\0U3C\0" \
936 "\0"
937
938 void
939 xhci_start(struct xhci_softc *sc)
940 {
941 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA);
942 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0)
943 /* Intel xhci needs interrupt rate moderated. */
944 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP);
945 else
946 xhci_rt_write_4(sc, XHCI_IMOD(0), 0);
947 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n",
948 xhci_rt_read_4(sc, XHCI_IMOD(0)));
949
950 /* Go! */
951 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
952 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n",
953 xhci_op_read_4(sc, XHCI_USBCMD));
954 }
955
956 int
957 xhci_init(struct xhci_softc *sc)
958 {
959 bus_size_t bsz;
960 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff;
961 uint32_t pagesize, config;
962 int i = 0;
963 uint16_t hciversion;
964 uint8_t caplength;
965
966 XHCIHIST_FUNC(); XHCIHIST_CALLED();
967
968 /* Set up the bus struct for the usb 3 and usb 2 buses */
969 sc->sc_bus.ub_methods = &xhci_bus_methods;
970 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe);
971 sc->sc_bus.ub_usedma = true;
972 sc->sc_bus.ub_hcpriv = sc;
973
974 sc->sc_bus2.ub_methods = &xhci_bus_methods;
975 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe);
976 sc->sc_bus2.ub_revision = USBREV_2_0;
977 sc->sc_bus2.ub_usedma = true;
978 sc->sc_bus2.ub_hcpriv = sc;
979 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag;
980
981 caplength = xhci_read_1(sc, XHCI_CAPLENGTH);
982 hciversion = xhci_read_2(sc, XHCI_HCIVERSION);
983
984 if (hciversion < XHCI_HCIVERSION_0_96 ||
985 hciversion >= 0x0200) {
986 aprint_normal_dev(sc->sc_dev,
987 "xHCI version %x.%x not known to be supported\n",
988 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
989 } else {
990 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n",
991 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
992 }
993
994 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength,
995 &sc->sc_cbh) != 0) {
996 aprint_error_dev(sc->sc_dev, "capability subregion failure\n");
997 return ENOMEM;
998 }
999
1000 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1);
1001 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1);
1002 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1);
1003 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1);
1004 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2);
1005 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3);
1006 aprint_debug_dev(sc->sc_dev,
1007 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3);
1008
1009 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS);
1010 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32;
1011
1012 char sbuf[128];
1013 if (hciversion < XHCI_HCIVERSION_1_0)
1014 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc);
1015 else
1016 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc);
1017 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf);
1018 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n",
1019 XHCI_HCC_XECP(sc->sc_hcc) * 4);
1020 if (hciversion >= XHCI_HCIVERSION_1_1) {
1021 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2);
1022 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2);
1023 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf);
1024 }
1025
1026 /* default all ports to bus 0, i.e. usb 3 */
1027 sc->sc_ctlrportbus = kmem_zalloc(
1028 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP);
1029 sc->sc_ctlrportmap = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1030
1031 /* controller port to bus roothub port map */
1032 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
1033 sc->sc_rhportmap[j] = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1034 }
1035
1036 /*
1037 * Process all Extended Capabilities
1038 */
1039 xhci_ecp(sc);
1040
1041 bsz = XHCI_PORTSC(sc->sc_maxports);
1042 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz,
1043 &sc->sc_obh) != 0) {
1044 aprint_error_dev(sc->sc_dev, "operational subregion failure\n");
1045 return ENOMEM;
1046 }
1047
1048 dboff = xhci_cap_read_4(sc, XHCI_DBOFF);
1049 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff,
1050 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) {
1051 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n");
1052 return ENOMEM;
1053 }
1054
1055 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF);
1056 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff,
1057 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) {
1058 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n");
1059 return ENOMEM;
1060 }
1061
1062 int rv;
1063 rv = xhci_hc_reset(sc);
1064 if (rv != 0) {
1065 return rv;
1066 }
1067
1068 if (sc->sc_vendor_init)
1069 sc->sc_vendor_init(sc);
1070
1071 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE);
1072 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize);
1073 pagesize = ffs(pagesize);
1074 if (pagesize == 0) {
1075 aprint_error_dev(sc->sc_dev, "pagesize is 0\n");
1076 return EIO;
1077 }
1078 sc->sc_pgsz = 1 << (12 + (pagesize - 1));
1079 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz);
1080 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n",
1081 (uint32_t)sc->sc_maxslots);
1082 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports);
1083
1084 usbd_status err;
1085
1086 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2);
1087 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf);
1088 if (sc->sc_maxspbuf != 0) {
1089 err = usb_allocmem(&sc->sc_bus,
1090 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t),
1091 USBMALLOC_COHERENT, &sc->sc_spbufarray_dma);
1092 if (err) {
1093 aprint_error_dev(sc->sc_dev,
1094 "spbufarray init fail, err %d\n", err);
1095 return ENOMEM;
1096 }
1097
1098 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) *
1099 sc->sc_maxspbuf, KM_SLEEP);
1100 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0);
1101 for (i = 0; i < sc->sc_maxspbuf; i++) {
1102 usb_dma_t * const dma = &sc->sc_spbuf_dma[i];
1103 /* allocate contexts */
1104 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz,
1105 sc->sc_pgsz, USBMALLOC_COHERENT, dma);
1106 if (err) {
1107 aprint_error_dev(sc->sc_dev,
1108 "spbufarray_dma init fail, err %d\n", err);
1109 rv = ENOMEM;
1110 goto bad1;
1111 }
1112 spbufarray[i] = htole64(DMAADDR(dma, 0));
1113 usb_syncmem(dma, 0, sc->sc_pgsz,
1114 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1115 }
1116
1117 usb_syncmem(&sc->sc_spbufarray_dma, 0,
1118 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE);
1119 }
1120
1121 config = xhci_op_read_4(sc, XHCI_CONFIG);
1122 config &= ~0xFF;
1123 config |= sc->sc_maxslots & 0xFF;
1124 xhci_op_write_4(sc, XHCI_CONFIG, config);
1125
1126 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS,
1127 XHCI_COMMAND_RING_SEGMENTS_ALIGN);
1128 if (err) {
1129 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n",
1130 err);
1131 rv = ENOMEM;
1132 goto bad1;
1133 }
1134
1135 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS,
1136 XHCI_EVENT_RING_SEGMENTS_ALIGN);
1137 if (err) {
1138 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n",
1139 err);
1140 rv = ENOMEM;
1141 goto bad2;
1142 }
1143
1144 usb_dma_t *dma;
1145 size_t size;
1146 size_t align;
1147
1148 dma = &sc->sc_eventst_dma;
1149 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE,
1150 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN);
1151 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size);
1152 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN;
1153 err = usb_allocmem(&sc->sc_bus, size, align, USBMALLOC_COHERENT, dma);
1154 if (err) {
1155 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n",
1156 err);
1157 rv = ENOMEM;
1158 goto bad3;
1159 }
1160
1161 memset(KERNADDR(dma, 0), 0, size);
1162 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1163 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n",
1164 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0),
1165 KERNADDR(&sc->sc_eventst_dma, 0),
1166 sc->sc_eventst_dma.udma_block->size);
1167
1168 dma = &sc->sc_dcbaa_dma;
1169 size = (1 + sc->sc_maxslots) * sizeof(uint64_t);
1170 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size);
1171 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN;
1172 err = usb_allocmem(&sc->sc_bus, size, align, USBMALLOC_COHERENT, dma);
1173 if (err) {
1174 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err);
1175 rv = ENOMEM;
1176 goto bad4;
1177 }
1178 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n",
1179 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0),
1180 KERNADDR(&sc->sc_dcbaa_dma, 0),
1181 sc->sc_dcbaa_dma.udma_block->size);
1182
1183 memset(KERNADDR(dma, 0), 0, size);
1184 if (sc->sc_maxspbuf != 0) {
1185 /*
1186 * DCBA entry 0 hold the scratchbuf array pointer.
1187 */
1188 *(uint64_t *)KERNADDR(dma, 0) =
1189 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0));
1190 }
1191 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1192
1193 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots,
1194 KM_SLEEP);
1195 if (sc->sc_slots == NULL) {
1196 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err);
1197 rv = ENOMEM;
1198 goto bad;
1199 }
1200
1201 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0,
1202 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
1203 if (sc->sc_xferpool == NULL) {
1204 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n",
1205 err);
1206 rv = ENOMEM;
1207 goto bad;
1208 }
1209
1210 cv_init(&sc->sc_command_cv, "xhcicmd");
1211 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq");
1212 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
1213 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
1214
1215 struct xhci_erste *erst;
1216 erst = KERNADDR(&sc->sc_eventst_dma, 0);
1217 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0));
1218 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb);
1219 erst[0].erste_3 = htole32(0);
1220 usb_syncmem(&sc->sc_eventst_dma, 0,
1221 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE);
1222
1223 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS);
1224 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0));
1225 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) |
1226 XHCI_ERDP_BUSY);
1227
1228 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0));
1229 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1230 sc->sc_cr->xr_cs);
1231
1232 xhci_op_barrier(sc, 0, 4, BUS_SPACE_BARRIER_WRITE);
1233
1234 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0),
1235 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS);
1236
1237 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0)
1238 xhci_start(sc);
1239
1240 return 0;
1241
1242 bad:
1243 if (sc->sc_xferpool) {
1244 pool_cache_destroy(sc->sc_xferpool);
1245 sc->sc_xferpool = NULL;
1246 }
1247
1248 if (sc->sc_slots) {
1249 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) *
1250 sc->sc_maxslots);
1251 sc->sc_slots = NULL;
1252 }
1253
1254 usb_freemem(&sc->sc_bus, &sc->sc_dcbaa_dma);
1255 bad4:
1256 usb_freemem(&sc->sc_bus, &sc->sc_eventst_dma);
1257 bad3:
1258 xhci_ring_free(sc, &sc->sc_er);
1259 bad2:
1260 xhci_ring_free(sc, &sc->sc_cr);
1261 i = sc->sc_maxspbuf;
1262 bad1:
1263 for (int j = 0; j < i; j++)
1264 usb_freemem(&sc->sc_bus, &sc->sc_spbuf_dma[j]);
1265 usb_freemem(&sc->sc_bus, &sc->sc_spbufarray_dma);
1266
1267 return rv;
1268 }
1269
1270 static inline bool
1271 xhci_polling_p(struct xhci_softc * const sc)
1272 {
1273 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling;
1274 }
1275
1276 int
1277 xhci_intr(void *v)
1278 {
1279 struct xhci_softc * const sc = v;
1280 int ret = 0;
1281
1282 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1283
1284 if (sc == NULL)
1285 return 0;
1286
1287 mutex_spin_enter(&sc->sc_intr_lock);
1288
1289 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1290 goto done;
1291
1292 /* If we get an interrupt while polling, then just ignore it. */
1293 if (xhci_polling_p(sc)) {
1294 #ifdef DIAGNOSTIC
1295 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1296 #endif
1297 goto done;
1298 }
1299
1300 ret = xhci_intr1(sc);
1301 if (ret) {
1302 KASSERT(sc->sc_child || sc->sc_child2);
1303
1304 /*
1305 * One of child busses could be already detached. It doesn't
1306 * matter on which of the two the softintr is scheduled.
1307 */
1308 if (sc->sc_child)
1309 usb_schedsoftintr(&sc->sc_bus);
1310 else
1311 usb_schedsoftintr(&sc->sc_bus2);
1312 }
1313 done:
1314 mutex_spin_exit(&sc->sc_intr_lock);
1315 return ret;
1316 }
1317
1318 int
1319 xhci_intr1(struct xhci_softc * const sc)
1320 {
1321 uint32_t usbsts;
1322 uint32_t iman;
1323
1324 XHCIHIST_FUNC();
1325
1326 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1327 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0);
1328 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD |
1329 XHCI_STS_HCE)) == 0) {
1330 DPRINTFN(16, "ignored intr not for %jd",
1331 device_unit(sc->sc_dev), 0, 0, 0);
1332 return 0;
1333 }
1334
1335 /*
1336 * Clear EINT and other transient flags, to not misenterpret
1337 * next shared interrupt. Also, to avoid race, EINT must be cleared
1338 * before XHCI_IMAN_INTR_PEND is cleared.
1339 */
1340 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & XHCI_STS_RSVDP0);
1341
1342 #ifdef XHCI_DEBUG
1343 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1344 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1345 #endif
1346
1347 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1348 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1349 iman |= XHCI_IMAN_INTR_PEND;
1350 xhci_rt_write_4(sc, XHCI_IMAN(0), iman);
1351
1352 #ifdef XHCI_DEBUG
1353 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1354 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1355 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1356 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1357 #endif
1358
1359 return 1;
1360 }
1361
1362 /*
1363 * 3 port speed types used in USB stack
1364 *
1365 * usbdi speed
1366 * definition: USB_SPEED_* in usb.h
1367 * They are used in struct usbd_device in USB stack.
1368 * ioctl interface uses these values too.
1369 * port_status speed
1370 * definition: UPS_*_SPEED in usb.h
1371 * They are used in usb_port_status_t and valid only for USB 2.0.
1372 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus
1373 * of usb_port_status_ext_t indicates port speed.
1374 * Note that some 3.0 values overlap with 2.0 values.
1375 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and
1376 * means UPS_LOW_SPEED in HS.)
1377 * port status returned from hub also uses these values.
1378 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed
1379 * or more.
1380 * xspeed:
1381 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1)
1382 * They are used in only slot context and PORTSC reg of xhci.
1383 * The difference between usbdi speed and xspeed is
1384 * that FS and LS values are swapped.
1385 */
1386
1387 /* convert usbdi speed to xspeed */
1388 static int
1389 xhci_speed2xspeed(int speed)
1390 {
1391 switch (speed) {
1392 case USB_SPEED_LOW: return 2;
1393 case USB_SPEED_FULL: return 1;
1394 default: return speed;
1395 }
1396 }
1397
1398 #if 0
1399 /* convert xspeed to usbdi speed */
1400 static int
1401 xhci_xspeed2speed(int xspeed)
1402 {
1403 switch (xspeed) {
1404 case 1: return USB_SPEED_FULL;
1405 case 2: return USB_SPEED_LOW;
1406 default: return xspeed;
1407 }
1408 }
1409 #endif
1410
1411 /* convert xspeed to port status speed */
1412 static int
1413 xhci_xspeed2psspeed(int xspeed)
1414 {
1415 switch (xspeed) {
1416 case 0: return 0;
1417 case 1: return UPS_FULL_SPEED;
1418 case 2: return UPS_LOW_SPEED;
1419 case 3: return UPS_HIGH_SPEED;
1420 default: return UPS_OTHER_SPEED;
1421 }
1422 }
1423
1424 /*
1425 * Construct input contexts and issue TRB to open pipe.
1426 */
1427 static usbd_status
1428 xhci_configure_endpoint(struct usbd_pipe *pipe)
1429 {
1430 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1431 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1432 #ifdef USB_DEBUG
1433 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1434 #endif
1435 struct xhci_soft_trb trb;
1436 usbd_status err;
1437
1438 XHCIHIST_FUNC();
1439 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx",
1440 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress,
1441 pipe->up_endpoint->ue_edesc->bmAttributes);
1442
1443 /* XXX ensure input context is available? */
1444
1445 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz);
1446
1447 /* set up context */
1448 xhci_setup_ctx(pipe);
1449
1450 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0),
1451 sc->sc_ctxsz * 1);
1452 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs,
1453 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1);
1454
1455 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1456 trb.trb_2 = 0;
1457 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1458 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1459
1460 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
1461
1462 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1463 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci),
1464 sc->sc_ctxsz * 1);
1465
1466 return err;
1467 }
1468
1469 #if 0
1470 static usbd_status
1471 xhci_unconfigure_endpoint(struct usbd_pipe *pipe)
1472 {
1473 #ifdef USB_DEBUG
1474 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1475 #endif
1476
1477 XHCIHIST_FUNC();
1478 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
1479
1480 return USBD_NORMAL_COMPLETION;
1481 }
1482 #endif
1483
1484 /* 4.6.8, 6.4.3.7 */
1485 static usbd_status
1486 xhci_reset_endpoint_locked(struct usbd_pipe *pipe)
1487 {
1488 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1489 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1490 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1491 struct xhci_soft_trb trb;
1492 usbd_status err;
1493
1494 XHCIHIST_FUNC();
1495 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1496
1497 KASSERT(mutex_owned(&sc->sc_lock));
1498
1499 trb.trb_0 = 0;
1500 trb.trb_2 = 0;
1501 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1502 XHCI_TRB_3_EP_SET(dci) |
1503 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP);
1504
1505 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1506
1507 return err;
1508 }
1509
1510 static usbd_status
1511 xhci_reset_endpoint(struct usbd_pipe *pipe)
1512 {
1513 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1514
1515 mutex_enter(&sc->sc_lock);
1516 usbd_status ret = xhci_reset_endpoint_locked(pipe);
1517 mutex_exit(&sc->sc_lock);
1518
1519 return ret;
1520 }
1521
1522 /*
1523 * 4.6.9, 6.4.3.8
1524 * Stop execution of TDs on xfer ring.
1525 * Should be called with sc_lock held.
1526 */
1527 static usbd_status
1528 xhci_stop_endpoint(struct usbd_pipe *pipe)
1529 {
1530 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1531 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1532 struct xhci_soft_trb trb;
1533 usbd_status err;
1534 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1535
1536 XHCIHIST_FUNC();
1537 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1538
1539 KASSERT(mutex_owned(&sc->sc_lock));
1540
1541 trb.trb_0 = 0;
1542 trb.trb_2 = 0;
1543 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1544 XHCI_TRB_3_EP_SET(dci) |
1545 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP);
1546
1547 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1548
1549 return err;
1550 }
1551
1552 /*
1553 * Set TR Dequeue Pointer.
1554 * xHCI 1.1 4.6.10 6.4.3.9
1555 * Purge all of the TRBs on ring and reinitialize ring.
1556 * Set TR dequeue Pointr to 0 and Cycle State to 1.
1557 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE
1558 * error will be generated.
1559 */
1560 static usbd_status
1561 xhci_set_dequeue_locked(struct usbd_pipe *pipe)
1562 {
1563 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1564 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1565 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1566 struct xhci_ring * const xr = xs->xs_xr[dci];
1567 struct xhci_soft_trb trb;
1568 usbd_status err;
1569
1570 XHCIHIST_FUNC();
1571 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1572
1573 KASSERT(mutex_owned(&sc->sc_lock));
1574 KASSERT(xr != NULL);
1575
1576 xhci_host_dequeue(xr);
1577
1578 /* set DCS */
1579 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */
1580 trb.trb_2 = 0;
1581 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1582 XHCI_TRB_3_EP_SET(dci) |
1583 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE);
1584
1585 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1586
1587 return err;
1588 }
1589
1590 static usbd_status
1591 xhci_set_dequeue(struct usbd_pipe *pipe)
1592 {
1593 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1594
1595 mutex_enter(&sc->sc_lock);
1596 usbd_status ret = xhci_set_dequeue_locked(pipe);
1597 mutex_exit(&sc->sc_lock);
1598
1599 return ret;
1600 }
1601
1602 /*
1603 * Open new pipe: called from usbd_setup_pipe_flags.
1604 * Fills methods of pipe.
1605 * If pipe is not for ep0, calls configure_endpoint.
1606 */
1607 static usbd_status
1608 xhci_open(struct usbd_pipe *pipe)
1609 {
1610 struct usbd_device * const dev = pipe->up_dev;
1611 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
1612 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
1613 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1614 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
1615 const u_int dci = xhci_ep_get_dci(ed);
1616 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1617 usbd_status err;
1618
1619 XHCIHIST_FUNC();
1620 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr,
1621 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed);
1622 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx",
1623 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress,
1624 ed->bmAttributes);
1625 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize),
1626 ed->bInterval, 0, 0);
1627
1628 if (sc->sc_dying)
1629 return USBD_IOERROR;
1630
1631 /* Root Hub */
1632 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) {
1633 switch (ed->bEndpointAddress) {
1634 case USB_CONTROL_ENDPOINT:
1635 pipe->up_methods = &roothub_ctrl_methods;
1636 break;
1637 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
1638 pipe->up_methods = &xhci_root_intr_methods;
1639 break;
1640 default:
1641 pipe->up_methods = NULL;
1642 DPRINTFN(0, "bad bEndpointAddress 0x%02jx",
1643 ed->bEndpointAddress, 0, 0, 0);
1644 return USBD_INVAL;
1645 }
1646 return USBD_NORMAL_COMPLETION;
1647 }
1648
1649 switch (xfertype) {
1650 case UE_CONTROL:
1651 pipe->up_methods = &xhci_device_ctrl_methods;
1652 break;
1653 case UE_ISOCHRONOUS:
1654 pipe->up_methods = &xhci_device_isoc_methods;
1655 pipe->up_serialise = false;
1656 xpipe->xp_isoc_next = -1;
1657 break;
1658 case UE_BULK:
1659 pipe->up_methods = &xhci_device_bulk_methods;
1660 break;
1661 case UE_INTERRUPT:
1662 pipe->up_methods = &xhci_device_intr_methods;
1663 break;
1664 default:
1665 return USBD_IOERROR;
1666 break;
1667 }
1668
1669 KASSERT(xs != NULL);
1670 KASSERT(xs->xs_xr[dci] == NULL);
1671
1672 /* allocate transfer ring */
1673 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS,
1674 XHCI_TRB_ALIGN);
1675 if (err) {
1676 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0);
1677 return err;
1678 }
1679
1680 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT)
1681 return xhci_configure_endpoint(pipe);
1682
1683 return USBD_NORMAL_COMPLETION;
1684 }
1685
1686 /*
1687 * Closes pipe, called from usbd_kill_pipe via close methods.
1688 * If the endpoint to be closed is ep0, disable_slot.
1689 * Should be called with sc_lock held.
1690 */
1691 static void
1692 xhci_close_pipe(struct usbd_pipe *pipe)
1693 {
1694 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1695 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1696 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
1697 const u_int dci = xhci_ep_get_dci(ed);
1698 struct xhci_soft_trb trb;
1699 uint32_t *cp;
1700
1701 XHCIHIST_FUNC();
1702
1703 if (sc->sc_dying)
1704 return;
1705
1706 /* xs is uninitialized before xhci_init_slot */
1707 if (xs == NULL || xs->xs_idx == 0)
1708 return;
1709
1710 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
1711 (uintptr_t)pipe, xs->xs_idx, dci, 0);
1712
1713 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
1714 KASSERT(mutex_owned(&sc->sc_lock));
1715
1716 if (pipe->up_dev->ud_depth == 0)
1717 return;
1718
1719 if (dci == XHCI_DCI_EP_CONTROL) {
1720 DPRINTFN(4, "closing ep0", 0, 0, 0, 0);
1721 /* This frees all rings */
1722 xhci_disable_slot(sc, xs->xs_idx);
1723 return;
1724 }
1725
1726 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED)
1727 (void)xhci_stop_endpoint(pipe);
1728
1729 /*
1730 * set appropriate bit to be dropped.
1731 * don't set DC bit to 1, otherwise all endpoints
1732 * would be deconfigured.
1733 */
1734 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
1735 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci));
1736 cp[1] = htole32(0);
1737
1738 /* XXX should be most significant one, not dci? */
1739 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
1740 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci));
1741
1742 /* configure ep context performs an implicit dequeue */
1743 xhci_host_dequeue(xs->xs_xr[dci]);
1744
1745 /* sync input contexts before they are read from memory */
1746 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
1747
1748 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1749 trb.trb_2 = 0;
1750 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1751 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1752
1753 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1754 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1755
1756 xhci_ring_free(sc, &xs->xs_xr[dci]);
1757 }
1758
1759 /*
1760 * Abort transfer.
1761 * Should be called with sc_lock held.
1762 */
1763 static void
1764 xhci_abortx(struct usbd_xfer *xfer)
1765 {
1766 XHCIHIST_FUNC();
1767 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
1768 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
1769 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
1770
1771 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx",
1772 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0);
1773
1774 KASSERT(mutex_owned(&sc->sc_lock));
1775 ASSERT_SLEEPABLE();
1776
1777 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
1778 xfer->ux_status == USBD_TIMEOUT),
1779 "bad abort status: %d", xfer->ux_status);
1780
1781 /*
1782 * If we're dying, skip the hardware action and just notify the
1783 * software that we're done.
1784 */
1785 if (sc->sc_dying) {
1786 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer,
1787 xfer->ux_status, 0, 0);
1788 goto dying;
1789 }
1790
1791 /*
1792 * HC Step 1: Stop execution of TD on the ring.
1793 */
1794 switch (xhci_get_epstate(sc, xs, dci)) {
1795 case XHCI_EPSTATE_HALTED:
1796 (void)xhci_reset_endpoint_locked(xfer->ux_pipe);
1797 break;
1798 case XHCI_EPSTATE_STOPPED:
1799 break;
1800 default:
1801 (void)xhci_stop_endpoint(xfer->ux_pipe);
1802 break;
1803 }
1804 #ifdef DIAGNOSTIC
1805 uint32_t epst = xhci_get_epstate(sc, xs, dci);
1806 if (epst != XHCI_EPSTATE_STOPPED)
1807 DPRINTFN(4, "dci %ju not stopped %ju", dci, epst, 0, 0);
1808 #endif
1809
1810 /*
1811 * HC Step 2: Remove any vestiges of the xfer from the ring.
1812 */
1813 xhci_set_dequeue_locked(xfer->ux_pipe);
1814
1815 /*
1816 * Final Step: Notify completion to waiting xfers.
1817 */
1818 dying:
1819 usb_transfer_complete(xfer);
1820 DPRINTFN(14, "end", 0, 0, 0, 0);
1821
1822 KASSERT(mutex_owned(&sc->sc_lock));
1823 }
1824
1825 static void
1826 xhci_host_dequeue(struct xhci_ring * const xr)
1827 {
1828 /* When dequeueing the controller, update our struct copy too */
1829 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE);
1830 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE,
1831 BUS_DMASYNC_PREWRITE);
1832 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies));
1833
1834 xr->xr_ep = 0;
1835 xr->xr_cs = 1;
1836 }
1837
1838 /*
1839 * Recover STALLed endpoint.
1840 * xHCI 1.1 sect 4.10.2.1
1841 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove
1842 * all transfers on transfer ring.
1843 * These are done in thread context asynchronously.
1844 */
1845 static void
1846 xhci_clear_endpoint_stall_async_task(void *cookie)
1847 {
1848 struct usbd_xfer * const xfer = cookie;
1849 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
1850 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
1851 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
1852 struct xhci_ring * const tr = xs->xs_xr[dci];
1853
1854 XHCIHIST_FUNC();
1855 XHCIHIST_CALLARGS("xfer %#jx slot %ju dci %ju", (uintptr_t)xfer, xs->xs_idx,
1856 dci, 0);
1857
1858 /*
1859 * XXXMRG: Stall task can run after slot is disabled when yanked.
1860 * This hack notices that the xs has been memset() in
1861 * xhci_disable_slot() and returns. Both xhci_reset_endpoint()
1862 * and xhci_set_dequeue() rely upon a valid ring setup for correct
1863 * operation, and the latter will fault, as would
1864 * usb_transfer_complete() if it got that far.
1865 */
1866 if (xs->xs_idx == 0) {
1867 DPRINTFN(4, "ends xs_idx is 0", 0, 0, 0, 0);
1868 return;
1869 }
1870
1871 KASSERT(tr != NULL);
1872
1873 xhci_reset_endpoint(xfer->ux_pipe);
1874 xhci_set_dequeue(xfer->ux_pipe);
1875
1876 mutex_enter(&sc->sc_lock);
1877 tr->is_halted = false;
1878 usb_transfer_complete(xfer);
1879 mutex_exit(&sc->sc_lock);
1880 DPRINTFN(4, "ends", 0, 0, 0, 0);
1881 }
1882
1883 static usbd_status
1884 xhci_clear_endpoint_stall_async(struct usbd_xfer *xfer)
1885 {
1886 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
1887 struct xhci_pipe * const xp = (struct xhci_pipe *)xfer->ux_pipe;
1888
1889 XHCIHIST_FUNC();
1890 XHCIHIST_CALLARGS("xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
1891
1892 if (sc->sc_dying) {
1893 return USBD_IOERROR;
1894 }
1895
1896 usb_init_task(&xp->xp_async_task,
1897 xhci_clear_endpoint_stall_async_task, xfer, USB_TASKQ_MPSAFE);
1898 usb_add_task(xfer->ux_pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC);
1899 DPRINTFN(4, "ends", 0, 0, 0, 0);
1900
1901 return USBD_NORMAL_COMPLETION;
1902 }
1903
1904 /* Process roothub port status/change events and notify to uhub_intr. */
1905 static void
1906 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport)
1907 {
1908 XHCIHIST_FUNC();
1909 XHCIHIST_CALLARGS("xhci%jd: port %ju status change",
1910 device_unit(sc->sc_dev), ctlrport, 0, 0);
1911
1912 if (ctlrport > sc->sc_maxports)
1913 return;
1914
1915 const size_t bn = xhci_ctlrport2bus(sc, ctlrport);
1916 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport);
1917 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn];
1918
1919 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change",
1920 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer);
1921
1922 if (xfer == NULL)
1923 return;
1924 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
1925
1926 uint8_t *p = xfer->ux_buf;
1927 memset(p, 0, xfer->ux_length);
1928 p[rhp / NBBY] |= 1 << (rhp % NBBY);
1929 xfer->ux_actlen = xfer->ux_length;
1930 xfer->ux_status = USBD_NORMAL_COMPLETION;
1931 usb_transfer_complete(xfer);
1932 }
1933
1934 /* Process Transfer Events */
1935 static void
1936 xhci_event_transfer(struct xhci_softc * const sc,
1937 const struct xhci_trb * const trb)
1938 {
1939 uint64_t trb_0;
1940 uint32_t trb_2, trb_3;
1941 uint8_t trbcode;
1942 u_int slot, dci;
1943 struct xhci_slot *xs;
1944 struct xhci_ring *xr;
1945 struct xhci_xfer *xx;
1946 struct usbd_xfer *xfer;
1947 usbd_status err;
1948
1949 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1950
1951 trb_0 = le64toh(trb->trb_0);
1952 trb_2 = le32toh(trb->trb_2);
1953 trb_3 = le32toh(trb->trb_3);
1954 trbcode = XHCI_TRB_2_ERROR_GET(trb_2);
1955 slot = XHCI_TRB_3_SLOT_GET(trb_3);
1956 dci = XHCI_TRB_3_EP_GET(trb_3);
1957 xs = &sc->sc_slots[slot];
1958 xr = xs->xs_xr[dci];
1959
1960 /* sanity check */
1961 KASSERT(xr != NULL);
1962 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots,
1963 "invalid xs_idx %u slot %u", xs->xs_idx, slot);
1964
1965 int idx = 0;
1966 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
1967 if (xhci_trb_get_idx(xr, trb_0, &idx)) {
1968 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0);
1969 return;
1970 }
1971 xx = xr->xr_cookies[idx];
1972
1973 /* clear cookie of consumed TRB */
1974 xr->xr_cookies[idx] = NULL;
1975
1976 /*
1977 * xx is NULL if pipe is opened but xfer is not started.
1978 * It happens when stopping idle pipe.
1979 */
1980 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) {
1981 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju",
1982 idx, (uintptr_t)xx, trbcode, dci);
1983 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0,
1984 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)),
1985 0, 0);
1986 return;
1987 }
1988 } else {
1989 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */
1990 xx = (void *)(uintptr_t)(trb_0 & ~0x3);
1991 }
1992 /* XXX this may not happen */
1993 if (xx == NULL) {
1994 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0);
1995 return;
1996 }
1997 xfer = &xx->xx_xfer;
1998 /* XXX this may happen when detaching */
1999 if (xfer == NULL) {
2000 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx",
2001 (uintptr_t)xx, trb_0, 0, 0);
2002 return;
2003 }
2004 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2005 /* XXX I dunno why this happens */
2006 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer);
2007
2008 if (!xfer->ux_pipe->up_repeat &&
2009 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) {
2010 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer,
2011 0, 0, 0);
2012 return;
2013 }
2014
2015 const uint8_t xfertype =
2016 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes);
2017
2018 /* 4.11.5.2 Event Data TRB */
2019 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) {
2020 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx"
2021 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0);
2022 if ((trb_0 & 0x3) == 0x3) {
2023 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2);
2024 }
2025 }
2026
2027 switch (trbcode) {
2028 case XHCI_TRB_ERROR_SHORT_PKT:
2029 case XHCI_TRB_ERROR_SUCCESS:
2030 /*
2031 * A ctrl transfer can generate two events if it has a Data
2032 * stage. A short data stage can be OK and should not
2033 * complete the transfer as the status stage needs to be
2034 * performed.
2035 *
2036 * Note: Data and Status stage events point at same xfer.
2037 * ux_actlen and ux_dmabuf will be passed to
2038 * usb_transfer_complete after the Status stage event.
2039 *
2040 * It can be distingished which stage generates the event:
2041 * + by checking least 3 bits of trb_0 if ED==1.
2042 * (see xhci_device_ctrl_start).
2043 * + by checking the type of original TRB if ED==0.
2044 *
2045 * In addition, intr, bulk, and isoc transfer currently
2046 * consists of single TD, so the "skip" is not needed.
2047 * ctrl xfer uses EVENT_DATA, and others do not.
2048 * Thus driver can switch the flow by checking ED bit.
2049 */
2050 if (xfertype == UE_ISOCHRONOUS) {
2051 xfer->ux_frlengths[xx->xx_isoc_done] -=
2052 XHCI_TRB_2_REM_GET(trb_2);
2053 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done];
2054 if (++xx->xx_isoc_done < xfer->ux_nframes)
2055 return;
2056 } else
2057 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2058 if (xfer->ux_actlen == 0)
2059 xfer->ux_actlen = xfer->ux_length -
2060 XHCI_TRB_2_REM_GET(trb_2);
2061 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3))
2062 == XHCI_TRB_TYPE_DATA_STAGE) {
2063 return;
2064 }
2065 } else if ((trb_0 & 0x3) == 0x3) {
2066 return;
2067 }
2068 err = USBD_NORMAL_COMPLETION;
2069 break;
2070 case XHCI_TRB_ERROR_STOPPED:
2071 case XHCI_TRB_ERROR_LENGTH:
2072 case XHCI_TRB_ERROR_STOPPED_SHORT:
2073 err = USBD_IOERROR;
2074 break;
2075 case XHCI_TRB_ERROR_STALL:
2076 case XHCI_TRB_ERROR_BABBLE:
2077 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2078 xr->is_halted = true;
2079 /*
2080 * Try to claim this xfer for completion. If it has already
2081 * completed or aborted, drop it on the floor.
2082 */
2083 if (!usbd_xfer_trycomplete(xfer))
2084 return;
2085
2086 /*
2087 * Stalled endpoints can be recoverd by issuing
2088 * command TRB TYPE_RESET_EP on xHCI instead of
2089 * issuing request CLEAR_FEATURE UF_ENDPOINT_HALT
2090 * on the endpoint. However, this function may be
2091 * called from softint context (e.g. from umass),
2092 * in that case driver gets KASSERT in cv_timedwait
2093 * in xhci_do_command.
2094 * To avoid this, this runs reset_endpoint and
2095 * usb_transfer_complete in usb task thread
2096 * asynchronously (and then umass issues clear
2097 * UF_ENDPOINT_HALT).
2098 */
2099
2100 /* Override the status. */
2101 xfer->ux_status = USBD_STALLED;
2102
2103 xhci_clear_endpoint_stall_async(xfer);
2104 return;
2105 default:
2106 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2107 err = USBD_IOERROR;
2108 break;
2109 }
2110
2111 /*
2112 * Try to claim this xfer for completion. If it has already
2113 * completed or aborted, drop it on the floor.
2114 */
2115 if (!usbd_xfer_trycomplete(xfer))
2116 return;
2117
2118 /* Set the status. */
2119 xfer->ux_status = err;
2120
2121 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 ||
2122 (trb_0 & 0x3) == 0x0) {
2123 usb_transfer_complete(xfer);
2124 }
2125 }
2126
2127 /* Process Command complete events */
2128 static void
2129 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb)
2130 {
2131 uint64_t trb_0;
2132 uint32_t trb_2, trb_3;
2133
2134 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2135
2136 KASSERT(mutex_owned(&sc->sc_lock));
2137
2138 trb_0 = le64toh(trb->trb_0);
2139 trb_2 = le32toh(trb->trb_2);
2140 trb_3 = le32toh(trb->trb_3);
2141
2142 if (trb_0 == sc->sc_command_addr) {
2143 sc->sc_resultpending = false;
2144
2145 sc->sc_result_trb.trb_0 = trb_0;
2146 sc->sc_result_trb.trb_2 = trb_2;
2147 sc->sc_result_trb.trb_3 = trb_3;
2148 if (XHCI_TRB_2_ERROR_GET(trb_2) !=
2149 XHCI_TRB_ERROR_SUCCESS) {
2150 DPRINTFN(1, "command completion "
2151 "failure: 0x%016jx 0x%08jx 0x%08jx",
2152 trb_0, trb_2, trb_3, 0);
2153 }
2154 cv_signal(&sc->sc_command_cv);
2155 } else {
2156 DPRINTFN(1, "spurious event: %#jx 0x%016jx "
2157 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3);
2158 }
2159 }
2160
2161 /*
2162 * Process events.
2163 * called from xhci_softintr
2164 */
2165 static void
2166 xhci_handle_event(struct xhci_softc * const sc,
2167 const struct xhci_trb * const trb)
2168 {
2169 uint64_t trb_0;
2170 uint32_t trb_2, trb_3;
2171
2172 XHCIHIST_FUNC();
2173
2174 trb_0 = le64toh(trb->trb_0);
2175 trb_2 = le32toh(trb->trb_2);
2176 trb_3 = le32toh(trb->trb_3);
2177
2178 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx",
2179 (uintptr_t)trb, trb_0, trb_2, trb_3);
2180
2181 /*
2182 * 4.11.3.1, 6.4.2.1
2183 * TRB Pointer is invalid for these completion codes.
2184 */
2185 switch (XHCI_TRB_2_ERROR_GET(trb_2)) {
2186 case XHCI_TRB_ERROR_RING_UNDERRUN:
2187 case XHCI_TRB_ERROR_RING_OVERRUN:
2188 case XHCI_TRB_ERROR_VF_RING_FULL:
2189 return;
2190 default:
2191 if (trb_0 == 0) {
2192 return;
2193 }
2194 break;
2195 }
2196
2197 switch (XHCI_TRB_3_TYPE_GET(trb_3)) {
2198 case XHCI_TRB_EVENT_TRANSFER:
2199 xhci_event_transfer(sc, trb);
2200 break;
2201 case XHCI_TRB_EVENT_CMD_COMPLETE:
2202 xhci_event_cmd(sc, trb);
2203 break;
2204 case XHCI_TRB_EVENT_PORT_STS_CHANGE:
2205 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff));
2206 break;
2207 default:
2208 break;
2209 }
2210 }
2211
2212 static void
2213 xhci_softintr(void *v)
2214 {
2215 struct usbd_bus * const bus = v;
2216 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2217 struct xhci_ring * const er = sc->sc_er;
2218 struct xhci_trb *trb;
2219 int i, j, k;
2220
2221 XHCIHIST_FUNC();
2222
2223 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2224
2225 i = er->xr_ep;
2226 j = er->xr_cs;
2227
2228 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0);
2229
2230 while (1) {
2231 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE,
2232 BUS_DMASYNC_POSTREAD);
2233 trb = &er->xr_trb[i];
2234 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0;
2235
2236 if (j != k)
2237 break;
2238
2239 xhci_handle_event(sc, trb);
2240
2241 i++;
2242 if (i == er->xr_ntrb) {
2243 i = 0;
2244 j ^= 1;
2245 }
2246 }
2247
2248 er->xr_ep = i;
2249 er->xr_cs = j;
2250
2251 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) |
2252 XHCI_ERDP_BUSY);
2253
2254 DPRINTFN(16, "ends", 0, 0, 0, 0);
2255
2256 return;
2257 }
2258
2259 static void
2260 xhci_poll(struct usbd_bus *bus)
2261 {
2262 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2263
2264 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2265
2266 mutex_enter(&sc->sc_intr_lock);
2267 int ret = xhci_intr1(sc);
2268 if (ret) {
2269 xhci_softintr(bus);
2270 }
2271 mutex_exit(&sc->sc_intr_lock);
2272
2273 return;
2274 }
2275
2276 static struct usbd_xfer *
2277 xhci_allocx(struct usbd_bus *bus, unsigned int nframes)
2278 {
2279 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2280 struct xhci_xfer *xx;
2281 u_int ntrbs;
2282
2283 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2284
2285 ntrbs = uimax(3, nframes);
2286 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs;
2287
2288 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
2289 if (xx != NULL) {
2290 memset(xx, 0, sizeof(*xx));
2291 if (ntrbs > 0) {
2292 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP);
2293 xx->xx_ntrb = ntrbs;
2294 }
2295 #ifdef DIAGNOSTIC
2296 xx->xx_xfer.ux_state = XFER_BUSY;
2297 #endif
2298 }
2299
2300 return &xx->xx_xfer;
2301 }
2302
2303 static void
2304 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
2305 {
2306 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2307 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
2308
2309 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2310
2311 #ifdef DIAGNOSTIC
2312 if (xfer->ux_state != XFER_BUSY &&
2313 xfer->ux_status != USBD_NOT_STARTED) {
2314 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx",
2315 (uintptr_t)xfer, xfer->ux_state, 0, 0);
2316 }
2317 xfer->ux_state = XFER_FREE;
2318 #endif
2319 if (xx->xx_ntrb > 0) {
2320 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb));
2321 xx->xx_trb = NULL;
2322 xx->xx_ntrb = 0;
2323 }
2324 pool_cache_put(sc->sc_xferpool, xx);
2325 }
2326
2327 static bool
2328 xhci_dying(struct usbd_bus *bus)
2329 {
2330 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2331
2332 return sc->sc_dying;
2333 }
2334
2335 static void
2336 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
2337 {
2338 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2339
2340 *lock = &sc->sc_lock;
2341 }
2342
2343 extern uint32_t usb_cookie_no;
2344
2345 /*
2346 * xHCI 4.3
2347 * Called when uhub_explore finds a new device (via usbd_new_device).
2348 * Port initialization and speed detection (4.3.1) are already done in uhub.c.
2349 * This function does:
2350 * Allocate and construct dev structure of default endpoint (ep0).
2351 * Allocate and open pipe of ep0.
2352 * Enable slot and initialize slot context.
2353 * Set Address.
2354 * Read initial device descriptor.
2355 * Determine initial MaxPacketSize (mps) by speed.
2356 * Read full device descriptor.
2357 * Register this device.
2358 * Finally state of device transitions ADDRESSED.
2359 */
2360 static usbd_status
2361 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth,
2362 int speed, int port, struct usbd_port *up)
2363 {
2364 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2365 struct usbd_device *dev;
2366 usbd_status err;
2367 usb_device_descriptor_t *dd;
2368 struct xhci_slot *xs;
2369 uint32_t *cp;
2370
2371 XHCIHIST_FUNC();
2372 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx",
2373 port, depth, speed, (uintptr_t)up);
2374
2375 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
2376 dev->ud_bus = bus;
2377 dev->ud_quirks = &usbd_no_quirk;
2378 dev->ud_addr = 0;
2379 dev->ud_ddesc.bMaxPacketSize = 0;
2380 dev->ud_depth = depth;
2381 dev->ud_powersrc = up;
2382 dev->ud_myhub = up->up_parent;
2383 dev->ud_speed = speed;
2384 dev->ud_langid = USBD_NOLANG;
2385 dev->ud_cookie.cookie = ++usb_cookie_no;
2386
2387 /* Set up default endpoint handle. */
2388 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc;
2389 /* doesn't matter, just don't let it uninitialized */
2390 dev->ud_ep0.ue_toggle = 0;
2391
2392 /* Set up default endpoint descriptor. */
2393 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE;
2394 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT;
2395 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT;
2396 dev->ud_ep0desc.bmAttributes = UE_CONTROL;
2397 dev->ud_ep0desc.bInterval = 0;
2398
2399 /* 4.3, 4.8.2.1 */
2400 switch (speed) {
2401 case USB_SPEED_SUPER:
2402 case USB_SPEED_SUPER_PLUS:
2403 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET);
2404 break;
2405 case USB_SPEED_FULL:
2406 /* XXX using 64 as initial mps of ep0 in FS */
2407 case USB_SPEED_HIGH:
2408 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET);
2409 break;
2410 case USB_SPEED_LOW:
2411 default:
2412 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET);
2413 break;
2414 }
2415
2416 up->up_dev = dev;
2417
2418 dd = &dev->ud_ddesc;
2419
2420 if (depth == 0 && port == 0) {
2421 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL);
2422 bus->ub_devices[USB_ROOTHUB_INDEX] = dev;
2423
2424 /* Establish the default pipe. */
2425 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2426 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2427 if (err) {
2428 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0);
2429 goto bad;
2430 }
2431 err = usbd_get_initial_ddesc(dev, dd);
2432 if (err) {
2433 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2434 goto bad;
2435 }
2436 } else {
2437 uint8_t slot = 0;
2438
2439 /* 4.3.2 */
2440 err = xhci_enable_slot(sc, &slot);
2441 if (err) {
2442 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0);
2443 goto bad;
2444 }
2445
2446 xs = &sc->sc_slots[slot];
2447 dev->ud_hcpriv = xs;
2448
2449 /* 4.3.3 initialize slot structure */
2450 err = xhci_init_slot(dev, slot);
2451 if (err) {
2452 DPRINTFN(1, "init slot %ju", err, 0, 0, 0);
2453 dev->ud_hcpriv = NULL;
2454 /*
2455 * We have to disable_slot here because
2456 * xs->xs_idx == 0 when xhci_init_slot fails,
2457 * in that case usbd_remove_dev won't work.
2458 */
2459 mutex_enter(&sc->sc_lock);
2460 xhci_disable_slot(sc, slot);
2461 mutex_exit(&sc->sc_lock);
2462 goto bad;
2463 }
2464
2465 /*
2466 * We have to establish the default pipe _after_ slot
2467 * structure has been prepared.
2468 */
2469 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2470 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2471 if (err) {
2472 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0,
2473 0);
2474 goto bad;
2475 }
2476
2477 /* 4.3.4 Address Assignment */
2478 err = xhci_set_address(dev, slot, false);
2479 if (err) {
2480 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0);
2481 goto bad;
2482 }
2483
2484 /* Allow device time to set new address */
2485 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE);
2486
2487 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2488 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT);
2489 HEXDUMP("slot context", cp, sc->sc_ctxsz);
2490 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3]));
2491 DPRINTFN(4, "device address %ju", addr, 0, 0, 0);
2492 /*
2493 * XXX ensure we know when the hardware does something
2494 * we can't yet cope with
2495 */
2496 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr);
2497 dev->ud_addr = addr;
2498
2499 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL,
2500 "addr %d already allocated", dev->ud_addr);
2501 /*
2502 * The root hub is given its own slot
2503 */
2504 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev;
2505
2506 err = usbd_get_initial_ddesc(dev, dd);
2507 if (err) {
2508 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2509 goto bad;
2510 }
2511
2512 /* 4.8.2.1 */
2513 if (USB_IS_SS(speed)) {
2514 if (dd->bMaxPacketSize != 9) {
2515 printf("%s: invalid mps 2^%u for SS ep0,"
2516 " using 512\n",
2517 device_xname(sc->sc_dev),
2518 dd->bMaxPacketSize);
2519 dd->bMaxPacketSize = 9;
2520 }
2521 USETW(dev->ud_ep0desc.wMaxPacketSize,
2522 (1 << dd->bMaxPacketSize));
2523 } else
2524 USETW(dev->ud_ep0desc.wMaxPacketSize,
2525 dd->bMaxPacketSize);
2526 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0);
2527 err = xhci_update_ep0_mps(sc, xs,
2528 UGETW(dev->ud_ep0desc.wMaxPacketSize));
2529 if (err) {
2530 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0);
2531 goto bad;
2532 }
2533 }
2534
2535 err = usbd_reload_device_desc(dev);
2536 if (err) {
2537 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0);
2538 goto bad;
2539 }
2540
2541 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,",
2542 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0);
2543 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,",
2544 dd->bDeviceClass, dd->bDeviceSubClass,
2545 dd->bDeviceProtocol, 0);
2546 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd",
2547 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations,
2548 dev->ud_speed);
2549
2550 usbd_get_device_strings(dev);
2551
2552 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev);
2553
2554 if (depth == 0 && port == 0) {
2555 usbd_attach_roothub(parent, dev);
2556 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0);
2557 return USBD_NORMAL_COMPLETION;
2558 }
2559
2560 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr);
2561 bad:
2562 if (err != USBD_NORMAL_COMPLETION) {
2563 usbd_remove_device(dev, up);
2564 }
2565
2566 return err;
2567 }
2568
2569 static usbd_status
2570 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp,
2571 size_t ntrb, size_t align)
2572 {
2573 usbd_status err;
2574 size_t size = ntrb * XHCI_TRB_SIZE;
2575 struct xhci_ring *xr;
2576
2577 XHCIHIST_FUNC();
2578 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx",
2579 (uintptr_t)*xrp, ntrb, align, 0);
2580
2581 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP);
2582 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0);
2583
2584 err = usb_allocmem(&sc->sc_bus, size, align, USBMALLOC_COHERENT,
2585 &xr->xr_dma);
2586 if (err) {
2587 kmem_free(xr, sizeof(struct xhci_ring));
2588 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0);
2589 return err;
2590 }
2591 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
2592 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP);
2593 xr->xr_trb = xhci_ring_trbv(xr, 0);
2594 xr->xr_ntrb = ntrb;
2595 xr->is_halted = false;
2596 xhci_host_dequeue(xr);
2597 *xrp = xr;
2598
2599 return USBD_NORMAL_COMPLETION;
2600 }
2601
2602 static void
2603 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr)
2604 {
2605 if (*xr == NULL)
2606 return;
2607
2608 usb_freemem(&sc->sc_bus, &(*xr)->xr_dma);
2609 mutex_destroy(&(*xr)->xr_lock);
2610 kmem_free((*xr)->xr_cookies,
2611 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb);
2612 kmem_free(*xr, sizeof(struct xhci_ring));
2613 *xr = NULL;
2614 }
2615
2616 static void
2617 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr,
2618 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs)
2619 {
2620 size_t i;
2621 u_int ri;
2622 u_int cs;
2623 uint64_t parameter;
2624 uint32_t status;
2625 uint32_t control;
2626
2627 XHCIHIST_FUNC();
2628 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju",
2629 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0);
2630
2631 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u",
2632 ntrbs, xr->xr_ntrb);
2633 for (i = 0; i < ntrbs; i++) {
2634 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr,
2635 (uintptr_t)trbs, i, 0);
2636 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx",
2637 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0);
2638 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) !=
2639 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3);
2640 }
2641
2642 ri = xr->xr_ep;
2643 cs = xr->xr_cs;
2644
2645 /*
2646 * Although the xhci hardware can do scatter/gather dma from
2647 * arbitrary sized buffers, there is a non-obvious restriction
2648 * that a LINK trb is only allowed at the end of a burst of
2649 * transfers - which might be 16kB.
2650 * Arbitrary aligned LINK trb definitely fail on Ivy bridge.
2651 * The simple solution is not to allow a LINK trb in the middle
2652 * of anything - as here.
2653 * XXX: (dsl) There are xhci controllers out there (eg some made by
2654 * ASMedia) that seem to lock up if they process a LINK trb but
2655 * cannot process the linked-to trb yet.
2656 * The code should write the 'cycle' bit on the link trb AFTER
2657 * adding the other trb.
2658 */
2659 u_int firstep = xr->xr_ep;
2660 u_int firstcs = xr->xr_cs;
2661
2662 for (i = 0; i < ntrbs; ) {
2663 u_int oldri = ri;
2664 u_int oldcs = cs;
2665
2666 if (ri >= (xr->xr_ntrb - 1)) {
2667 /* Put Link TD at the end of ring */
2668 parameter = xhci_ring_trbp(xr, 0);
2669 status = 0;
2670 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) |
2671 XHCI_TRB_3_TC_BIT;
2672 xr->xr_cookies[ri] = NULL;
2673 xr->xr_ep = 0;
2674 xr->xr_cs ^= 1;
2675 ri = xr->xr_ep;
2676 cs = xr->xr_cs;
2677 } else {
2678 parameter = trbs[i].trb_0;
2679 status = trbs[i].trb_2;
2680 control = trbs[i].trb_3;
2681
2682 xr->xr_cookies[ri] = cookie;
2683 ri++;
2684 i++;
2685 }
2686 /*
2687 * If this is a first TRB, mark it invalid to prevent
2688 * xHC from running it immediately.
2689 */
2690 if (oldri == firstep) {
2691 if (oldcs) {
2692 control &= ~XHCI_TRB_3_CYCLE_BIT;
2693 } else {
2694 control |= XHCI_TRB_3_CYCLE_BIT;
2695 }
2696 } else {
2697 if (oldcs) {
2698 control |= XHCI_TRB_3_CYCLE_BIT;
2699 } else {
2700 control &= ~XHCI_TRB_3_CYCLE_BIT;
2701 }
2702 }
2703 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control);
2704 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri,
2705 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
2706 }
2707
2708 /* Now invert cycle bit of first TRB */
2709 if (firstcs) {
2710 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT);
2711 } else {
2712 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT);
2713 }
2714 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep,
2715 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
2716
2717 xr->xr_ep = ri;
2718 xr->xr_cs = cs;
2719
2720 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep,
2721 xr->xr_cs, 0);
2722 }
2723
2724 static inline void
2725 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr,
2726 struct xhci_xfer *xx, u_int ntrb)
2727 {
2728 KASSERT(ntrb <= xx->xx_ntrb);
2729 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb);
2730 }
2731
2732 /*
2733 * Stop execution commands, purge all commands on command ring, and
2734 * rewind dequeue pointer.
2735 */
2736 static void
2737 xhci_abort_command(struct xhci_softc *sc)
2738 {
2739 struct xhci_ring * const cr = sc->sc_cr;
2740 uint64_t crcr;
2741 int i;
2742
2743 XHCIHIST_FUNC();
2744 XHCIHIST_CALLARGS("command %#jx timeout, aborting",
2745 sc->sc_command_addr, 0, 0, 0);
2746
2747 mutex_enter(&cr->xr_lock);
2748
2749 /* 4.6.1.2 Aborting a Command */
2750 crcr = xhci_op_read_8(sc, XHCI_CRCR);
2751 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA);
2752
2753 for (i = 0; i < 500; i++) {
2754 crcr = xhci_op_read_8(sc, XHCI_CRCR);
2755 if ((crcr & XHCI_CRCR_LO_CRR) == 0)
2756 break;
2757 usb_delay_ms(&sc->sc_bus, 1);
2758 }
2759 if ((crcr & XHCI_CRCR_LO_CRR) != 0) {
2760 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0);
2761 /* reset HC here? */
2762 }
2763
2764 /* reset command ring dequeue pointer */
2765 cr->xr_ep = 0;
2766 cr->xr_cs = 1;
2767 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs);
2768
2769 mutex_exit(&cr->xr_lock);
2770 }
2771
2772 /*
2773 * Put a command on command ring, ring bell, set timer, and cv_timedwait.
2774 * Command completion is notified by cv_signal from xhci_event_cmd()
2775 * (called from xhci_softint), or timed-out.
2776 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(),
2777 * then do_command examines it.
2778 */
2779 static usbd_status
2780 xhci_do_command_locked(struct xhci_softc * const sc,
2781 struct xhci_soft_trb * const trb, int timeout)
2782 {
2783 struct xhci_ring * const cr = sc->sc_cr;
2784 usbd_status err;
2785
2786 XHCIHIST_FUNC();
2787 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx",
2788 trb->trb_0, trb->trb_2, trb->trb_3, 0);
2789
2790 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
2791 KASSERT(mutex_owned(&sc->sc_lock));
2792
2793 while (sc->sc_command_addr != 0)
2794 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
2795
2796 /*
2797 * If enqueue pointer points at last of ring, it's Link TRB,
2798 * command TRB will be stored in 0th TRB.
2799 */
2800 if (cr->xr_ep == cr->xr_ntrb - 1)
2801 sc->sc_command_addr = xhci_ring_trbp(cr, 0);
2802 else
2803 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep);
2804
2805 sc->sc_resultpending = true;
2806
2807 mutex_enter(&cr->xr_lock);
2808 xhci_ring_put(sc, cr, NULL, trb, 1);
2809 mutex_exit(&cr->xr_lock);
2810
2811 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0);
2812
2813 while (sc->sc_resultpending) {
2814 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock,
2815 MAX(1, mstohz(timeout))) == EWOULDBLOCK) {
2816 xhci_abort_command(sc);
2817 err = USBD_TIMEOUT;
2818 goto timedout;
2819 }
2820 }
2821
2822 trb->trb_0 = sc->sc_result_trb.trb_0;
2823 trb->trb_2 = sc->sc_result_trb.trb_2;
2824 trb->trb_3 = sc->sc_result_trb.trb_3;
2825
2826 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx",
2827 trb->trb_0, trb->trb_2, trb->trb_3, 0);
2828
2829 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) {
2830 case XHCI_TRB_ERROR_SUCCESS:
2831 err = USBD_NORMAL_COMPLETION;
2832 break;
2833 default:
2834 case 192 ... 223:
2835 DPRINTFN(5, "error %#jx",
2836 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0);
2837 err = USBD_IOERROR;
2838 break;
2839 case 224 ... 255:
2840 err = USBD_NORMAL_COMPLETION;
2841 break;
2842 }
2843
2844 timedout:
2845 sc->sc_resultpending = false;
2846 sc->sc_command_addr = 0;
2847 cv_broadcast(&sc->sc_cmdbusy_cv);
2848
2849 return err;
2850 }
2851
2852 static usbd_status
2853 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb,
2854 int timeout)
2855 {
2856
2857 mutex_enter(&sc->sc_lock);
2858 usbd_status ret = xhci_do_command_locked(sc, trb, timeout);
2859 mutex_exit(&sc->sc_lock);
2860
2861 return ret;
2862 }
2863
2864 static usbd_status
2865 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp)
2866 {
2867 struct xhci_soft_trb trb;
2868 usbd_status err;
2869
2870 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2871
2872 trb.trb_0 = 0;
2873 trb.trb_2 = 0;
2874 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT);
2875
2876 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
2877 if (err != USBD_NORMAL_COMPLETION) {
2878 return err;
2879 }
2880
2881 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3);
2882
2883 return err;
2884 }
2885
2886 /*
2887 * xHCI 4.6.4
2888 * Deallocate ring and device/input context DMA buffers, and disable_slot.
2889 * All endpoints in the slot should be stopped.
2890 * Should be called with sc_lock held.
2891 */
2892 static usbd_status
2893 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot)
2894 {
2895 struct xhci_soft_trb trb;
2896 struct xhci_slot *xs;
2897 usbd_status err;
2898
2899 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2900
2901 if (sc->sc_dying)
2902 return USBD_IOERROR;
2903
2904 trb.trb_0 = 0;
2905 trb.trb_2 = 0;
2906 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) |
2907 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT);
2908
2909 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
2910
2911 if (!err) {
2912 xs = &sc->sc_slots[slot];
2913 if (xs->xs_idx != 0) {
2914 xhci_free_slot(sc, xs);
2915 xhci_set_dcba(sc, 0, slot);
2916 memset(xs, 0, sizeof(*xs));
2917 }
2918 }
2919
2920 return err;
2921 }
2922
2923 /*
2924 * Set address of device and transition slot state from ENABLED to ADDRESSED
2925 * if Block Setaddress Request (BSR) is false.
2926 * If BSR==true, transition slot state from ENABLED to DEFAULT.
2927 * see xHCI 1.1 4.5.3, 3.3.4
2928 * Should be called without sc_lock held.
2929 */
2930 static usbd_status
2931 xhci_address_device(struct xhci_softc * const sc,
2932 uint64_t icp, uint8_t slot_id, bool bsr)
2933 {
2934 struct xhci_soft_trb trb;
2935 usbd_status err;
2936
2937 XHCIHIST_FUNC();
2938 if (bsr) {
2939 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr",
2940 icp, slot_id, 0, 0);
2941 } else {
2942 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr",
2943 icp, slot_id, 0, 0);
2944 }
2945
2946 trb.trb_0 = icp;
2947 trb.trb_2 = 0;
2948 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) |
2949 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) |
2950 (bsr ? XHCI_TRB_3_BSR_BIT : 0);
2951
2952 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
2953
2954 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS)
2955 err = USBD_NO_ADDR;
2956
2957 return err;
2958 }
2959
2960 static usbd_status
2961 xhci_update_ep0_mps(struct xhci_softc * const sc,
2962 struct xhci_slot * const xs, u_int mps)
2963 {
2964 struct xhci_soft_trb trb;
2965 usbd_status err;
2966 uint32_t * cp;
2967
2968 XHCIHIST_FUNC();
2969 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0);
2970
2971 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
2972 cp[0] = htole32(0);
2973 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL));
2974
2975 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL));
2976 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps));
2977
2978 /* sync input contexts before they are read from memory */
2979 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
2980 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
2981 sc->sc_ctxsz * 4);
2982
2983 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
2984 trb.trb_2 = 0;
2985 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2986 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX);
2987
2988 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
2989 return err;
2990 }
2991
2992 static void
2993 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si)
2994 {
2995 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0);
2996
2997 XHCIHIST_FUNC();
2998 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd",
2999 (uintptr_t)&dcbaa[si], dcba, si, 0);
3000
3001 dcbaa[si] = htole64(dcba);
3002 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t),
3003 BUS_DMASYNC_PREWRITE);
3004 }
3005
3006 /*
3007 * Allocate device and input context DMA buffer, and
3008 * TRB DMA buffer for each endpoint.
3009 */
3010 static usbd_status
3011 xhci_init_slot(struct usbd_device *dev, uint32_t slot)
3012 {
3013 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3014 struct xhci_slot *xs;
3015 usbd_status err;
3016
3017 XHCIHIST_FUNC();
3018 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0);
3019
3020 xs = &sc->sc_slots[slot];
3021
3022 /* allocate contexts */
3023 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz, sc->sc_pgsz,
3024 USBMALLOC_COHERENT, &xs->xs_dc_dma);
3025 if (err) {
3026 DPRINTFN(1, "failed to allocmem output device context %jd",
3027 err, 0, 0, 0);
3028 return err;
3029 }
3030 memset(KERNADDR(&xs->xs_dc_dma, 0), 0, sc->sc_pgsz);
3031
3032 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz, sc->sc_pgsz,
3033 USBMALLOC_COHERENT, &xs->xs_ic_dma);
3034 if (err) {
3035 DPRINTFN(1, "failed to allocmem input device context %jd",
3036 err, 0, 0, 0);
3037 goto bad1;
3038 }
3039 memset(KERNADDR(&xs->xs_ic_dma, 0), 0, sc->sc_pgsz);
3040
3041 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr));
3042 xs->xs_idx = slot;
3043
3044 return USBD_NORMAL_COMPLETION;
3045
3046 bad1:
3047 usb_freemem(&sc->sc_bus, &xs->xs_dc_dma);
3048 xs->xs_idx = 0;
3049 return err;
3050 }
3051
3052 static void
3053 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs)
3054 {
3055 u_int dci;
3056
3057 XHCIHIST_FUNC();
3058 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
3059
3060 /* deallocate all allocated rings in the slot */
3061 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
3062 if (xs->xs_xr[dci] != NULL)
3063 xhci_ring_free(sc, &xs->xs_xr[dci]);
3064 }
3065 usb_freemem(&sc->sc_bus, &xs->xs_ic_dma);
3066 usb_freemem(&sc->sc_bus, &xs->xs_dc_dma);
3067 xs->xs_idx = 0;
3068 }
3069
3070 /*
3071 * Setup slot context, set Device Context Base Address, and issue
3072 * Set Address Device command.
3073 */
3074 static usbd_status
3075 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr)
3076 {
3077 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3078 struct xhci_slot *xs;
3079 usbd_status err;
3080
3081 XHCIHIST_FUNC();
3082 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0);
3083
3084 xs = &sc->sc_slots[slot];
3085
3086 xhci_setup_ctx(dev->ud_pipe0);
3087
3088 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3089 sc->sc_ctxsz * 3);
3090
3091 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot);
3092
3093 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr);
3094
3095 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
3096 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0),
3097 sc->sc_ctxsz * 2);
3098
3099 return err;
3100 }
3101
3102 /*
3103 * 4.8.2, 6.2.3.2
3104 * construct slot/endpoint context parameters and do syncmem
3105 */
3106 static void
3107 xhci_setup_ctx(struct usbd_pipe *pipe)
3108 {
3109 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3110 struct usbd_device *dev = pipe->up_dev;
3111 struct xhci_slot * const xs = dev->ud_hcpriv;
3112 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3113 const u_int dci = xhci_ep_get_dci(ed);
3114 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3115 uint32_t *cp;
3116 uint16_t mps = UGETW(ed->wMaxPacketSize);
3117 uint8_t speed = dev->ud_speed;
3118 uint8_t ival = ed->bInterval;
3119
3120 XHCIHIST_FUNC();
3121 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju",
3122 (uintptr_t)pipe, xs->xs_idx, dci, speed);
3123
3124 /* set up initial input control context */
3125 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3126 cp[0] = htole32(0);
3127 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci));
3128 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT));
3129 cp[7] = htole32(0);
3130
3131 /* set up input slot context */
3132 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
3133 cp[0] =
3134 XHCI_SCTX_0_CTX_NUM_SET(dci) |
3135 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed));
3136 cp[1] = 0;
3137 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0);
3138 cp[3] = 0;
3139 xhci_setup_route(pipe, cp);
3140 xhci_setup_tthub(pipe, cp);
3141
3142 cp[0] = htole32(cp[0]);
3143 cp[1] = htole32(cp[1]);
3144 cp[2] = htole32(cp[2]);
3145 cp[3] = htole32(cp[3]);
3146
3147 /* set up input endpoint context */
3148 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci));
3149 cp[0] =
3150 XHCI_EPCTX_0_EPSTATE_SET(0) |
3151 XHCI_EPCTX_0_MULT_SET(0) |
3152 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) |
3153 XHCI_EPCTX_0_LSA_SET(0) |
3154 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0);
3155 cp[1] =
3156 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) |
3157 XHCI_EPCTX_1_HID_SET(0) |
3158 XHCI_EPCTX_1_MAXB_SET(0);
3159
3160 if (xfertype != UE_ISOCHRONOUS)
3161 cp[1] |= XHCI_EPCTX_1_CERR_SET(3);
3162
3163 if (xfertype == UE_CONTROL)
3164 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(8); /* 6.2.3 */
3165 else if (USB_IS_SS(speed))
3166 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(mps);
3167 else
3168 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(UE_GET_SIZE(mps));
3169
3170 xhci_setup_maxburst(pipe, cp);
3171
3172 switch (xfertype) {
3173 case UE_CONTROL:
3174 break;
3175 case UE_BULK:
3176 /* XXX Set MaxPStreams, HID, and LSA if streams enabled */
3177 break;
3178 case UE_INTERRUPT:
3179 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3180 ival = pipe->up_interval;
3181
3182 ival = xhci_bival2ival(ival, speed);
3183 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3184 break;
3185 case UE_ISOCHRONOUS:
3186 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3187 ival = pipe->up_interval;
3188
3189 /* xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 */
3190 if (speed == USB_SPEED_FULL)
3191 ival += 3; /* 1ms -> 125us */
3192 ival--;
3193 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3194 break;
3195 default:
3196 break;
3197 }
3198 DPRINTFN(4, "setting ival %ju MaxBurst %#jx",
3199 XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_1_MAXB_GET(cp[1]), 0, 0);
3200
3201 /* rewind TR dequeue pointer in xHC */
3202 /* can't use xhci_ep_get_dci() yet? */
3203 *(uint64_t *)(&cp[2]) = htole64(
3204 xhci_ring_trbp(xs->xs_xr[dci], 0) |
3205 XHCI_EPCTX_2_DCS_SET(1));
3206
3207 cp[0] = htole32(cp[0]);
3208 cp[1] = htole32(cp[1]);
3209 cp[4] = htole32(cp[4]);
3210
3211 /* rewind TR dequeue pointer in driver */
3212 struct xhci_ring *xr = xs->xs_xr[dci];
3213 mutex_enter(&xr->xr_lock);
3214 xhci_host_dequeue(xr);
3215 mutex_exit(&xr->xr_lock);
3216
3217 /* sync input contexts before they are read from memory */
3218 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3219 }
3220
3221 /*
3222 * Setup route string and roothub port of given device for slot context
3223 */
3224 static void
3225 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp)
3226 {
3227 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3228 struct usbd_device *dev = pipe->up_dev;
3229 struct usbd_port *up = dev->ud_powersrc;
3230 struct usbd_device *hub;
3231 struct usbd_device *adev;
3232 uint8_t rhport = 0;
3233 uint32_t route = 0;
3234
3235 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3236
3237 /* Locate root hub port and Determine route string */
3238 /* 4.3.3 route string does not include roothub port */
3239 for (hub = dev; hub != NULL; hub = hub->ud_myhub) {
3240 uint32_t dep;
3241
3242 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd",
3243 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc,
3244 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno :
3245 -1);
3246
3247 if (hub->ud_powersrc == NULL)
3248 break;
3249 dep = hub->ud_depth;
3250 if (dep == 0)
3251 break;
3252 rhport = hub->ud_powersrc->up_portno;
3253 if (dep > USB_HUB_MAX_DEPTH)
3254 continue;
3255
3256 route |=
3257 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport)
3258 << ((dep - 1) * 4);
3259 }
3260 route = route >> 4;
3261 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1;
3262
3263 /* Locate port on upstream high speed hub */
3264 for (adev = dev, hub = up->up_parent;
3265 hub != NULL && hub->ud_speed != USB_SPEED_HIGH;
3266 adev = hub, hub = hub->ud_myhub)
3267 ;
3268 if (hub) {
3269 int p;
3270 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) {
3271 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) {
3272 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1];
3273 goto found;
3274 }
3275 }
3276 panic("%s: cannot find HS port", __func__);
3277 found:
3278 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0);
3279 } else {
3280 dev->ud_myhsport = NULL;
3281 }
3282
3283 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport);
3284
3285 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport,
3286 ctlrport, route, (uintptr_t)hub);
3287
3288 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route);
3289 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport);
3290 }
3291
3292 /*
3293 * Setup whether device is hub, whether device uses MTT, and
3294 * TT informations if it uses MTT.
3295 */
3296 static void
3297 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp)
3298 {
3299 struct usbd_device *dev = pipe->up_dev;
3300 struct usbd_port *myhsport = dev->ud_myhsport;
3301 usb_device_descriptor_t * const dd = &dev->ud_ddesc;
3302 uint32_t speed = dev->ud_speed;
3303 uint8_t rhaddr = dev->ud_bus->ub_rhaddr;
3304 uint8_t tthubslot, ttportnum;
3305 bool ishub;
3306 bool usemtt;
3307
3308 XHCIHIST_FUNC();
3309
3310 /*
3311 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2
3312 * tthubslot:
3313 * This is the slot ID of parent HS hub
3314 * if LS/FS device is connected && connected through HS hub.
3315 * This is 0 if device is not LS/FS device ||
3316 * parent hub is not HS hub ||
3317 * attached to root hub.
3318 * ttportnum:
3319 * This is the downstream facing port of parent HS hub
3320 * if LS/FS device is connected.
3321 * This is 0 if device is not LS/FS device ||
3322 * parent hub is not HS hub ||
3323 * attached to root hub.
3324 */
3325 if (myhsport &&
3326 myhsport->up_parent->ud_addr != rhaddr &&
3327 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
3328 ttportnum = myhsport->up_portno;
3329 tthubslot = myhsport->up_parent->ud_addr;
3330 } else {
3331 ttportnum = 0;
3332 tthubslot = 0;
3333 }
3334 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd",
3335 (uintptr_t)myhsport, ttportnum, tthubslot, 0);
3336
3337 /* ishub is valid after reading UDESC_DEVICE */
3338 ishub = (dd->bDeviceClass == UDCLASS_HUB);
3339
3340 /* dev->ud_hub is valid after reading UDESC_HUB */
3341 if (ishub && dev->ud_hub) {
3342 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc;
3343 uint8_t ttt =
3344 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK);
3345
3346 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts);
3347 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt);
3348 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0);
3349 }
3350
3351 #define IS_MTTHUB(dd) \
3352 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT)
3353
3354 /*
3355 * MTT flag is set if
3356 * 1. this is HS hub && MTTs are supported and enabled; or
3357 * 2. this is LS or FS device && there is a parent HS hub where MTTs
3358 * are supported and enabled.
3359 *
3360 * XXX enabled is not tested yet
3361 */
3362 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd))
3363 usemtt = true;
3364 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) &&
3365 myhsport &&
3366 myhsport->up_parent->ud_addr != rhaddr &&
3367 IS_MTTHUB(&myhsport->up_parent->ud_ddesc))
3368 usemtt = true;
3369 else
3370 usemtt = false;
3371 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd",
3372 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt);
3373
3374 #undef IS_MTTHUB
3375
3376 cp[0] |=
3377 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) |
3378 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0);
3379 cp[2] |=
3380 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) |
3381 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum);
3382 }
3383
3384 /* set up params for periodic endpoint */
3385 static void
3386 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp)
3387 {
3388 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
3389 struct usbd_device *dev = pipe->up_dev;
3390 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3391 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3392 usbd_desc_iter_t iter;
3393 const usb_cdc_descriptor_t *cdcd;
3394 uint32_t maxb = 0;
3395 uint16_t mps = UGETW(ed->wMaxPacketSize);
3396 uint8_t speed = dev->ud_speed;
3397 uint8_t mult = 0;
3398 uint8_t ep;
3399
3400 /* config desc is NULL when opening ep0 */
3401 if (dev == NULL || dev->ud_cdesc == NULL)
3402 goto no_cdcd;
3403 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev,
3404 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY);
3405 if (cdcd == NULL)
3406 goto no_cdcd;
3407 usb_desc_iter_init(dev, &iter);
3408 iter.cur = (const void *)cdcd;
3409
3410 /* find endpoint_ss_comp desc for ep of this pipe */
3411 for (ep = 0;;) {
3412 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter);
3413 if (cdcd == NULL)
3414 break;
3415 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) {
3416 ep = ((const usb_endpoint_descriptor_t *)cdcd)->
3417 bEndpointAddress;
3418 if (UE_GET_ADDR(ep) ==
3419 UE_GET_ADDR(ed->bEndpointAddress)) {
3420 cdcd = (const usb_cdc_descriptor_t *)
3421 usb_desc_iter_next(&iter);
3422 break;
3423 }
3424 ep = 0;
3425 }
3426 }
3427 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) {
3428 const usb_endpoint_ss_comp_descriptor_t * esscd =
3429 (const usb_endpoint_ss_comp_descriptor_t *)cdcd;
3430 maxb = esscd->bMaxBurst;
3431 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes);
3432 }
3433
3434 no_cdcd:
3435 /* 6.2.3.4, 4.8.2.4 */
3436 if (USB_IS_SS(speed)) {
3437 /* USB 3.1 9.6.6 */
3438 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps);
3439 /* USB 3.1 9.6.7 */
3440 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3441 #ifdef notyet
3442 if (xfertype == UE_ISOCHRONOUS) {
3443 }
3444 if (XHCI_HCC2_LEC(sc->sc_hcc2) != 0) {
3445 /* use ESIT */
3446 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(x);
3447 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(x);
3448
3449 /* XXX if LEC = 1, set ESIT instead */
3450 cp[0] |= XHCI_EPCTX_0_MULT_SET(0);
3451 } else {
3452 /* use ival */
3453 }
3454 #endif
3455 } else {
3456 /* USB 2.0 9.6.6 */
3457 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(UE_GET_SIZE(mps));
3458
3459 /* 6.2.3.4 */
3460 if (speed == USB_SPEED_HIGH &&
3461 (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)) {
3462 maxb = UE_GET_TRANS(mps);
3463 } else {
3464 /* LS/FS or HS CTRL or HS BULK */
3465 maxb = 0;
3466 }
3467 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3468 }
3469 xpipe->xp_maxb = maxb + 1;
3470 xpipe->xp_mult = mult + 1;
3471 }
3472
3473 /*
3474 * Convert endpoint bInterval value to endpoint context interval value
3475 * for Interrupt pipe.
3476 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6
3477 */
3478 static uint32_t
3479 xhci_bival2ival(uint32_t ival, uint32_t speed)
3480 {
3481 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) {
3482 int i;
3483
3484 /*
3485 * round ival down to "the nearest base 2 multiple of
3486 * bInterval * 8".
3487 * bInterval is at most 255 as its type is uByte.
3488 * 255(ms) = 2040(x 125us) < 2^11, so start with 10.
3489 */
3490 for (i = 10; i > 0; i--) {
3491 if ((ival * 8) >= (1 << i))
3492 break;
3493 }
3494 ival = i;
3495 } else {
3496 /* Interval = bInterval-1 for SS/HS */
3497 ival--;
3498 }
3499
3500 return ival;
3501 }
3502
3503 /* ----- */
3504
3505 static void
3506 xhci_noop(struct usbd_pipe *pipe)
3507 {
3508 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3509 }
3510
3511 /*
3512 * Process root hub request.
3513 */
3514 static int
3515 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
3516 void *buf, int buflen)
3517 {
3518 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
3519 usb_port_status_t ps;
3520 int l, totlen = 0;
3521 uint16_t len, value, index;
3522 int port, i;
3523 uint32_t v;
3524
3525 XHCIHIST_FUNC();
3526
3527 if (sc->sc_dying)
3528 return -1;
3529
3530 size_t bn = bus == &sc->sc_bus ? 0 : 1;
3531
3532 len = UGETW(req->wLength);
3533 value = UGETW(req->wValue);
3534 index = UGETW(req->wIndex);
3535
3536 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx",
3537 req->bmRequestType | (req->bRequest << 8), value, index, len);
3538
3539 #define C(x,y) ((x) | ((y) << 8))
3540 switch (C(req->bRequest, req->bmRequestType)) {
3541 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
3542 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0);
3543 if (len == 0)
3544 break;
3545 switch (value) {
3546 #define sd ((usb_string_descriptor_t *)buf)
3547 case C(2, UDESC_STRING):
3548 /* Product */
3549 totlen = usb_makestrdesc(sd, len, "xHCI root hub");
3550 break;
3551 #undef sd
3552 default:
3553 /* default from usbroothub */
3554 return buflen;
3555 }
3556 break;
3557
3558 /* Hub requests */
3559 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
3560 break;
3561 /* Clear Port Feature request */
3562 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): {
3563 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
3564
3565 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd",
3566 index, value, bn, cp);
3567 if (index < 1 || index > sc->sc_rhportcount[bn]) {
3568 return -1;
3569 }
3570 port = XHCI_PORTSC(cp);
3571 v = xhci_op_read_4(sc, port);
3572 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0);
3573 v &= ~XHCI_PS_CLEAR;
3574 switch (value) {
3575 case UHF_PORT_ENABLE:
3576 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED);
3577 break;
3578 case UHF_PORT_SUSPEND:
3579 return -1;
3580 case UHF_PORT_POWER:
3581 break;
3582 case UHF_PORT_TEST:
3583 case UHF_PORT_INDICATOR:
3584 return -1;
3585 case UHF_C_PORT_CONNECTION:
3586 xhci_op_write_4(sc, port, v | XHCI_PS_CSC);
3587 break;
3588 case UHF_C_PORT_ENABLE:
3589 case UHF_C_PORT_SUSPEND:
3590 case UHF_C_PORT_OVER_CURRENT:
3591 return -1;
3592 case UHF_C_BH_PORT_RESET:
3593 xhci_op_write_4(sc, port, v | XHCI_PS_WRC);
3594 break;
3595 case UHF_C_PORT_RESET:
3596 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
3597 break;
3598 case UHF_C_PORT_LINK_STATE:
3599 xhci_op_write_4(sc, port, v | XHCI_PS_PLC);
3600 break;
3601 case UHF_C_PORT_CONFIG_ERROR:
3602 xhci_op_write_4(sc, port, v | XHCI_PS_CEC);
3603 break;
3604 default:
3605 return -1;
3606 }
3607 break;
3608 }
3609 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
3610 if (len == 0)
3611 break;
3612 if ((value & 0xff) != 0) {
3613 return -1;
3614 }
3615 usb_hub_descriptor_t hubd;
3616
3617 totlen = uimin(buflen, sizeof(hubd));
3618 memcpy(&hubd, buf, totlen);
3619 hubd.bNbrPorts = sc->sc_rhportcount[bn];
3620 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH);
3621 hubd.bPwrOn2PwrGood = 200;
3622 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) {
3623 /* XXX can't find out? */
3624 hubd.DeviceRemovable[i++] = 0;
3625 }
3626 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
3627 totlen = uimin(totlen, hubd.bDescLength);
3628 memcpy(buf, &hubd, totlen);
3629 break;
3630 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
3631 if (len != 4) {
3632 return -1;
3633 }
3634 memset(buf, 0, len); /* ? XXX */
3635 totlen = len;
3636 break;
3637 /* Get Port Status request */
3638 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): {
3639 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
3640
3641 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju",
3642 bn, index, cp, 0);
3643 if (index < 1 || index > sc->sc_rhportcount[bn]) {
3644 DPRINTFN(5, "bad get port status: index=%jd bn=%jd "
3645 "portcount=%jd",
3646 index, bn, sc->sc_rhportcount[bn], 0);
3647 return -1;
3648 }
3649 if (len != 4) {
3650 DPRINTFN(5, "bad get port status: len %jd != 4",
3651 len, 0, 0, 0);
3652 return -1;
3653 }
3654 v = xhci_op_read_4(sc, XHCI_PORTSC(cp));
3655 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0);
3656 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v));
3657 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS;
3658 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED;
3659 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR;
3660 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND;
3661 if (v & XHCI_PS_PR) i |= UPS_RESET;
3662 if (v & XHCI_PS_PP) {
3663 if (i & UPS_OTHER_SPEED)
3664 i |= UPS_PORT_POWER_SS;
3665 else
3666 i |= UPS_PORT_POWER;
3667 }
3668 if (i & UPS_OTHER_SPEED)
3669 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v));
3670 if (sc->sc_vendor_port_status)
3671 i = sc->sc_vendor_port_status(sc, v, i);
3672 USETW(ps.wPortStatus, i);
3673 i = 0;
3674 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS;
3675 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED;
3676 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR;
3677 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET;
3678 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET;
3679 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE;
3680 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR;
3681 USETW(ps.wPortChange, i);
3682 totlen = uimin(len, sizeof(ps));
3683 memcpy(buf, &ps, totlen);
3684 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx"
3685 " totlen %jd",
3686 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0);
3687 break;
3688 }
3689 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
3690 return -1;
3691 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE):
3692 break;
3693 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
3694 break;
3695 /* Set Port Feature request */
3696 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): {
3697 int optval = (index >> 8) & 0xff;
3698 index &= 0xff;
3699 if (index < 1 || index > sc->sc_rhportcount[bn]) {
3700 return -1;
3701 }
3702
3703 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
3704
3705 port = XHCI_PORTSC(cp);
3706 v = xhci_op_read_4(sc, port);
3707 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0);
3708 v &= ~XHCI_PS_CLEAR;
3709 switch (value) {
3710 case UHF_PORT_ENABLE:
3711 xhci_op_write_4(sc, port, v | XHCI_PS_PED);
3712 break;
3713 case UHF_PORT_SUSPEND:
3714 /* XXX suspend */
3715 break;
3716 case UHF_PORT_RESET:
3717 v &= ~(XHCI_PS_PED | XHCI_PS_PR);
3718 xhci_op_write_4(sc, port, v | XHCI_PS_PR);
3719 /* Wait for reset to complete. */
3720 usb_delay_ms(&sc->sc_bus, USB_PORT_ROOT_RESET_DELAY);
3721 if (sc->sc_dying) {
3722 return -1;
3723 }
3724 v = xhci_op_read_4(sc, port);
3725 if (v & XHCI_PS_PR) {
3726 xhci_op_write_4(sc, port, v & ~XHCI_PS_PR);
3727 usb_delay_ms(&sc->sc_bus, 10);
3728 /* XXX */
3729 }
3730 break;
3731 case UHF_PORT_POWER:
3732 /* XXX power control */
3733 break;
3734 /* XXX more */
3735 case UHF_C_PORT_RESET:
3736 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
3737 break;
3738 case UHF_PORT_U1_TIMEOUT:
3739 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
3740 return -1;
3741 }
3742 port = XHCI_PORTPMSC(cp);
3743 v = xhci_op_read_4(sc, port);
3744 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
3745 index, cp, v, 0);
3746 v &= ~XHCI_PM3_U1TO_SET(0xff);
3747 v |= XHCI_PM3_U1TO_SET(optval);
3748 xhci_op_write_4(sc, port, v);
3749 break;
3750 case UHF_PORT_U2_TIMEOUT:
3751 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
3752 return -1;
3753 }
3754 port = XHCI_PORTPMSC(cp);
3755 v = xhci_op_read_4(sc, port);
3756 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
3757 index, cp, v, 0);
3758 v &= ~XHCI_PM3_U2TO_SET(0xff);
3759 v |= XHCI_PM3_U2TO_SET(optval);
3760 xhci_op_write_4(sc, port, v);
3761 break;
3762 default:
3763 return -1;
3764 }
3765 }
3766 break;
3767 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
3768 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
3769 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
3770 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
3771 break;
3772 default:
3773 /* default from usbroothub */
3774 return buflen;
3775 }
3776
3777 return totlen;
3778 }
3779
3780 /* root hub interrupt */
3781
3782 static usbd_status
3783 xhci_root_intr_transfer(struct usbd_xfer *xfer)
3784 {
3785 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
3786 usbd_status err;
3787
3788 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3789
3790 /* Insert last in queue. */
3791 mutex_enter(&sc->sc_lock);
3792 err = usb_insert_transfer(xfer);
3793 mutex_exit(&sc->sc_lock);
3794 if (err)
3795 return err;
3796
3797 /* Pipe isn't running, start first */
3798 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
3799 }
3800
3801 /* Wait for roothub port status/change */
3802 static usbd_status
3803 xhci_root_intr_start(struct usbd_xfer *xfer)
3804 {
3805 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
3806 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
3807 const bool polling = xhci_polling_p(sc);
3808
3809 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3810
3811 if (sc->sc_dying)
3812 return USBD_IOERROR;
3813
3814 if (!polling)
3815 mutex_enter(&sc->sc_lock);
3816 KASSERT(sc->sc_intrxfer[bn] == NULL);
3817 sc->sc_intrxfer[bn] = xfer;
3818 xfer->ux_status = USBD_IN_PROGRESS;
3819 if (!polling)
3820 mutex_exit(&sc->sc_lock);
3821
3822 return USBD_IN_PROGRESS;
3823 }
3824
3825 static void
3826 xhci_root_intr_abort(struct usbd_xfer *xfer)
3827 {
3828 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
3829 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
3830
3831 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3832
3833 KASSERT(mutex_owned(&sc->sc_lock));
3834 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
3835
3836 /* If xfer has already completed, nothing to do here. */
3837 if (sc->sc_intrxfer[bn] == NULL)
3838 return;
3839
3840 /*
3841 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer.
3842 * Cancel it.
3843 */
3844 KASSERT(sc->sc_intrxfer[bn] == xfer);
3845 xfer->ux_status = USBD_CANCELLED;
3846 usb_transfer_complete(xfer);
3847 }
3848
3849 static void
3850 xhci_root_intr_close(struct usbd_pipe *pipe)
3851 {
3852 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe);
3853 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer;
3854 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
3855
3856 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3857
3858 KASSERT(mutex_owned(&sc->sc_lock));
3859
3860 /*
3861 * Caller must guarantee the xfer has completed first, by
3862 * closing the pipe only after normal completion or an abort.
3863 */
3864 KASSERT(sc->sc_intrxfer[bn] == NULL);
3865 }
3866
3867 static void
3868 xhci_root_intr_done(struct usbd_xfer *xfer)
3869 {
3870 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
3871 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
3872
3873 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3874
3875 KASSERT(mutex_owned(&sc->sc_lock));
3876
3877 /* Claim the xfer so it doesn't get completed again. */
3878 KASSERT(sc->sc_intrxfer[bn] == xfer);
3879 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
3880 sc->sc_intrxfer[bn] = NULL;
3881 }
3882
3883 /* -------------- */
3884 /* device control */
3885
3886 static usbd_status
3887 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
3888 {
3889 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
3890 usbd_status err;
3891
3892 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3893
3894 /* Insert last in queue. */
3895 mutex_enter(&sc->sc_lock);
3896 err = usb_insert_transfer(xfer);
3897 mutex_exit(&sc->sc_lock);
3898 if (err)
3899 return err;
3900
3901 /* Pipe isn't running, start first */
3902 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
3903 }
3904
3905 static usbd_status
3906 xhci_device_ctrl_start(struct usbd_xfer *xfer)
3907 {
3908 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
3909 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
3910 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
3911 struct xhci_ring * const tr = xs->xs_xr[dci];
3912 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
3913 usb_device_request_t * const req = &xfer->ux_request;
3914 const bool isread = usbd_xfer_isread(xfer);
3915 const uint32_t len = UGETW(req->wLength);
3916 usb_dma_t * const dma = &xfer->ux_dmabuf;
3917 uint64_t parameter;
3918 uint32_t status;
3919 uint32_t control;
3920 u_int i;
3921 const bool polling = xhci_polling_p(sc);
3922
3923 XHCIHIST_FUNC();
3924 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx",
3925 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue),
3926 UGETW(req->wIndex), UGETW(req->wLength));
3927
3928 /* we rely on the bottom bits for extra info */
3929 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %zx",
3930 (uintptr_t) xfer);
3931
3932 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0);
3933
3934 i = 0;
3935
3936 /* setup phase */
3937 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */
3938 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req));
3939 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE :
3940 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) |
3941 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) |
3942 XHCI_TRB_3_IDT_BIT;
3943 xhci_xfer_put_trb(xx, i++, parameter, status, control);
3944
3945 if (len != 0) {
3946 /* data phase */
3947 parameter = DMAADDR(dma, 0);
3948 KASSERTMSG(len <= 0x10000, "len %d", len);
3949 status = XHCI_TRB_2_IRQ_SET(0) |
3950 XHCI_TRB_2_TDSZ_SET(0) |
3951 XHCI_TRB_2_BYTES_SET(len);
3952 control = (isread ? XHCI_TRB_3_DIR_IN : 0) |
3953 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) |
3954 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
3955 XHCI_TRB_3_IOC_BIT;
3956 xhci_xfer_put_trb(xx, i++, parameter, status, control);
3957
3958 usb_syncmem(dma, 0, len,
3959 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
3960 }
3961
3962 parameter = 0;
3963 status = XHCI_TRB_2_IRQ_SET(0);
3964 /* the status stage has inverted direction */
3965 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) |
3966 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) |
3967 XHCI_TRB_3_IOC_BIT;
3968 xhci_xfer_put_trb(xx, i++, parameter, status, control);
3969
3970 if (!polling)
3971 mutex_enter(&tr->xr_lock);
3972 xhci_ring_put_xfer(sc, tr, xx, i);
3973 if (!polling)
3974 mutex_exit(&tr->xr_lock);
3975
3976 if (!polling)
3977 mutex_enter(&sc->sc_lock);
3978 xfer->ux_status = USBD_IN_PROGRESS;
3979 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
3980 usbd_xfer_schedule_timeout(xfer);
3981 if (!polling)
3982 mutex_exit(&sc->sc_lock);
3983
3984 return USBD_IN_PROGRESS;
3985 }
3986
3987 static void
3988 xhci_device_ctrl_done(struct usbd_xfer *xfer)
3989 {
3990 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3991 usb_device_request_t *req = &xfer->ux_request;
3992 int len = UGETW(req->wLength);
3993 int rd = req->bmRequestType & UT_READ;
3994
3995 if (len)
3996 usb_syncmem(&xfer->ux_dmabuf, 0, len,
3997 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3998 }
3999
4000 static void
4001 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
4002 {
4003 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4004
4005 usbd_xfer_abort(xfer);
4006 }
4007
4008 static void
4009 xhci_device_ctrl_close(struct usbd_pipe *pipe)
4010 {
4011 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4012
4013 xhci_close_pipe(pipe);
4014 }
4015
4016 /* ------------------ */
4017 /* device isochronous */
4018
4019 static usbd_status
4020 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
4021 {
4022 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4023 usbd_status err;
4024
4025 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4026
4027 /* Insert last in queue. */
4028 mutex_enter(&sc->sc_lock);
4029 err = usb_insert_transfer(xfer);
4030 mutex_exit(&sc->sc_lock);
4031 if (err)
4032 return err;
4033
4034 return xhci_device_isoc_enter(xfer);
4035 }
4036
4037 static usbd_status
4038 xhci_device_isoc_enter(struct usbd_xfer *xfer)
4039 {
4040 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4041 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4042 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4043 struct xhci_ring * const tr = xs->xs_xr[dci];
4044 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4045 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe;
4046 uint32_t len = xfer->ux_length;
4047 usb_dma_t * const dma = &xfer->ux_dmabuf;
4048 uint64_t parameter;
4049 uint32_t status;
4050 uint32_t control;
4051 uint32_t mfindex;
4052 uint32_t offs;
4053 int i, ival;
4054 const bool polling = xhci_polling_p(sc);
4055 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
4056 const uint16_t mps = UE_GET_SIZE(MPS);
4057 const uint8_t maxb = xpipe->xp_maxb;
4058 u_int tdpc, tbc, tlbpc;
4059
4060 XHCIHIST_FUNC();
4061 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4062 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4063
4064 if (sc->sc_dying)
4065 return USBD_IOERROR;
4066
4067 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths);
4068 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4069
4070 const bool isread = usbd_xfer_isread(xfer);
4071 if (xfer->ux_length)
4072 usb_syncmem(dma, 0, xfer->ux_length,
4073 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4074
4075 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
4076 if (ival >= 1 && ival <= 16)
4077 ival = 1 << (ival - 1);
4078 else
4079 ival = 1; /* fake something up */
4080
4081 if (xpipe->xp_isoc_next == -1) {
4082 mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX);
4083 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0);
4084 mfindex = XHCI_MFINDEX_GET(mfindex + 1);
4085 mfindex /= USB_UFRAMES_PER_FRAME;
4086 mfindex += 7; /* 7 frames is max possible IST */
4087 xpipe->xp_isoc_next = roundup2(mfindex, ival);
4088 }
4089
4090 offs = 0;
4091 for (i = 0; i < xfer->ux_nframes; i++) {
4092 len = xfer->ux_frlengths[i];
4093
4094 tdpc = howmany(len, mps);
4095 tbc = howmany(tdpc, maxb) - 1;
4096 tlbpc = tdpc % maxb;
4097 tlbpc = tlbpc ? tlbpc - 1 : maxb - 1;
4098
4099 KASSERTMSG(len <= 0x10000, "len %d", len);
4100 parameter = DMAADDR(dma, offs);
4101 status = XHCI_TRB_2_IRQ_SET(0) |
4102 XHCI_TRB_2_TDSZ_SET(0) |
4103 XHCI_TRB_2_BYTES_SET(len);
4104 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) |
4105 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4106 XHCI_TRB_3_TBC_SET(tbc) |
4107 XHCI_TRB_3_TLBPC_SET(tlbpc) |
4108 XHCI_TRB_3_IOC_BIT;
4109 if (XHCI_HCC_CFC(sc->sc_hcc)) {
4110 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4111 #if 0
4112 } else if (xpipe->xp_isoc_next == -1) {
4113 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4114 #endif
4115 } else {
4116 control |= XHCI_TRB_3_ISO_SIA_BIT;
4117 }
4118 #if 0
4119 if (i != xfer->ux_nframes - 1)
4120 control |= XHCI_TRB_3_BEI_BIT;
4121 #endif
4122 xhci_xfer_put_trb(xx, i, parameter, status, control);
4123
4124 xpipe->xp_isoc_next += ival;
4125 offs += len;
4126 }
4127
4128 xx->xx_isoc_done = 0;
4129
4130 if (!polling)
4131 mutex_enter(&tr->xr_lock);
4132 xhci_ring_put_xfer(sc, tr, xx, i);
4133 if (!polling)
4134 mutex_exit(&tr->xr_lock);
4135
4136 if (!polling)
4137 mutex_enter(&sc->sc_lock);
4138 xfer->ux_status = USBD_IN_PROGRESS;
4139 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4140 usbd_xfer_schedule_timeout(xfer);
4141 if (!polling)
4142 mutex_exit(&sc->sc_lock);
4143
4144 return USBD_IN_PROGRESS;
4145 }
4146
4147 static void
4148 xhci_device_isoc_abort(struct usbd_xfer *xfer)
4149 {
4150 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4151
4152 usbd_xfer_abort(xfer);
4153 }
4154
4155 static void
4156 xhci_device_isoc_close(struct usbd_pipe *pipe)
4157 {
4158 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4159
4160 xhci_close_pipe(pipe);
4161 }
4162
4163 static void
4164 xhci_device_isoc_done(struct usbd_xfer *xfer)
4165 {
4166 #ifdef USB_DEBUG
4167 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4168 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4169 #endif
4170 const bool isread = usbd_xfer_isread(xfer);
4171
4172 XHCIHIST_FUNC();
4173 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4174 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4175
4176 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4177 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4178 }
4179
4180 /* ----------- */
4181 /* device bulk */
4182
4183 static usbd_status
4184 xhci_device_bulk_transfer(struct usbd_xfer *xfer)
4185 {
4186 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4187 usbd_status err;
4188
4189 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4190
4191 /* Insert last in queue. */
4192 mutex_enter(&sc->sc_lock);
4193 err = usb_insert_transfer(xfer);
4194 mutex_exit(&sc->sc_lock);
4195 if (err)
4196 return err;
4197
4198 /*
4199 * Pipe isn't running (otherwise err would be USBD_INPROG),
4200 * so start it first.
4201 */
4202 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4203 }
4204
4205 static usbd_status
4206 xhci_device_bulk_start(struct usbd_xfer *xfer)
4207 {
4208 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4209 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4210 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4211 struct xhci_ring * const tr = xs->xs_xr[dci];
4212 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4213 const uint32_t len = xfer->ux_length;
4214 usb_dma_t * const dma = &xfer->ux_dmabuf;
4215 uint64_t parameter;
4216 uint32_t status;
4217 uint32_t control;
4218 u_int i = 0;
4219 const bool polling = xhci_polling_p(sc);
4220
4221 XHCIHIST_FUNC();
4222 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4223 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4224
4225 if (sc->sc_dying)
4226 return USBD_IOERROR;
4227
4228 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4229
4230 parameter = DMAADDR(dma, 0);
4231 const bool isread = usbd_xfer_isread(xfer);
4232 if (len)
4233 usb_syncmem(dma, 0, len,
4234 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4235
4236 /*
4237 * XXX: (dsl) The physical buffer must not cross a 64k boundary.
4238 * If the user supplied buffer crosses such a boundary then 2
4239 * (or more) TRB should be used.
4240 * If multiple TRB are used the td_size field must be set correctly.
4241 * For v1.0 devices (like ivy bridge) this is the number of usb data
4242 * blocks needed to complete the transfer.
4243 * Setting it to 1 in the last TRB causes an extra zero-length
4244 * data block be sent.
4245 * The earlier documentation differs, I don't know how it behaves.
4246 */
4247 KASSERTMSG(len <= 0x10000, "len %d", len);
4248 status = XHCI_TRB_2_IRQ_SET(0) |
4249 XHCI_TRB_2_TDSZ_SET(0) |
4250 XHCI_TRB_2_BYTES_SET(len);
4251 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4252 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4253 XHCI_TRB_3_IOC_BIT;
4254 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4255
4256 if (!polling)
4257 mutex_enter(&tr->xr_lock);
4258 xhci_ring_put_xfer(sc, tr, xx, i);
4259 if (!polling)
4260 mutex_exit(&tr->xr_lock);
4261
4262 if (!polling)
4263 mutex_enter(&sc->sc_lock);
4264 xfer->ux_status = USBD_IN_PROGRESS;
4265 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4266 usbd_xfer_schedule_timeout(xfer);
4267 if (!polling)
4268 mutex_exit(&sc->sc_lock);
4269
4270 return USBD_IN_PROGRESS;
4271 }
4272
4273 static void
4274 xhci_device_bulk_done(struct usbd_xfer *xfer)
4275 {
4276 #ifdef USB_DEBUG
4277 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4278 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4279 #endif
4280 const bool isread = usbd_xfer_isread(xfer);
4281
4282 XHCIHIST_FUNC();
4283 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4284 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4285
4286 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4287 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4288 }
4289
4290 static void
4291 xhci_device_bulk_abort(struct usbd_xfer *xfer)
4292 {
4293 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4294
4295 usbd_xfer_abort(xfer);
4296 }
4297
4298 static void
4299 xhci_device_bulk_close(struct usbd_pipe *pipe)
4300 {
4301 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4302
4303 xhci_close_pipe(pipe);
4304 }
4305
4306 /* ---------------- */
4307 /* device interrupt */
4308
4309 static usbd_status
4310 xhci_device_intr_transfer(struct usbd_xfer *xfer)
4311 {
4312 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4313 usbd_status err;
4314
4315 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4316
4317 /* Insert last in queue. */
4318 mutex_enter(&sc->sc_lock);
4319 err = usb_insert_transfer(xfer);
4320 mutex_exit(&sc->sc_lock);
4321 if (err)
4322 return err;
4323
4324 /*
4325 * Pipe isn't running (otherwise err would be USBD_INPROG),
4326 * so start it first.
4327 */
4328 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4329 }
4330
4331 static usbd_status
4332 xhci_device_intr_start(struct usbd_xfer *xfer)
4333 {
4334 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4335 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4336 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4337 struct xhci_ring * const tr = xs->xs_xr[dci];
4338 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4339 const uint32_t len = xfer->ux_length;
4340 const bool polling = xhci_polling_p(sc);
4341 usb_dma_t * const dma = &xfer->ux_dmabuf;
4342 uint64_t parameter;
4343 uint32_t status;
4344 uint32_t control;
4345 u_int i = 0;
4346
4347 XHCIHIST_FUNC();
4348 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4349 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4350
4351 if (sc->sc_dying)
4352 return USBD_IOERROR;
4353
4354 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4355
4356 const bool isread = usbd_xfer_isread(xfer);
4357 if (len)
4358 usb_syncmem(dma, 0, len,
4359 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4360
4361 parameter = DMAADDR(dma, 0);
4362 KASSERTMSG(len <= 0x10000, "len %d", len);
4363 status = XHCI_TRB_2_IRQ_SET(0) |
4364 XHCI_TRB_2_TDSZ_SET(0) |
4365 XHCI_TRB_2_BYTES_SET(len);
4366 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4367 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT;
4368 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4369
4370 if (!polling)
4371 mutex_enter(&tr->xr_lock);
4372 xhci_ring_put_xfer(sc, tr, xx, i);
4373 if (!polling)
4374 mutex_exit(&tr->xr_lock);
4375
4376 if (!polling)
4377 mutex_enter(&sc->sc_lock);
4378 xfer->ux_status = USBD_IN_PROGRESS;
4379 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4380 usbd_xfer_schedule_timeout(xfer);
4381 if (!polling)
4382 mutex_exit(&sc->sc_lock);
4383
4384 return USBD_IN_PROGRESS;
4385 }
4386
4387 static void
4388 xhci_device_intr_done(struct usbd_xfer *xfer)
4389 {
4390 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4391 #ifdef USB_DEBUG
4392 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4393 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4394 #endif
4395 const bool isread = usbd_xfer_isread(xfer);
4396
4397 XHCIHIST_FUNC();
4398 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4399 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4400
4401 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4402
4403 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4404 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4405 }
4406
4407 static void
4408 xhci_device_intr_abort(struct usbd_xfer *xfer)
4409 {
4410 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4411
4412 XHCIHIST_FUNC();
4413 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0);
4414
4415 KASSERT(mutex_owned(&sc->sc_lock));
4416 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
4417 usbd_xfer_abort(xfer);
4418 }
4419
4420 static void
4421 xhci_device_intr_close(struct usbd_pipe *pipe)
4422 {
4423 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
4424
4425 XHCIHIST_FUNC();
4426 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0);
4427
4428 xhci_close_pipe(pipe);
4429 }
4430