xhci.c revision 1.164 1 /* $NetBSD: xhci.c,v 1.164 2022/04/06 22:01:45 mlelstv Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * USB rev 2.0 and rev 3.1 specification
31 * http://www.usb.org/developers/docs/
32 * xHCI rev 1.1 specification
33 * http://www.intel.com/technology/usb/spec.htm
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.164 2022/04/06 22:01:45 mlelstv Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_usb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/kmem.h>
47 #include <sys/device.h>
48 #include <sys/select.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/endian.h>
58
59 #include <dev/usb/usb.h>
60 #include <dev/usb/usbdi.h>
61 #include <dev/usb/usbdivar.h>
62 #include <dev/usb/usbdi_util.h>
63 #include <dev/usb/usbhist.h>
64 #include <dev/usb/usb_mem.h>
65 #include <dev/usb/usb_quirks.h>
66
67 #include <dev/usb/xhcireg.h>
68 #include <dev/usb/xhcivar.h>
69 #include <dev/usb/usbroothub.h>
70
71
72 #ifdef USB_DEBUG
73 #ifndef XHCI_DEBUG
74 #define xhcidebug 0
75 #else /* !XHCI_DEBUG */
76 #define HEXDUMP(a, b, c) \
77 do { \
78 if (xhcidebug > 0) \
79 hexdump(printf, a, b, c); \
80 } while (/*CONSTCOND*/0)
81 static int xhcidebug = 0;
82
83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup")
84 {
85 int err;
86 const struct sysctlnode *rnode;
87 const struct sysctlnode *cnode;
88
89 err = sysctl_createv(clog, 0, NULL, &rnode,
90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci",
91 SYSCTL_DESCR("xhci global controls"),
92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
93
94 if (err)
95 goto fail;
96
97 /* control debugging printfs */
98 err = sysctl_createv(clog, 0, &rnode, &cnode,
99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
100 "debug", SYSCTL_DESCR("Enable debugging output"),
101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL);
102 if (err)
103 goto fail;
104
105 return;
106 fail:
107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
108 }
109
110 #endif /* !XHCI_DEBUG */
111 #endif /* USB_DEBUG */
112
113 #ifndef HEXDUMP
114 #define HEXDUMP(a, b, c)
115 #endif
116
117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D)
118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D)
119 #define XHCIHIST_FUNC() USBHIST_FUNC()
120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug)
121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \
122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D)
123
124 #define XHCI_DCI_SLOT 0
125 #define XHCI_DCI_EP_CONTROL 1
126
127 #define XHCI_ICI_INPUT_CONTROL 0
128
129 struct xhci_pipe {
130 struct usbd_pipe xp_pipe;
131 struct usb_task xp_async_task;
132 int16_t xp_isoc_next; /* next frame */
133 uint8_t xp_maxb; /* max burst */
134 uint8_t xp_mult;
135 };
136
137 #define XHCI_COMMAND_RING_TRBS 256
138 #define XHCI_EVENT_RING_TRBS 256
139 #define XHCI_EVENT_RING_SEGMENTS 1
140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT
141
142 static usbd_status xhci_open(struct usbd_pipe *);
143 static void xhci_close_pipe(struct usbd_pipe *);
144 static int xhci_intr1(struct xhci_softc * const);
145 static void xhci_softintr(void *);
146 static void xhci_poll(struct usbd_bus *);
147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int);
148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
149 static void xhci_abortx(struct usbd_xfer *);
150 static bool xhci_dying(struct usbd_bus *);
151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **);
152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int,
153 struct usbd_port *);
154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
155 void *, int);
156
157 static void xhci_pipe_restart(struct usbd_pipe *);
158 static void xhci_pipe_restart_async_task(void *);
159 static void xhci_pipe_restart_async(struct usbd_pipe *);
160
161 static usbd_status xhci_configure_endpoint(struct usbd_pipe *);
162 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *);
163 static void xhci_reset_endpoint(struct usbd_pipe *);
164 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *,
165 struct xhci_slot *, u_int, uint32_t);
166 static usbd_status xhci_stop_endpoint(struct usbd_pipe *);
167
168 static void xhci_host_dequeue(struct xhci_ring * const);
169 static void xhci_set_dequeue(struct usbd_pipe *);
170
171 static usbd_status xhci_do_command(struct xhci_softc * const,
172 struct xhci_soft_trb * const, int);
173 static usbd_status xhci_do_command_locked(struct xhci_softc * const,
174 struct xhci_soft_trb * const, int);
175 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t);
176 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *);
177 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool);
178 static usbd_status xhci_enable_slot(struct xhci_softc * const,
179 uint8_t * const);
180 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t);
181 static usbd_status xhci_address_device(struct xhci_softc * const,
182 uint64_t, uint8_t, bool);
183 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int);
184 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const,
185 struct xhci_slot * const, u_int);
186 static usbd_status xhci_ring_init(struct xhci_softc * const,
187 struct xhci_ring **, size_t, size_t);
188 static void xhci_ring_free(struct xhci_softc * const,
189 struct xhci_ring ** const);
190
191 static void xhci_setup_ctx(struct usbd_pipe *);
192 static void xhci_setup_route(struct usbd_pipe *, uint32_t *);
193 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *);
194 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *);
195 static uint32_t xhci_bival2ival(uint32_t, uint32_t);
196
197 static void xhci_noop(struct usbd_pipe *);
198
199 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *);
200 static usbd_status xhci_root_intr_start(struct usbd_xfer *);
201 static void xhci_root_intr_abort(struct usbd_xfer *);
202 static void xhci_root_intr_close(struct usbd_pipe *);
203 static void xhci_root_intr_done(struct usbd_xfer *);
204
205 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *);
206 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *);
207 static void xhci_device_ctrl_abort(struct usbd_xfer *);
208 static void xhci_device_ctrl_close(struct usbd_pipe *);
209 static void xhci_device_ctrl_done(struct usbd_xfer *);
210
211 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *);
212 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *);
213 static void xhci_device_isoc_abort(struct usbd_xfer *);
214 static void xhci_device_isoc_close(struct usbd_pipe *);
215 static void xhci_device_isoc_done(struct usbd_xfer *);
216
217 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *);
218 static usbd_status xhci_device_intr_start(struct usbd_xfer *);
219 static void xhci_device_intr_abort(struct usbd_xfer *);
220 static void xhci_device_intr_close(struct usbd_pipe *);
221 static void xhci_device_intr_done(struct usbd_xfer *);
222
223 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *);
224 static usbd_status xhci_device_bulk_start(struct usbd_xfer *);
225 static void xhci_device_bulk_abort(struct usbd_xfer *);
226 static void xhci_device_bulk_close(struct usbd_pipe *);
227 static void xhci_device_bulk_done(struct usbd_xfer *);
228
229 static const struct usbd_bus_methods xhci_bus_methods = {
230 .ubm_open = xhci_open,
231 .ubm_softint = xhci_softintr,
232 .ubm_dopoll = xhci_poll,
233 .ubm_allocx = xhci_allocx,
234 .ubm_freex = xhci_freex,
235 .ubm_abortx = xhci_abortx,
236 .ubm_dying = xhci_dying,
237 .ubm_getlock = xhci_get_lock,
238 .ubm_newdev = xhci_new_device,
239 .ubm_rhctrl = xhci_roothub_ctrl,
240 };
241
242 static const struct usbd_pipe_methods xhci_root_intr_methods = {
243 .upm_transfer = xhci_root_intr_transfer,
244 .upm_start = xhci_root_intr_start,
245 .upm_abort = xhci_root_intr_abort,
246 .upm_close = xhci_root_intr_close,
247 .upm_cleartoggle = xhci_noop,
248 .upm_done = xhci_root_intr_done,
249 };
250
251
252 static const struct usbd_pipe_methods xhci_device_ctrl_methods = {
253 .upm_transfer = xhci_device_ctrl_transfer,
254 .upm_start = xhci_device_ctrl_start,
255 .upm_abort = xhci_device_ctrl_abort,
256 .upm_close = xhci_device_ctrl_close,
257 .upm_cleartoggle = xhci_noop,
258 .upm_done = xhci_device_ctrl_done,
259 };
260
261 static const struct usbd_pipe_methods xhci_device_isoc_methods = {
262 .upm_transfer = xhci_device_isoc_transfer,
263 .upm_abort = xhci_device_isoc_abort,
264 .upm_close = xhci_device_isoc_close,
265 .upm_cleartoggle = xhci_noop,
266 .upm_done = xhci_device_isoc_done,
267 };
268
269 static const struct usbd_pipe_methods xhci_device_bulk_methods = {
270 .upm_transfer = xhci_device_bulk_transfer,
271 .upm_start = xhci_device_bulk_start,
272 .upm_abort = xhci_device_bulk_abort,
273 .upm_close = xhci_device_bulk_close,
274 .upm_cleartoggle = xhci_noop,
275 .upm_done = xhci_device_bulk_done,
276 };
277
278 static const struct usbd_pipe_methods xhci_device_intr_methods = {
279 .upm_transfer = xhci_device_intr_transfer,
280 .upm_start = xhci_device_intr_start,
281 .upm_abort = xhci_device_intr_abort,
282 .upm_close = xhci_device_intr_close,
283 .upm_cleartoggle = xhci_noop,
284 .upm_done = xhci_device_intr_done,
285 };
286
287 static inline uint32_t
288 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset)
289 {
290 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset);
291 }
292
293 static inline uint32_t
294 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset)
295 {
296 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
297 }
298
299 static inline uint32_t
300 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset)
301 {
302 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset);
303 }
304
305 static inline void
306 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset,
307 uint32_t value)
308 {
309 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value);
310 }
311
312 #if 0 /* unused */
313 static inline void
314 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset,
315 uint32_t value)
316 {
317 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value);
318 }
319 #endif /* unused */
320
321 static inline void
322 xhci_barrier(const struct xhci_softc * const sc, int flags)
323 {
324 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 0, sc->sc_ios, flags);
325 }
326
327 static inline uint32_t
328 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset)
329 {
330 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset);
331 }
332
333 static inline uint32_t
334 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset)
335 {
336 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
337 }
338
339 static inline void
340 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset,
341 uint32_t value)
342 {
343 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
344 }
345
346 static inline uint64_t
347 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset)
348 {
349 uint64_t value;
350
351 #ifdef XHCI_USE_BUS_SPACE_8
352 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset);
353 #else
354 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
355 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh,
356 offset + 4) << 32;
357 #endif
358
359 return value;
360 }
361
362 static inline void
363 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset,
364 uint64_t value)
365 {
366 #ifdef XHCI_USE_BUS_SPACE_8
367 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value);
368 #else
369 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0,
370 (value >> 0) & 0xffffffff);
371 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4,
372 (value >> 32) & 0xffffffff);
373 #endif
374 }
375
376 static inline uint32_t
377 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset)
378 {
379 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
380 }
381
382 static inline void
383 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset,
384 uint32_t value)
385 {
386 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
387 }
388
389 static inline uint64_t
390 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset)
391 {
392 uint64_t value;
393
394 #ifdef XHCI_USE_BUS_SPACE_8
395 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset);
396 #else
397 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
398 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh,
399 offset + 4) << 32;
400 #endif
401
402 return value;
403 }
404
405 static inline void
406 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset,
407 uint64_t value)
408 {
409 #ifdef XHCI_USE_BUS_SPACE_8
410 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value);
411 #else
412 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0,
413 (value >> 0) & 0xffffffff);
414 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4,
415 (value >> 32) & 0xffffffff);
416 #endif
417 }
418
419 #if 0 /* unused */
420 static inline uint32_t
421 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset)
422 {
423 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset);
424 }
425 #endif /* unused */
426
427 static inline void
428 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset,
429 uint32_t value)
430 {
431 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value);
432 }
433
434 /* --- */
435
436 static inline uint8_t
437 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed)
438 {
439 u_int eptype = 0;
440
441 switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
442 case UE_CONTROL:
443 eptype = 0x0;
444 break;
445 case UE_ISOCHRONOUS:
446 eptype = 0x1;
447 break;
448 case UE_BULK:
449 eptype = 0x2;
450 break;
451 case UE_INTERRUPT:
452 eptype = 0x3;
453 break;
454 }
455
456 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
457 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
458 return eptype | 0x4;
459 else
460 return eptype;
461 }
462
463 static u_int
464 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed)
465 {
466 /* xHCI 1.0 section 4.5.1 */
467 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress);
468 u_int in = 0;
469
470 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
471 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
472 in = 1;
473
474 return epaddr * 2 + in;
475 }
476
477 static inline u_int
478 xhci_dci_to_ici(const u_int i)
479 {
480 return i + 1;
481 }
482
483 static inline void *
484 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs,
485 const u_int dci)
486 {
487 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
488 }
489
490 #if 0 /* unused */
491 static inline bus_addr_t
492 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs,
493 const u_int dci)
494 {
495 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
496 }
497 #endif /* unused */
498
499 static inline void *
500 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs,
501 const u_int ici)
502 {
503 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
504 }
505
506 static inline bus_addr_t
507 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs,
508 const u_int ici)
509 {
510 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
511 }
512
513 static inline struct xhci_trb *
514 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx)
515 {
516 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
517 }
518
519 static inline bus_addr_t
520 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx)
521 {
522 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
523 }
524
525 static inline void
526 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx,
527 uint64_t parameter, uint32_t status, uint32_t control)
528 {
529 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb);
530 xx->xx_trb[idx].trb_0 = parameter;
531 xx->xx_trb[idx].trb_2 = status;
532 xx->xx_trb[idx].trb_3 = control;
533 }
534
535 static inline void
536 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status,
537 uint32_t control)
538 {
539 trb->trb_0 = htole64(parameter);
540 trb->trb_2 = htole32(status);
541 trb->trb_3 = htole32(control);
542 }
543
544 static int
545 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx)
546 {
547 /* base address of TRBs */
548 bus_addr_t trbp = xhci_ring_trbp(xr, 0);
549
550 /* trb_0 range sanity check */
551 if (trb_0 == 0 || trb_0 < trbp ||
552 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 ||
553 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) {
554 return 1;
555 }
556 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb);
557 return 0;
558 }
559
560 static unsigned int
561 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs,
562 u_int dci)
563 {
564 uint32_t *cp;
565
566 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
567 cp = xhci_slot_get_dcv(sc, xs, dci);
568 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0]));
569 }
570
571 static inline unsigned int
572 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport)
573 {
574 const unsigned int port = ctlrport - 1;
575 const uint8_t bit = __BIT(port % NBBY);
576
577 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit);
578 }
579
580 /*
581 * Return the roothub port for a controller port. Both are 1..n.
582 */
583 static inline unsigned int
584 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport)
585 {
586
587 return sc->sc_ctlrportmap[ctrlport - 1];
588 }
589
590 /*
591 * Return the controller port for a bus roothub port. Both are 1..n.
592 */
593 static inline unsigned int
594 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn,
595 unsigned int rhport)
596 {
597
598 return sc->sc_rhportmap[bn][rhport - 1];
599 }
600
601 /* --- */
602
603 void
604 xhci_childdet(device_t self, device_t child)
605 {
606 struct xhci_softc * const sc = device_private(self);
607
608 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child));
609 if (child == sc->sc_child2)
610 sc->sc_child2 = NULL;
611 else if (child == sc->sc_child)
612 sc->sc_child = NULL;
613 }
614
615 int
616 xhci_detach(struct xhci_softc *sc, int flags)
617 {
618 int rv = 0;
619
620 if (sc->sc_child2 != NULL) {
621 rv = config_detach(sc->sc_child2, flags);
622 if (rv != 0)
623 return rv;
624 KASSERT(sc->sc_child2 == NULL);
625 }
626
627 if (sc->sc_child != NULL) {
628 rv = config_detach(sc->sc_child, flags);
629 if (rv != 0)
630 return rv;
631 KASSERT(sc->sc_child == NULL);
632 }
633
634 /* XXX unconfigure/free slots */
635
636 /* verify: */
637 xhci_rt_write_4(sc, XHCI_IMAN(0), 0);
638 xhci_op_write_4(sc, XHCI_USBCMD, 0);
639 /* do we need to wait for stop? */
640
641 xhci_op_write_8(sc, XHCI_CRCR, 0);
642 xhci_ring_free(sc, &sc->sc_cr);
643 cv_destroy(&sc->sc_command_cv);
644 cv_destroy(&sc->sc_cmdbusy_cv);
645
646 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0);
647 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0);
648 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY);
649 xhci_ring_free(sc, &sc->sc_er);
650
651 usb_freemem(&sc->sc_eventst_dma);
652
653 xhci_op_write_8(sc, XHCI_DCBAAP, 0);
654 usb_freemem(&sc->sc_dcbaa_dma);
655
656 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots);
657
658 kmem_free(sc->sc_ctlrportbus,
659 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY));
660 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int));
661
662 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
663 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int));
664 }
665
666 mutex_destroy(&sc->sc_lock);
667 mutex_destroy(&sc->sc_intr_lock);
668
669 pool_cache_destroy(sc->sc_xferpool);
670
671 return rv;
672 }
673
674 int
675 xhci_activate(device_t self, enum devact act)
676 {
677 struct xhci_softc * const sc = device_private(self);
678
679 switch (act) {
680 case DVACT_DEACTIVATE:
681 sc->sc_dying = true;
682 return 0;
683 default:
684 return EOPNOTSUPP;
685 }
686 }
687
688 bool
689 xhci_suspend(device_t self, const pmf_qual_t *qual)
690 {
691 struct xhci_softc * const sc = device_private(self);
692 size_t i, j, bn, dci;
693 int port;
694 uint32_t v;
695 usbd_status err;
696 bool ok = false;
697
698 XHCIHIST_FUNC(); XHCIHIST_CALLED();
699
700 /*
701 * Block issuance of new commands, and wait for all pending
702 * commands to complete.
703 */
704 mutex_enter(&sc->sc_lock);
705 KASSERT(sc->sc_suspender == NULL);
706 sc->sc_suspender = curlwp;
707 while (sc->sc_command_addr != 0)
708 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
709 mutex_exit(&sc->sc_lock);
710
711 /*
712 * Block roothub xfers which might touch portsc registers until
713 * we're done suspending.
714 */
715 mutex_enter(&sc->sc_rhlock);
716
717 /*
718 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
719 * xHCI Power Management, p. 342
720 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342
721 */
722
723 /*
724 * `1. Stop all USB activity by issuing Stop Endpoint Commands
725 * for Busy endpoints in the Running state. If the Force
726 * Save Context Capability (FSC = ``0'') is not supported,
727 * then Stop Endpoint Commands shall be issued for all idle
728 * endpoints in the Running state as well. The Stop
729 * Endpoint Command causes the xHC to update the respective
730 * Endpoint or Stream Contexts in system memory, e.g. the
731 * TR Dequeue Pointer, DCS, etc. fields. Refer to
732 * Implementation Note "0".'
733 */
734 for (i = 0; i < sc->sc_maxslots; i++) {
735 struct xhci_slot *xs = &sc->sc_slots[i];
736
737 /* Skip if the slot is not in use. */
738 if (xs->xs_idx == 0)
739 continue;
740
741 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
742 /* Skip if the endpoint is not Running. */
743 /* XXX What about Busy? */
744 if (xhci_get_epstate(sc, xs, dci) !=
745 XHCI_EPSTATE_RUNNING)
746 continue;
747
748 /* Stop endpoint. */
749 mutex_enter(&sc->sc_lock);
750 err = xhci_stop_endpoint_cmd(sc, xs, dci,
751 XHCI_TRB_3_SUSP_EP_BIT);
752 mutex_exit(&sc->sc_lock);
753 if (err) {
754 device_printf(self, "failed to stop endpoint"
755 " slot %zu dci %zu err %d\n",
756 i, dci, err);
757 goto out;
758 }
759 }
760 }
761
762 /*
763 * Next, suspend all the ports:
764 *
765 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15:
766 * Suspend-Resume, pp. 276-283
767 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276
768 */
769 for (bn = 0; bn < 2; bn++) {
770 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
771 /* 4.15.1: Port Suspend. */
772 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
773
774 /*
775 * `System software places individual ports
776 * into suspend mode by writing a ``3'' into
777 * the appropriate PORTSC register Port Link
778 * State (PLS) field (refer to Section 5.4.8).
779 * Software should only set the PLS field to
780 * ``3'' when the port is in the Enabled
781 * state.'
782 *
783 * `Software should not attempt to suspend a
784 * port unless the port reports that it is in
785 * the enabled (PED = ``1''; PLS < ``3'')
786 * state (refer to Section 5.4.8 for more
787 * information about PED and PLS).'
788 */
789 v = xhci_op_read_4(sc, port);
790 if (((v & XHCI_PS_PED) == 0) ||
791 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3)
792 continue;
793 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
794 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3);
795 xhci_op_write_4(sc, port, v);
796
797 /*
798 * `When the PLS field is written with U3
799 * (``3''), the status of the PLS bit will not
800 * change to the target U state U3 until the
801 * suspend signaling has completed to the
802 * attached device (which may be as long as
803 * 10ms.).'
804 *
805 * `Software is required to wait for U3
806 * transitions to complete before it puts the
807 * xHC into a low power state, and before
808 * resuming the port.'
809 *
810 * XXX Take advantage of the technique to
811 * reduce polling on host controllers that
812 * support the U3C capability.
813 */
814 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) {
815 v = xhci_op_read_4(sc, port);
816 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3)
817 break;
818 usb_delay_ms(&sc->sc_bus, 1);
819 }
820 if (j == XHCI_WAIT_PLS_U3) {
821 device_printf(self,
822 "suspend timeout on bus %zu port %zu\n",
823 bn, i);
824 goto out;
825 }
826 }
827 }
828
829 /*
830 * `2. Ensure that the Command Ring is in the Stopped state
831 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is
832 * empty), and all Command Completion Events associated
833 * with them have been received.'
834 *
835 * XXX
836 */
837
838 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */
839 xhci_op_write_4(sc, XHCI_USBCMD,
840 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS);
841
842 /*
843 * `4. Read the Operational Runtime, and VTIO registers in the
844 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ,
845 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their
846 * state.'
847 *
848 * (We don't use VTIO here (XXX for now?).)
849 */
850 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
851 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL);
852 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP);
853 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG);
854 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0));
855 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0));
856 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0));
857 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0));
858 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0));
859
860 /*
861 * `5. Set the Controller Save State (CSS) flag in the USBCMD
862 * register (5.4.1)...'
863 */
864 xhci_op_write_4(sc, XHCI_USBCMD,
865 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS);
866
867 /*
868 * `...and wait for the Save State Status (SSS) flag in the
869 * USBSTS register (5.4.2) to transition to ``0''.'
870 */
871 for (i = 0; i < XHCI_WAIT_SSS; i++) {
872 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0)
873 break;
874 usb_delay_ms(&sc->sc_bus, 1);
875 }
876 if (i >= XHCI_WAIT_SSS) {
877 device_printf(self, "suspend timeout, USBSTS.SSS\n");
878 /*
879 * Just optimistically go on and check SRE anyway --
880 * what's the worst that could happen?
881 */
882 }
883
884 /*
885 * `Note: After a Save or Restore operation completes, the
886 * Save/Restore Error (SRE) flag in the USBSTS register should
887 * be checked to ensure that the operation completed
888 * successfully.'
889 */
890 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
891 device_printf(self, "suspend error, USBSTS.SRE\n");
892 goto out;
893 }
894
895 /* Success! */
896 ok = true;
897
898 out: mutex_exit(&sc->sc_rhlock);
899 return ok;
900 }
901
902 bool
903 xhci_resume(device_t self, const pmf_qual_t *qual)
904 {
905 struct xhci_softc * const sc = device_private(self);
906 size_t i, j, bn, dci;
907 int port;
908 uint32_t v;
909 bool ok = false;
910
911 XHCIHIST_FUNC(); XHCIHIST_CALLED();
912
913 KASSERT(sc->sc_suspender);
914
915 /*
916 * Block roothub xfers which might touch portsc registers until
917 * we're done resuming.
918 */
919 mutex_enter(&sc->sc_rhlock);
920
921 /*
922 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
923 * xHCI Power Management, p. 343
924 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343
925 */
926
927 /*
928 * `4. Restore the Operational Runtime, and VTIO registers with
929 * their previously saved state in the following order:
930 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN,
931 * IMOD, and VTIO.'
932 *
933 * (We don't use VTIO here (for now?).)
934 */
935 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd);
936 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl);
937 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap);
938 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config);
939 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0);
940 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0);
941 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0);
942 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0);
943 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0);
944
945 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */
946
947 /*
948 * `5. Set the Controller Restore State (CRS) flag in the
949 * USBCMD register (5.4.1) to ``1''...'
950 */
951 xhci_op_write_4(sc, XHCI_USBCMD,
952 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS);
953
954 /*
955 * `...and wait for the Restore State Status (RSS) in the
956 * USBSTS register (5.4.2) to transition to ``0''.'
957 */
958 for (i = 0; i < XHCI_WAIT_RSS; i++) {
959 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0)
960 break;
961 usb_delay_ms(&sc->sc_bus, 1);
962 }
963 if (i >= XHCI_WAIT_RSS) {
964 device_printf(self, "resume timeout, USBSTS.RSS\n");
965 goto out;
966 }
967
968 /*
969 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits
970 * are consistent with the RCS values to be written to the
971 * CRCR.'
972 *
973 * XXX Hope just zeroing it is good enough!
974 */
975 xhci_host_dequeue(sc->sc_cr);
976
977 /*
978 * `7. Write the CRCR with the address and RCS value of the
979 * reinitialized Command Ring. Note that this write will
980 * cause the Command Ring to restart at the address
981 * specified by the CRCR.'
982 */
983 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
984 sc->sc_cr->xr_cs);
985
986 /*
987 * `8. Enable the controller by setting Run/Stop (R/S) =
988 * ``1''.'
989 */
990 xhci_op_write_4(sc, XHCI_USBCMD,
991 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS);
992
993 /*
994 * `9. Software shall walk the USB topology and initialize each
995 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and
996 * external hub ports attached to USB devices.'
997 *
998 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2
999 * `Port Resume', 4.15.2.2 `Host Initiated'.
1000 *
1001 * XXX We should maybe batch up initiating the state
1002 * transitions, and then wait for them to complete all at once.
1003 */
1004 for (bn = 0; bn < 2; bn++) {
1005 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
1006 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
1007
1008 /* `When a port is in the U3 state: ...' */
1009 v = xhci_op_read_4(sc, port);
1010 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3)
1011 continue;
1012
1013 /*
1014 * `For a USB2 protocol port, software shall
1015 * write a ``15'' (Resume) to the PLS field to
1016 * initiate resume signaling. The port shall
1017 * transition to the Resume substate and the
1018 * xHC shall transmit the resume signaling
1019 * within 1ms (T_URSM). Software shall ensure
1020 * that resume is signaled for at least 20ms
1021 * (T_DRSMDN). Software shall start timing
1022 * T_DRSMDN from the write of ``15'' (Resume)
1023 * to PLS.'
1024 */
1025 if (bn == 1) {
1026 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0);
1027 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1028 v |= XHCI_PS_LWS;
1029 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME);
1030 xhci_op_write_4(sc, port, v);
1031 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
1032 } else {
1033 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0);
1034 }
1035
1036 /*
1037 * `For a USB3 protocol port [and a USB2
1038 * protocol port after transitioning to
1039 * Resume], software shall write a ``0'' (U0)
1040 * to the PLS field...'
1041 */
1042 v = xhci_op_read_4(sc, port);
1043 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1044 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0);
1045 xhci_op_write_4(sc, port, v);
1046
1047 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) {
1048 v = xhci_op_read_4(sc, port);
1049 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0)
1050 break;
1051 usb_delay_ms(&sc->sc_bus, 1);
1052 }
1053 if (j == XHCI_WAIT_PLS_U0) {
1054 device_printf(self,
1055 "resume timeout on bus %zu port %zu\n",
1056 bn, i);
1057 goto out;
1058 }
1059 }
1060 }
1061
1062 /*
1063 * `10. Restart each of the previously Running endpoints by
1064 * ringing their doorbells.'
1065 */
1066 for (i = 0; i < sc->sc_maxslots; i++) {
1067 struct xhci_slot *xs = &sc->sc_slots[i];
1068
1069 /* Skip if the slot is not in use. */
1070 if (xs->xs_idx == 0)
1071 continue;
1072
1073 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
1074 /* Skip if the endpoint is not Running. */
1075 if (xhci_get_epstate(sc, xs, dci) !=
1076 XHCI_EPSTATE_RUNNING)
1077 continue;
1078
1079 /* Ring the doorbell. */
1080 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
1081 }
1082 }
1083
1084 /*
1085 * `Note: After a Save or Restore operation completes, the
1086 * Save/Restore Error (SRE) flag in the USBSTS register should
1087 * be checked to ensure that the operation completed
1088 * successfully.'
1089 */
1090 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
1091 device_printf(self, "resume error, USBSTS.SRE\n");
1092 goto out;
1093 }
1094
1095 /* Resume command issuance. */
1096 mutex_enter(&sc->sc_lock);
1097 KASSERT(sc->sc_suspender);
1098 sc->sc_suspender = NULL;
1099 cv_broadcast(&sc->sc_cmdbusy_cv);
1100 mutex_exit(&sc->sc_lock);
1101
1102 /* Success! */
1103 ok = true;
1104
1105 out: mutex_exit(&sc->sc_rhlock);
1106 return ok;
1107 }
1108
1109 bool
1110 xhci_shutdown(device_t self, int flags)
1111 {
1112 return false;
1113 }
1114
1115 static int
1116 xhci_hc_reset(struct xhci_softc * const sc)
1117 {
1118 uint32_t usbcmd, usbsts;
1119 int i;
1120
1121 /* Check controller not ready */
1122 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1123 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1124 if ((usbsts & XHCI_STS_CNR) == 0)
1125 break;
1126 usb_delay_ms(&sc->sc_bus, 1);
1127 }
1128 if (i >= XHCI_WAIT_CNR) {
1129 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n");
1130 return EIO;
1131 }
1132
1133 /* Halt controller */
1134 usbcmd = 0;
1135 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1136 usb_delay_ms(&sc->sc_bus, 1);
1137
1138 /* Reset controller */
1139 usbcmd = XHCI_CMD_HCRST;
1140 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1141 for (i = 0; i < XHCI_WAIT_HCRST; i++) {
1142 /*
1143 * Wait 1ms first. Existing Intel xHCI requires 1ms delay to
1144 * prevent system hang (Errata).
1145 */
1146 usb_delay_ms(&sc->sc_bus, 1);
1147 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
1148 if ((usbcmd & XHCI_CMD_HCRST) == 0)
1149 break;
1150 }
1151 if (i >= XHCI_WAIT_HCRST) {
1152 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n");
1153 return EIO;
1154 }
1155
1156 /* Check controller not ready */
1157 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1158 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1159 if ((usbsts & XHCI_STS_CNR) == 0)
1160 break;
1161 usb_delay_ms(&sc->sc_bus, 1);
1162 }
1163 if (i >= XHCI_WAIT_CNR) {
1164 aprint_error_dev(sc->sc_dev,
1165 "controller not ready timeout after reset\n");
1166 return EIO;
1167 }
1168
1169 return 0;
1170 }
1171
1172 /* 7.2 xHCI Support Protocol Capability */
1173 static void
1174 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp)
1175 {
1176 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1177
1178 /* XXX Cache this lot */
1179
1180 const uint32_t w0 = xhci_read_4(sc, ecp);
1181 const uint32_t w4 = xhci_read_4(sc, ecp + 4);
1182 const uint32_t w8 = xhci_read_4(sc, ecp + 8);
1183 const uint32_t wc = xhci_read_4(sc, ecp + 0xc);
1184
1185 aprint_debug_dev(sc->sc_dev,
1186 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc);
1187
1188 if (w4 != XHCI_XECP_USBID)
1189 return;
1190
1191 const int major = XHCI_XECP_SP_W0_MAJOR(w0);
1192 const int minor = XHCI_XECP_SP_W0_MINOR(w0);
1193 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8);
1194 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8);
1195
1196 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16));
1197 switch (mm) {
1198 case 0x0200:
1199 case 0x0300:
1200 case 0x0301:
1201 case 0x0310:
1202 case 0x0320:
1203 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n",
1204 major == 3 ? "ss" : "hs", cpo, cpo + cpc -1);
1205 break;
1206 default:
1207 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n",
1208 major, minor);
1209 return;
1210 }
1211
1212 const size_t bus = (major == 3) ? 0 : 1;
1213
1214 /* Index arrays with 0..n-1 where ports are numbered 1..n */
1215 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) {
1216 if (sc->sc_ctlrportmap[cp] != 0) {
1217 aprint_error_dev(sc->sc_dev, "controller port %zu "
1218 "already assigned", cp);
1219 continue;
1220 }
1221
1222 sc->sc_ctlrportbus[cp / NBBY] |=
1223 bus == 0 ? 0 : __BIT(cp % NBBY);
1224
1225 const size_t rhp = sc->sc_rhportcount[bus]++;
1226
1227 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0,
1228 "bus %zu rhp %zu is %d", bus, rhp,
1229 sc->sc_rhportmap[bus][rhp]);
1230
1231 sc->sc_rhportmap[bus][rhp] = cp + 1;
1232 sc->sc_ctlrportmap[cp] = rhp + 1;
1233 }
1234 }
1235
1236 /* Process extended capabilities */
1237 static void
1238 xhci_ecp(struct xhci_softc *sc)
1239 {
1240 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1241
1242 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4;
1243 while (ecp != 0) {
1244 uint32_t ecr = xhci_read_4(sc, ecp);
1245 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr);
1246 switch (XHCI_XECP_ID(ecr)) {
1247 case XHCI_ID_PROTOCOLS: {
1248 xhci_id_protocols(sc, ecp);
1249 break;
1250 }
1251 case XHCI_ID_USB_LEGACY: {
1252 uint8_t bios_sem;
1253
1254 /* Take host controller ownership from BIOS */
1255 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM);
1256 if (bios_sem) {
1257 /* sets xHCI to be owned by OS */
1258 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1);
1259 aprint_debug_dev(sc->sc_dev,
1260 "waiting for BIOS to give up control\n");
1261 for (int i = 0; i < 5000; i++) {
1262 bios_sem = xhci_read_1(sc, ecp +
1263 XHCI_XECP_BIOS_SEM);
1264 if (bios_sem == 0)
1265 break;
1266 DELAY(1000);
1267 }
1268 if (bios_sem) {
1269 aprint_error_dev(sc->sc_dev,
1270 "timed out waiting for BIOS\n");
1271 }
1272 }
1273 break;
1274 }
1275 default:
1276 break;
1277 }
1278 ecr = xhci_read_4(sc, ecp);
1279 if (XHCI_XECP_NEXT(ecr) == 0) {
1280 ecp = 0;
1281 } else {
1282 ecp += XHCI_XECP_NEXT(ecr) * 4;
1283 }
1284 }
1285 }
1286
1287 #define XHCI_HCCPREV1_BITS \
1288 "\177\020" /* New bitmask */ \
1289 "f\020\020XECP\0" \
1290 "f\014\4MAXPSA\0" \
1291 "b\013CFC\0" \
1292 "b\012SEC\0" \
1293 "b\011SBD\0" \
1294 "b\010FSE\0" \
1295 "b\7NSS\0" \
1296 "b\6LTC\0" \
1297 "b\5LHRC\0" \
1298 "b\4PIND\0" \
1299 "b\3PPC\0" \
1300 "b\2CZC\0" \
1301 "b\1BNC\0" \
1302 "b\0AC64\0" \
1303 "\0"
1304 #define XHCI_HCCV1_x_BITS \
1305 "\177\020" /* New bitmask */ \
1306 "f\020\020XECP\0" \
1307 "f\014\4MAXPSA\0" \
1308 "b\013CFC\0" \
1309 "b\012SEC\0" \
1310 "b\011SPC\0" \
1311 "b\010PAE\0" \
1312 "b\7NSS\0" \
1313 "b\6LTC\0" \
1314 "b\5LHRC\0" \
1315 "b\4PIND\0" \
1316 "b\3PPC\0" \
1317 "b\2CSZ\0" \
1318 "b\1BNC\0" \
1319 "b\0AC64\0" \
1320 "\0"
1321
1322 #define XHCI_HCC2_BITS \
1323 "\177\020" /* New bitmask */ \
1324 "b\7ETC_TSC\0" \
1325 "b\6ETC\0" \
1326 "b\5CIC\0" \
1327 "b\4LEC\0" \
1328 "b\3CTC\0" \
1329 "b\2FSC\0" \
1330 "b\1CMC\0" \
1331 "b\0U3C\0" \
1332 "\0"
1333
1334 void
1335 xhci_start(struct xhci_softc *sc)
1336 {
1337 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA);
1338 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0)
1339 /* Intel xhci needs interrupt rate moderated. */
1340 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP);
1341 else
1342 xhci_rt_write_4(sc, XHCI_IMOD(0), 0);
1343 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n",
1344 xhci_rt_read_4(sc, XHCI_IMOD(0)));
1345
1346 /* Go! */
1347 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
1348 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n",
1349 xhci_op_read_4(sc, XHCI_USBCMD));
1350 }
1351
1352 int
1353 xhci_init(struct xhci_softc *sc)
1354 {
1355 bus_size_t bsz;
1356 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff;
1357 uint32_t pagesize, config;
1358 int i = 0;
1359 uint16_t hciversion;
1360 uint8_t caplength;
1361
1362 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1363
1364 /* Set up the bus struct for the usb 3 and usb 2 buses */
1365 sc->sc_bus.ub_methods = &xhci_bus_methods;
1366 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe);
1367 sc->sc_bus.ub_usedma = true;
1368 sc->sc_bus.ub_hcpriv = sc;
1369
1370 sc->sc_bus2.ub_methods = &xhci_bus_methods;
1371 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe);
1372 sc->sc_bus2.ub_revision = USBREV_2_0;
1373 sc->sc_bus2.ub_usedma = true;
1374 sc->sc_bus2.ub_hcpriv = sc;
1375 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag;
1376
1377 caplength = xhci_read_1(sc, XHCI_CAPLENGTH);
1378 hciversion = xhci_read_2(sc, XHCI_HCIVERSION);
1379
1380 if (hciversion < XHCI_HCIVERSION_0_96 ||
1381 hciversion >= 0x0200) {
1382 aprint_normal_dev(sc->sc_dev,
1383 "xHCI version %x.%x not known to be supported\n",
1384 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1385 } else {
1386 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n",
1387 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1388 }
1389
1390 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength,
1391 &sc->sc_cbh) != 0) {
1392 aprint_error_dev(sc->sc_dev, "capability subregion failure\n");
1393 return ENOMEM;
1394 }
1395
1396 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1);
1397 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1);
1398 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1);
1399 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1);
1400 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2);
1401 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3);
1402 aprint_debug_dev(sc->sc_dev,
1403 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3);
1404
1405 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS);
1406 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32;
1407
1408 char sbuf[128];
1409 if (hciversion < XHCI_HCIVERSION_1_0)
1410 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc);
1411 else
1412 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc);
1413 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf);
1414 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n",
1415 XHCI_HCC_XECP(sc->sc_hcc) * 4);
1416 if (hciversion >= XHCI_HCIVERSION_1_1) {
1417 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2);
1418 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2);
1419 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf);
1420 }
1421
1422 /* default all ports to bus 0, i.e. usb 3 */
1423 sc->sc_ctlrportbus = kmem_zalloc(
1424 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP);
1425 sc->sc_ctlrportmap = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1426
1427 /* controller port to bus roothub port map */
1428 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
1429 sc->sc_rhportmap[j] = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1430 }
1431
1432 /*
1433 * Process all Extended Capabilities
1434 */
1435 xhci_ecp(sc);
1436
1437 bsz = XHCI_PORTSC(sc->sc_maxports);
1438 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz,
1439 &sc->sc_obh) != 0) {
1440 aprint_error_dev(sc->sc_dev, "operational subregion failure\n");
1441 return ENOMEM;
1442 }
1443
1444 dboff = xhci_cap_read_4(sc, XHCI_DBOFF);
1445 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff,
1446 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) {
1447 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n");
1448 return ENOMEM;
1449 }
1450
1451 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF);
1452 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff,
1453 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) {
1454 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n");
1455 return ENOMEM;
1456 }
1457
1458 int rv;
1459 rv = xhci_hc_reset(sc);
1460 if (rv != 0) {
1461 return rv;
1462 }
1463
1464 if (sc->sc_vendor_init)
1465 sc->sc_vendor_init(sc);
1466
1467 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE);
1468 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize);
1469 pagesize = ffs(pagesize);
1470 if (pagesize == 0) {
1471 aprint_error_dev(sc->sc_dev, "pagesize is 0\n");
1472 return EIO;
1473 }
1474 sc->sc_pgsz = 1 << (12 + (pagesize - 1));
1475 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz);
1476 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n",
1477 (uint32_t)sc->sc_maxslots);
1478 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports);
1479
1480 int err;
1481 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2);
1482 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf);
1483 if (sc->sc_maxspbuf != 0) {
1484 err = usb_allocmem(sc->sc_bus.ub_dmatag,
1485 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t),
1486 USBMALLOC_COHERENT | USBMALLOC_ZERO,
1487 &sc->sc_spbufarray_dma);
1488 if (err) {
1489 aprint_error_dev(sc->sc_dev,
1490 "spbufarray init fail, err %d\n", err);
1491 return ENOMEM;
1492 }
1493
1494 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) *
1495 sc->sc_maxspbuf, KM_SLEEP);
1496 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0);
1497 for (i = 0; i < sc->sc_maxspbuf; i++) {
1498 usb_dma_t * const dma = &sc->sc_spbuf_dma[i];
1499 /* allocate contexts */
1500 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz,
1501 sc->sc_pgsz, USBMALLOC_COHERENT | USBMALLOC_ZERO,
1502 dma);
1503 if (err) {
1504 aprint_error_dev(sc->sc_dev,
1505 "spbufarray_dma init fail, err %d\n", err);
1506 rv = ENOMEM;
1507 goto bad1;
1508 }
1509 spbufarray[i] = htole64(DMAADDR(dma, 0));
1510 usb_syncmem(dma, 0, sc->sc_pgsz,
1511 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1512 }
1513
1514 usb_syncmem(&sc->sc_spbufarray_dma, 0,
1515 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE);
1516 }
1517
1518 config = xhci_op_read_4(sc, XHCI_CONFIG);
1519 config &= ~0xFF;
1520 config |= sc->sc_maxslots & 0xFF;
1521 xhci_op_write_4(sc, XHCI_CONFIG, config);
1522
1523 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS,
1524 XHCI_COMMAND_RING_SEGMENTS_ALIGN);
1525 if (err) {
1526 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n",
1527 err);
1528 rv = ENOMEM;
1529 goto bad1;
1530 }
1531
1532 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS,
1533 XHCI_EVENT_RING_SEGMENTS_ALIGN);
1534 if (err) {
1535 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n",
1536 err);
1537 rv = ENOMEM;
1538 goto bad2;
1539 }
1540
1541 usb_dma_t *dma;
1542 size_t size;
1543 size_t align;
1544
1545 dma = &sc->sc_eventst_dma;
1546 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE,
1547 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN);
1548 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size);
1549 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN;
1550 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1551 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma);
1552 if (err) {
1553 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n",
1554 err);
1555 rv = ENOMEM;
1556 goto bad3;
1557 }
1558
1559 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n",
1560 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0),
1561 KERNADDR(&sc->sc_eventst_dma, 0),
1562 sc->sc_eventst_dma.udma_block->size);
1563
1564 dma = &sc->sc_dcbaa_dma;
1565 size = (1 + sc->sc_maxslots) * sizeof(uint64_t);
1566 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size);
1567 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN;
1568 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1569 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma);
1570 if (err) {
1571 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err);
1572 rv = ENOMEM;
1573 goto bad4;
1574 }
1575 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n",
1576 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0),
1577 KERNADDR(&sc->sc_dcbaa_dma, 0),
1578 sc->sc_dcbaa_dma.udma_block->size);
1579
1580 if (sc->sc_maxspbuf != 0) {
1581 /*
1582 * DCBA entry 0 hold the scratchbuf array pointer.
1583 */
1584 *(uint64_t *)KERNADDR(dma, 0) =
1585 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0));
1586 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1587 }
1588
1589 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots,
1590 KM_SLEEP);
1591 if (sc->sc_slots == NULL) {
1592 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err);
1593 rv = ENOMEM;
1594 goto bad;
1595 }
1596
1597 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0,
1598 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
1599 if (sc->sc_xferpool == NULL) {
1600 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n",
1601 err);
1602 rv = ENOMEM;
1603 goto bad;
1604 }
1605
1606 cv_init(&sc->sc_command_cv, "xhcicmd");
1607 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq");
1608 mutex_init(&sc->sc_rhlock, MUTEX_DEFAULT, IPL_NONE);
1609 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
1610 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
1611
1612 struct xhci_erste *erst;
1613 erst = KERNADDR(&sc->sc_eventst_dma, 0);
1614 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0));
1615 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb);
1616 erst[0].erste_3 = htole32(0);
1617 usb_syncmem(&sc->sc_eventst_dma, 0,
1618 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE);
1619
1620 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS);
1621 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0));
1622 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) |
1623 XHCI_ERDP_BUSY);
1624
1625 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0));
1626 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1627 sc->sc_cr->xr_cs);
1628
1629 xhci_barrier(sc, BUS_SPACE_BARRIER_WRITE);
1630
1631 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0),
1632 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS);
1633
1634 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0)
1635 xhci_start(sc);
1636
1637 return 0;
1638
1639 bad:
1640 if (sc->sc_xferpool) {
1641 pool_cache_destroy(sc->sc_xferpool);
1642 sc->sc_xferpool = NULL;
1643 }
1644
1645 if (sc->sc_slots) {
1646 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) *
1647 sc->sc_maxslots);
1648 sc->sc_slots = NULL;
1649 }
1650
1651 usb_freemem(&sc->sc_dcbaa_dma);
1652 bad4:
1653 usb_freemem(&sc->sc_eventst_dma);
1654 bad3:
1655 xhci_ring_free(sc, &sc->sc_er);
1656 bad2:
1657 xhci_ring_free(sc, &sc->sc_cr);
1658 i = sc->sc_maxspbuf;
1659 bad1:
1660 for (int j = 0; j < i; j++)
1661 usb_freemem(&sc->sc_spbuf_dma[j]);
1662 usb_freemem(&sc->sc_spbufarray_dma);
1663
1664 return rv;
1665 }
1666
1667 static inline bool
1668 xhci_polling_p(struct xhci_softc * const sc)
1669 {
1670 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling;
1671 }
1672
1673 int
1674 xhci_intr(void *v)
1675 {
1676 struct xhci_softc * const sc = v;
1677 int ret = 0;
1678
1679 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1680
1681 if (sc == NULL)
1682 return 0;
1683
1684 mutex_spin_enter(&sc->sc_intr_lock);
1685
1686 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1687 goto done;
1688
1689 /* If we get an interrupt while polling, then just ignore it. */
1690 if (xhci_polling_p(sc)) {
1691 #ifdef DIAGNOSTIC
1692 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1693 #endif
1694 goto done;
1695 }
1696
1697 ret = xhci_intr1(sc);
1698 if (ret) {
1699 KASSERT(sc->sc_child || sc->sc_child2);
1700
1701 /*
1702 * One of child busses could be already detached. It doesn't
1703 * matter on which of the two the softintr is scheduled.
1704 */
1705 if (sc->sc_child)
1706 usb_schedsoftintr(&sc->sc_bus);
1707 else
1708 usb_schedsoftintr(&sc->sc_bus2);
1709 }
1710 done:
1711 mutex_spin_exit(&sc->sc_intr_lock);
1712 return ret;
1713 }
1714
1715 int
1716 xhci_intr1(struct xhci_softc * const sc)
1717 {
1718 uint32_t usbsts;
1719 uint32_t iman;
1720
1721 XHCIHIST_FUNC();
1722
1723 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1724 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0);
1725 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD |
1726 XHCI_STS_HCE)) == 0) {
1727 DPRINTFN(16, "ignored intr not for %jd",
1728 device_unit(sc->sc_dev), 0, 0, 0);
1729 return 0;
1730 }
1731
1732 /*
1733 * Clear EINT and other transient flags, to not misenterpret
1734 * next shared interrupt. Also, to avoid race, EINT must be cleared
1735 * before XHCI_IMAN_INTR_PEND is cleared.
1736 */
1737 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & XHCI_STS_RSVDP0);
1738
1739 #ifdef XHCI_DEBUG
1740 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1741 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1742 #endif
1743
1744 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1745 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1746 iman |= XHCI_IMAN_INTR_PEND;
1747 xhci_rt_write_4(sc, XHCI_IMAN(0), iman);
1748
1749 #ifdef XHCI_DEBUG
1750 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1751 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1752 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1753 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1754 #endif
1755
1756 return 1;
1757 }
1758
1759 /*
1760 * 3 port speed types used in USB stack
1761 *
1762 * usbdi speed
1763 * definition: USB_SPEED_* in usb.h
1764 * They are used in struct usbd_device in USB stack.
1765 * ioctl interface uses these values too.
1766 * port_status speed
1767 * definition: UPS_*_SPEED in usb.h
1768 * They are used in usb_port_status_t and valid only for USB 2.0.
1769 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus
1770 * of usb_port_status_ext_t indicates port speed.
1771 * Note that some 3.0 values overlap with 2.0 values.
1772 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and
1773 * means UPS_LOW_SPEED in HS.)
1774 * port status returned from hub also uses these values.
1775 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed
1776 * or more.
1777 * xspeed:
1778 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1)
1779 * They are used in only slot context and PORTSC reg of xhci.
1780 * The difference between usbdi speed and xspeed is
1781 * that FS and LS values are swapped.
1782 */
1783
1784 /* convert usbdi speed to xspeed */
1785 static int
1786 xhci_speed2xspeed(int speed)
1787 {
1788 switch (speed) {
1789 case USB_SPEED_LOW: return 2;
1790 case USB_SPEED_FULL: return 1;
1791 default: return speed;
1792 }
1793 }
1794
1795 #if 0
1796 /* convert xspeed to usbdi speed */
1797 static int
1798 xhci_xspeed2speed(int xspeed)
1799 {
1800 switch (xspeed) {
1801 case 1: return USB_SPEED_FULL;
1802 case 2: return USB_SPEED_LOW;
1803 default: return xspeed;
1804 }
1805 }
1806 #endif
1807
1808 /* convert xspeed to port status speed */
1809 static int
1810 xhci_xspeed2psspeed(int xspeed)
1811 {
1812 switch (xspeed) {
1813 case 0: return 0;
1814 case 1: return UPS_FULL_SPEED;
1815 case 2: return UPS_LOW_SPEED;
1816 case 3: return UPS_HIGH_SPEED;
1817 default: return UPS_OTHER_SPEED;
1818 }
1819 }
1820
1821 /*
1822 * Construct input contexts and issue TRB to open pipe.
1823 */
1824 static usbd_status
1825 xhci_configure_endpoint(struct usbd_pipe *pipe)
1826 {
1827 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1828 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1829 #ifdef USB_DEBUG
1830 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1831 #endif
1832 struct xhci_soft_trb trb;
1833 usbd_status err;
1834
1835 XHCIHIST_FUNC();
1836 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx",
1837 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress,
1838 pipe->up_endpoint->ue_edesc->bmAttributes);
1839
1840 /* XXX ensure input context is available? */
1841
1842 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz);
1843
1844 /* set up context */
1845 xhci_setup_ctx(pipe);
1846
1847 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0),
1848 sc->sc_ctxsz * 1);
1849 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs,
1850 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1);
1851
1852 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1853 trb.trb_2 = 0;
1854 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1855 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1856
1857 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
1858
1859 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1860 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci),
1861 sc->sc_ctxsz * 1);
1862
1863 return err;
1864 }
1865
1866 #if 0
1867 static usbd_status
1868 xhci_unconfigure_endpoint(struct usbd_pipe *pipe)
1869 {
1870 #ifdef USB_DEBUG
1871 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1872 #endif
1873
1874 XHCIHIST_FUNC();
1875 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
1876
1877 return USBD_NORMAL_COMPLETION;
1878 }
1879 #endif
1880
1881 /* 4.6.8, 6.4.3.7 */
1882 static void
1883 xhci_reset_endpoint(struct usbd_pipe *pipe)
1884 {
1885 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1886 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1887 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1888 struct xhci_soft_trb trb;
1889
1890 XHCIHIST_FUNC();
1891 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1892
1893 KASSERT(mutex_owned(&sc->sc_lock));
1894
1895 trb.trb_0 = 0;
1896 trb.trb_2 = 0;
1897 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1898 XHCI_TRB_3_EP_SET(dci) |
1899 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP);
1900
1901 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
1902 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
1903 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
1904 }
1905 }
1906
1907 /*
1908 * 4.6.9, 6.4.3.8
1909 * Stop execution of TDs on xfer ring.
1910 * Should be called with sc_lock held.
1911 */
1912 static usbd_status
1913 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci,
1914 uint32_t trb3flags)
1915 {
1916 struct xhci_soft_trb trb;
1917 usbd_status err;
1918
1919 XHCIHIST_FUNC();
1920 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1921
1922 KASSERT(mutex_owned(&sc->sc_lock));
1923
1924 trb.trb_0 = 0;
1925 trb.trb_2 = 0;
1926 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1927 XHCI_TRB_3_EP_SET(dci) |
1928 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) |
1929 trb3flags;
1930
1931 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1932
1933 return err;
1934 }
1935
1936 static usbd_status
1937 xhci_stop_endpoint(struct usbd_pipe *pipe)
1938 {
1939 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1940 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1941 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1942
1943 XHCIHIST_FUNC();
1944 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1945
1946 KASSERT(mutex_owned(&sc->sc_lock));
1947
1948 return xhci_stop_endpoint_cmd(sc, xs, dci, 0);
1949 }
1950
1951 /*
1952 * Set TR Dequeue Pointer.
1953 * xHCI 1.1 4.6.10 6.4.3.9
1954 * Purge all of the TRBs on ring and reinitialize ring.
1955 * Set TR dequeue Pointer to 0 and Cycle State to 1.
1956 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE
1957 * error will be generated.
1958 */
1959 static void
1960 xhci_set_dequeue(struct usbd_pipe *pipe)
1961 {
1962 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1963 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1964 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1965 struct xhci_ring * const xr = xs->xs_xr[dci];
1966 struct xhci_soft_trb trb;
1967
1968 XHCIHIST_FUNC();
1969 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1970
1971 KASSERT(mutex_owned(&sc->sc_lock));
1972 KASSERT(xr != NULL);
1973
1974 xhci_host_dequeue(xr);
1975
1976 /* set DCS */
1977 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */
1978 trb.trb_2 = 0;
1979 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1980 XHCI_TRB_3_EP_SET(dci) |
1981 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE);
1982
1983 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
1984 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
1985 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
1986 }
1987 }
1988
1989 /*
1990 * Open new pipe: called from usbd_setup_pipe_flags.
1991 * Fills methods of pipe.
1992 * If pipe is not for ep0, calls configure_endpoint.
1993 */
1994 static usbd_status
1995 xhci_open(struct usbd_pipe *pipe)
1996 {
1997 struct usbd_device * const dev = pipe->up_dev;
1998 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
1999 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
2000 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2001 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2002 const u_int dci = xhci_ep_get_dci(ed);
2003 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
2004 usbd_status err;
2005
2006 XHCIHIST_FUNC();
2007 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr,
2008 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed);
2009 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx",
2010 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress,
2011 ed->bmAttributes);
2012 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize),
2013 ed->bInterval, 0, 0);
2014
2015 if (sc->sc_dying)
2016 return USBD_IOERROR;
2017
2018 /* Root Hub */
2019 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) {
2020 switch (ed->bEndpointAddress) {
2021 case USB_CONTROL_ENDPOINT:
2022 pipe->up_methods = &roothub_ctrl_methods;
2023 break;
2024 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
2025 pipe->up_methods = &xhci_root_intr_methods;
2026 break;
2027 default:
2028 pipe->up_methods = NULL;
2029 DPRINTFN(0, "bad bEndpointAddress 0x%02jx",
2030 ed->bEndpointAddress, 0, 0, 0);
2031 return USBD_INVAL;
2032 }
2033 return USBD_NORMAL_COMPLETION;
2034 }
2035
2036 usb_init_task(&xpipe->xp_async_task, xhci_pipe_restart_async_task,
2037 pipe, USB_TASKQ_MPSAFE);
2038
2039 switch (xfertype) {
2040 case UE_CONTROL:
2041 pipe->up_methods = &xhci_device_ctrl_methods;
2042 break;
2043 case UE_ISOCHRONOUS:
2044 pipe->up_methods = &xhci_device_isoc_methods;
2045 pipe->up_serialise = false;
2046 xpipe->xp_isoc_next = -1;
2047 break;
2048 case UE_BULK:
2049 pipe->up_methods = &xhci_device_bulk_methods;
2050 break;
2051 case UE_INTERRUPT:
2052 pipe->up_methods = &xhci_device_intr_methods;
2053 break;
2054 default:
2055 return USBD_IOERROR;
2056 break;
2057 }
2058
2059 KASSERT(xs != NULL);
2060 KASSERT(xs->xs_xr[dci] == NULL);
2061
2062 /* allocate transfer ring */
2063 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS,
2064 XHCI_TRB_ALIGN);
2065 if (err) {
2066 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0);
2067 return err;
2068 }
2069
2070 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT)
2071 return xhci_configure_endpoint(pipe);
2072
2073 return USBD_NORMAL_COMPLETION;
2074 }
2075
2076 /*
2077 * Closes pipe, called from usbd_kill_pipe via close methods.
2078 * If the endpoint to be closed is ep0, disable_slot.
2079 * Should be called with sc_lock held.
2080 */
2081 static void
2082 xhci_close_pipe(struct usbd_pipe *pipe)
2083 {
2084 struct xhci_pipe * const xp =
2085 container_of(pipe, struct xhci_pipe, xp_pipe);
2086 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2087 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2088 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2089 const u_int dci = xhci_ep_get_dci(ed);
2090 struct xhci_soft_trb trb;
2091 uint32_t *cp;
2092
2093 XHCIHIST_FUNC();
2094
2095 usb_rem_task_wait(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC,
2096 &sc->sc_lock);
2097
2098 if (sc->sc_dying)
2099 return;
2100
2101 /* xs is uninitialized before xhci_init_slot */
2102 if (xs == NULL || xs->xs_idx == 0)
2103 return;
2104
2105 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2106 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2107
2108 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
2109 KASSERT(mutex_owned(&sc->sc_lock));
2110
2111 if (pipe->up_dev->ud_depth == 0)
2112 return;
2113
2114 if (dci == XHCI_DCI_EP_CONTROL) {
2115 DPRINTFN(4, "closing ep0", 0, 0, 0, 0);
2116 /* This frees all rings */
2117 xhci_disable_slot(sc, xs->xs_idx);
2118 return;
2119 }
2120
2121 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED)
2122 (void)xhci_stop_endpoint(pipe);
2123
2124 /*
2125 * set appropriate bit to be dropped.
2126 * don't set DC bit to 1, otherwise all endpoints
2127 * would be deconfigured.
2128 */
2129 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
2130 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci));
2131 cp[1] = htole32(0);
2132
2133 /* XXX should be most significant one, not dci? */
2134 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
2135 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci));
2136
2137 /* configure ep context performs an implicit dequeue */
2138 xhci_host_dequeue(xs->xs_xr[dci]);
2139
2140 /* sync input contexts before they are read from memory */
2141 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
2142
2143 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
2144 trb.trb_2 = 0;
2145 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2146 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
2147
2148 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
2149 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2150
2151 xhci_ring_free(sc, &xs->xs_xr[dci]);
2152 xs->xs_xr[dci] = NULL;
2153 }
2154
2155 /*
2156 * Abort transfer. Must be called with sc_lock held. Releases and
2157 * reacquires sc_lock to sleep until hardware acknowledges abort.
2158 */
2159 static void
2160 xhci_abortx(struct usbd_xfer *xfer)
2161 {
2162 XHCIHIST_FUNC();
2163 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2164
2165 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx",
2166 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0);
2167
2168 KASSERT(mutex_owned(&sc->sc_lock));
2169 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2170 xfer->ux_status == USBD_TIMEOUT),
2171 "bad abort status: %d", xfer->ux_status);
2172
2173 xhci_pipe_restart(xfer->ux_pipe);
2174
2175 DPRINTFN(14, "end", 0, 0, 0, 0);
2176 }
2177
2178 static void
2179 xhci_host_dequeue(struct xhci_ring * const xr)
2180 {
2181 /* When dequeueing the controller, update our struct copy too */
2182 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE);
2183 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE,
2184 BUS_DMASYNC_PREWRITE);
2185 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies));
2186
2187 xr->xr_ep = 0;
2188 xr->xr_cs = 1;
2189 }
2190
2191 /*
2192 * Recover STALLed endpoint, or stop endpoint to abort a pipe.
2193 * xHCI 1.1 sect 4.10.2.1
2194 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove
2195 * all transfers on transfer ring.
2196 */
2197 static void
2198 xhci_pipe_restart(struct usbd_pipe *pipe)
2199 {
2200 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2201 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2202 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2203
2204 XHCIHIST_FUNC();
2205 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2206 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2207
2208 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2209
2210 /*
2211 * - If the endpoint is halted, indicating a stall, reset it.
2212 * - If the endpoint is stopped, we're already good.
2213 * - Otherwise, someone wanted to abort the pipe, so stop the
2214 * endpoint.
2215 *
2216 * In any case, clear the ring.
2217 */
2218 switch (xhci_get_epstate(sc, xs, dci)) {
2219 case XHCI_EPSTATE_HALTED:
2220 xhci_reset_endpoint(pipe);
2221 break;
2222 case XHCI_EPSTATE_STOPPED:
2223 break;
2224 default:
2225 xhci_stop_endpoint(pipe);
2226 break;
2227 }
2228
2229 switch (xhci_get_epstate(sc, xs, dci)) {
2230 case XHCI_EPSTATE_STOPPED:
2231 break;
2232 case XHCI_EPSTATE_ERROR:
2233 device_printf(sc->sc_dev, "endpoint 0x%x error\n",
2234 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2235 break;
2236 default:
2237 device_printf(sc->sc_dev, "endpoint 0x%x failed to stop\n",
2238 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2239 }
2240
2241 xhci_set_dequeue(pipe);
2242
2243 DPRINTFN(4, "ends", 0, 0, 0, 0);
2244 }
2245
2246 static void
2247 xhci_pipe_restart_async_task(void *cookie)
2248 {
2249 struct usbd_pipe * const pipe = cookie;
2250 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2251 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2252 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2253 struct xhci_ring * const tr = xs->xs_xr[dci];
2254 struct usbd_xfer *xfer;
2255
2256 mutex_enter(&sc->sc_lock);
2257
2258 xhci_pipe_restart(pipe);
2259
2260 /*
2261 * We halted our own queue because it stalled. Mark it no
2262 * longer halted and start issuing queued transfers again.
2263 */
2264 tr->is_halted = false;
2265 xfer = SIMPLEQ_FIRST(&pipe->up_queue);
2266 if (xfer)
2267 (*pipe->up_methods->upm_start)(xfer);
2268
2269 mutex_exit(&sc->sc_lock);
2270 }
2271
2272 static void
2273 xhci_pipe_restart_async(struct usbd_pipe *pipe)
2274 {
2275 struct xhci_pipe * const xp =
2276 container_of(pipe, struct xhci_pipe, xp_pipe);
2277 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2278 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2279 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2280 struct xhci_ring * const tr = xs->xs_xr[dci];
2281
2282 XHCIHIST_FUNC();
2283 XHCIHIST_CALLARGS("pipe %#jx", (uintptr_t)pipe, 0, 0, 0);
2284
2285 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2286
2287 tr->is_halted = true;
2288 usb_add_task(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC);
2289
2290 DPRINTFN(4, "ends", 0, 0, 0, 0);
2291 }
2292
2293 /* Process roothub port status/change events and notify to uhub_intr. */
2294 static void
2295 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport)
2296 {
2297 XHCIHIST_FUNC();
2298 XHCIHIST_CALLARGS("xhci%jd: port %ju status change",
2299 device_unit(sc->sc_dev), ctlrport, 0, 0);
2300
2301 if (ctlrport > sc->sc_maxports)
2302 return;
2303
2304 const size_t bn = xhci_ctlrport2bus(sc, ctlrport);
2305 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport);
2306 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn];
2307
2308 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change",
2309 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer);
2310
2311 if (xfer == NULL)
2312 return;
2313 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
2314
2315 uint8_t *p = xfer->ux_buf;
2316 memset(p, 0, xfer->ux_length);
2317 p[rhp / NBBY] |= 1 << (rhp % NBBY);
2318 xfer->ux_actlen = xfer->ux_length;
2319 xfer->ux_status = USBD_NORMAL_COMPLETION;
2320 usb_transfer_complete(xfer);
2321 }
2322
2323 /* Process Transfer Events */
2324 static void
2325 xhci_event_transfer(struct xhci_softc * const sc,
2326 const struct xhci_trb * const trb)
2327 {
2328 uint64_t trb_0;
2329 uint32_t trb_2, trb_3;
2330 uint8_t trbcode;
2331 u_int slot, dci;
2332 struct xhci_slot *xs;
2333 struct xhci_ring *xr;
2334 struct xhci_xfer *xx;
2335 struct usbd_xfer *xfer;
2336 usbd_status err;
2337
2338 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2339
2340 trb_0 = le64toh(trb->trb_0);
2341 trb_2 = le32toh(trb->trb_2);
2342 trb_3 = le32toh(trb->trb_3);
2343 trbcode = XHCI_TRB_2_ERROR_GET(trb_2);
2344 slot = XHCI_TRB_3_SLOT_GET(trb_3);
2345 dci = XHCI_TRB_3_EP_GET(trb_3);
2346 xs = &sc->sc_slots[slot];
2347 xr = xs->xs_xr[dci];
2348
2349 /* sanity check */
2350 KASSERT(xr != NULL);
2351 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots,
2352 "invalid xs_idx %u slot %u", xs->xs_idx, slot);
2353
2354 int idx = 0;
2355 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2356 if (xhci_trb_get_idx(xr, trb_0, &idx)) {
2357 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0);
2358 return;
2359 }
2360 xx = xr->xr_cookies[idx];
2361
2362 /* clear cookie of consumed TRB */
2363 xr->xr_cookies[idx] = NULL;
2364
2365 /*
2366 * xx is NULL if pipe is opened but xfer is not started.
2367 * It happens when stopping idle pipe.
2368 */
2369 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) {
2370 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju",
2371 idx, (uintptr_t)xx, trbcode, dci);
2372 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0,
2373 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)),
2374 0, 0);
2375 return;
2376 }
2377 } else {
2378 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */
2379 xx = (void *)(uintptr_t)(trb_0 & ~0x3);
2380 }
2381 /* XXX this may not happen */
2382 if (xx == NULL) {
2383 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0);
2384 return;
2385 }
2386 xfer = &xx->xx_xfer;
2387 /* XXX this may happen when detaching */
2388 if (xfer == NULL) {
2389 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx",
2390 (uintptr_t)xx, trb_0, 0, 0);
2391 return;
2392 }
2393 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2394 /* XXX I dunno why this happens */
2395 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer);
2396
2397 if (!xfer->ux_pipe->up_repeat &&
2398 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) {
2399 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer,
2400 0, 0, 0);
2401 return;
2402 }
2403
2404 const uint8_t xfertype =
2405 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes);
2406
2407 /* 4.11.5.2 Event Data TRB */
2408 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) {
2409 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx"
2410 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0);
2411 if ((trb_0 & 0x3) == 0x3) {
2412 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2);
2413 }
2414 }
2415
2416 switch (trbcode) {
2417 case XHCI_TRB_ERROR_SHORT_PKT:
2418 case XHCI_TRB_ERROR_SUCCESS:
2419 /*
2420 * A ctrl transfer can generate two events if it has a Data
2421 * stage. A short data stage can be OK and should not
2422 * complete the transfer as the status stage needs to be
2423 * performed.
2424 *
2425 * Note: Data and Status stage events point at same xfer.
2426 * ux_actlen and ux_dmabuf will be passed to
2427 * usb_transfer_complete after the Status stage event.
2428 *
2429 * It can be distingished which stage generates the event:
2430 * + by checking least 3 bits of trb_0 if ED==1.
2431 * (see xhci_device_ctrl_start).
2432 * + by checking the type of original TRB if ED==0.
2433 *
2434 * In addition, intr, bulk, and isoc transfer currently
2435 * consists of single TD, so the "skip" is not needed.
2436 * ctrl xfer uses EVENT_DATA, and others do not.
2437 * Thus driver can switch the flow by checking ED bit.
2438 */
2439 if (xfertype == UE_ISOCHRONOUS) {
2440 xfer->ux_frlengths[xx->xx_isoc_done] -=
2441 XHCI_TRB_2_REM_GET(trb_2);
2442 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done];
2443 if (++xx->xx_isoc_done < xfer->ux_nframes)
2444 return;
2445 } else
2446 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2447 if (xfer->ux_actlen == 0)
2448 xfer->ux_actlen = xfer->ux_length -
2449 XHCI_TRB_2_REM_GET(trb_2);
2450 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3))
2451 == XHCI_TRB_TYPE_DATA_STAGE) {
2452 return;
2453 }
2454 } else if ((trb_0 & 0x3) == 0x3) {
2455 return;
2456 }
2457 err = USBD_NORMAL_COMPLETION;
2458 break;
2459 case XHCI_TRB_ERROR_STOPPED:
2460 case XHCI_TRB_ERROR_LENGTH:
2461 case XHCI_TRB_ERROR_STOPPED_SHORT:
2462 err = USBD_IOERROR;
2463 break;
2464 case XHCI_TRB_ERROR_STALL:
2465 case XHCI_TRB_ERROR_BABBLE:
2466 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2467 xhci_pipe_restart_async(xfer->ux_pipe);
2468 err = USBD_STALLED;
2469 break;
2470 default:
2471 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2472 err = USBD_IOERROR;
2473 break;
2474 }
2475
2476 /*
2477 * Try to claim this xfer for completion. If it has already
2478 * completed or aborted, drop it on the floor.
2479 */
2480 if (!usbd_xfer_trycomplete(xfer))
2481 return;
2482
2483 /* Set the status. */
2484 xfer->ux_status = err;
2485
2486 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 ||
2487 (trb_0 & 0x3) == 0x0) {
2488 usb_transfer_complete(xfer);
2489 }
2490 }
2491
2492 /* Process Command complete events */
2493 static void
2494 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb)
2495 {
2496 uint64_t trb_0;
2497 uint32_t trb_2, trb_3;
2498
2499 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2500
2501 KASSERT(mutex_owned(&sc->sc_lock));
2502
2503 trb_0 = le64toh(trb->trb_0);
2504 trb_2 = le32toh(trb->trb_2);
2505 trb_3 = le32toh(trb->trb_3);
2506
2507 if (trb_0 == sc->sc_command_addr) {
2508 sc->sc_resultpending = false;
2509
2510 sc->sc_result_trb.trb_0 = trb_0;
2511 sc->sc_result_trb.trb_2 = trb_2;
2512 sc->sc_result_trb.trb_3 = trb_3;
2513 if (XHCI_TRB_2_ERROR_GET(trb_2) !=
2514 XHCI_TRB_ERROR_SUCCESS) {
2515 DPRINTFN(1, "command completion "
2516 "failure: 0x%016jx 0x%08jx 0x%08jx",
2517 trb_0, trb_2, trb_3, 0);
2518 }
2519 cv_signal(&sc->sc_command_cv);
2520 } else {
2521 DPRINTFN(1, "spurious event: %#jx 0x%016jx "
2522 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3);
2523 }
2524 }
2525
2526 /*
2527 * Process events.
2528 * called from xhci_softintr
2529 */
2530 static void
2531 xhci_handle_event(struct xhci_softc * const sc,
2532 const struct xhci_trb * const trb)
2533 {
2534 uint64_t trb_0;
2535 uint32_t trb_2, trb_3;
2536
2537 XHCIHIST_FUNC();
2538
2539 trb_0 = le64toh(trb->trb_0);
2540 trb_2 = le32toh(trb->trb_2);
2541 trb_3 = le32toh(trb->trb_3);
2542
2543 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx",
2544 (uintptr_t)trb, trb_0, trb_2, trb_3);
2545
2546 /*
2547 * 4.11.3.1, 6.4.2.1
2548 * TRB Pointer is invalid for these completion codes.
2549 */
2550 switch (XHCI_TRB_2_ERROR_GET(trb_2)) {
2551 case XHCI_TRB_ERROR_RING_UNDERRUN:
2552 case XHCI_TRB_ERROR_RING_OVERRUN:
2553 case XHCI_TRB_ERROR_VF_RING_FULL:
2554 return;
2555 default:
2556 if (trb_0 == 0) {
2557 return;
2558 }
2559 break;
2560 }
2561
2562 switch (XHCI_TRB_3_TYPE_GET(trb_3)) {
2563 case XHCI_TRB_EVENT_TRANSFER:
2564 xhci_event_transfer(sc, trb);
2565 break;
2566 case XHCI_TRB_EVENT_CMD_COMPLETE:
2567 xhci_event_cmd(sc, trb);
2568 break;
2569 case XHCI_TRB_EVENT_PORT_STS_CHANGE:
2570 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff));
2571 break;
2572 default:
2573 break;
2574 }
2575 }
2576
2577 static void
2578 xhci_softintr(void *v)
2579 {
2580 struct usbd_bus * const bus = v;
2581 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2582 struct xhci_ring * const er = sc->sc_er;
2583 struct xhci_trb *trb;
2584 int i, j, k;
2585
2586 XHCIHIST_FUNC();
2587
2588 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2589
2590 i = er->xr_ep;
2591 j = er->xr_cs;
2592
2593 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0);
2594
2595 while (1) {
2596 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE,
2597 BUS_DMASYNC_POSTREAD);
2598 trb = &er->xr_trb[i];
2599 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0;
2600
2601 if (j != k)
2602 break;
2603
2604 xhci_handle_event(sc, trb);
2605
2606 i++;
2607 if (i == er->xr_ntrb) {
2608 i = 0;
2609 j ^= 1;
2610 }
2611 }
2612
2613 er->xr_ep = i;
2614 er->xr_cs = j;
2615
2616 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) |
2617 XHCI_ERDP_BUSY);
2618
2619 DPRINTFN(16, "ends", 0, 0, 0, 0);
2620
2621 return;
2622 }
2623
2624 static void
2625 xhci_poll(struct usbd_bus *bus)
2626 {
2627 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2628
2629 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2630
2631 mutex_enter(&sc->sc_intr_lock);
2632 int ret = xhci_intr1(sc);
2633 if (ret) {
2634 xhci_softintr(bus);
2635 }
2636 mutex_exit(&sc->sc_intr_lock);
2637
2638 return;
2639 }
2640
2641 static struct usbd_xfer *
2642 xhci_allocx(struct usbd_bus *bus, unsigned int nframes)
2643 {
2644 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2645 struct xhci_xfer *xx;
2646 u_int ntrbs;
2647
2648 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2649
2650 ntrbs = uimax(3, nframes);
2651 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs;
2652
2653 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
2654 if (xx != NULL) {
2655 memset(xx, 0, sizeof(*xx));
2656 if (ntrbs > 0) {
2657 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP);
2658 xx->xx_ntrb = ntrbs;
2659 }
2660 #ifdef DIAGNOSTIC
2661 xx->xx_xfer.ux_state = XFER_BUSY;
2662 #endif
2663 }
2664
2665 return &xx->xx_xfer;
2666 }
2667
2668 static void
2669 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
2670 {
2671 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2672 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
2673
2674 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2675
2676 #ifdef DIAGNOSTIC
2677 if (xfer->ux_state != XFER_BUSY &&
2678 xfer->ux_status != USBD_NOT_STARTED) {
2679 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx",
2680 (uintptr_t)xfer, xfer->ux_state, 0, 0);
2681 }
2682 xfer->ux_state = XFER_FREE;
2683 #endif
2684 if (xx->xx_ntrb > 0) {
2685 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb));
2686 xx->xx_trb = NULL;
2687 xx->xx_ntrb = 0;
2688 }
2689 pool_cache_put(sc->sc_xferpool, xx);
2690 }
2691
2692 static bool
2693 xhci_dying(struct usbd_bus *bus)
2694 {
2695 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2696
2697 return sc->sc_dying;
2698 }
2699
2700 static void
2701 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
2702 {
2703 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2704
2705 *lock = &sc->sc_lock;
2706 }
2707
2708 extern uint32_t usb_cookie_no;
2709
2710 /*
2711 * xHCI 4.3
2712 * Called when uhub_explore finds a new device (via usbd_new_device).
2713 * Port initialization and speed detection (4.3.1) are already done in uhub.c.
2714 * This function does:
2715 * Allocate and construct dev structure of default endpoint (ep0).
2716 * Allocate and open pipe of ep0.
2717 * Enable slot and initialize slot context.
2718 * Set Address.
2719 * Read initial device descriptor.
2720 * Determine initial MaxPacketSize (mps) by speed.
2721 * Read full device descriptor.
2722 * Register this device.
2723 * Finally state of device transitions ADDRESSED.
2724 */
2725 static usbd_status
2726 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth,
2727 int speed, int port, struct usbd_port *up)
2728 {
2729 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2730 struct usbd_device *dev;
2731 usbd_status err;
2732 usb_device_descriptor_t *dd;
2733 struct xhci_slot *xs;
2734 uint32_t *cp;
2735
2736 XHCIHIST_FUNC();
2737 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx",
2738 port, depth, speed, (uintptr_t)up);
2739
2740 KASSERT(KERNEL_LOCKED_P());
2741
2742 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
2743 dev->ud_bus = bus;
2744 dev->ud_quirks = &usbd_no_quirk;
2745 dev->ud_addr = 0;
2746 dev->ud_ddesc.bMaxPacketSize = 0;
2747 dev->ud_depth = depth;
2748 dev->ud_powersrc = up;
2749 dev->ud_myhub = up->up_parent;
2750 dev->ud_speed = speed;
2751 dev->ud_langid = USBD_NOLANG;
2752 dev->ud_cookie.cookie = ++usb_cookie_no;
2753
2754 /* Set up default endpoint handle. */
2755 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc;
2756 /* doesn't matter, just don't let it uninitialized */
2757 dev->ud_ep0.ue_toggle = 0;
2758
2759 /* Set up default endpoint descriptor. */
2760 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE;
2761 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT;
2762 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT;
2763 dev->ud_ep0desc.bmAttributes = UE_CONTROL;
2764 dev->ud_ep0desc.bInterval = 0;
2765
2766 /* 4.3, 4.8.2.1 */
2767 switch (speed) {
2768 case USB_SPEED_SUPER:
2769 case USB_SPEED_SUPER_PLUS:
2770 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET);
2771 break;
2772 case USB_SPEED_FULL:
2773 /* XXX using 64 as initial mps of ep0 in FS */
2774 case USB_SPEED_HIGH:
2775 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET);
2776 break;
2777 case USB_SPEED_LOW:
2778 default:
2779 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET);
2780 break;
2781 }
2782
2783 up->up_dev = dev;
2784
2785 dd = &dev->ud_ddesc;
2786
2787 if (depth == 0 && port == 0) {
2788 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL);
2789 bus->ub_devices[USB_ROOTHUB_INDEX] = dev;
2790
2791 /* Establish the default pipe. */
2792 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2793 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2794 if (err) {
2795 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0);
2796 goto bad;
2797 }
2798 err = usbd_get_initial_ddesc(dev, dd);
2799 if (err) {
2800 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2801 goto bad;
2802 }
2803 } else {
2804 uint8_t slot = 0;
2805
2806 /* 4.3.2 */
2807 err = xhci_enable_slot(sc, &slot);
2808 if (err) {
2809 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0);
2810 goto bad;
2811 }
2812
2813 xs = &sc->sc_slots[slot];
2814 dev->ud_hcpriv = xs;
2815
2816 /* 4.3.3 initialize slot structure */
2817 err = xhci_init_slot(dev, slot);
2818 if (err) {
2819 DPRINTFN(1, "init slot %ju", err, 0, 0, 0);
2820 dev->ud_hcpriv = NULL;
2821 /*
2822 * We have to disable_slot here because
2823 * xs->xs_idx == 0 when xhci_init_slot fails,
2824 * in that case usbd_remove_dev won't work.
2825 */
2826 mutex_enter(&sc->sc_lock);
2827 xhci_disable_slot(sc, slot);
2828 mutex_exit(&sc->sc_lock);
2829 goto bad;
2830 }
2831
2832 /*
2833 * We have to establish the default pipe _after_ slot
2834 * structure has been prepared.
2835 */
2836 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2837 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2838 if (err) {
2839 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0,
2840 0);
2841 goto bad;
2842 }
2843
2844 /* 4.3.4 Address Assignment */
2845 err = xhci_set_address(dev, slot, false);
2846 if (err) {
2847 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0);
2848 goto bad;
2849 }
2850
2851 /* Allow device time to set new address */
2852 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE);
2853
2854 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2855 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT);
2856 HEXDUMP("slot context", cp, sc->sc_ctxsz);
2857 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3]));
2858 DPRINTFN(4, "device address %ju", addr, 0, 0, 0);
2859 /*
2860 * XXX ensure we know when the hardware does something
2861 * we can't yet cope with
2862 */
2863 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr);
2864 dev->ud_addr = addr;
2865
2866 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL,
2867 "addr %d already allocated", dev->ud_addr);
2868 /*
2869 * The root hub is given its own slot
2870 */
2871 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev;
2872
2873 err = usbd_get_initial_ddesc(dev, dd);
2874 if (err) {
2875 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2876 goto bad;
2877 }
2878
2879 /* 4.8.2.1 */
2880 if (USB_IS_SS(speed)) {
2881 if (dd->bMaxPacketSize != 9) {
2882 printf("%s: invalid mps 2^%u for SS ep0,"
2883 " using 512\n",
2884 device_xname(sc->sc_dev),
2885 dd->bMaxPacketSize);
2886 dd->bMaxPacketSize = 9;
2887 }
2888 USETW(dev->ud_ep0desc.wMaxPacketSize,
2889 (1 << dd->bMaxPacketSize));
2890 } else
2891 USETW(dev->ud_ep0desc.wMaxPacketSize,
2892 dd->bMaxPacketSize);
2893 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0);
2894 err = xhci_update_ep0_mps(sc, xs,
2895 UGETW(dev->ud_ep0desc.wMaxPacketSize));
2896 if (err) {
2897 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0);
2898 goto bad;
2899 }
2900 }
2901
2902 err = usbd_reload_device_desc(dev);
2903 if (err) {
2904 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0);
2905 goto bad;
2906 }
2907
2908 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,",
2909 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0);
2910 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,",
2911 dd->bDeviceClass, dd->bDeviceSubClass,
2912 dd->bDeviceProtocol, 0);
2913 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd",
2914 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations,
2915 dev->ud_speed);
2916
2917 usbd_get_device_strings(dev);
2918
2919 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev);
2920
2921 if (depth == 0 && port == 0) {
2922 usbd_attach_roothub(parent, dev);
2923 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0);
2924 return USBD_NORMAL_COMPLETION;
2925 }
2926
2927 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr);
2928 bad:
2929 if (err != USBD_NORMAL_COMPLETION) {
2930 if (depth == 0 && port == 0 && dev->ud_pipe0)
2931 usbd_kill_pipe(dev->ud_pipe0);
2932 usbd_remove_device(dev, up);
2933 }
2934
2935 return err;
2936 }
2937
2938 static usbd_status
2939 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp,
2940 size_t ntrb, size_t align)
2941 {
2942 size_t size = ntrb * XHCI_TRB_SIZE;
2943 struct xhci_ring *xr;
2944
2945 XHCIHIST_FUNC();
2946 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx",
2947 (uintptr_t)*xrp, ntrb, align, 0);
2948
2949 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP);
2950 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0);
2951
2952 int err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
2953 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xr->xr_dma);
2954 if (err) {
2955 kmem_free(xr, sizeof(struct xhci_ring));
2956 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0);
2957 return err;
2958 }
2959 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
2960 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP);
2961 xr->xr_trb = xhci_ring_trbv(xr, 0);
2962 xr->xr_ntrb = ntrb;
2963 xr->is_halted = false;
2964 xhci_host_dequeue(xr);
2965 *xrp = xr;
2966
2967 return USBD_NORMAL_COMPLETION;
2968 }
2969
2970 static void
2971 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr)
2972 {
2973 if (*xr == NULL)
2974 return;
2975
2976 usb_freemem(&(*xr)->xr_dma);
2977 mutex_destroy(&(*xr)->xr_lock);
2978 kmem_free((*xr)->xr_cookies,
2979 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb);
2980 kmem_free(*xr, sizeof(struct xhci_ring));
2981 *xr = NULL;
2982 }
2983
2984 static void
2985 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr,
2986 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs)
2987 {
2988 size_t i;
2989 u_int ri;
2990 u_int cs;
2991 uint64_t parameter;
2992 uint32_t status;
2993 uint32_t control;
2994
2995 XHCIHIST_FUNC();
2996 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju",
2997 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0);
2998
2999 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u",
3000 ntrbs, xr->xr_ntrb);
3001 for (i = 0; i < ntrbs; i++) {
3002 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr,
3003 (uintptr_t)trbs, i, 0);
3004 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx",
3005 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0);
3006 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) !=
3007 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3);
3008 }
3009
3010 ri = xr->xr_ep;
3011 cs = xr->xr_cs;
3012
3013 /*
3014 * Although the xhci hardware can do scatter/gather dma from
3015 * arbitrary sized buffers, there is a non-obvious restriction
3016 * that a LINK trb is only allowed at the end of a burst of
3017 * transfers - which might be 16kB.
3018 * Arbitrary aligned LINK trb definitely fail on Ivy bridge.
3019 * The simple solution is not to allow a LINK trb in the middle
3020 * of anything - as here.
3021 * XXX: (dsl) There are xhci controllers out there (eg some made by
3022 * ASMedia) that seem to lock up if they process a LINK trb but
3023 * cannot process the linked-to trb yet.
3024 * The code should write the 'cycle' bit on the link trb AFTER
3025 * adding the other trb.
3026 */
3027 u_int firstep = xr->xr_ep;
3028 u_int firstcs = xr->xr_cs;
3029
3030 for (i = 0; i < ntrbs; ) {
3031 u_int oldri = ri;
3032 u_int oldcs = cs;
3033
3034 if (ri >= (xr->xr_ntrb - 1)) {
3035 /* Put Link TD at the end of ring */
3036 parameter = xhci_ring_trbp(xr, 0);
3037 status = 0;
3038 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) |
3039 XHCI_TRB_3_TC_BIT;
3040 xr->xr_cookies[ri] = NULL;
3041 xr->xr_ep = 0;
3042 xr->xr_cs ^= 1;
3043 ri = xr->xr_ep;
3044 cs = xr->xr_cs;
3045 } else {
3046 parameter = trbs[i].trb_0;
3047 status = trbs[i].trb_2;
3048 control = trbs[i].trb_3;
3049
3050 xr->xr_cookies[ri] = cookie;
3051 ri++;
3052 i++;
3053 }
3054 /*
3055 * If this is a first TRB, mark it invalid to prevent
3056 * xHC from running it immediately.
3057 */
3058 if (oldri == firstep) {
3059 if (oldcs) {
3060 control &= ~XHCI_TRB_3_CYCLE_BIT;
3061 } else {
3062 control |= XHCI_TRB_3_CYCLE_BIT;
3063 }
3064 } else {
3065 if (oldcs) {
3066 control |= XHCI_TRB_3_CYCLE_BIT;
3067 } else {
3068 control &= ~XHCI_TRB_3_CYCLE_BIT;
3069 }
3070 }
3071 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control);
3072 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri,
3073 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3074 }
3075
3076 /* Now invert cycle bit of first TRB */
3077 if (firstcs) {
3078 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT);
3079 } else {
3080 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT);
3081 }
3082 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep,
3083 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3084
3085 xr->xr_ep = ri;
3086 xr->xr_cs = cs;
3087
3088 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep,
3089 xr->xr_cs, 0);
3090 }
3091
3092 static inline void
3093 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr,
3094 struct xhci_xfer *xx, u_int ntrb)
3095 {
3096 KASSERT(ntrb <= xx->xx_ntrb);
3097 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb);
3098 }
3099
3100 /*
3101 * Stop execution commands, purge all commands on command ring, and
3102 * rewind dequeue pointer.
3103 */
3104 static void
3105 xhci_abort_command(struct xhci_softc *sc)
3106 {
3107 struct xhci_ring * const cr = sc->sc_cr;
3108 uint64_t crcr;
3109 int i;
3110
3111 XHCIHIST_FUNC();
3112 XHCIHIST_CALLARGS("command %#jx timeout, aborting",
3113 sc->sc_command_addr, 0, 0, 0);
3114
3115 mutex_enter(&cr->xr_lock);
3116
3117 /* 4.6.1.2 Aborting a Command */
3118 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3119 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA);
3120
3121 for (i = 0; i < 500; i++) {
3122 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3123 if ((crcr & XHCI_CRCR_LO_CRR) == 0)
3124 break;
3125 usb_delay_ms(&sc->sc_bus, 1);
3126 }
3127 if ((crcr & XHCI_CRCR_LO_CRR) != 0) {
3128 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0);
3129 /* reset HC here? */
3130 }
3131
3132 /* reset command ring dequeue pointer */
3133 cr->xr_ep = 0;
3134 cr->xr_cs = 1;
3135 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs);
3136
3137 mutex_exit(&cr->xr_lock);
3138 }
3139
3140 /*
3141 * Put a command on command ring, ring bell, set timer, and cv_timedwait.
3142 * Command completion is notified by cv_signal from xhci_event_cmd()
3143 * (called from xhci_softint), or timed-out.
3144 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(),
3145 * then do_command examines it.
3146 */
3147 static usbd_status
3148 xhci_do_command_locked(struct xhci_softc * const sc,
3149 struct xhci_soft_trb * const trb, int timeout)
3150 {
3151 struct xhci_ring * const cr = sc->sc_cr;
3152 usbd_status err;
3153
3154 XHCIHIST_FUNC();
3155 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx",
3156 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3157
3158 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
3159 KASSERT(mutex_owned(&sc->sc_lock));
3160
3161 while (sc->sc_command_addr != 0 ||
3162 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp))
3163 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
3164
3165 /*
3166 * If enqueue pointer points at last of ring, it's Link TRB,
3167 * command TRB will be stored in 0th TRB.
3168 */
3169 if (cr->xr_ep == cr->xr_ntrb - 1)
3170 sc->sc_command_addr = xhci_ring_trbp(cr, 0);
3171 else
3172 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep);
3173
3174 sc->sc_resultpending = true;
3175
3176 mutex_enter(&cr->xr_lock);
3177 xhci_ring_put(sc, cr, NULL, trb, 1);
3178 mutex_exit(&cr->xr_lock);
3179
3180 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0);
3181
3182 while (sc->sc_resultpending) {
3183 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock,
3184 MAX(1, mstohz(timeout))) == EWOULDBLOCK) {
3185 xhci_abort_command(sc);
3186 err = USBD_TIMEOUT;
3187 goto timedout;
3188 }
3189 }
3190
3191 trb->trb_0 = sc->sc_result_trb.trb_0;
3192 trb->trb_2 = sc->sc_result_trb.trb_2;
3193 trb->trb_3 = sc->sc_result_trb.trb_3;
3194
3195 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx",
3196 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3197
3198 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) {
3199 case XHCI_TRB_ERROR_SUCCESS:
3200 err = USBD_NORMAL_COMPLETION;
3201 break;
3202 default:
3203 case 192 ... 223:
3204 DPRINTFN(5, "error %#jx",
3205 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0);
3206 err = USBD_IOERROR;
3207 break;
3208 case 224 ... 255:
3209 err = USBD_NORMAL_COMPLETION;
3210 break;
3211 }
3212
3213 timedout:
3214 sc->sc_resultpending = false;
3215 sc->sc_command_addr = 0;
3216 cv_broadcast(&sc->sc_cmdbusy_cv);
3217
3218 return err;
3219 }
3220
3221 static usbd_status
3222 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb,
3223 int timeout)
3224 {
3225
3226 mutex_enter(&sc->sc_lock);
3227 usbd_status ret = xhci_do_command_locked(sc, trb, timeout);
3228 mutex_exit(&sc->sc_lock);
3229
3230 return ret;
3231 }
3232
3233 static usbd_status
3234 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp)
3235 {
3236 struct xhci_soft_trb trb;
3237 usbd_status err;
3238
3239 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3240
3241 trb.trb_0 = 0;
3242 trb.trb_2 = 0;
3243 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT);
3244
3245 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3246 if (err != USBD_NORMAL_COMPLETION) {
3247 return err;
3248 }
3249
3250 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3);
3251
3252 return err;
3253 }
3254
3255 /*
3256 * xHCI 4.6.4
3257 * Deallocate ring and device/input context DMA buffers, and disable_slot.
3258 * All endpoints in the slot should be stopped.
3259 * Should be called with sc_lock held.
3260 */
3261 static usbd_status
3262 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot)
3263 {
3264 struct xhci_soft_trb trb;
3265 struct xhci_slot *xs;
3266 usbd_status err;
3267
3268 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3269
3270 if (sc->sc_dying)
3271 return USBD_IOERROR;
3272
3273 trb.trb_0 = 0;
3274 trb.trb_2 = 0;
3275 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) |
3276 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT);
3277
3278 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
3279
3280 if (!err) {
3281 xs = &sc->sc_slots[slot];
3282 if (xs->xs_idx != 0) {
3283 xhci_free_slot(sc, xs);
3284 xhci_set_dcba(sc, 0, slot);
3285 memset(xs, 0, sizeof(*xs));
3286 }
3287 }
3288
3289 return err;
3290 }
3291
3292 /*
3293 * Set address of device and transition slot state from ENABLED to ADDRESSED
3294 * if Block Setaddress Request (BSR) is false.
3295 * If BSR==true, transition slot state from ENABLED to DEFAULT.
3296 * see xHCI 1.1 4.5.3, 3.3.4
3297 * Should be called without sc_lock held.
3298 */
3299 static usbd_status
3300 xhci_address_device(struct xhci_softc * const sc,
3301 uint64_t icp, uint8_t slot_id, bool bsr)
3302 {
3303 struct xhci_soft_trb trb;
3304 usbd_status err;
3305
3306 XHCIHIST_FUNC();
3307 if (bsr) {
3308 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr",
3309 icp, slot_id, 0, 0);
3310 } else {
3311 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr",
3312 icp, slot_id, 0, 0);
3313 }
3314
3315 trb.trb_0 = icp;
3316 trb.trb_2 = 0;
3317 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) |
3318 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) |
3319 (bsr ? XHCI_TRB_3_BSR_BIT : 0);
3320
3321 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3322
3323 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS)
3324 err = USBD_NO_ADDR;
3325
3326 return err;
3327 }
3328
3329 static usbd_status
3330 xhci_update_ep0_mps(struct xhci_softc * const sc,
3331 struct xhci_slot * const xs, u_int mps)
3332 {
3333 struct xhci_soft_trb trb;
3334 usbd_status err;
3335 uint32_t * cp;
3336
3337 XHCIHIST_FUNC();
3338 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0);
3339
3340 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3341 cp[0] = htole32(0);
3342 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL));
3343
3344 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL));
3345 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps));
3346
3347 /* sync input contexts before they are read from memory */
3348 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3349 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3350 sc->sc_ctxsz * 4);
3351
3352 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
3353 trb.trb_2 = 0;
3354 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
3355 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX);
3356
3357 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3358 return err;
3359 }
3360
3361 static void
3362 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si)
3363 {
3364 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0);
3365
3366 XHCIHIST_FUNC();
3367 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd",
3368 (uintptr_t)&dcbaa[si], dcba, si, 0);
3369
3370 dcbaa[si] = htole64(dcba);
3371 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t),
3372 BUS_DMASYNC_PREWRITE);
3373 }
3374
3375 /*
3376 * Allocate device and input context DMA buffer, and
3377 * TRB DMA buffer for each endpoint.
3378 */
3379 static usbd_status
3380 xhci_init_slot(struct usbd_device *dev, uint32_t slot)
3381 {
3382 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3383 struct xhci_slot *xs;
3384
3385 XHCIHIST_FUNC();
3386 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0);
3387
3388 xs = &sc->sc_slots[slot];
3389
3390 /* allocate contexts */
3391 int err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3392 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_dc_dma);
3393 if (err) {
3394 DPRINTFN(1, "failed to allocmem output device context %jd",
3395 err, 0, 0, 0);
3396 return USBD_NOMEM;
3397 }
3398
3399 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3400 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_ic_dma);
3401 if (err) {
3402 DPRINTFN(1, "failed to allocmem input device context %jd",
3403 err, 0, 0, 0);
3404 goto bad1;
3405 }
3406
3407 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr));
3408 xs->xs_idx = slot;
3409
3410 return USBD_NORMAL_COMPLETION;
3411
3412 bad1:
3413 usb_freemem(&xs->xs_dc_dma);
3414 xs->xs_idx = 0;
3415 return USBD_NOMEM;
3416 }
3417
3418 static void
3419 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs)
3420 {
3421 u_int dci;
3422
3423 XHCIHIST_FUNC();
3424 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
3425
3426 /* deallocate all allocated rings in the slot */
3427 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
3428 if (xs->xs_xr[dci] != NULL)
3429 xhci_ring_free(sc, &xs->xs_xr[dci]);
3430 }
3431 usb_freemem(&xs->xs_ic_dma);
3432 usb_freemem(&xs->xs_dc_dma);
3433 xs->xs_idx = 0;
3434 }
3435
3436 /*
3437 * Setup slot context, set Device Context Base Address, and issue
3438 * Set Address Device command.
3439 */
3440 static usbd_status
3441 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr)
3442 {
3443 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3444 struct xhci_slot *xs;
3445 usbd_status err;
3446
3447 XHCIHIST_FUNC();
3448 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0);
3449
3450 xs = &sc->sc_slots[slot];
3451
3452 xhci_setup_ctx(dev->ud_pipe0);
3453
3454 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3455 sc->sc_ctxsz * 3);
3456
3457 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot);
3458
3459 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr);
3460
3461 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
3462 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0),
3463 sc->sc_ctxsz * 2);
3464
3465 return err;
3466 }
3467
3468 /*
3469 * 4.8.2, 6.2.3.2
3470 * construct slot/endpoint context parameters and do syncmem
3471 */
3472 static void
3473 xhci_setup_ctx(struct usbd_pipe *pipe)
3474 {
3475 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3476 struct usbd_device *dev = pipe->up_dev;
3477 struct xhci_slot * const xs = dev->ud_hcpriv;
3478 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3479 const u_int dci = xhci_ep_get_dci(ed);
3480 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3481 uint32_t *cp;
3482 uint16_t mps = UGETW(ed->wMaxPacketSize);
3483 uint8_t speed = dev->ud_speed;
3484 uint8_t ival = ed->bInterval;
3485
3486 XHCIHIST_FUNC();
3487 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju",
3488 (uintptr_t)pipe, xs->xs_idx, dci, speed);
3489
3490 /* set up initial input control context */
3491 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3492 cp[0] = htole32(0);
3493 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci));
3494 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT));
3495 cp[7] = htole32(0);
3496
3497 /* set up input slot context */
3498 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
3499 cp[0] =
3500 XHCI_SCTX_0_CTX_NUM_SET(dci) |
3501 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed));
3502 cp[1] = 0;
3503 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0);
3504 cp[3] = 0;
3505 xhci_setup_route(pipe, cp);
3506 xhci_setup_tthub(pipe, cp);
3507
3508 cp[0] = htole32(cp[0]);
3509 cp[1] = htole32(cp[1]);
3510 cp[2] = htole32(cp[2]);
3511 cp[3] = htole32(cp[3]);
3512
3513 /* set up input endpoint context */
3514 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci));
3515 cp[0] =
3516 XHCI_EPCTX_0_EPSTATE_SET(0) |
3517 XHCI_EPCTX_0_MULT_SET(0) |
3518 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) |
3519 XHCI_EPCTX_0_LSA_SET(0) |
3520 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0);
3521 cp[1] =
3522 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) |
3523 XHCI_EPCTX_1_HID_SET(0) |
3524 XHCI_EPCTX_1_MAXB_SET(0);
3525
3526 if (xfertype != UE_ISOCHRONOUS)
3527 cp[1] |= XHCI_EPCTX_1_CERR_SET(3);
3528
3529 if (xfertype == UE_CONTROL)
3530 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(8); /* 6.2.3 */
3531 else if (USB_IS_SS(speed))
3532 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(mps);
3533 else
3534 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(UE_GET_SIZE(mps));
3535
3536 xhci_setup_maxburst(pipe, cp);
3537
3538 switch (xfertype) {
3539 case UE_CONTROL:
3540 break;
3541 case UE_BULK:
3542 /* XXX Set MaxPStreams, HID, and LSA if streams enabled */
3543 break;
3544 case UE_INTERRUPT:
3545 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3546 ival = pipe->up_interval;
3547
3548 ival = xhci_bival2ival(ival, speed);
3549 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3550 break;
3551 case UE_ISOCHRONOUS:
3552 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3553 ival = pipe->up_interval;
3554
3555 /* xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 */
3556 if (speed == USB_SPEED_FULL)
3557 ival += 3; /* 1ms -> 125us */
3558 ival--;
3559 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3560 break;
3561 default:
3562 break;
3563 }
3564 DPRINTFN(4, "setting ival %ju MaxBurst %#jx",
3565 XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_1_MAXB_GET(cp[1]), 0, 0);
3566
3567 /* rewind TR dequeue pointer in xHC */
3568 /* can't use xhci_ep_get_dci() yet? */
3569 *(uint64_t *)(&cp[2]) = htole64(
3570 xhci_ring_trbp(xs->xs_xr[dci], 0) |
3571 XHCI_EPCTX_2_DCS_SET(1));
3572
3573 cp[0] = htole32(cp[0]);
3574 cp[1] = htole32(cp[1]);
3575 cp[4] = htole32(cp[4]);
3576
3577 /* rewind TR dequeue pointer in driver */
3578 struct xhci_ring *xr = xs->xs_xr[dci];
3579 mutex_enter(&xr->xr_lock);
3580 xhci_host_dequeue(xr);
3581 mutex_exit(&xr->xr_lock);
3582
3583 /* sync input contexts before they are read from memory */
3584 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3585 }
3586
3587 /*
3588 * Setup route string and roothub port of given device for slot context
3589 */
3590 static void
3591 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp)
3592 {
3593 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3594 struct usbd_device *dev = pipe->up_dev;
3595 struct usbd_port *up = dev->ud_powersrc;
3596 struct usbd_device *hub;
3597 struct usbd_device *adev;
3598 uint8_t rhport = 0;
3599 uint32_t route = 0;
3600
3601 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3602
3603 /* Locate root hub port and Determine route string */
3604 /* 4.3.3 route string does not include roothub port */
3605 for (hub = dev; hub != NULL; hub = hub->ud_myhub) {
3606 uint32_t dep;
3607
3608 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd",
3609 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc,
3610 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno :
3611 -1);
3612
3613 if (hub->ud_powersrc == NULL)
3614 break;
3615 dep = hub->ud_depth;
3616 if (dep == 0)
3617 break;
3618 rhport = hub->ud_powersrc->up_portno;
3619 if (dep > USB_HUB_MAX_DEPTH)
3620 continue;
3621
3622 route |=
3623 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport)
3624 << ((dep - 1) * 4);
3625 }
3626 route = route >> 4;
3627 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1;
3628
3629 /* Locate port on upstream high speed hub */
3630 for (adev = dev, hub = up->up_parent;
3631 hub != NULL && hub->ud_speed != USB_SPEED_HIGH;
3632 adev = hub, hub = hub->ud_myhub)
3633 ;
3634 if (hub) {
3635 int p;
3636 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) {
3637 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) {
3638 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1];
3639 goto found;
3640 }
3641 }
3642 panic("%s: cannot find HS port", __func__);
3643 found:
3644 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0);
3645 } else {
3646 dev->ud_myhsport = NULL;
3647 }
3648
3649 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport);
3650
3651 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport,
3652 ctlrport, route, (uintptr_t)hub);
3653
3654 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route);
3655 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport);
3656 }
3657
3658 /*
3659 * Setup whether device is hub, whether device uses MTT, and
3660 * TT informations if it uses MTT.
3661 */
3662 static void
3663 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp)
3664 {
3665 struct usbd_device *dev = pipe->up_dev;
3666 struct usbd_port *myhsport = dev->ud_myhsport;
3667 usb_device_descriptor_t * const dd = &dev->ud_ddesc;
3668 uint32_t speed = dev->ud_speed;
3669 uint8_t rhaddr = dev->ud_bus->ub_rhaddr;
3670 uint8_t tthubslot, ttportnum;
3671 bool ishub;
3672 bool usemtt;
3673
3674 XHCIHIST_FUNC();
3675
3676 /*
3677 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2
3678 * tthubslot:
3679 * This is the slot ID of parent HS hub
3680 * if LS/FS device is connected && connected through HS hub.
3681 * This is 0 if device is not LS/FS device ||
3682 * parent hub is not HS hub ||
3683 * attached to root hub.
3684 * ttportnum:
3685 * This is the downstream facing port of parent HS hub
3686 * if LS/FS device is connected.
3687 * This is 0 if device is not LS/FS device ||
3688 * parent hub is not HS hub ||
3689 * attached to root hub.
3690 */
3691 if (myhsport &&
3692 myhsport->up_parent->ud_addr != rhaddr &&
3693 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
3694 ttportnum = myhsport->up_portno;
3695 tthubslot = myhsport->up_parent->ud_addr;
3696 } else {
3697 ttportnum = 0;
3698 tthubslot = 0;
3699 }
3700 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd",
3701 (uintptr_t)myhsport, ttportnum, tthubslot, 0);
3702
3703 /* ishub is valid after reading UDESC_DEVICE */
3704 ishub = (dd->bDeviceClass == UDCLASS_HUB);
3705
3706 /* dev->ud_hub is valid after reading UDESC_HUB */
3707 if (ishub && dev->ud_hub) {
3708 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc;
3709 uint8_t ttt =
3710 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK);
3711
3712 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts);
3713 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt);
3714 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0);
3715 }
3716
3717 #define IS_MTTHUB(dd) \
3718 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT)
3719
3720 /*
3721 * MTT flag is set if
3722 * 1. this is HS hub && MTTs are supported and enabled; or
3723 * 2. this is LS or FS device && there is a parent HS hub where MTTs
3724 * are supported and enabled.
3725 *
3726 * XXX enabled is not tested yet
3727 */
3728 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd))
3729 usemtt = true;
3730 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) &&
3731 myhsport &&
3732 myhsport->up_parent->ud_addr != rhaddr &&
3733 IS_MTTHUB(&myhsport->up_parent->ud_ddesc))
3734 usemtt = true;
3735 else
3736 usemtt = false;
3737 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd",
3738 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt);
3739
3740 #undef IS_MTTHUB
3741
3742 cp[0] |=
3743 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) |
3744 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0);
3745 cp[2] |=
3746 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) |
3747 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum);
3748 }
3749
3750 /* set up params for periodic endpoint */
3751 static void
3752 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp)
3753 {
3754 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
3755 struct usbd_device *dev = pipe->up_dev;
3756 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3757 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3758 usbd_desc_iter_t iter;
3759 const usb_cdc_descriptor_t *cdcd;
3760 uint32_t maxb = 0;
3761 uint16_t mps = UGETW(ed->wMaxPacketSize);
3762 uint8_t speed = dev->ud_speed;
3763 uint8_t mult = 0;
3764 uint8_t ep;
3765
3766 /* config desc is NULL when opening ep0 */
3767 if (dev == NULL || dev->ud_cdesc == NULL)
3768 goto no_cdcd;
3769 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev,
3770 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY);
3771 if (cdcd == NULL)
3772 goto no_cdcd;
3773 usb_desc_iter_init(dev, &iter);
3774 iter.cur = (const void *)cdcd;
3775
3776 /* find endpoint_ss_comp desc for ep of this pipe */
3777 for (ep = 0;;) {
3778 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter);
3779 if (cdcd == NULL)
3780 break;
3781 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) {
3782 ep = ((const usb_endpoint_descriptor_t *)cdcd)->
3783 bEndpointAddress;
3784 if (UE_GET_ADDR(ep) ==
3785 UE_GET_ADDR(ed->bEndpointAddress)) {
3786 cdcd = (const usb_cdc_descriptor_t *)
3787 usb_desc_iter_next(&iter);
3788 break;
3789 }
3790 ep = 0;
3791 }
3792 }
3793 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) {
3794 const usb_endpoint_ss_comp_descriptor_t * esscd =
3795 (const usb_endpoint_ss_comp_descriptor_t *)cdcd;
3796 maxb = esscd->bMaxBurst;
3797 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes);
3798 }
3799
3800 no_cdcd:
3801 /* 6.2.3.4, 4.8.2.4 */
3802 if (USB_IS_SS(speed)) {
3803 /* USB 3.1 9.6.6 */
3804 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps);
3805 /* USB 3.1 9.6.7 */
3806 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3807 #ifdef notyet
3808 if (xfertype == UE_ISOCHRONOUS) {
3809 }
3810 if (XHCI_HCC2_LEC(sc->sc_hcc2) != 0) {
3811 /* use ESIT */
3812 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(x);
3813 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(x);
3814
3815 /* XXX if LEC = 1, set ESIT instead */
3816 cp[0] |= XHCI_EPCTX_0_MULT_SET(0);
3817 } else {
3818 /* use ival */
3819 }
3820 #endif
3821 } else {
3822 /* USB 2.0 9.6.6 */
3823 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(UE_GET_SIZE(mps));
3824
3825 /* 6.2.3.4 */
3826 if (speed == USB_SPEED_HIGH &&
3827 (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)) {
3828 maxb = UE_GET_TRANS(mps);
3829 } else {
3830 /* LS/FS or HS CTRL or HS BULK */
3831 maxb = 0;
3832 }
3833 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3834 }
3835 xpipe->xp_maxb = maxb + 1;
3836 xpipe->xp_mult = mult + 1;
3837 }
3838
3839 /*
3840 * Convert endpoint bInterval value to endpoint context interval value
3841 * for Interrupt pipe.
3842 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6
3843 */
3844 static uint32_t
3845 xhci_bival2ival(uint32_t ival, uint32_t speed)
3846 {
3847 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) {
3848 int i;
3849
3850 /*
3851 * round ival down to "the nearest base 2 multiple of
3852 * bInterval * 8".
3853 * bInterval is at most 255 as its type is uByte.
3854 * 255(ms) = 2040(x 125us) < 2^11, so start with 10.
3855 */
3856 for (i = 10; i > 0; i--) {
3857 if ((ival * 8) >= (1 << i))
3858 break;
3859 }
3860 ival = i;
3861 } else {
3862 /* Interval = bInterval-1 for SS/HS */
3863 ival--;
3864 }
3865
3866 return ival;
3867 }
3868
3869 /* ----- */
3870
3871 static void
3872 xhci_noop(struct usbd_pipe *pipe)
3873 {
3874 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3875 }
3876
3877 /*
3878 * Process root hub request.
3879 */
3880 static int
3881 xhci_roothub_ctrl_locked(struct usbd_bus *bus, usb_device_request_t *req,
3882 void *buf, int buflen)
3883 {
3884 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
3885 usb_port_status_t ps;
3886 int l, totlen = 0;
3887 uint16_t len, value, index;
3888 int port, i;
3889 uint32_t v;
3890
3891 XHCIHIST_FUNC();
3892
3893 KASSERT(mutex_owned(&sc->sc_rhlock));
3894
3895 if (sc->sc_dying)
3896 return -1;
3897
3898 size_t bn = bus == &sc->sc_bus ? 0 : 1;
3899
3900 len = UGETW(req->wLength);
3901 value = UGETW(req->wValue);
3902 index = UGETW(req->wIndex);
3903
3904 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx",
3905 req->bmRequestType | (req->bRequest << 8), value, index, len);
3906
3907 #define C(x,y) ((x) | ((y) << 8))
3908 switch (C(req->bRequest, req->bmRequestType)) {
3909 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
3910 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0);
3911 if (len == 0)
3912 break;
3913 switch (value) {
3914 #define sd ((usb_string_descriptor_t *)buf)
3915 case C(2, UDESC_STRING):
3916 /* Product */
3917 totlen = usb_makestrdesc(sd, len, "xHCI root hub");
3918 break;
3919 #undef sd
3920 default:
3921 /* default from usbroothub */
3922 return buflen;
3923 }
3924 break;
3925
3926 /* Hub requests */
3927 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
3928 break;
3929 /* Clear Port Feature request */
3930 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): {
3931 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
3932
3933 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd",
3934 index, value, bn, cp);
3935 if (index < 1 || index > sc->sc_rhportcount[bn]) {
3936 return -1;
3937 }
3938 port = XHCI_PORTSC(cp);
3939 v = xhci_op_read_4(sc, port);
3940 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0);
3941 v &= ~XHCI_PS_CLEAR;
3942 switch (value) {
3943 case UHF_PORT_ENABLE:
3944 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED);
3945 break;
3946 case UHF_PORT_SUSPEND:
3947 return -1;
3948 case UHF_PORT_POWER:
3949 break;
3950 case UHF_PORT_TEST:
3951 case UHF_PORT_INDICATOR:
3952 return -1;
3953 case UHF_C_PORT_CONNECTION:
3954 xhci_op_write_4(sc, port, v | XHCI_PS_CSC);
3955 break;
3956 case UHF_C_PORT_ENABLE:
3957 case UHF_C_PORT_SUSPEND:
3958 case UHF_C_PORT_OVER_CURRENT:
3959 return -1;
3960 case UHF_C_BH_PORT_RESET:
3961 xhci_op_write_4(sc, port, v | XHCI_PS_WRC);
3962 break;
3963 case UHF_C_PORT_RESET:
3964 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
3965 break;
3966 case UHF_C_PORT_LINK_STATE:
3967 xhci_op_write_4(sc, port, v | XHCI_PS_PLC);
3968 break;
3969 case UHF_C_PORT_CONFIG_ERROR:
3970 xhci_op_write_4(sc, port, v | XHCI_PS_CEC);
3971 break;
3972 default:
3973 return -1;
3974 }
3975 break;
3976 }
3977 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
3978 if (len == 0)
3979 break;
3980 if ((value & 0xff) != 0) {
3981 return -1;
3982 }
3983 usb_hub_descriptor_t hubd;
3984
3985 totlen = uimin(buflen, sizeof(hubd));
3986 memcpy(&hubd, buf, totlen);
3987 hubd.bNbrPorts = sc->sc_rhportcount[bn];
3988 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH);
3989 hubd.bPwrOn2PwrGood = 200;
3990 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) {
3991 /* XXX can't find out? */
3992 hubd.DeviceRemovable[i++] = 0;
3993 }
3994 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
3995 totlen = uimin(totlen, hubd.bDescLength);
3996 memcpy(buf, &hubd, totlen);
3997 break;
3998 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
3999 if (len != 4) {
4000 return -1;
4001 }
4002 memset(buf, 0, len); /* ? XXX */
4003 totlen = len;
4004 break;
4005 /* Get Port Status request */
4006 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): {
4007 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4008
4009 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju",
4010 bn, index, cp, 0);
4011 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4012 DPRINTFN(5, "bad get port status: index=%jd bn=%jd "
4013 "portcount=%jd",
4014 index, bn, sc->sc_rhportcount[bn], 0);
4015 return -1;
4016 }
4017 if (len != 4) {
4018 DPRINTFN(5, "bad get port status: len %jd != 4",
4019 len, 0, 0, 0);
4020 return -1;
4021 }
4022 v = xhci_op_read_4(sc, XHCI_PORTSC(cp));
4023 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0);
4024 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v));
4025 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS;
4026 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED;
4027 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR;
4028 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND;
4029 if (v & XHCI_PS_PR) i |= UPS_RESET;
4030 if (v & XHCI_PS_PP) {
4031 if (i & UPS_OTHER_SPEED)
4032 i |= UPS_PORT_POWER_SS;
4033 else
4034 i |= UPS_PORT_POWER;
4035 }
4036 if (i & UPS_OTHER_SPEED)
4037 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v));
4038 if (sc->sc_vendor_port_status)
4039 i = sc->sc_vendor_port_status(sc, v, i);
4040 USETW(ps.wPortStatus, i);
4041 i = 0;
4042 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS;
4043 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED;
4044 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR;
4045 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET;
4046 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET;
4047 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE;
4048 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR;
4049 USETW(ps.wPortChange, i);
4050 totlen = uimin(len, sizeof(ps));
4051 memcpy(buf, &ps, totlen);
4052 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx"
4053 " totlen %jd",
4054 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0);
4055 break;
4056 }
4057 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
4058 return -1;
4059 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE):
4060 break;
4061 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
4062 break;
4063 /* Set Port Feature request */
4064 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): {
4065 int optval = (index >> 8) & 0xff;
4066 index &= 0xff;
4067 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4068 return -1;
4069 }
4070
4071 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4072
4073 port = XHCI_PORTSC(cp);
4074 v = xhci_op_read_4(sc, port);
4075 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0);
4076 v &= ~XHCI_PS_CLEAR;
4077 switch (value) {
4078 case UHF_PORT_ENABLE:
4079 xhci_op_write_4(sc, port, v | XHCI_PS_PED);
4080 break;
4081 case UHF_PORT_SUSPEND:
4082 /* XXX suspend */
4083 break;
4084 case UHF_PORT_RESET:
4085 xhci_op_write_4(sc, port, v | XHCI_PS_PR);
4086 /* Wait for reset to complete. */
4087 for (i = 0; i < USB_PORT_ROOT_RESET_DELAY / 10; i++) {
4088 if (sc->sc_dying) {
4089 return -1;
4090 }
4091 v = xhci_op_read_4(sc, port);
4092 if ((v & XHCI_PS_PR) == 0) {
4093 break;
4094 }
4095 usb_delay_ms(&sc->sc_bus, 10);
4096 }
4097 break;
4098 case UHF_PORT_POWER:
4099 /* XXX power control */
4100 break;
4101 /* XXX more */
4102 case UHF_C_PORT_RESET:
4103 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4104 break;
4105 case UHF_PORT_U1_TIMEOUT:
4106 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4107 return -1;
4108 }
4109 port = XHCI_PORTPMSC(cp);
4110 v = xhci_op_read_4(sc, port);
4111 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4112 index, cp, v, 0);
4113 v &= ~XHCI_PM3_U1TO_SET(0xff);
4114 v |= XHCI_PM3_U1TO_SET(optval);
4115 xhci_op_write_4(sc, port, v);
4116 break;
4117 case UHF_PORT_U2_TIMEOUT:
4118 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4119 return -1;
4120 }
4121 port = XHCI_PORTPMSC(cp);
4122 v = xhci_op_read_4(sc, port);
4123 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4124 index, cp, v, 0);
4125 v &= ~XHCI_PM3_U2TO_SET(0xff);
4126 v |= XHCI_PM3_U2TO_SET(optval);
4127 xhci_op_write_4(sc, port, v);
4128 break;
4129 default:
4130 return -1;
4131 }
4132 }
4133 break;
4134 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
4135 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
4136 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
4137 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
4138 break;
4139 default:
4140 /* default from usbroothub */
4141 return buflen;
4142 }
4143
4144 return totlen;
4145 }
4146
4147 static int
4148 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
4149 void *buf, int buflen)
4150 {
4151 struct xhci_softc *sc = XHCI_BUS2SC(bus);
4152 int actlen;
4153
4154 mutex_enter(&sc->sc_rhlock);
4155 actlen = xhci_roothub_ctrl_locked(bus, req, buf, buflen);
4156 mutex_exit(&sc->sc_rhlock);
4157
4158 return actlen;
4159 }
4160
4161 /* root hub interrupt */
4162
4163 static usbd_status
4164 xhci_root_intr_transfer(struct usbd_xfer *xfer)
4165 {
4166 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4167
4168 /* Pipe isn't running, start first */
4169 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4170 }
4171
4172 /* Wait for roothub port status/change */
4173 static usbd_status
4174 xhci_root_intr_start(struct usbd_xfer *xfer)
4175 {
4176 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4177 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4178
4179 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4180
4181 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4182
4183 if (sc->sc_dying)
4184 return USBD_IOERROR;
4185
4186 KASSERT(sc->sc_intrxfer[bn] == NULL);
4187 sc->sc_intrxfer[bn] = xfer;
4188 xfer->ux_status = USBD_IN_PROGRESS;
4189
4190 return USBD_IN_PROGRESS;
4191 }
4192
4193 static void
4194 xhci_root_intr_abort(struct usbd_xfer *xfer)
4195 {
4196 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4197 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4198
4199 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4200
4201 KASSERT(mutex_owned(&sc->sc_lock));
4202 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
4203
4204 /* If xfer has already completed, nothing to do here. */
4205 if (sc->sc_intrxfer[bn] == NULL)
4206 return;
4207
4208 /*
4209 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer.
4210 * Cancel it.
4211 */
4212 KASSERT(sc->sc_intrxfer[bn] == xfer);
4213 xfer->ux_status = USBD_CANCELLED;
4214 usb_transfer_complete(xfer);
4215 }
4216
4217 static void
4218 xhci_root_intr_close(struct usbd_pipe *pipe)
4219 {
4220 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe);
4221 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer;
4222 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4223
4224 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4225
4226 KASSERT(mutex_owned(&sc->sc_lock));
4227
4228 /*
4229 * Caller must guarantee the xfer has completed first, by
4230 * closing the pipe only after normal completion or an abort.
4231 */
4232 KASSERT(sc->sc_intrxfer[bn] == NULL);
4233 }
4234
4235 static void
4236 xhci_root_intr_done(struct usbd_xfer *xfer)
4237 {
4238 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4239 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4240
4241 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4242
4243 KASSERT(mutex_owned(&sc->sc_lock));
4244
4245 /* Claim the xfer so it doesn't get completed again. */
4246 KASSERT(sc->sc_intrxfer[bn] == xfer);
4247 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
4248 sc->sc_intrxfer[bn] = NULL;
4249 }
4250
4251 /* -------------- */
4252 /* device control */
4253
4254 static usbd_status
4255 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
4256 {
4257 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4258
4259 /* Pipe isn't running, start first */
4260 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4261 }
4262
4263 static usbd_status
4264 xhci_device_ctrl_start(struct usbd_xfer *xfer)
4265 {
4266 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4267 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4268 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4269 struct xhci_ring * const tr = xs->xs_xr[dci];
4270 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4271 usb_device_request_t * const req = &xfer->ux_request;
4272 const bool isread = usbd_xfer_isread(xfer);
4273 const uint32_t len = UGETW(req->wLength);
4274 usb_dma_t * const dma = &xfer->ux_dmabuf;
4275 uint64_t parameter;
4276 uint32_t status;
4277 uint32_t control;
4278 u_int i;
4279 const bool polling = xhci_polling_p(sc);
4280
4281 XHCIHIST_FUNC();
4282 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx",
4283 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue),
4284 UGETW(req->wIndex), UGETW(req->wLength));
4285
4286 KASSERT(polling || mutex_owned(&sc->sc_lock));
4287
4288 /* we rely on the bottom bits for extra info */
4289 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %zx",
4290 (uintptr_t) xfer);
4291
4292 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0);
4293
4294 if (tr->is_halted)
4295 goto out;
4296
4297 i = 0;
4298
4299 /* setup phase */
4300 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */
4301 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req));
4302 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE :
4303 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) |
4304 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) |
4305 XHCI_TRB_3_IDT_BIT;
4306 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4307
4308 if (len != 0) {
4309 /* data phase */
4310 parameter = DMAADDR(dma, 0);
4311 KASSERTMSG(len <= 0x10000, "len %d", len);
4312 status = XHCI_TRB_2_IRQ_SET(0) |
4313 XHCI_TRB_2_TDSZ_SET(0) |
4314 XHCI_TRB_2_BYTES_SET(len);
4315 control = (isread ? XHCI_TRB_3_DIR_IN : 0) |
4316 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) |
4317 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4318 XHCI_TRB_3_IOC_BIT;
4319 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4320
4321 usb_syncmem(dma, 0, len,
4322 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4323 }
4324
4325 parameter = 0;
4326 status = XHCI_TRB_2_IRQ_SET(0);
4327 /* the status stage has inverted direction */
4328 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) |
4329 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) |
4330 XHCI_TRB_3_IOC_BIT;
4331 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4332
4333 if (!polling)
4334 mutex_enter(&tr->xr_lock);
4335 xhci_ring_put_xfer(sc, tr, xx, i);
4336 if (!polling)
4337 mutex_exit(&tr->xr_lock);
4338
4339 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4340
4341 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4342 usbd_xfer_schedule_timeout(xfer);
4343 xfer->ux_status = USBD_IN_PROGRESS;
4344 } else {
4345 /*
4346 * We must be coming from xhci_pipe_restart -- timeout
4347 * already set up, nothing to do.
4348 */
4349 }
4350 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4351
4352 return USBD_IN_PROGRESS;
4353 }
4354
4355 static void
4356 xhci_device_ctrl_done(struct usbd_xfer *xfer)
4357 {
4358 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4359 usb_device_request_t *req = &xfer->ux_request;
4360 int len = UGETW(req->wLength);
4361 int rd = req->bmRequestType & UT_READ;
4362
4363 if (len)
4364 usb_syncmem(&xfer->ux_dmabuf, 0, len,
4365 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4366 }
4367
4368 static void
4369 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
4370 {
4371 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4372
4373 usbd_xfer_abort(xfer);
4374 }
4375
4376 static void
4377 xhci_device_ctrl_close(struct usbd_pipe *pipe)
4378 {
4379 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4380
4381 xhci_close_pipe(pipe);
4382 }
4383
4384 /* ------------------ */
4385 /* device isochronous */
4386
4387 static usbd_status
4388 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
4389 {
4390 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4391
4392 return xhci_device_isoc_enter(xfer);
4393 }
4394
4395 static usbd_status
4396 xhci_device_isoc_enter(struct usbd_xfer *xfer)
4397 {
4398 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4399 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4400 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4401 struct xhci_ring * const tr = xs->xs_xr[dci];
4402 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4403 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe;
4404 uint32_t len = xfer->ux_length;
4405 usb_dma_t * const dma = &xfer->ux_dmabuf;
4406 uint64_t parameter;
4407 uint32_t status;
4408 uint32_t control;
4409 uint32_t mfindex;
4410 uint32_t offs;
4411 int i, ival;
4412 const bool polling = xhci_polling_p(sc);
4413 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
4414 const uint16_t mps = UE_GET_SIZE(MPS);
4415 const uint8_t maxb = xpipe->xp_maxb;
4416 u_int tdpc, tbc, tlbpc;
4417
4418 XHCIHIST_FUNC();
4419 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4420 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4421
4422 KASSERT(polling || mutex_owned(&sc->sc_lock));
4423
4424 if (sc->sc_dying)
4425 return USBD_IOERROR;
4426
4427 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths);
4428 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4429
4430 const bool isread = usbd_xfer_isread(xfer);
4431 if (xfer->ux_length)
4432 usb_syncmem(dma, 0, xfer->ux_length,
4433 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4434
4435 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
4436 if (ival >= 1 && ival <= 16)
4437 ival = 1 << (ival - 1);
4438 else
4439 ival = 1; /* fake something up */
4440
4441 if (xpipe->xp_isoc_next == -1) {
4442 mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX);
4443 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0);
4444 mfindex = XHCI_MFINDEX_GET(mfindex + 1);
4445 mfindex /= USB_UFRAMES_PER_FRAME;
4446 mfindex += 7; /* 7 frames is max possible IST */
4447 xpipe->xp_isoc_next = roundup2(mfindex, ival);
4448 }
4449
4450 offs = 0;
4451 for (i = 0; i < xfer->ux_nframes; i++) {
4452 len = xfer->ux_frlengths[i];
4453
4454 tdpc = howmany(len, mps);
4455 tbc = howmany(tdpc, maxb) - 1;
4456 tlbpc = tdpc % maxb;
4457 tlbpc = tlbpc ? tlbpc - 1 : maxb - 1;
4458
4459 KASSERTMSG(len <= 0x10000, "len %d", len);
4460 parameter = DMAADDR(dma, offs);
4461 status = XHCI_TRB_2_IRQ_SET(0) |
4462 XHCI_TRB_2_TDSZ_SET(0) |
4463 XHCI_TRB_2_BYTES_SET(len);
4464 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) |
4465 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4466 XHCI_TRB_3_TBC_SET(tbc) |
4467 XHCI_TRB_3_TLBPC_SET(tlbpc) |
4468 XHCI_TRB_3_IOC_BIT;
4469 if (XHCI_HCC_CFC(sc->sc_hcc)) {
4470 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4471 #if 0
4472 } else if (xpipe->xp_isoc_next == -1) {
4473 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4474 #endif
4475 } else {
4476 control |= XHCI_TRB_3_ISO_SIA_BIT;
4477 }
4478 #if 0
4479 if (i != xfer->ux_nframes - 1)
4480 control |= XHCI_TRB_3_BEI_BIT;
4481 #endif
4482 xhci_xfer_put_trb(xx, i, parameter, status, control);
4483
4484 xpipe->xp_isoc_next += ival;
4485 offs += len;
4486 }
4487
4488 xx->xx_isoc_done = 0;
4489
4490 if (!polling)
4491 mutex_enter(&tr->xr_lock);
4492 xhci_ring_put_xfer(sc, tr, xx, i);
4493 if (!polling)
4494 mutex_exit(&tr->xr_lock);
4495
4496 xfer->ux_status = USBD_IN_PROGRESS;
4497 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4498 usbd_xfer_schedule_timeout(xfer);
4499
4500 return USBD_IN_PROGRESS;
4501 }
4502
4503 static void
4504 xhci_device_isoc_abort(struct usbd_xfer *xfer)
4505 {
4506 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4507
4508 usbd_xfer_abort(xfer);
4509 }
4510
4511 static void
4512 xhci_device_isoc_close(struct usbd_pipe *pipe)
4513 {
4514 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4515
4516 xhci_close_pipe(pipe);
4517 }
4518
4519 static void
4520 xhci_device_isoc_done(struct usbd_xfer *xfer)
4521 {
4522 #ifdef USB_DEBUG
4523 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4524 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4525 #endif
4526 const bool isread = usbd_xfer_isread(xfer);
4527
4528 XHCIHIST_FUNC();
4529 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4530 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4531
4532 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4533 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4534 }
4535
4536 /* ----------- */
4537 /* device bulk */
4538
4539 static usbd_status
4540 xhci_device_bulk_transfer(struct usbd_xfer *xfer)
4541 {
4542 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4543
4544 /* Pipe isn't running, so start it first. */
4545 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4546 }
4547
4548 static usbd_status
4549 xhci_device_bulk_start(struct usbd_xfer *xfer)
4550 {
4551 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4552 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4553 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4554 struct xhci_ring * const tr = xs->xs_xr[dci];
4555 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4556 const uint32_t len = xfer->ux_length;
4557 usb_dma_t * const dma = &xfer->ux_dmabuf;
4558 uint64_t parameter;
4559 uint32_t status;
4560 uint32_t control;
4561 u_int i = 0;
4562 const bool polling = xhci_polling_p(sc);
4563
4564 XHCIHIST_FUNC();
4565 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4566 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4567
4568 KASSERT(polling || mutex_owned(&sc->sc_lock));
4569
4570 if (sc->sc_dying)
4571 return USBD_IOERROR;
4572
4573 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4574
4575 if (tr->is_halted)
4576 goto out;
4577
4578 parameter = DMAADDR(dma, 0);
4579 const bool isread = usbd_xfer_isread(xfer);
4580 if (len)
4581 usb_syncmem(dma, 0, len,
4582 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4583
4584 /*
4585 * XXX: (dsl) The physical buffer must not cross a 64k boundary.
4586 * If the user supplied buffer crosses such a boundary then 2
4587 * (or more) TRB should be used.
4588 * If multiple TRB are used the td_size field must be set correctly.
4589 * For v1.0 devices (like ivy bridge) this is the number of usb data
4590 * blocks needed to complete the transfer.
4591 * Setting it to 1 in the last TRB causes an extra zero-length
4592 * data block be sent.
4593 * The earlier documentation differs, I don't know how it behaves.
4594 */
4595 KASSERTMSG(len <= 0x10000, "len %d", len);
4596 status = XHCI_TRB_2_IRQ_SET(0) |
4597 XHCI_TRB_2_TDSZ_SET(0) |
4598 XHCI_TRB_2_BYTES_SET(len);
4599 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4600 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4601 XHCI_TRB_3_IOC_BIT;
4602 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4603
4604 if (!polling)
4605 mutex_enter(&tr->xr_lock);
4606 xhci_ring_put_xfer(sc, tr, xx, i);
4607 if (!polling)
4608 mutex_exit(&tr->xr_lock);
4609
4610 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4611
4612 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4613 xfer->ux_status = USBD_IN_PROGRESS;
4614 usbd_xfer_schedule_timeout(xfer);
4615 } else {
4616 /*
4617 * We must be coming from xhci_pipe_restart -- timeout
4618 * already set up, nothing to do.
4619 */
4620 }
4621 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4622
4623 return USBD_IN_PROGRESS;
4624 }
4625
4626 static void
4627 xhci_device_bulk_done(struct usbd_xfer *xfer)
4628 {
4629 #ifdef USB_DEBUG
4630 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4631 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4632 #endif
4633 const bool isread = usbd_xfer_isread(xfer);
4634
4635 XHCIHIST_FUNC();
4636 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4637 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4638
4639 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4640 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4641 }
4642
4643 static void
4644 xhci_device_bulk_abort(struct usbd_xfer *xfer)
4645 {
4646 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4647
4648 usbd_xfer_abort(xfer);
4649 }
4650
4651 static void
4652 xhci_device_bulk_close(struct usbd_pipe *pipe)
4653 {
4654 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4655
4656 xhci_close_pipe(pipe);
4657 }
4658
4659 /* ---------------- */
4660 /* device interrupt */
4661
4662 static usbd_status
4663 xhci_device_intr_transfer(struct usbd_xfer *xfer)
4664 {
4665 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4666
4667 /* Pipe isn't running, so start it first. */
4668 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4669 }
4670
4671 static usbd_status
4672 xhci_device_intr_start(struct usbd_xfer *xfer)
4673 {
4674 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4675 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4676 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4677 struct xhci_ring * const tr = xs->xs_xr[dci];
4678 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4679 const uint32_t len = xfer->ux_length;
4680 const bool polling = xhci_polling_p(sc);
4681 usb_dma_t * const dma = &xfer->ux_dmabuf;
4682 uint64_t parameter;
4683 uint32_t status;
4684 uint32_t control;
4685 u_int i = 0;
4686
4687 XHCIHIST_FUNC();
4688 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4689 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4690
4691 KASSERT(polling || mutex_owned(&sc->sc_lock));
4692
4693 if (sc->sc_dying)
4694 return USBD_IOERROR;
4695
4696 if (tr->is_halted)
4697 goto out;
4698
4699 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4700
4701 const bool isread = usbd_xfer_isread(xfer);
4702 if (len)
4703 usb_syncmem(dma, 0, len,
4704 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4705
4706 parameter = DMAADDR(dma, 0);
4707 KASSERTMSG(len <= 0x10000, "len %d", len);
4708 status = XHCI_TRB_2_IRQ_SET(0) |
4709 XHCI_TRB_2_TDSZ_SET(0) |
4710 XHCI_TRB_2_BYTES_SET(len);
4711 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4712 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT;
4713 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4714
4715 if (!polling)
4716 mutex_enter(&tr->xr_lock);
4717 xhci_ring_put_xfer(sc, tr, xx, i);
4718 if (!polling)
4719 mutex_exit(&tr->xr_lock);
4720
4721 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4722
4723 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4724 xfer->ux_status = USBD_IN_PROGRESS;
4725 usbd_xfer_schedule_timeout(xfer);
4726 } else {
4727 /*
4728 * We must be coming from xhci_pipe_restart -- timeout
4729 * already set up, nothing to do.
4730 */
4731 }
4732 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4733
4734 return USBD_IN_PROGRESS;
4735 }
4736
4737 static void
4738 xhci_device_intr_done(struct usbd_xfer *xfer)
4739 {
4740 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4741 #ifdef USB_DEBUG
4742 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4743 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4744 #endif
4745 const bool isread = usbd_xfer_isread(xfer);
4746
4747 XHCIHIST_FUNC();
4748 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4749 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4750
4751 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4752
4753 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4754 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4755 }
4756
4757 static void
4758 xhci_device_intr_abort(struct usbd_xfer *xfer)
4759 {
4760 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4761
4762 XHCIHIST_FUNC();
4763 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0);
4764
4765 KASSERT(mutex_owned(&sc->sc_lock));
4766 usbd_xfer_abort(xfer);
4767 }
4768
4769 static void
4770 xhci_device_intr_close(struct usbd_pipe *pipe)
4771 {
4772 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
4773
4774 XHCIHIST_FUNC();
4775 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0);
4776
4777 xhci_close_pipe(pipe);
4778 }
4779