xhci.c revision 1.107.2.12 1 /* $NetBSD: xhci.c,v 1.107.2.12 2023/08/01 13:46:18 martin Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * USB rev 2.0 and rev 3.1 specification
31 * http://www.usb.org/developers/docs/
32 * xHCI rev 1.1 specification
33 * http://www.intel.com/technology/usb/spec.htm
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.107.2.12 2023/08/01 13:46:18 martin Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_usb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/kmem.h>
47 #include <sys/device.h>
48 #include <sys/select.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/endian.h>
58
59 #include <dev/usb/usb.h>
60 #include <dev/usb/usbdi.h>
61 #include <dev/usb/usbdivar.h>
62 #include <dev/usb/usbdi_util.h>
63 #include <dev/usb/usbhist.h>
64 #include <dev/usb/usb_mem.h>
65 #include <dev/usb/usb_quirks.h>
66
67 #include <dev/usb/xhcireg.h>
68 #include <dev/usb/xhcivar.h>
69 #include <dev/usb/usbroothub.h>
70
71
72 #ifdef USB_DEBUG
73 #ifndef XHCI_DEBUG
74 #define xhcidebug 0
75 #else /* !XHCI_DEBUG */
76 #define HEXDUMP(a, b, c) \
77 do { \
78 if (xhcidebug > 0) \
79 hexdump(printf, a, b, c); \
80 } while (/*CONSTCOND*/0)
81 static int xhcidebug = 0;
82
83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup")
84 {
85 int err;
86 const struct sysctlnode *rnode;
87 const struct sysctlnode *cnode;
88
89 err = sysctl_createv(clog, 0, NULL, &rnode,
90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci",
91 SYSCTL_DESCR("xhci global controls"),
92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
93
94 if (err)
95 goto fail;
96
97 /* control debugging printfs */
98 err = sysctl_createv(clog, 0, &rnode, &cnode,
99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
100 "debug", SYSCTL_DESCR("Enable debugging output"),
101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL);
102 if (err)
103 goto fail;
104
105 return;
106 fail:
107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
108 }
109
110 #endif /* !XHCI_DEBUG */
111 #endif /* USB_DEBUG */
112
113 #ifndef HEXDUMP
114 #define HEXDUMP(a, b, c)
115 #endif
116
117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D)
118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D)
119 #define XHCIHIST_FUNC() USBHIST_FUNC()
120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug)
121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \
122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D)
123
124 #define XHCI_DCI_SLOT 0
125 #define XHCI_DCI_EP_CONTROL 1
126
127 #define XHCI_ICI_INPUT_CONTROL 0
128
129 struct xhci_pipe {
130 struct usbd_pipe xp_pipe;
131 struct usb_task xp_async_task;
132 };
133
134 #define XHCI_COMMAND_RING_TRBS 256
135 #define XHCI_EVENT_RING_TRBS 256
136 #define XHCI_EVENT_RING_SEGMENTS 1
137 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT
138
139 static usbd_status xhci_open(struct usbd_pipe *);
140 static void xhci_close_pipe(struct usbd_pipe *);
141 static int xhci_intr1(struct xhci_softc * const);
142 static void xhci_softintr(void *);
143 static void xhci_poll(struct usbd_bus *);
144 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int);
145 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
146 static void xhci_abortx(struct usbd_xfer *);
147 static bool xhci_dying(struct usbd_bus *);
148 static void xhci_get_lock(struct usbd_bus *, kmutex_t **);
149 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int,
150 struct usbd_port *);
151 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
152 void *, int);
153
154 static usbd_status xhci_configure_endpoint(struct usbd_pipe *);
155 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *);
156 static usbd_status xhci_reset_endpoint(struct usbd_pipe *);
157 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *,
158 struct xhci_slot *, u_int, uint32_t);
159 static usbd_status xhci_stop_endpoint(struct usbd_pipe *);
160
161 static void xhci_host_dequeue(struct xhci_ring * const);
162 static usbd_status xhci_set_dequeue(struct usbd_pipe *);
163
164 static usbd_status xhci_do_command(struct xhci_softc * const,
165 struct xhci_soft_trb * const, int);
166 static usbd_status xhci_do_command_locked(struct xhci_softc * const,
167 struct xhci_soft_trb * const, int);
168 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t);
169 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *, int, int);
170 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool);
171 static usbd_status xhci_enable_slot(struct xhci_softc * const,
172 uint8_t * const);
173 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t);
174 static usbd_status xhci_address_device(struct xhci_softc * const,
175 uint64_t, uint8_t, bool);
176 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int);
177 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const,
178 struct xhci_slot * const, u_int);
179 static usbd_status xhci_ring_init(struct xhci_softc * const,
180 struct xhci_ring * const, size_t, size_t);
181 static void xhci_ring_free(struct xhci_softc * const, struct xhci_ring * const);
182
183 static void xhci_setup_ctx(struct usbd_pipe *);
184 static void xhci_setup_route(struct usbd_pipe *, uint32_t *);
185 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *);
186 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *);
187 static uint32_t xhci_bival2ival(uint32_t, uint32_t);
188
189 static void xhci_noop(struct usbd_pipe *);
190
191 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *);
192 static usbd_status xhci_root_intr_start(struct usbd_xfer *);
193 static void xhci_root_intr_abort(struct usbd_xfer *);
194 static void xhci_root_intr_close(struct usbd_pipe *);
195 static void xhci_root_intr_done(struct usbd_xfer *);
196
197 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *);
198 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *);
199 static void xhci_device_ctrl_abort(struct usbd_xfer *);
200 static void xhci_device_ctrl_close(struct usbd_pipe *);
201 static void xhci_device_ctrl_done(struct usbd_xfer *);
202
203 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *);
204 static usbd_status xhci_device_intr_start(struct usbd_xfer *);
205 static void xhci_device_intr_abort(struct usbd_xfer *);
206 static void xhci_device_intr_close(struct usbd_pipe *);
207 static void xhci_device_intr_done(struct usbd_xfer *);
208
209 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *);
210 static usbd_status xhci_device_bulk_start(struct usbd_xfer *);
211 static void xhci_device_bulk_abort(struct usbd_xfer *);
212 static void xhci_device_bulk_close(struct usbd_pipe *);
213 static void xhci_device_bulk_done(struct usbd_xfer *);
214
215 static const struct usbd_bus_methods xhci_bus_methods = {
216 .ubm_open = xhci_open,
217 .ubm_softint = xhci_softintr,
218 .ubm_dopoll = xhci_poll,
219 .ubm_allocx = xhci_allocx,
220 .ubm_freex = xhci_freex,
221 .ubm_abortx = xhci_abortx,
222 .ubm_dying = xhci_dying,
223 .ubm_getlock = xhci_get_lock,
224 .ubm_newdev = xhci_new_device,
225 .ubm_rhctrl = xhci_roothub_ctrl,
226 };
227
228 static const struct usbd_pipe_methods xhci_root_intr_methods = {
229 .upm_transfer = xhci_root_intr_transfer,
230 .upm_start = xhci_root_intr_start,
231 .upm_abort = xhci_root_intr_abort,
232 .upm_close = xhci_root_intr_close,
233 .upm_cleartoggle = xhci_noop,
234 .upm_done = xhci_root_intr_done,
235 };
236
237
238 static const struct usbd_pipe_methods xhci_device_ctrl_methods = {
239 .upm_transfer = xhci_device_ctrl_transfer,
240 .upm_start = xhci_device_ctrl_start,
241 .upm_abort = xhci_device_ctrl_abort,
242 .upm_close = xhci_device_ctrl_close,
243 .upm_cleartoggle = xhci_noop,
244 .upm_done = xhci_device_ctrl_done,
245 };
246
247 static const struct usbd_pipe_methods xhci_device_isoc_methods = {
248 .upm_cleartoggle = xhci_noop,
249 };
250
251 static const struct usbd_pipe_methods xhci_device_bulk_methods = {
252 .upm_transfer = xhci_device_bulk_transfer,
253 .upm_start = xhci_device_bulk_start,
254 .upm_abort = xhci_device_bulk_abort,
255 .upm_close = xhci_device_bulk_close,
256 .upm_cleartoggle = xhci_noop,
257 .upm_done = xhci_device_bulk_done,
258 };
259
260 static const struct usbd_pipe_methods xhci_device_intr_methods = {
261 .upm_transfer = xhci_device_intr_transfer,
262 .upm_start = xhci_device_intr_start,
263 .upm_abort = xhci_device_intr_abort,
264 .upm_close = xhci_device_intr_close,
265 .upm_cleartoggle = xhci_noop,
266 .upm_done = xhci_device_intr_done,
267 };
268
269 static inline uint32_t
270 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset)
271 {
272 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset);
273 }
274
275 static inline uint32_t
276 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset)
277 {
278 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset);
279 }
280
281 static inline void
282 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset,
283 uint32_t value)
284 {
285 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value);
286 }
287
288 #if 0 /* unused */
289 static inline void
290 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset,
291 uint32_t value)
292 {
293 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value);
294 }
295 #endif /* unused */
296
297 static inline uint32_t
298 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset)
299 {
300 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset);
301 }
302
303 static inline uint32_t
304 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset)
305 {
306 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
307 }
308
309 static inline void
310 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset,
311 uint32_t value)
312 {
313 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
314 }
315
316 static inline uint64_t
317 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset)
318 {
319 uint64_t value;
320
321 if (sc->sc_ac64) {
322 #ifdef XHCI_USE_BUS_SPACE_8
323 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset);
324 #else
325 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
326 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh,
327 offset + 4) << 32;
328 #endif
329 } else {
330 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
331 }
332
333 return value;
334 }
335
336 static inline void
337 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset,
338 uint64_t value)
339 {
340 if (sc->sc_ac64) {
341 #ifdef XHCI_USE_BUS_SPACE_8
342 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value);
343 #else
344 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0,
345 (value >> 0) & 0xffffffff);
346 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4,
347 (value >> 32) & 0xffffffff);
348 #endif
349 } else {
350 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
351 }
352 }
353
354 static inline void
355 xhci_op_barrier(const struct xhci_softc * const sc, bus_size_t offset,
356 bus_size_t len, int flags)
357 {
358 bus_space_barrier(sc->sc_iot, sc->sc_obh, offset, len, flags);
359 }
360
361 static inline uint32_t
362 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset)
363 {
364 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
365 }
366
367 static inline void
368 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset,
369 uint32_t value)
370 {
371 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
372 }
373
374 static inline uint64_t
375 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset)
376 {
377 uint64_t value;
378
379 if (sc->sc_ac64) {
380 #ifdef XHCI_USE_BUS_SPACE_8
381 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset);
382 #else
383 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
384 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh,
385 offset + 4) << 32;
386 #endif
387 } else {
388 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
389 }
390
391 return value;
392 }
393
394 static inline void
395 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset,
396 uint64_t value)
397 {
398 if (sc->sc_ac64) {
399 #ifdef XHCI_USE_BUS_SPACE_8
400 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value);
401 #else
402 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0,
403 (value >> 0) & 0xffffffff);
404 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4,
405 (value >> 32) & 0xffffffff);
406 #endif
407 } else {
408 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
409 }
410 }
411
412 #if 0 /* unused */
413 static inline uint32_t
414 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset)
415 {
416 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset);
417 }
418 #endif /* unused */
419
420 static inline void
421 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset,
422 uint32_t value)
423 {
424 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value);
425 }
426
427 /* --- */
428
429 static inline uint8_t
430 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed)
431 {
432 u_int eptype = 0;
433
434 switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
435 case UE_CONTROL:
436 eptype = 0x0;
437 break;
438 case UE_ISOCHRONOUS:
439 eptype = 0x1;
440 break;
441 case UE_BULK:
442 eptype = 0x2;
443 break;
444 case UE_INTERRUPT:
445 eptype = 0x3;
446 break;
447 }
448
449 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
450 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
451 return eptype | 0x4;
452 else
453 return eptype;
454 }
455
456 static u_int
457 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed)
458 {
459 /* xHCI 1.0 section 4.5.1 */
460 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress);
461 u_int in = 0;
462
463 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
464 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
465 in = 1;
466
467 return epaddr * 2 + in;
468 }
469
470 static inline u_int
471 xhci_dci_to_ici(const u_int i)
472 {
473 return i + 1;
474 }
475
476 static inline void *
477 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs,
478 const u_int dci)
479 {
480 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
481 }
482
483 #if 0 /* unused */
484 static inline bus_addr_t
485 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs,
486 const u_int dci)
487 {
488 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
489 }
490 #endif /* unused */
491
492 static inline void *
493 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs,
494 const u_int ici)
495 {
496 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
497 }
498
499 static inline bus_addr_t
500 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs,
501 const u_int ici)
502 {
503 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
504 }
505
506 static inline struct xhci_trb *
507 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx)
508 {
509 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
510 }
511
512 static inline bus_addr_t
513 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx)
514 {
515 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
516 }
517
518 static inline void
519 xhci_soft_trb_put(struct xhci_soft_trb * const trb,
520 uint64_t parameter, uint32_t status, uint32_t control)
521 {
522 trb->trb_0 = parameter;
523 trb->trb_2 = status;
524 trb->trb_3 = control;
525 }
526
527 static inline void
528 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status,
529 uint32_t control)
530 {
531 trb->trb_0 = htole64(parameter);
532 trb->trb_2 = htole32(status);
533 trb->trb_3 = htole32(control);
534 }
535
536 static int
537 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx)
538 {
539 /* base address of TRBs */
540 bus_addr_t trbp = xhci_ring_trbp(xr, 0);
541
542 /* trb_0 range sanity check */
543 if (trb_0 == 0 || trb_0 < trbp ||
544 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 ||
545 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) {
546 return 1;
547 }
548 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb);
549 return 0;
550 }
551
552 static unsigned int
553 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs,
554 u_int dci)
555 {
556 uint32_t *cp;
557
558 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
559 cp = xhci_slot_get_dcv(sc, xs, dci);
560 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0]));
561 }
562
563 static inline unsigned int
564 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport)
565 {
566 const unsigned int port = ctlrport - 1;
567 const uint8_t bit = __BIT(port % NBBY);
568
569 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit);
570 }
571
572 /*
573 * Return the roothub port for a controller port. Both are 1..n.
574 */
575 static inline unsigned int
576 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport)
577 {
578
579 return sc->sc_ctlrportmap[ctrlport - 1];
580 }
581
582 /*
583 * Return the controller port for a bus roothub port. Both are 1..n.
584 */
585 static inline unsigned int
586 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn,
587 unsigned int rhport)
588 {
589
590 return sc->sc_rhportmap[bn][rhport - 1];
591 }
592
593 /* --- */
594
595 void
596 xhci_childdet(device_t self, device_t child)
597 {
598 struct xhci_softc * const sc = device_private(self);
599
600 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child));
601 if (child == sc->sc_child2)
602 sc->sc_child2 = NULL;
603 else if (child == sc->sc_child)
604 sc->sc_child = NULL;
605 }
606
607 int
608 xhci_detach(struct xhci_softc *sc, int flags)
609 {
610 int rv = 0;
611
612 if (sc->sc_child2 != NULL) {
613 rv = config_detach(sc->sc_child2, flags);
614 if (rv != 0)
615 return rv;
616 KASSERT(sc->sc_child2 == NULL);
617 }
618
619 if (sc->sc_child != NULL) {
620 rv = config_detach(sc->sc_child, flags);
621 if (rv != 0)
622 return rv;
623 KASSERT(sc->sc_child == NULL);
624 }
625
626 /* XXX unconfigure/free slots */
627
628 /* verify: */
629 xhci_rt_write_4(sc, XHCI_IMAN(0), 0);
630 xhci_op_write_4(sc, XHCI_USBCMD, 0);
631 /* do we need to wait for stop? */
632
633 xhci_op_write_8(sc, XHCI_CRCR, 0);
634 xhci_ring_free(sc, &sc->sc_cr);
635 cv_destroy(&sc->sc_command_cv);
636 cv_destroy(&sc->sc_cmdbusy_cv);
637
638 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0);
639 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0);
640 xhci_rt_write_8(sc, XHCI_ERDP(0), 0|XHCI_ERDP_LO_BUSY);
641 xhci_ring_free(sc, &sc->sc_er);
642
643 usb_freemem(&sc->sc_bus, &sc->sc_eventst_dma);
644
645 xhci_op_write_8(sc, XHCI_DCBAAP, 0);
646 usb_freemem(&sc->sc_bus, &sc->sc_dcbaa_dma);
647
648 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots);
649
650 kmem_free(sc->sc_ctlrportbus,
651 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY));
652 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int));
653
654 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
655 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int));
656 }
657
658 mutex_destroy(&sc->sc_lock);
659 mutex_destroy(&sc->sc_intr_lock);
660
661 pool_cache_destroy(sc->sc_xferpool);
662
663 return rv;
664 }
665
666 int
667 xhci_activate(device_t self, enum devact act)
668 {
669 struct xhci_softc * const sc = device_private(self);
670
671 switch (act) {
672 case DVACT_DEACTIVATE:
673 sc->sc_dying = true;
674 return 0;
675 default:
676 return EOPNOTSUPP;
677 }
678 }
679
680 bool
681 xhci_suspend(device_t self, const pmf_qual_t *qual)
682 {
683 struct xhci_softc * const sc = device_private(self);
684 size_t i, j, bn, dci;
685 int port;
686 uint32_t v;
687 usbd_status err;
688 bool ok = false;
689
690 XHCIHIST_FUNC(); XHCIHIST_CALLED();
691
692 mutex_enter(&sc->sc_lock);
693
694 /*
695 * Block issuance of new commands, and wait for all pending
696 * commands to complete.
697 */
698 KASSERT(sc->sc_suspender == NULL);
699 sc->sc_suspender = curlwp;
700 while (sc->sc_command_addr != 0)
701 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
702
703 /*
704 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
705 * xHCI Power Management, p. 342
706 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342
707 */
708
709 /*
710 * `1. Stop all USB activity by issuing Stop Endpoint Commands
711 * for Busy endpoints in the Running state. If the Force
712 * Save Context Capability (FSC = ``0'') is not supported,
713 * then Stop Endpoint Commands shall be issued for all idle
714 * endpoints in the Running state as well. The Stop
715 * Endpoint Command causes the xHC to update the respective
716 * Endpoint or Stream Contexts in system memory, e.g. the
717 * TR Dequeue Pointer, DCS, etc. fields. Refer to
718 * Implementation Note "0".'
719 */
720 for (i = 0; i < sc->sc_maxslots; i++) {
721 struct xhci_slot *xs = &sc->sc_slots[i];
722
723 /* Skip if the slot is not in use. */
724 if (xs->xs_idx == 0)
725 continue;
726
727 for (dci = 0; dci < 32; dci++) {
728 /* Skip if the endpoint is not Running. */
729 /* XXX What about Busy? */
730 if (xhci_get_epstate(sc, xs, dci) !=
731 XHCI_EPSTATE_RUNNING)
732 continue;
733
734 /* Stop endpoint. */
735 err = xhci_stop_endpoint_cmd(sc, xs, dci,
736 XHCI_TRB_3_SUSP_EP_BIT);
737 if (err) {
738 device_printf(self, "failed to stop endpoint"
739 " slot %zu dci %zu err %d\n",
740 i, dci, err);
741 goto out;
742 }
743 }
744 }
745
746 /*
747 * Next, suspend all the ports:
748 *
749 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15:
750 * Suspend-Resume, pp. 276-283
751 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276
752 */
753 for (bn = 0; bn < 2; bn++) {
754 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
755 /* 4.15.1: Port Suspend. */
756 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
757
758 /*
759 * `System software places individual ports
760 * into suspend mode by writing a ``3'' into
761 * the appropriate PORTSC register Port Link
762 * State (PLS) field (refer to Section 5.4.8).
763 * Software should only set the PLS field to
764 * ``3'' when the port is in the Enabled
765 * state.'
766 *
767 * `Software should not attempt to suspend a
768 * port unless the port reports that it is in
769 * the enabled (PED = ``1''; PLS < ``3'')
770 * state (refer to Section 5.4.8 for more
771 * information about PED and PLS).'
772 */
773 v = xhci_op_read_4(sc, port);
774 if (((v & XHCI_PS_PED) == 0) ||
775 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3)
776 continue;
777 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
778 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3);
779 xhci_op_write_4(sc, port, v);
780
781 /*
782 * `When the PLS field is written with U3
783 * (``3''), the status of the PLS bit will not
784 * change to the target U state U3 until the
785 * suspend signaling has completed to the
786 * attached device (which may be as long as
787 * 10ms.).'
788 *
789 * `Software is required to wait for U3
790 * transitions to complete before it puts the
791 * xHC into a low power state, and before
792 * resuming the port.'
793 *
794 * XXX Take advantage of the technique to
795 * reduce polling on host controllers that
796 * support the U3C capability.
797 */
798 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) {
799 v = xhci_op_read_4(sc, port);
800 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3)
801 break;
802 usb_delay_ms(&sc->sc_bus, 1);
803 }
804 if (j == XHCI_WAIT_PLS_U3) {
805 device_printf(self,
806 "suspend timeout on bus %zu port %zu\n",
807 bn, i);
808 goto out;
809 }
810 }
811 }
812
813 /*
814 * `2. Ensure that the Command Ring is in the Stopped state
815 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is
816 * empty), and all Command Completion Events associated
817 * with them have been received.'
818 *
819 * XXX
820 */
821
822 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */
823 xhci_op_write_4(sc, XHCI_USBCMD,
824 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS);
825
826 /*
827 * `4. Read the Operational Runtime, and VTIO registers in the
828 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ,
829 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their
830 * state.'
831 *
832 * (We don't use VTIO here (XXX for now?).)
833 */
834 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
835 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL);
836 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP);
837 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG);
838 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0));
839 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0));
840 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0));
841 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0));
842 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0));
843
844 /*
845 * `5. Set the Controller Save State (CSS) flag in the USBCMD
846 * register (5.4.1)...'
847 */
848 xhci_op_write_4(sc, XHCI_USBCMD,
849 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS);
850
851 /*
852 * `...and wait for the Save State Status (SSS) flag in the
853 * USBSTS register (5.4.2) to transition to ``0''.'
854 */
855 for (i = 0; i < XHCI_WAIT_SSS; i++) {
856 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0)
857 break;
858 usb_delay_ms(&sc->sc_bus, 1);
859 }
860 if (i >= XHCI_WAIT_SSS) {
861 device_printf(self, "suspend timeout, USBSTS.SSS\n");
862 /*
863 * Just optimistically go on and check SRE anyway --
864 * what's the worst that could happen?
865 */
866 }
867
868 /*
869 * `Note: After a Save or Restore operation completes, the
870 * Save/Restore Error (SRE) flag in the USBSTS register should
871 * be checked to ensure that the operation completed
872 * successfully.'
873 */
874 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
875 device_printf(self, "suspend error, USBSTS.SRE\n");
876 goto out;
877 }
878
879 /* Success! */
880 ok = true;
881
882 out: mutex_exit(&sc->sc_lock);
883 return ok;
884 }
885
886 bool
887 xhci_resume(device_t self, const pmf_qual_t *qual)
888 {
889 struct xhci_softc * const sc = device_private(self);
890 size_t i, j, bn, dci;
891 int port;
892 uint32_t v;
893 bool ok = false;
894
895 XHCIHIST_FUNC(); XHCIHIST_CALLED();
896
897 mutex_enter(&sc->sc_lock);
898 KASSERT(sc->sc_suspender);
899
900 /*
901 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
902 * xHCI Power Management, p. 343
903 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343
904 */
905
906 /*
907 * `4. Restore the Operational Runtime, and VTIO registers with
908 * their previously saved state in the following order:
909 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN,
910 * IMOD, and VTIO.'
911 *
912 * (We don't use VTIO here (for now?).)
913 */
914 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd);
915 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl);
916 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap);
917 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config);
918 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0);
919 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0);
920 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0);
921 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0);
922 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0);
923
924 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */
925
926 /*
927 * `5. Set the Controller Restore State (CRS) flag in the
928 * USBCMD register (5.4.1) to ``1''...'
929 */
930 xhci_op_write_4(sc, XHCI_USBCMD,
931 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS);
932
933 /*
934 * `...and wait for the Restore State Status (RSS) in the
935 * USBSTS register (5.4.2) to transition to ``0''.'
936 */
937 for (i = 0; i < XHCI_WAIT_RSS; i++) {
938 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0)
939 break;
940 usb_delay_ms(&sc->sc_bus, 1);
941 }
942 if (i >= XHCI_WAIT_RSS) {
943 device_printf(self, "suspend timeout, USBSTS.RSS\n");
944 goto out;
945 }
946
947 /*
948 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits
949 * are consistent with the RCS values to be written to the
950 * CRCR.'
951 *
952 * XXX Hope just zeroing it is good enough!
953 */
954 xhci_host_dequeue(&sc->sc_cr);
955
956 /*
957 * `7. Write the CRCR with the address and RCS value of the
958 * reinitialized Command Ring. Note that this write will
959 * cause the Command Ring to restart at the address
960 * specified by the CRCR.'
961 */
962 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(&sc->sc_cr, 0) |
963 sc->sc_cr.xr_cs);
964
965 /*
966 * `8. Enable the controller by setting Run/Stop (R/S) =
967 * ``1''.'
968 */
969 xhci_op_write_4(sc, XHCI_USBCMD,
970 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS);
971
972 /*
973 * `9. Software shall walk the USB topology and initialize each
974 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and
975 * external hub ports attached to USB devices.'
976 *
977 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2
978 * `Port Resume', 4.15.2.1 `Host Initiated'.
979 *
980 * XXX We should maybe batch up initiating the state
981 * transitions, and then wait for them to complete all at once.
982 */
983 for (bn = 0; bn < 2; bn++) {
984 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
985 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
986
987 /* `When a port is in the U3 state: ...' */
988 v = xhci_op_read_4(sc, port);
989 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3)
990 continue;
991
992 /*
993 * `For a USB2 protocol port, software shall
994 * write a ``15'' (Resume) to the PLS field to
995 * initiate resume signaling. The port shall
996 * transition to the Resume substate and the
997 * xHC shall transmit the resume signaling
998 * within 1ms (T_URSM). Software shall ensure
999 * that resume is signaled for at least 20ms
1000 * (T_DRSMDN). Software shall start timing
1001 * T_DRSMDN from the write of ``15'' (Resume)
1002 * to PLS.'
1003 */
1004 if (bn == 1) {
1005 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0);
1006 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1007 v |= XHCI_PS_LWS;
1008 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME);
1009 xhci_op_write_4(sc, port, v);
1010 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
1011 } else {
1012 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0);
1013 }
1014
1015 /*
1016 * `For a USB3 protocol port [and a USB2
1017 * protocol port after transitioning to
1018 * Resume], software shall write a ``0'' (U0)
1019 * to the PLS field...'
1020 */
1021 v = xhci_op_read_4(sc, port);
1022 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1023 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0);
1024 xhci_op_write_4(sc, port, v);
1025
1026 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) {
1027 v = xhci_op_read_4(sc, port);
1028 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0)
1029 break;
1030 usb_delay_ms(&sc->sc_bus, 1);
1031 }
1032 if (j == XHCI_WAIT_PLS_U0) {
1033 device_printf(self,
1034 "resume timeout on bus %zu port %zu\n",
1035 bn, i);
1036 goto out;
1037 }
1038 }
1039 }
1040
1041 /*
1042 * `10. Restart each of the previously Running endpoints by
1043 * ringing their doorbells.'
1044 */
1045 for (i = 0; i < sc->sc_maxslots; i++) {
1046 struct xhci_slot *xs = &sc->sc_slots[i];
1047
1048 /* Skip if the slot is not in use. */
1049 if (xs->xs_idx == 0)
1050 continue;
1051
1052 for (dci = 0; dci < 32; dci++) {
1053 /* Skip if the endpoint is not Running. */
1054 if (xhci_get_epstate(sc, xs, dci) !=
1055 XHCI_EPSTATE_RUNNING)
1056 continue;
1057
1058 /* Ring the doorbell. */
1059 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
1060 }
1061 }
1062
1063 /*
1064 * `Note: After a Save or Restore operation completes, the
1065 * Save/Restore Error (SRE) flag in the USBSTS register should
1066 * be checked to ensure that the operation completed
1067 * successfully.'
1068 */
1069 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
1070 device_printf(self, "resume error, USBSTS.SRE\n");
1071 goto out;
1072 }
1073
1074 /* Resume command issuance. */
1075 sc->sc_suspender = NULL;
1076 cv_broadcast(&sc->sc_cmdbusy_cv);
1077
1078 /* Success! */
1079 ok = true;
1080
1081 out: mutex_exit(&sc->sc_lock);
1082 return ok;
1083 }
1084
1085 bool
1086 xhci_shutdown(device_t self, int flags)
1087 {
1088 return false;
1089 }
1090
1091 static int
1092 xhci_hc_reset(struct xhci_softc * const sc)
1093 {
1094 uint32_t usbcmd, usbsts;
1095 int i;
1096
1097 /* Check controller not ready */
1098 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1099 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1100 if ((usbsts & XHCI_STS_CNR) == 0)
1101 break;
1102 usb_delay_ms(&sc->sc_bus, 1);
1103 }
1104 if (i >= XHCI_WAIT_CNR) {
1105 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n");
1106 return EIO;
1107 }
1108
1109 /* Halt controller */
1110 usbcmd = 0;
1111 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1112 usb_delay_ms(&sc->sc_bus, 1);
1113
1114 /* Reset controller */
1115 usbcmd = XHCI_CMD_HCRST;
1116 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1117 for (i = 0; i < XHCI_WAIT_HCRST; i++) {
1118 /*
1119 * Wait 1ms first. Existing Intel xHCI requies 1ms delay to
1120 * prevent system hang (Errata).
1121 */
1122 usb_delay_ms(&sc->sc_bus, 1);
1123 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
1124 if ((usbcmd & XHCI_CMD_HCRST) == 0)
1125 break;
1126 }
1127 if (i >= XHCI_WAIT_HCRST) {
1128 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n");
1129 return EIO;
1130 }
1131
1132 /* Check controller not ready */
1133 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1134 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1135 if ((usbsts & XHCI_STS_CNR) == 0)
1136 break;
1137 usb_delay_ms(&sc->sc_bus, 1);
1138 }
1139 if (i >= XHCI_WAIT_CNR) {
1140 aprint_error_dev(sc->sc_dev,
1141 "controller not ready timeout after reset\n");
1142 return EIO;
1143 }
1144
1145 return 0;
1146 }
1147
1148 /* 7.2 xHCI Support Protocol Capability */
1149 static void
1150 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp)
1151 {
1152 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1153
1154 /* XXX Cache this lot */
1155
1156 const uint32_t w0 = xhci_read_4(sc, ecp);
1157 const uint32_t w4 = xhci_read_4(sc, ecp + 4);
1158 const uint32_t w8 = xhci_read_4(sc, ecp + 8);
1159 const uint32_t wc = xhci_read_4(sc, ecp + 0xc);
1160
1161 aprint_debug_dev(sc->sc_dev,
1162 " SP: %08x %08x %08x %08x\n", w0, w4, w8, wc);
1163
1164 if (w4 != XHCI_XECP_USBID)
1165 return;
1166
1167 const int major = XHCI_XECP_SP_W0_MAJOR(w0);
1168 const int minor = XHCI_XECP_SP_W0_MINOR(w0);
1169 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8);
1170 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8);
1171
1172 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16));
1173 switch (mm) {
1174 case 0x0200:
1175 case 0x0300:
1176 case 0x0301:
1177 case 0x0310:
1178 case 0x0320:
1179 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n",
1180 major == 3 ? "ss" : "hs", cpo, cpo + cpc - 1);
1181 if (major == 3)
1182 sc->sc_usb3nports += cpo + cpc - 1;
1183 else
1184 sc->sc_usb2nports += cpo + cpc - 1;
1185 break;
1186 default:
1187 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n",
1188 major, minor);
1189 return;
1190 }
1191
1192 const size_t bus = (major == 3) ? 0 : 1;
1193
1194 /* Index arrays with 0..n-1 where ports are numbered 1..n */
1195 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) {
1196 if (sc->sc_ctlrportmap[cp] != 0) {
1197 aprint_error_dev(sc->sc_dev, "controller port %zu "
1198 "already assigned", cp);
1199 continue;
1200 }
1201
1202 sc->sc_ctlrportbus[cp / NBBY] |=
1203 bus == 0 ? 0 : __BIT(cp % NBBY);
1204
1205 const size_t rhp = sc->sc_rhportcount[bus]++;
1206
1207 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0,
1208 "bus %zu rhp %zu is %d", bus, rhp,
1209 sc->sc_rhportmap[bus][rhp]);
1210
1211 sc->sc_rhportmap[bus][rhp] = cp + 1;
1212 sc->sc_ctlrportmap[cp] = rhp + 1;
1213 }
1214 }
1215
1216 /* Process extended capabilities */
1217 static void
1218 xhci_ecp(struct xhci_softc *sc, uint32_t hcc)
1219 {
1220 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1221
1222 bus_size_t ecp = XHCI_HCC_XECP(hcc) * 4;
1223 while (ecp != 0) {
1224 uint32_t ecr = xhci_read_4(sc, ecp);
1225 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr);
1226 switch (XHCI_XECP_ID(ecr)) {
1227 case XHCI_ID_PROTOCOLS: {
1228 xhci_id_protocols(sc, ecp);
1229 break;
1230 }
1231 case XHCI_ID_USB_LEGACY: {
1232 uint8_t bios_sem;
1233
1234 /* Take host controller ownership from BIOS */
1235 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM);
1236 if (bios_sem) {
1237 /* sets xHCI to be owned by OS */
1238 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1);
1239 aprint_debug_dev(sc->sc_dev,
1240 "waiting for BIOS to give up control\n");
1241 for (int i = 0; i < 5000; i++) {
1242 bios_sem = xhci_read_1(sc, ecp +
1243 XHCI_XECP_BIOS_SEM);
1244 if (bios_sem == 0)
1245 break;
1246 DELAY(1000);
1247 }
1248 if (bios_sem) {
1249 aprint_error_dev(sc->sc_dev,
1250 "timed out waiting for BIOS\n");
1251 }
1252 }
1253 break;
1254 }
1255 default:
1256 break;
1257 }
1258 ecr = xhci_read_4(sc, ecp);
1259 if (XHCI_XECP_NEXT(ecr) == 0) {
1260 ecp = 0;
1261 } else {
1262 ecp += XHCI_XECP_NEXT(ecr) * 4;
1263 }
1264 }
1265 }
1266
1267 #define XHCI_HCCPREV1_BITS \
1268 "\177\020" /* New bitmask */ \
1269 "f\020\020XECP\0" \
1270 "f\014\4MAXPSA\0" \
1271 "b\013CFC\0" \
1272 "b\012SEC\0" \
1273 "b\011SBD\0" \
1274 "b\010FSE\0" \
1275 "b\7NSS\0" \
1276 "b\6LTC\0" \
1277 "b\5LHRC\0" \
1278 "b\4PIND\0" \
1279 "b\3PPC\0" \
1280 "b\2CZC\0" \
1281 "b\1BNC\0" \
1282 "b\0AC64\0" \
1283 "\0"
1284 #define XHCI_HCCV1_x_BITS \
1285 "\177\020" /* New bitmask */ \
1286 "f\020\020XECP\0" \
1287 "f\014\4MAXPSA\0" \
1288 "b\013CFC\0" \
1289 "b\012SEC\0" \
1290 "b\011SPC\0" \
1291 "b\010PAE\0" \
1292 "b\7NSS\0" \
1293 "b\6LTC\0" \
1294 "b\5LHRC\0" \
1295 "b\4PIND\0" \
1296 "b\3PPC\0" \
1297 "b\2CSZ\0" \
1298 "b\1BNC\0" \
1299 "b\0AC64\0" \
1300 "\0"
1301
1302 #define XHCI_HCC2_BITS \
1303 "\177\020" /* New bitmask */ \
1304 "b\7ETC_TSC\0" \
1305 "b\6ETC\0" \
1306 "b\5CIC\0" \
1307 "b\4LEC\0" \
1308 "b\3CTC\0" \
1309 "b\2FSC\0" \
1310 "b\1CMC\0" \
1311 "b\0U3C\0" \
1312 "\0"
1313
1314 void
1315 xhci_start(struct xhci_softc *sc)
1316 {
1317 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA);
1318 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0)
1319 /* Intel xhci needs interrupt rate moderated. */
1320 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP);
1321 else
1322 xhci_rt_write_4(sc, XHCI_IMOD(0), 0);
1323 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n",
1324 xhci_rt_read_4(sc, XHCI_IMOD(0)));
1325
1326 /* Go! */
1327 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
1328 aprint_debug_dev(sc->sc_dev, "USBCMD %08"PRIx32"\n",
1329 xhci_op_read_4(sc, XHCI_USBCMD));
1330 }
1331
1332 int
1333 xhci_init(struct xhci_softc *sc)
1334 {
1335 bus_size_t bsz;
1336 uint32_t cap, hcs1, hcs2, hcs3, hcc, dboff, rtsoff, hcc2;
1337 uint32_t pagesize, config;
1338 int i = 0;
1339 uint16_t hciversion;
1340 uint8_t caplength;
1341
1342 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1343
1344 /* Set up the bus struct for the usb 3 and usb 2 buses */
1345 sc->sc_bus.ub_methods = &xhci_bus_methods;
1346 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe);
1347 sc->sc_bus.ub_usedma = true;
1348 sc->sc_bus.ub_hcpriv = sc;
1349
1350 sc->sc_bus2.ub_methods = &xhci_bus_methods;
1351 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe);
1352 sc->sc_bus2.ub_revision = USBREV_2_0;
1353 sc->sc_bus2.ub_usedma = true;
1354 sc->sc_bus2.ub_hcpriv = sc;
1355 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag;
1356
1357 cap = xhci_read_4(sc, XHCI_CAPLENGTH);
1358 caplength = XHCI_CAP_CAPLENGTH(cap);
1359 hciversion = XHCI_CAP_HCIVERSION(cap);
1360
1361 if (hciversion < XHCI_HCIVERSION_0_96 ||
1362 hciversion >= 0x0200) {
1363 aprint_normal_dev(sc->sc_dev,
1364 "xHCI version %x.%x not known to be supported\n",
1365 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1366 } else {
1367 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n",
1368 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1369 }
1370
1371 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength,
1372 &sc->sc_cbh) != 0) {
1373 aprint_error_dev(sc->sc_dev, "capability subregion failure\n");
1374 return ENOMEM;
1375 }
1376
1377 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1);
1378 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1);
1379 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1);
1380 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1);
1381 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2);
1382 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3);
1383 aprint_debug_dev(sc->sc_dev,
1384 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3);
1385
1386 hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS);
1387 sc->sc_ac64 = XHCI_HCC_AC64(hcc);
1388 sc->sc_ctxsz = XHCI_HCC_CSZ(hcc) ? 64 : 32;
1389
1390 char sbuf[128];
1391 if (hciversion < XHCI_HCIVERSION_1_0)
1392 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, hcc);
1393 else
1394 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, hcc);
1395 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf);
1396 aprint_debug_dev(sc->sc_dev, "xECP %x\n", XHCI_HCC_XECP(hcc) * 4);
1397 if (hciversion >= XHCI_HCIVERSION_1_1) {
1398 hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2);
1399 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, hcc2);
1400 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf);
1401 }
1402
1403 /* default all ports to bus 0, i.e. usb 3 */
1404 sc->sc_ctlrportbus = kmem_zalloc(
1405 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP);
1406 sc->sc_ctlrportmap =
1407 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1408
1409 /* controller port to bus roothub port map */
1410 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
1411 sc->sc_rhportmap[j] =
1412 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1413 }
1414
1415 /*
1416 * Process all Extended Capabilities
1417 */
1418 xhci_ecp(sc, hcc);
1419
1420 bsz = XHCI_PORTSC(sc->sc_maxports);
1421 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz,
1422 &sc->sc_obh) != 0) {
1423 aprint_error_dev(sc->sc_dev, "operational subregion failure\n");
1424 return ENOMEM;
1425 }
1426
1427 dboff = xhci_cap_read_4(sc, XHCI_DBOFF);
1428 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff,
1429 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) {
1430 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n");
1431 return ENOMEM;
1432 }
1433
1434 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF);
1435 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff,
1436 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) {
1437 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n");
1438 return ENOMEM;
1439 }
1440
1441 int rv;
1442 rv = xhci_hc_reset(sc);
1443 if (rv != 0) {
1444 return rv;
1445 }
1446
1447 if (sc->sc_vendor_init)
1448 sc->sc_vendor_init(sc);
1449
1450 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE);
1451 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize);
1452 pagesize = ffs(pagesize);
1453 if (pagesize == 0) {
1454 aprint_error_dev(sc->sc_dev, "pagesize is 0\n");
1455 return EIO;
1456 }
1457 sc->sc_pgsz = 1 << (12 + (pagesize - 1));
1458 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz);
1459 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n",
1460 (uint32_t)sc->sc_maxslots);
1461 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports);
1462
1463 usbd_status err;
1464
1465 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2);
1466 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf);
1467 if (sc->sc_maxspbuf != 0) {
1468 err = usb_allocmem(&sc->sc_bus,
1469 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t),
1470 &sc->sc_spbufarray_dma);
1471 if (err) {
1472 aprint_error_dev(sc->sc_dev,
1473 "spbufarray init fail, err %d\n", err);
1474 return ENOMEM;
1475 }
1476
1477 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) *
1478 sc->sc_maxspbuf, KM_SLEEP);
1479 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0);
1480 for (i = 0; i < sc->sc_maxspbuf; i++) {
1481 usb_dma_t * const dma = &sc->sc_spbuf_dma[i];
1482 /* allocate contexts */
1483 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz,
1484 sc->sc_pgsz, dma);
1485 if (err) {
1486 aprint_error_dev(sc->sc_dev,
1487 "spbufarray_dma init fail, err %d\n", err);
1488 rv = ENOMEM;
1489 goto bad1;
1490 }
1491 spbufarray[i] = htole64(DMAADDR(dma, 0));
1492 usb_syncmem(dma, 0, sc->sc_pgsz,
1493 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1494 }
1495
1496 usb_syncmem(&sc->sc_spbufarray_dma, 0,
1497 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE);
1498 }
1499
1500 config = xhci_op_read_4(sc, XHCI_CONFIG);
1501 config &= ~0xFF;
1502 config |= sc->sc_maxslots & 0xFF;
1503 xhci_op_write_4(sc, XHCI_CONFIG, config);
1504
1505 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS,
1506 XHCI_COMMAND_RING_SEGMENTS_ALIGN);
1507 if (err) {
1508 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n",
1509 err);
1510 rv = ENOMEM;
1511 goto bad1;
1512 }
1513
1514 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS,
1515 XHCI_EVENT_RING_SEGMENTS_ALIGN);
1516 if (err) {
1517 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n",
1518 err);
1519 rv = ENOMEM;
1520 goto bad2;
1521 }
1522
1523 usb_dma_t *dma;
1524 size_t size;
1525 size_t align;
1526
1527 dma = &sc->sc_eventst_dma;
1528 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE,
1529 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN);
1530 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size);
1531 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN;
1532 err = usb_allocmem(&sc->sc_bus, size, align, dma);
1533 if (err) {
1534 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n",
1535 err);
1536 rv = ENOMEM;
1537 goto bad3;
1538 }
1539
1540 memset(KERNADDR(dma, 0), 0, size);
1541 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1542 aprint_debug_dev(sc->sc_dev, "eventst: %016jx %p %zx\n",
1543 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0),
1544 KERNADDR(&sc->sc_eventst_dma, 0),
1545 sc->sc_eventst_dma.udma_block->size);
1546
1547 dma = &sc->sc_dcbaa_dma;
1548 size = (1 + sc->sc_maxslots) * sizeof(uint64_t);
1549 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size);
1550 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN;
1551 err = usb_allocmem(&sc->sc_bus, size, align, dma);
1552 if (err) {
1553 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err);
1554 rv = ENOMEM;
1555 goto bad4;
1556 }
1557 aprint_debug_dev(sc->sc_dev, "dcbaa: %016jx %p %zx\n",
1558 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0),
1559 KERNADDR(&sc->sc_dcbaa_dma, 0),
1560 sc->sc_dcbaa_dma.udma_block->size);
1561
1562 memset(KERNADDR(dma, 0), 0, size);
1563 if (sc->sc_maxspbuf != 0) {
1564 /*
1565 * DCBA entry 0 hold the scratchbuf array pointer.
1566 */
1567 *(uint64_t *)KERNADDR(dma, 0) =
1568 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0));
1569 }
1570 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1571
1572 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots,
1573 KM_SLEEP);
1574 if (sc->sc_slots == NULL) {
1575 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err);
1576 rv = ENOMEM;
1577 goto bad;
1578 }
1579
1580 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0,
1581 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
1582 if (sc->sc_xferpool == NULL) {
1583 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n",
1584 err);
1585 rv = ENOMEM;
1586 goto bad;
1587 }
1588
1589 cv_init(&sc->sc_command_cv, "xhcicmd");
1590 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq");
1591 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
1592 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
1593
1594 struct xhci_erste *erst;
1595 erst = KERNADDR(&sc->sc_eventst_dma, 0);
1596 erst[0].erste_0 = htole64(xhci_ring_trbp(&sc->sc_er, 0));
1597 erst[0].erste_2 = htole32(sc->sc_er.xr_ntrb);
1598 erst[0].erste_3 = htole32(0);
1599 usb_syncmem(&sc->sc_eventst_dma, 0,
1600 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE);
1601
1602 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS);
1603 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0));
1604 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(&sc->sc_er, 0) |
1605 XHCI_ERDP_LO_BUSY);
1606
1607 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0));
1608 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(&sc->sc_cr, 0) |
1609 sc->sc_cr.xr_cs);
1610
1611 xhci_op_barrier(sc, 0, 4, BUS_SPACE_BARRIER_WRITE);
1612
1613 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0),
1614 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS);
1615
1616 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0)
1617 xhci_start(sc);
1618
1619 return 0;
1620
1621 bad:
1622 if (sc->sc_xferpool) {
1623 pool_cache_destroy(sc->sc_xferpool);
1624 sc->sc_xferpool = NULL;
1625 }
1626
1627 if (sc->sc_slots) {
1628 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) *
1629 sc->sc_maxslots);
1630 sc->sc_slots = NULL;
1631 }
1632
1633 usb_freemem(&sc->sc_bus, &sc->sc_dcbaa_dma);
1634 bad4:
1635 usb_freemem(&sc->sc_bus, &sc->sc_eventst_dma);
1636 bad3:
1637 xhci_ring_free(sc, &sc->sc_er);
1638 bad2:
1639 xhci_ring_free(sc, &sc->sc_cr);
1640 i = sc->sc_maxspbuf;
1641 bad1:
1642 for (int j = 0; j < i; j++)
1643 usb_freemem(&sc->sc_bus, &sc->sc_spbuf_dma[j]);
1644 usb_freemem(&sc->sc_bus, &sc->sc_spbufarray_dma);
1645
1646 return rv;
1647 }
1648
1649 static inline bool
1650 xhci_polling_p(struct xhci_softc * const sc)
1651 {
1652 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling;
1653 }
1654
1655 int
1656 xhci_intr(void *v)
1657 {
1658 struct xhci_softc * const sc = v;
1659 int ret = 0;
1660
1661 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1662
1663 if (sc == NULL)
1664 return 0;
1665
1666 mutex_spin_enter(&sc->sc_intr_lock);
1667
1668 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1669 goto done;
1670
1671 /* If we get an interrupt while polling, then just ignore it. */
1672 if (xhci_polling_p(sc)) {
1673 #ifdef DIAGNOSTIC
1674 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1675 #endif
1676 goto done;
1677 }
1678
1679 ret = xhci_intr1(sc);
1680 if (ret) {
1681 KASSERT(sc->sc_child || sc->sc_child2);
1682
1683 /*
1684 * One of child busses could be already detached. It doesn't
1685 * matter on which of the two the softintr is scheduled.
1686 */
1687 if (sc->sc_child)
1688 usb_schedsoftintr(&sc->sc_bus);
1689 else
1690 usb_schedsoftintr(&sc->sc_bus2);
1691 }
1692 done:
1693 mutex_spin_exit(&sc->sc_intr_lock);
1694 return ret;
1695 }
1696
1697 int
1698 xhci_intr1(struct xhci_softc * const sc)
1699 {
1700 uint32_t usbsts;
1701 uint32_t iman;
1702
1703 XHCIHIST_FUNC();
1704
1705 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1706 XHCIHIST_CALLARGS("USBSTS %08jx", usbsts, 0, 0, 0);
1707 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD |
1708 XHCI_STS_HCE)) == 0) {
1709 DPRINTFN(16, "ignored intr not for %s",
1710 (uintptr_t)device_xname(sc->sc_dev), 0, 0, 0);
1711 return 0;
1712 }
1713
1714 /*
1715 * Clear EINT and other transient flags, to not misenterpret
1716 * next shared interrupt. Also, to avoid race, EINT must be cleared
1717 * before XHCI_IMAN_INTR_PEND is cleared.
1718 */
1719 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & XHCI_STS_RSVDP0);
1720
1721 #ifdef XHCI_DEBUG
1722 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1723 DPRINTFN(16, "USBSTS %08jx", usbsts, 0, 0, 0);
1724 #endif
1725
1726 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1727 DPRINTFN(16, "IMAN0 %08jx", iman, 0, 0, 0);
1728 iman |= XHCI_IMAN_INTR_PEND;
1729 xhci_rt_write_4(sc, XHCI_IMAN(0), iman);
1730
1731 #ifdef XHCI_DEBUG
1732 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1733 DPRINTFN(16, "IMAN0 %08jx", iman, 0, 0, 0);
1734 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1735 DPRINTFN(16, "USBSTS %08jx", usbsts, 0, 0, 0);
1736 #endif
1737
1738 return 1;
1739 }
1740
1741 /*
1742 * 3 port speed types used in USB stack
1743 *
1744 * usbdi speed
1745 * definition: USB_SPEED_* in usb.h
1746 * They are used in struct usbd_device in USB stack.
1747 * ioctl interface uses these values too.
1748 * port_status speed
1749 * definition: UPS_*_SPEED in usb.h
1750 * They are used in usb_port_status_t and valid only for USB 2.0.
1751 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus
1752 * of usb_port_status_ext_t indicates port speed.
1753 * Note that some 3.0 values overlap with 2.0 values.
1754 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and
1755 * means UPS_LOW_SPEED in HS.)
1756 * port status returned from hub also uses these values.
1757 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed
1758 * or more.
1759 * xspeed:
1760 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1)
1761 * They are used in only slot context and PORTSC reg of xhci.
1762 * The difference between usbdi speed and xspeed is
1763 * that FS and LS values are swapped.
1764 */
1765
1766 /* convert usbdi speed to xspeed */
1767 static int
1768 xhci_speed2xspeed(int speed)
1769 {
1770 switch (speed) {
1771 case USB_SPEED_LOW: return 2;
1772 case USB_SPEED_FULL: return 1;
1773 default: return speed;
1774 }
1775 }
1776
1777 #if 0
1778 /* convert xspeed to usbdi speed */
1779 static int
1780 xhci_xspeed2speed(int xspeed)
1781 {
1782 switch (xspeed) {
1783 case 1: return USB_SPEED_FULL;
1784 case 2: return USB_SPEED_LOW;
1785 default: return xspeed;
1786 }
1787 }
1788 #endif
1789
1790 /* convert xspeed to port status speed */
1791 static int
1792 xhci_xspeed2psspeed(int xspeed)
1793 {
1794 switch (xspeed) {
1795 case 0: return 0;
1796 case 1: return UPS_FULL_SPEED;
1797 case 2: return UPS_LOW_SPEED;
1798 case 3: return UPS_HIGH_SPEED;
1799 default: return UPS_OTHER_SPEED;
1800 }
1801 }
1802
1803 /*
1804 * Construct input contexts and issue TRB to open pipe.
1805 */
1806 static usbd_status
1807 xhci_configure_endpoint(struct usbd_pipe *pipe)
1808 {
1809 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1810 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1811 #ifdef USB_DEBUG
1812 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1813 #endif
1814 struct xhci_soft_trb trb;
1815 usbd_status err;
1816
1817 XHCIHIST_FUNC();
1818 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx",
1819 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress,
1820 pipe->up_endpoint->ue_edesc->bmAttributes);
1821
1822 /* XXX ensure input context is available? */
1823
1824 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz);
1825
1826 /* set up context */
1827 xhci_setup_ctx(pipe);
1828
1829 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0),
1830 sc->sc_ctxsz * 1);
1831 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs,
1832 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1);
1833
1834 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1835 trb.trb_2 = 0;
1836 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1837 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1838
1839 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
1840
1841 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1842 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci),
1843 sc->sc_ctxsz * 1);
1844
1845 return err;
1846 }
1847
1848 #if 0
1849 static usbd_status
1850 xhci_unconfigure_endpoint(struct usbd_pipe *pipe)
1851 {
1852 #ifdef USB_DEBUG
1853 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1854 #endif
1855
1856 XHCIHIST_FUNC();
1857 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
1858
1859 return USBD_NORMAL_COMPLETION;
1860 }
1861 #endif
1862
1863 /* 4.6.8, 6.4.3.7 */
1864 static usbd_status
1865 xhci_reset_endpoint_locked(struct usbd_pipe *pipe)
1866 {
1867 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1868 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1869 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1870 struct xhci_soft_trb trb;
1871 usbd_status err;
1872
1873 XHCIHIST_FUNC();
1874 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1875
1876 KASSERT(mutex_owned(&sc->sc_lock));
1877
1878 trb.trb_0 = 0;
1879 trb.trb_2 = 0;
1880 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1881 XHCI_TRB_3_EP_SET(dci) |
1882 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP);
1883
1884 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1885
1886 return err;
1887 }
1888
1889 static usbd_status
1890 xhci_reset_endpoint(struct usbd_pipe *pipe)
1891 {
1892 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1893
1894 mutex_enter(&sc->sc_lock);
1895 usbd_status ret = xhci_reset_endpoint_locked(pipe);
1896 mutex_exit(&sc->sc_lock);
1897
1898 return ret;
1899 }
1900
1901 /*
1902 * 4.6.9, 6.4.3.8
1903 * Stop execution of TDs on xfer ring.
1904 * Should be called with sc_lock held.
1905 */
1906 static usbd_status
1907 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci,
1908 uint32_t trb3flags)
1909 {
1910 struct xhci_soft_trb trb;
1911 usbd_status err;
1912
1913 XHCIHIST_FUNC();
1914 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1915
1916 KASSERT(mutex_owned(&sc->sc_lock));
1917
1918 trb.trb_0 = 0;
1919 trb.trb_2 = 0;
1920 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1921 XHCI_TRB_3_EP_SET(dci) |
1922 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) |
1923 trb3flags;
1924
1925 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1926
1927 return err;
1928 }
1929
1930 static usbd_status
1931 xhci_stop_endpoint(struct usbd_pipe *pipe)
1932 {
1933 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1934 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1935 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1936
1937 XHCIHIST_FUNC();
1938 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1939
1940 KASSERT(mutex_owned(&sc->sc_lock));
1941
1942 return xhci_stop_endpoint_cmd(sc, xs, dci, 0);
1943 }
1944
1945 /*
1946 * Set TR Dequeue Pointer.
1947 * xHCI 1.1 4.6.10 6.4.3.9
1948 * Purge all of the TRBs on ring and reinitialize ring.
1949 * Set TR dequeue Pointr to 0 and Cycle State to 1.
1950 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE
1951 * error will be generated.
1952 */
1953 static usbd_status
1954 xhci_set_dequeue_locked(struct usbd_pipe *pipe)
1955 {
1956 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1957 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1958 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1959 struct xhci_ring * const xr = &xs->xs_ep[dci].xe_tr;
1960 struct xhci_soft_trb trb;
1961 usbd_status err;
1962
1963 XHCIHIST_FUNC();
1964 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1965
1966 KASSERT(mutex_owned(&sc->sc_lock));
1967
1968 xhci_host_dequeue(xr);
1969
1970 /* set DCS */
1971 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */
1972 trb.trb_2 = 0;
1973 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1974 XHCI_TRB_3_EP_SET(dci) |
1975 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE);
1976
1977 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1978
1979 return err;
1980 }
1981
1982 static usbd_status
1983 xhci_set_dequeue(struct usbd_pipe *pipe)
1984 {
1985 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1986
1987 mutex_enter(&sc->sc_lock);
1988 usbd_status ret = xhci_set_dequeue_locked(pipe);
1989 mutex_exit(&sc->sc_lock);
1990
1991 return ret;
1992 }
1993
1994 /*
1995 * Open new pipe: called from usbd_setup_pipe_flags.
1996 * Fills methods of pipe.
1997 * If pipe is not for ep0, calls configure_endpoint.
1998 */
1999 static usbd_status
2000 xhci_open(struct usbd_pipe *pipe)
2001 {
2002 struct usbd_device * const dev = pipe->up_dev;
2003 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
2004 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2005 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
2006
2007 XHCIHIST_FUNC();
2008 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr,
2009 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed);
2010 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx",
2011 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress,
2012 ed->bmAttributes);
2013 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize),
2014 ed->bInterval, 0, 0);
2015
2016 if (sc->sc_dying)
2017 return USBD_IOERROR;
2018
2019 /* Root Hub */
2020 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) {
2021 switch (ed->bEndpointAddress) {
2022 case USB_CONTROL_ENDPOINT:
2023 pipe->up_methods = &roothub_ctrl_methods;
2024 break;
2025 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
2026 pipe->up_methods = &xhci_root_intr_methods;
2027 break;
2028 default:
2029 pipe->up_methods = NULL;
2030 DPRINTFN(0, "bad bEndpointAddress 0x%02jx",
2031 ed->bEndpointAddress, 0, 0, 0);
2032 return USBD_INVAL;
2033 }
2034 return USBD_NORMAL_COMPLETION;
2035 }
2036
2037 switch (xfertype) {
2038 case UE_CONTROL:
2039 pipe->up_methods = &xhci_device_ctrl_methods;
2040 break;
2041 case UE_ISOCHRONOUS:
2042 pipe->up_methods = &xhci_device_isoc_methods;
2043 return USBD_INVAL;
2044 break;
2045 case UE_BULK:
2046 pipe->up_methods = &xhci_device_bulk_methods;
2047 break;
2048 case UE_INTERRUPT:
2049 pipe->up_methods = &xhci_device_intr_methods;
2050 break;
2051 default:
2052 return USBD_IOERROR;
2053 break;
2054 }
2055
2056 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT)
2057 return xhci_configure_endpoint(pipe);
2058
2059 return USBD_NORMAL_COMPLETION;
2060 }
2061
2062 /*
2063 * Closes pipe, called from usbd_kill_pipe via close methods.
2064 * If the endpoint to be closed is ep0, disable_slot.
2065 * Should be called with sc_lock held.
2066 */
2067 static void
2068 xhci_close_pipe(struct usbd_pipe *pipe)
2069 {
2070 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2071 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2072 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2073 const u_int dci = xhci_ep_get_dci(ed);
2074 struct xhci_soft_trb trb;
2075 uint32_t *cp;
2076
2077 XHCIHIST_FUNC();
2078
2079 if (sc->sc_dying)
2080 return;
2081
2082 /* xs is uninitialized before xhci_init_slot */
2083 if (xs == NULL || xs->xs_idx == 0)
2084 return;
2085
2086 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2087 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2088
2089 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
2090 KASSERT(mutex_owned(&sc->sc_lock));
2091
2092 if (pipe->up_dev->ud_depth == 0)
2093 return;
2094
2095 if (dci == XHCI_DCI_EP_CONTROL) {
2096 DPRINTFN(4, "closing ep0", 0, 0, 0, 0);
2097 xhci_disable_slot(sc, xs->xs_idx);
2098 return;
2099 }
2100
2101 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED)
2102 (void)xhci_stop_endpoint(pipe);
2103
2104 /*
2105 * set appropriate bit to be dropped.
2106 * don't set DC bit to 1, otherwise all endpoints
2107 * would be deconfigured.
2108 */
2109 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
2110 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci));
2111 cp[1] = htole32(0);
2112
2113 /* XXX should be most significant one, not dci? */
2114 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
2115 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci));
2116
2117 /* configure ep context performs an implicit dequeue */
2118 xhci_host_dequeue(&xs->xs_ep[dci].xe_tr);
2119
2120 /* sync input contexts before they are read from memory */
2121 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
2122
2123 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
2124 trb.trb_2 = 0;
2125 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2126 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
2127
2128 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
2129 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2130 }
2131
2132 /*
2133 * Abort transfer.
2134 * Should be called with sc_lock held.
2135 */
2136 static void
2137 xhci_abortx(struct usbd_xfer *xfer)
2138 {
2139 XHCIHIST_FUNC();
2140 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2141 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
2142 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
2143
2144 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx",
2145 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0);
2146
2147 KASSERT(mutex_owned(&sc->sc_lock));
2148 ASSERT_SLEEPABLE();
2149
2150 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2151 xfer->ux_status == USBD_TIMEOUT),
2152 "bad abort status: %d", xfer->ux_status);
2153
2154 /*
2155 * If we're dying, skip the hardware action and just notify the
2156 * software that we're done.
2157 */
2158 if (sc->sc_dying) {
2159 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer,
2160 xfer->ux_status, 0, 0);
2161 goto dying;
2162 }
2163
2164 /*
2165 * HC Step 1: Stop execution of TD on the ring.
2166 */
2167 switch (xhci_get_epstate(sc, xs, dci)) {
2168 case XHCI_EPSTATE_HALTED:
2169 (void)xhci_reset_endpoint_locked(xfer->ux_pipe);
2170 break;
2171 case XHCI_EPSTATE_STOPPED:
2172 break;
2173 default:
2174 (void)xhci_stop_endpoint(xfer->ux_pipe);
2175 break;
2176 }
2177 #ifdef DIAGNOSTIC
2178 uint32_t epst = xhci_get_epstate(sc, xs, dci);
2179 if (epst != XHCI_EPSTATE_STOPPED)
2180 DPRINTFN(4, "dci %ju not stopped %ju", dci, epst, 0, 0);
2181 #endif
2182
2183 /*
2184 * HC Step 2: Remove any vestiges of the xfer from the ring.
2185 */
2186 xhci_set_dequeue_locked(xfer->ux_pipe);
2187
2188 /*
2189 * Final Step: Notify completion to waiting xfers.
2190 */
2191 dying:
2192 usb_transfer_complete(xfer);
2193 DPRINTFN(14, "end", 0, 0, 0, 0);
2194
2195 KASSERT(mutex_owned(&sc->sc_lock));
2196 }
2197
2198 static void
2199 xhci_host_dequeue(struct xhci_ring * const xr)
2200 {
2201 /* When dequeueing the controller, update our struct copy too */
2202 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE);
2203 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE,
2204 BUS_DMASYNC_PREWRITE);
2205 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies));
2206
2207 xr->xr_ep = 0;
2208 xr->xr_cs = 1;
2209 }
2210
2211 /*
2212 * Recover STALLed endpoint.
2213 * xHCI 1.1 sect 4.10.2.1
2214 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove
2215 * all transfers on transfer ring.
2216 * These are done in thread context asynchronously.
2217 */
2218 static void
2219 xhci_clear_endpoint_stall_async_task(void *cookie)
2220 {
2221 struct usbd_xfer * const xfer = cookie;
2222 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2223 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
2224 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
2225 struct xhci_ring * const tr = &xs->xs_ep[dci].xe_tr;
2226
2227 XHCIHIST_FUNC();
2228 XHCIHIST_CALLARGS("xfer %#jx slot %ju dci %ju", (uintptr_t)xfer, xs->xs_idx,
2229 dci, 0);
2230
2231 /*
2232 * XXXMRG: Stall task can run after slot is disabled when yanked.
2233 * This hack notices that the xs has been memset() in
2234 * xhci_disable_slot() and returns. Both xhci_reset_endpoint()
2235 * and xhci_set_dequeue() rely upon a valid ring setup for correct
2236 * operation, and the latter will fault, as would
2237 * usb_transfer_complete() if it got that far.
2238 */
2239 if (xs->xs_idx == 0) {
2240 DPRINTFN(4, "ends xs_idx is 0", 0, 0, 0, 0);
2241 return;
2242 }
2243
2244 xhci_reset_endpoint(xfer->ux_pipe);
2245 xhci_set_dequeue(xfer->ux_pipe);
2246
2247 mutex_enter(&sc->sc_lock);
2248 tr->is_halted = false;
2249 usb_transfer_complete(xfer);
2250 mutex_exit(&sc->sc_lock);
2251 DPRINTFN(4, "ends", 0, 0, 0, 0);
2252 }
2253
2254 static usbd_status
2255 xhci_clear_endpoint_stall_async(struct usbd_xfer *xfer)
2256 {
2257 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2258 struct xhci_pipe * const xp = (struct xhci_pipe *)xfer->ux_pipe;
2259
2260 XHCIHIST_FUNC();
2261 XHCIHIST_CALLARGS("xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2262
2263 if (sc->sc_dying) {
2264 return USBD_IOERROR;
2265 }
2266
2267 usb_init_task(&xp->xp_async_task,
2268 xhci_clear_endpoint_stall_async_task, xfer, USB_TASKQ_MPSAFE);
2269 usb_add_task(xfer->ux_pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC);
2270 DPRINTFN(4, "ends", 0, 0, 0, 0);
2271
2272 return USBD_NORMAL_COMPLETION;
2273 }
2274
2275 /* Process roothub port status/change events and notify to uhub_intr. */
2276 static void
2277 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport)
2278 {
2279 XHCIHIST_FUNC();
2280 XHCIHIST_CALLARGS("xhci%jd: port %ju status change",
2281 device_unit(sc->sc_dev), ctlrport, 0, 0);
2282
2283 if (ctlrport > sc->sc_maxports)
2284 return;
2285
2286 const size_t bn = xhci_ctlrport2bus(sc, ctlrport);
2287 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport);
2288 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn];
2289
2290 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change",
2291 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer);
2292
2293 if (xfer == NULL)
2294 return;
2295 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
2296
2297 uint8_t *p = xfer->ux_buf;
2298 if (!xhci_polling_p(sc) || !sc->sc_intrxfer_deferred[bn])
2299 memset(p, 0, xfer->ux_length);
2300 p[rhp / NBBY] |= 1 << (rhp % NBBY);
2301 xfer->ux_actlen = xfer->ux_length;
2302 xfer->ux_status = USBD_NORMAL_COMPLETION;
2303 if (xhci_polling_p(sc))
2304 sc->sc_intrxfer_deferred[bn] = true;
2305 else
2306 usb_transfer_complete(xfer);
2307 }
2308
2309 /* Process Transfer Events */
2310 static void
2311 xhci_event_transfer(struct xhci_softc * const sc,
2312 const struct xhci_trb * const trb)
2313 {
2314 uint64_t trb_0;
2315 uint32_t trb_2, trb_3;
2316 uint8_t trbcode;
2317 u_int slot, dci;
2318 struct xhci_slot *xs;
2319 struct xhci_ring *xr;
2320 struct xhci_xfer *xx;
2321 struct usbd_xfer *xfer;
2322 usbd_status err;
2323
2324 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2325
2326 trb_0 = le64toh(trb->trb_0);
2327 trb_2 = le32toh(trb->trb_2);
2328 trb_3 = le32toh(trb->trb_3);
2329 trbcode = XHCI_TRB_2_ERROR_GET(trb_2);
2330 slot = XHCI_TRB_3_SLOT_GET(trb_3);
2331 dci = XHCI_TRB_3_EP_GET(trb_3);
2332 xs = &sc->sc_slots[slot];
2333 xr = &xs->xs_ep[dci].xe_tr;
2334
2335 /* sanity check */
2336 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots,
2337 "invalid xs_idx %u slot %u", xs->xs_idx, slot);
2338
2339 int idx = 0;
2340 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2341 if (xhci_trb_get_idx(xr, trb_0, &idx)) {
2342 DPRINTFN(0, "invalid trb_0 0x%jx", trb_0, 0, 0, 0);
2343 return;
2344 }
2345 xx = xr->xr_cookies[idx];
2346
2347 /* clear cookie of consumed TRB */
2348 xr->xr_cookies[idx] = NULL;
2349
2350 /*
2351 * xx is NULL if pipe is opened but xfer is not started.
2352 * It happens when stopping idle pipe.
2353 */
2354 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) {
2355 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju",
2356 idx, (uintptr_t)xx, trbcode, dci);
2357 DPRINTFN(1, " orig TRB %jx type %ju", trb_0,
2358 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)),
2359 0, 0);
2360 return;
2361 }
2362 } else {
2363 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */
2364 xx = (void *)(uintptr_t)(trb_0 & ~0x3);
2365 }
2366 /* XXX this may not happen */
2367 if (xx == NULL) {
2368 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0);
2369 return;
2370 }
2371 xfer = &xx->xx_xfer;
2372 /* XXX this may happen when detaching */
2373 if (xfer == NULL) {
2374 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx",
2375 (uintptr_t)xx, trb_0, 0, 0);
2376 return;
2377 }
2378 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2379 /* XXX I dunno why this happens */
2380 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer);
2381
2382 if (!xfer->ux_pipe->up_repeat &&
2383 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) {
2384 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer,
2385 0, 0, 0);
2386 return;
2387 }
2388
2389 /* 4.11.5.2 Event Data TRB */
2390 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) {
2391 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx"
2392 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0);
2393 if ((trb_0 & 0x3) == 0x3) {
2394 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2);
2395 }
2396 }
2397
2398 switch (trbcode) {
2399 case XHCI_TRB_ERROR_SHORT_PKT:
2400 case XHCI_TRB_ERROR_SUCCESS:
2401 /*
2402 * A ctrl transfer can generate two events if it has a Data
2403 * stage. A short data stage can be OK and should not
2404 * complete the transfer as the status stage needs to be
2405 * performed.
2406 *
2407 * Note: Data and Status stage events point at same xfer.
2408 * ux_actlen and ux_dmabuf will be passed to
2409 * usb_transfer_complete after the Status stage event.
2410 *
2411 * It can be distingished which stage generates the event:
2412 * + by checking least 3 bits of trb_0 if ED==1.
2413 * (see xhci_device_ctrl_start).
2414 * + by checking the type of original TRB if ED==0.
2415 *
2416 * In addition, intr, bulk, and isoc transfer currently
2417 * consists of single TD, so the "skip" is not needed.
2418 * ctrl xfer uses EVENT_DATA, and others do not.
2419 * Thus driver can switch the flow by checking ED bit.
2420 */
2421 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2422 if (xfer->ux_actlen == 0)
2423 xfer->ux_actlen = xfer->ux_length -
2424 XHCI_TRB_2_REM_GET(trb_2);
2425 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3))
2426 == XHCI_TRB_TYPE_DATA_STAGE) {
2427 return;
2428 }
2429 } else if ((trb_0 & 0x3) == 0x3) {
2430 return;
2431 }
2432 err = USBD_NORMAL_COMPLETION;
2433 break;
2434 case XHCI_TRB_ERROR_STOPPED:
2435 case XHCI_TRB_ERROR_LENGTH:
2436 case XHCI_TRB_ERROR_STOPPED_SHORT:
2437 err = USBD_IOERROR;
2438 break;
2439 case XHCI_TRB_ERROR_STALL:
2440 case XHCI_TRB_ERROR_BABBLE:
2441 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2442 xr->is_halted = true;
2443 /*
2444 * Try to claim this xfer for completion. If it has already
2445 * completed or aborted, drop it on the floor.
2446 */
2447 if (!usbd_xfer_trycomplete(xfer))
2448 return;
2449
2450 /*
2451 * Stalled endpoints can be recoverd by issuing
2452 * command TRB TYPE_RESET_EP on xHCI instead of
2453 * issuing request CLEAR_FEATURE UF_ENDPOINT_HALT
2454 * on the endpoint. However, this function may be
2455 * called from softint context (e.g. from umass),
2456 * in that case driver gets KASSERT in cv_timedwait
2457 * in xhci_do_command.
2458 * To avoid this, this runs reset_endpoint and
2459 * usb_transfer_complete in usb task thread
2460 * asynchronously (and then umass issues clear
2461 * UF_ENDPOINT_HALT).
2462 */
2463
2464 /* Override the status. */
2465 xfer->ux_status = USBD_STALLED;
2466
2467 xhci_clear_endpoint_stall_async(xfer);
2468 return;
2469 default:
2470 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2471 err = USBD_IOERROR;
2472 break;
2473 }
2474
2475 /*
2476 * Try to claim this xfer for completion. If it has already
2477 * completed or aborted, drop it on the floor.
2478 */
2479 if (!usbd_xfer_trycomplete(xfer))
2480 return;
2481
2482 /* Set the status. */
2483 xfer->ux_status = err;
2484
2485 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 ||
2486 (trb_0 & 0x3) == 0x0) {
2487 usb_transfer_complete(xfer);
2488 }
2489 }
2490
2491 /* Process Command complete events */
2492 static void
2493 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb)
2494 {
2495 uint64_t trb_0;
2496 uint32_t trb_2, trb_3;
2497
2498 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2499
2500 KASSERT(mutex_owned(&sc->sc_lock));
2501
2502 trb_0 = le64toh(trb->trb_0);
2503 trb_2 = le32toh(trb->trb_2);
2504 trb_3 = le32toh(trb->trb_3);
2505
2506 if (trb_0 == sc->sc_command_addr) {
2507 sc->sc_resultpending = false;
2508
2509 sc->sc_result_trb.trb_0 = trb_0;
2510 sc->sc_result_trb.trb_2 = trb_2;
2511 sc->sc_result_trb.trb_3 = trb_3;
2512 if (XHCI_TRB_2_ERROR_GET(trb_2) !=
2513 XHCI_TRB_ERROR_SUCCESS) {
2514 DPRINTFN(1, "command completion "
2515 "failure: 0x%016jx 0x%08jx 0x%08jx",
2516 trb_0, trb_2, trb_3, 0);
2517 }
2518 cv_signal(&sc->sc_command_cv);
2519 } else {
2520 DPRINTFN(1, "spurious event: %#jx 0x%016jx "
2521 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3);
2522 }
2523 }
2524
2525 /*
2526 * Process events.
2527 * called from xhci_softintr
2528 */
2529 static void
2530 xhci_handle_event(struct xhci_softc * const sc,
2531 const struct xhci_trb * const trb)
2532 {
2533 uint64_t trb_0;
2534 uint32_t trb_2, trb_3;
2535
2536 XHCIHIST_FUNC();
2537
2538 trb_0 = le64toh(trb->trb_0);
2539 trb_2 = le32toh(trb->trb_2);
2540 trb_3 = le32toh(trb->trb_3);
2541
2542 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx",
2543 (uintptr_t)trb, trb_0, trb_2, trb_3);
2544
2545 /*
2546 * 4.11.3.1, 6.4.2.1
2547 * TRB Pointer is invalid for these completion codes.
2548 */
2549 switch (XHCI_TRB_2_ERROR_GET(trb_2)) {
2550 case XHCI_TRB_ERROR_RING_UNDERRUN:
2551 case XHCI_TRB_ERROR_RING_OVERRUN:
2552 case XHCI_TRB_ERROR_VF_RING_FULL:
2553 return;
2554 default:
2555 if (trb_0 == 0) {
2556 return;
2557 }
2558 break;
2559 }
2560
2561 switch (XHCI_TRB_3_TYPE_GET(trb_3)) {
2562 case XHCI_TRB_EVENT_TRANSFER:
2563 xhci_event_transfer(sc, trb);
2564 break;
2565 case XHCI_TRB_EVENT_CMD_COMPLETE:
2566 xhci_event_cmd(sc, trb);
2567 break;
2568 case XHCI_TRB_EVENT_PORT_STS_CHANGE:
2569 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff));
2570 break;
2571 default:
2572 break;
2573 }
2574 }
2575
2576 static void
2577 xhci_softintr(void *v)
2578 {
2579 struct usbd_bus * const bus = v;
2580 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2581 struct xhci_ring * const er = &sc->sc_er;
2582 struct xhci_trb *trb;
2583 int i, j, k, bn;
2584
2585 XHCIHIST_FUNC();
2586
2587 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2588
2589 i = er->xr_ep;
2590 j = er->xr_cs;
2591
2592 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0);
2593
2594 /*
2595 * Handle deferred root intr xfer, in case we just switched off
2596 * polling. It's not safe to complete root intr xfers while
2597 * polling -- too much kernel machinery gets involved.
2598 */
2599 if (!xhci_polling_p(sc)) {
2600 for (bn = 0; bn < 2; bn++) {
2601 if (__predict_false(sc->sc_intrxfer_deferred[bn])) {
2602 sc->sc_intrxfer_deferred[bn] = false;
2603 usb_transfer_complete(sc->sc_intrxfer[bn]);
2604 }
2605 }
2606 }
2607
2608 while (1) {
2609 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE,
2610 BUS_DMASYNC_POSTREAD);
2611 trb = &er->xr_trb[i];
2612 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0;
2613
2614 if (j != k)
2615 break;
2616
2617 xhci_handle_event(sc, trb);
2618
2619 i++;
2620 if (i == er->xr_ntrb) {
2621 i = 0;
2622 j ^= 1;
2623 }
2624 }
2625
2626 er->xr_ep = i;
2627 er->xr_cs = j;
2628
2629 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) |
2630 XHCI_ERDP_LO_BUSY);
2631
2632 DPRINTFN(16, "ends", 0, 0, 0, 0);
2633
2634 return;
2635 }
2636
2637 static void
2638 xhci_poll(struct usbd_bus *bus)
2639 {
2640 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2641
2642 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2643
2644 mutex_enter(&sc->sc_intr_lock);
2645 int ret = xhci_intr1(sc);
2646 if (ret) {
2647 xhci_softintr(bus);
2648 }
2649 mutex_exit(&sc->sc_intr_lock);
2650
2651 return;
2652 }
2653
2654 static struct usbd_xfer *
2655 xhci_allocx(struct usbd_bus *bus, unsigned int nframes)
2656 {
2657 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2658 struct usbd_xfer *xfer;
2659
2660 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2661
2662 xfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
2663 if (xfer != NULL) {
2664 memset(xfer, 0, sizeof(struct xhci_xfer));
2665 #ifdef DIAGNOSTIC
2666 xfer->ux_state = XFER_BUSY;
2667 #endif
2668 }
2669
2670 return xfer;
2671 }
2672
2673 static void
2674 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
2675 {
2676 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2677
2678 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2679
2680 #ifdef DIAGNOSTIC
2681 if (xfer->ux_state != XFER_BUSY &&
2682 xfer->ux_status != USBD_NOT_STARTED) {
2683 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx",
2684 (uintptr_t)xfer, xfer->ux_state, 0, 0);
2685 }
2686 xfer->ux_state = XFER_FREE;
2687 #endif
2688 pool_cache_put(sc->sc_xferpool, xfer);
2689 }
2690
2691 static bool
2692 xhci_dying(struct usbd_bus *bus)
2693 {
2694 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2695
2696 return sc->sc_dying;
2697 }
2698
2699 static void
2700 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
2701 {
2702 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2703
2704 *lock = &sc->sc_lock;
2705 }
2706
2707 extern uint32_t usb_cookie_no;
2708
2709 /*
2710 * xHCI 4.3
2711 * Called when uhub_explore finds a new device (via usbd_new_device).
2712 * Port initialization and speed detection (4.3.1) are already done in uhub.c.
2713 * This function does:
2714 * Allocate and construct dev structure of default endpoint (ep0).
2715 * Allocate and open pipe of ep0.
2716 * Enable slot and initialize slot context.
2717 * Set Address.
2718 * Read initial device descriptor.
2719 * Determine initial MaxPacketSize (mps) by speed.
2720 * Read full device descriptor.
2721 * Register this device.
2722 * Finally state of device transitions ADDRESSED.
2723 */
2724 static usbd_status
2725 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth,
2726 int speed, int port, struct usbd_port *up)
2727 {
2728 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2729 struct usbd_device *dev;
2730 usbd_status err;
2731 usb_device_descriptor_t *dd;
2732 struct xhci_slot *xs;
2733 uint32_t *cp;
2734
2735 XHCIHIST_FUNC();
2736 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx",
2737 port, depth, speed, (uintptr_t)up);
2738
2739 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
2740 dev->ud_bus = bus;
2741 dev->ud_quirks = &usbd_no_quirk;
2742 dev->ud_addr = 0;
2743 dev->ud_ddesc.bMaxPacketSize = 0;
2744 dev->ud_depth = depth;
2745 dev->ud_powersrc = up;
2746 dev->ud_myhub = up->up_parent;
2747 dev->ud_speed = speed;
2748 dev->ud_langid = USBD_NOLANG;
2749 dev->ud_cookie.cookie = ++usb_cookie_no;
2750
2751 /* Set up default endpoint handle. */
2752 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc;
2753 /* doesn't matter, just don't let it uninitialized */
2754 dev->ud_ep0.ue_toggle = 0;
2755
2756 /* Set up default endpoint descriptor. */
2757 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE;
2758 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT;
2759 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT;
2760 dev->ud_ep0desc.bmAttributes = UE_CONTROL;
2761 dev->ud_ep0desc.bInterval = 0;
2762
2763 /* 4.3, 4.8.2.1 */
2764 switch (speed) {
2765 case USB_SPEED_SUPER:
2766 case USB_SPEED_SUPER_PLUS:
2767 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET);
2768 break;
2769 case USB_SPEED_FULL:
2770 /* XXX using 64 as initial mps of ep0 in FS */
2771 case USB_SPEED_HIGH:
2772 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET);
2773 break;
2774 case USB_SPEED_LOW:
2775 default:
2776 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET);
2777 break;
2778 }
2779
2780 up->up_dev = dev;
2781
2782 /* Establish the default pipe. */
2783 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0, USBD_DEFAULT_INTERVAL,
2784 &dev->ud_pipe0);
2785 if (err) {
2786 goto bad;
2787 }
2788
2789 dd = &dev->ud_ddesc;
2790
2791 if (depth == 0 && port == 0) {
2792 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL);
2793 bus->ub_devices[USB_ROOTHUB_INDEX] = dev;
2794 err = usbd_get_initial_ddesc(dev, dd);
2795 if (err) {
2796 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2797 goto bad;
2798 }
2799
2800 err = usbd_reload_device_desc(dev);
2801 if (err) {
2802 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0);
2803 goto bad;
2804 }
2805 } else {
2806 uint8_t slot = 0;
2807
2808 /* 4.3.2 */
2809 err = xhci_enable_slot(sc, &slot);
2810 if (err) {
2811 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0);
2812 goto bad;
2813 }
2814
2815 xs = &sc->sc_slots[slot];
2816 dev->ud_hcpriv = xs;
2817
2818 /* 4.3.3 initialize slot structure */
2819 err = xhci_init_slot(dev, slot);
2820 if (err) {
2821 DPRINTFN(1, "init slot %ju", err, 0, 0, 0);
2822 dev->ud_hcpriv = NULL;
2823 /*
2824 * We have to disable_slot here because
2825 * xs->xs_idx == 0 when xhci_init_slot fails,
2826 * in that case usbd_remove_dev won't work.
2827 */
2828 mutex_enter(&sc->sc_lock);
2829 xhci_disable_slot(sc, slot);
2830 mutex_exit(&sc->sc_lock);
2831 goto bad;
2832 }
2833
2834 /* 4.3.4 Address Assignment */
2835 err = xhci_set_address(dev, slot, false);
2836 if (err) {
2837 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0);
2838 goto bad;
2839 }
2840
2841 /* Allow device time to set new address */
2842 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE);
2843
2844 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2845 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT);
2846 HEXDUMP("slot context", cp, sc->sc_ctxsz);
2847 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3]));
2848 DPRINTFN(4, "device address %ju", addr, 0, 0, 0);
2849 /*
2850 * XXX ensure we know when the hardware does something
2851 * we can't yet cope with
2852 */
2853 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr);
2854 dev->ud_addr = addr;
2855
2856 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL,
2857 "addr %d already allocated", dev->ud_addr);
2858 /*
2859 * The root hub is given its own slot
2860 */
2861 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev;
2862
2863 err = usbd_get_initial_ddesc(dev, dd);
2864 if (err) {
2865 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2866 goto bad;
2867 }
2868
2869 /* 4.8.2.1 */
2870 if (USB_IS_SS(speed)) {
2871 if (dd->bMaxPacketSize != 9) {
2872 printf("%s: invalid mps 2^%u for SS ep0,"
2873 " using 512\n",
2874 device_xname(sc->sc_dev),
2875 dd->bMaxPacketSize);
2876 dd->bMaxPacketSize = 9;
2877 }
2878 USETW(dev->ud_ep0desc.wMaxPacketSize,
2879 (1 << dd->bMaxPacketSize));
2880 } else
2881 USETW(dev->ud_ep0desc.wMaxPacketSize,
2882 dd->bMaxPacketSize);
2883 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0);
2884 err = xhci_update_ep0_mps(sc, xs,
2885 UGETW(dev->ud_ep0desc.wMaxPacketSize));
2886 if (err) {
2887 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0);
2888 goto bad;
2889 }
2890
2891 err = usbd_reload_device_desc(dev);
2892 if (err) {
2893 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0);
2894 goto bad;
2895 }
2896 }
2897
2898 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,",
2899 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0);
2900 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,",
2901 dd->bDeviceClass, dd->bDeviceSubClass,
2902 dd->bDeviceProtocol, 0);
2903 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd",
2904 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations,
2905 dev->ud_speed);
2906
2907 usbd_get_device_strings(dev);
2908
2909 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev);
2910
2911 if (depth == 0 && port == 0) {
2912 usbd_attach_roothub(parent, dev);
2913 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0);
2914 return USBD_NORMAL_COMPLETION;
2915 }
2916
2917 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr);
2918 bad:
2919 if (err != USBD_NORMAL_COMPLETION) {
2920 usbd_remove_device(dev, up);
2921 }
2922
2923 return err;
2924 }
2925
2926 static usbd_status
2927 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring * const xr,
2928 size_t ntrb, size_t align)
2929 {
2930 usbd_status err;
2931 size_t size = ntrb * XHCI_TRB_SIZE;
2932
2933 XHCIHIST_FUNC();
2934 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx",
2935 (uintptr_t)xr, ntrb, align, 0);
2936
2937 err = usb_allocmem(&sc->sc_bus, size, align, &xr->xr_dma);
2938 if (err)
2939 return err;
2940 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
2941 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP);
2942 xr->xr_trb = xhci_ring_trbv(xr, 0);
2943 xr->xr_ntrb = ntrb;
2944 xr->is_halted = false;
2945 xhci_host_dequeue(xr);
2946
2947 return USBD_NORMAL_COMPLETION;
2948 }
2949
2950 static void
2951 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring * const xr)
2952 {
2953 usb_freemem(&sc->sc_bus, &xr->xr_dma);
2954 mutex_destroy(&xr->xr_lock);
2955 kmem_free(xr->xr_cookies, sizeof(*xr->xr_cookies) * xr->xr_ntrb);
2956 }
2957
2958 static void
2959 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr,
2960 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs)
2961 {
2962 size_t i;
2963 u_int ri;
2964 u_int cs;
2965 uint64_t parameter;
2966 uint32_t status;
2967 uint32_t control;
2968
2969 XHCIHIST_FUNC();
2970 XHCIHIST_CALLARGS("%#jx xr_ep 0x%jx xr_cs %ju",
2971 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0);
2972
2973 KASSERTMSG(ntrbs <= XHCI_XFER_NTRB, "ntrbs %zu", ntrbs);
2974 for (i = 0; i < ntrbs; i++) {
2975 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr,
2976 (uintptr_t)trbs, i, 0);
2977 DPRINTFN(12, " %016jx %08jx %08jx",
2978 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0);
2979 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) !=
2980 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3);
2981 }
2982
2983 ri = xr->xr_ep;
2984 cs = xr->xr_cs;
2985
2986 /*
2987 * Although the xhci hardware can do scatter/gather dma from
2988 * arbitrary sized buffers, there is a non-obvious restriction
2989 * that a LINK trb is only allowed at the end of a burst of
2990 * transfers - which might be 16kB.
2991 * Arbitrary aligned LINK trb definitely fail on Ivy bridge.
2992 * The simple solution is not to allow a LINK trb in the middle
2993 * of anything - as here.
2994 * XXX: (dsl) There are xhci controllers out there (eg some made by
2995 * ASMedia) that seem to lock up if they process a LINK trb but
2996 * cannot process the linked-to trb yet.
2997 * The code should write the 'cycle' bit on the link trb AFTER
2998 * adding the other trb.
2999 */
3000 u_int firstep = xr->xr_ep;
3001 u_int firstcs = xr->xr_cs;
3002
3003 for (i = 0; i < ntrbs; ) {
3004 u_int oldri = ri;
3005 u_int oldcs = cs;
3006
3007 if (ri >= (xr->xr_ntrb - 1)) {
3008 /* Put Link TD at the end of ring */
3009 parameter = xhci_ring_trbp(xr, 0);
3010 status = 0;
3011 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) |
3012 XHCI_TRB_3_TC_BIT;
3013 xr->xr_cookies[ri] = NULL;
3014 xr->xr_ep = 0;
3015 xr->xr_cs ^= 1;
3016 ri = xr->xr_ep;
3017 cs = xr->xr_cs;
3018 } else {
3019 parameter = trbs[i].trb_0;
3020 status = trbs[i].trb_2;
3021 control = trbs[i].trb_3;
3022
3023 xr->xr_cookies[ri] = cookie;
3024 ri++;
3025 i++;
3026 }
3027 /*
3028 * If this is a first TRB, mark it invalid to prevent
3029 * xHC from running it immediately.
3030 */
3031 if (oldri == firstep) {
3032 if (oldcs) {
3033 control &= ~XHCI_TRB_3_CYCLE_BIT;
3034 } else {
3035 control |= XHCI_TRB_3_CYCLE_BIT;
3036 }
3037 } else {
3038 if (oldcs) {
3039 control |= XHCI_TRB_3_CYCLE_BIT;
3040 } else {
3041 control &= ~XHCI_TRB_3_CYCLE_BIT;
3042 }
3043 }
3044 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control);
3045 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri,
3046 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3047 }
3048
3049 /* Now invert cycle bit of first TRB */
3050 if (firstcs) {
3051 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT);
3052 } else {
3053 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT);
3054 }
3055 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep,
3056 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3057
3058 xr->xr_ep = ri;
3059 xr->xr_cs = cs;
3060
3061 DPRINTFN(12, "%#jx xr_ep 0x%jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep,
3062 xr->xr_cs, 0);
3063 }
3064
3065 /*
3066 * Stop execution commands, purge all commands on command ring, and
3067 * rewind dequeue pointer.
3068 */
3069 static void
3070 xhci_abort_command(struct xhci_softc *sc)
3071 {
3072 struct xhci_ring * const cr = &sc->sc_cr;
3073 uint64_t crcr;
3074 int i;
3075
3076 XHCIHIST_FUNC();
3077 XHCIHIST_CALLARGS("command %#jx timeout, aborting",
3078 sc->sc_command_addr, 0, 0, 0);
3079
3080 mutex_enter(&cr->xr_lock);
3081
3082 /* 4.6.1.2 Aborting a Command */
3083 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3084 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA);
3085
3086 for (i = 0; i < 500; i++) {
3087 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3088 if ((crcr & XHCI_CRCR_LO_CRR) == 0)
3089 break;
3090 usb_delay_ms(&sc->sc_bus, 1);
3091 }
3092 if ((crcr & XHCI_CRCR_LO_CRR) != 0) {
3093 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0);
3094 /* reset HC here? */
3095 }
3096
3097 /* reset command ring dequeue pointer */
3098 cr->xr_ep = 0;
3099 cr->xr_cs = 1;
3100 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs);
3101
3102 mutex_exit(&cr->xr_lock);
3103 }
3104
3105 /*
3106 * Put a command on command ring, ring bell, set timer, and cv_timedwait.
3107 * Command completion is notified by cv_signal from xhci_event_cmd()
3108 * (called from xhci_softint), or timed-out.
3109 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(),
3110 * then do_command examines it.
3111 */
3112 static usbd_status
3113 xhci_do_command_locked(struct xhci_softc * const sc,
3114 struct xhci_soft_trb * const trb, int timeout)
3115 {
3116 struct xhci_ring * const cr = &sc->sc_cr;
3117 usbd_status err;
3118
3119 XHCIHIST_FUNC();
3120 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx",
3121 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3122
3123 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
3124 KASSERT(mutex_owned(&sc->sc_lock));
3125
3126 while (sc->sc_command_addr != 0 ||
3127 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp))
3128 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
3129
3130 /*
3131 * If enqueue pointer points at last of ring, it's Link TRB,
3132 * command TRB will be stored in 0th TRB.
3133 */
3134 if (cr->xr_ep == cr->xr_ntrb - 1)
3135 sc->sc_command_addr = xhci_ring_trbp(cr, 0);
3136 else
3137 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep);
3138
3139 sc->sc_resultpending = true;
3140
3141 mutex_enter(&cr->xr_lock);
3142 xhci_ring_put(sc, cr, NULL, trb, 1);
3143 mutex_exit(&cr->xr_lock);
3144
3145 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0);
3146
3147 while (sc->sc_resultpending) {
3148 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock,
3149 MAX(1, mstohz(timeout))) == EWOULDBLOCK) {
3150 xhci_abort_command(sc);
3151 err = USBD_TIMEOUT;
3152 goto timedout;
3153 }
3154 }
3155
3156 trb->trb_0 = sc->sc_result_trb.trb_0;
3157 trb->trb_2 = sc->sc_result_trb.trb_2;
3158 trb->trb_3 = sc->sc_result_trb.trb_3;
3159
3160 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx",
3161 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3162
3163 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) {
3164 case XHCI_TRB_ERROR_SUCCESS:
3165 err = USBD_NORMAL_COMPLETION;
3166 break;
3167 default:
3168 case 192 ... 223:
3169 DPRINTFN(5, "error %x",
3170 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0);
3171 err = USBD_IOERROR;
3172 break;
3173 case 224 ... 255:
3174 err = USBD_NORMAL_COMPLETION;
3175 break;
3176 }
3177
3178 timedout:
3179 sc->sc_resultpending = false;
3180 sc->sc_command_addr = 0;
3181 cv_broadcast(&sc->sc_cmdbusy_cv);
3182
3183 return err;
3184 }
3185
3186 static usbd_status
3187 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb,
3188 int timeout)
3189 {
3190
3191 mutex_enter(&sc->sc_lock);
3192 usbd_status ret = xhci_do_command_locked(sc, trb, timeout);
3193 mutex_exit(&sc->sc_lock);
3194
3195 return ret;
3196 }
3197
3198 static usbd_status
3199 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp)
3200 {
3201 struct xhci_soft_trb trb;
3202 usbd_status err;
3203
3204 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3205
3206 trb.trb_0 = 0;
3207 trb.trb_2 = 0;
3208 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT);
3209
3210 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3211 if (err != USBD_NORMAL_COMPLETION) {
3212 return err;
3213 }
3214
3215 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3);
3216
3217 return err;
3218 }
3219
3220 /*
3221 * xHCI 4.6.4
3222 * Deallocate ring and device/input context DMA buffers, and disable_slot.
3223 * All endpoints in the slot should be stopped.
3224 * Should be called with sc_lock held.
3225 */
3226 static usbd_status
3227 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot)
3228 {
3229 struct xhci_soft_trb trb;
3230 struct xhci_slot *xs;
3231 usbd_status err;
3232
3233 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3234
3235 if (sc->sc_dying)
3236 return USBD_IOERROR;
3237
3238 trb.trb_0 = 0;
3239 trb.trb_2 = 0;
3240 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) |
3241 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT);
3242
3243 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
3244
3245 if (!err) {
3246 xs = &sc->sc_slots[slot];
3247 if (xs->xs_idx != 0) {
3248 xhci_free_slot(sc, xs, XHCI_DCI_SLOT + 1, 32);
3249 xhci_set_dcba(sc, 0, slot);
3250 memset(xs, 0, sizeof(*xs));
3251 }
3252 }
3253
3254 return err;
3255 }
3256
3257 /*
3258 * Set address of device and transition slot state from ENABLED to ADDRESSED
3259 * if Block Setaddress Request (BSR) is false.
3260 * If BSR==true, transition slot state from ENABLED to DEFAULT.
3261 * see xHCI 1.1 4.5.3, 3.3.4
3262 * Should be called without sc_lock held.
3263 */
3264 static usbd_status
3265 xhci_address_device(struct xhci_softc * const sc,
3266 uint64_t icp, uint8_t slot_id, bool bsr)
3267 {
3268 struct xhci_soft_trb trb;
3269 usbd_status err;
3270
3271 XHCIHIST_FUNC();
3272 if (bsr) {
3273 XHCIHIST_CALLARGS("icp %jx slot %jx with bsr",
3274 icp, slot_id, 0, 0);
3275 } else {
3276 XHCIHIST_CALLARGS("icp %jx slot %jx nobsr",
3277 icp, slot_id, 0, 0);
3278 }
3279
3280 trb.trb_0 = icp;
3281 trb.trb_2 = 0;
3282 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) |
3283 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) |
3284 (bsr ? XHCI_TRB_3_BSR_BIT : 0);
3285
3286 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3287
3288 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS)
3289 err = USBD_NO_ADDR;
3290
3291 return err;
3292 }
3293
3294 static usbd_status
3295 xhci_update_ep0_mps(struct xhci_softc * const sc,
3296 struct xhci_slot * const xs, u_int mps)
3297 {
3298 struct xhci_soft_trb trb;
3299 usbd_status err;
3300 uint32_t * cp;
3301
3302 XHCIHIST_FUNC();
3303 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0);
3304
3305 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3306 cp[0] = htole32(0);
3307 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL));
3308
3309 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL));
3310 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps));
3311
3312 /* sync input contexts before they are read from memory */
3313 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3314 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3315 sc->sc_ctxsz * 4);
3316
3317 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
3318 trb.trb_2 = 0;
3319 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
3320 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX);
3321
3322 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3323 return err;
3324 }
3325
3326 static void
3327 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si)
3328 {
3329 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0);
3330
3331 XHCIHIST_FUNC();
3332 XHCIHIST_CALLARGS("dcbaa %#jx dc %016jx slot %jd",
3333 (uintptr_t)&dcbaa[si], dcba, si, 0);
3334
3335 dcbaa[si] = htole64(dcba);
3336 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t),
3337 BUS_DMASYNC_PREWRITE);
3338 }
3339
3340 /*
3341 * Allocate device and input context DMA buffer, and
3342 * TRB DMA buffer for each endpoint.
3343 */
3344 static usbd_status
3345 xhci_init_slot(struct usbd_device *dev, uint32_t slot)
3346 {
3347 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3348 struct xhci_slot *xs;
3349 usbd_status err;
3350 u_int dci;
3351
3352 XHCIHIST_FUNC();
3353 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0);
3354
3355 xs = &sc->sc_slots[slot];
3356
3357 /* allocate contexts */
3358 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz, sc->sc_pgsz,
3359 &xs->xs_dc_dma);
3360 if (err)
3361 return err;
3362 memset(KERNADDR(&xs->xs_dc_dma, 0), 0, sc->sc_pgsz);
3363
3364 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz, sc->sc_pgsz,
3365 &xs->xs_ic_dma);
3366 if (err)
3367 goto bad1;
3368 memset(KERNADDR(&xs->xs_ic_dma, 0), 0, sc->sc_pgsz);
3369
3370 for (dci = 0; dci < 32; dci++) {
3371 //CTASSERT(sizeof(xs->xs_ep[dci]) == sizeof(struct xhci_endpoint));
3372 memset(&xs->xs_ep[dci], 0, sizeof(xs->xs_ep[dci]));
3373 if (dci == XHCI_DCI_SLOT)
3374 continue;
3375 err = xhci_ring_init(sc, &xs->xs_ep[dci].xe_tr,
3376 XHCI_TRANSFER_RING_TRBS, XHCI_TRB_ALIGN);
3377 if (err) {
3378 DPRINTFN(0, "ring init failure", 0, 0, 0, 0);
3379 goto bad2;
3380 }
3381 }
3382
3383 bad2:
3384 if (err == USBD_NORMAL_COMPLETION) {
3385 xs->xs_idx = slot;
3386 } else {
3387 xhci_free_slot(sc, xs, XHCI_DCI_SLOT + 1, dci);
3388 }
3389
3390 return err;
3391
3392 bad1:
3393 usb_freemem(&sc->sc_bus, &xs->xs_dc_dma);
3394 xs->xs_idx = 0;
3395 return err;
3396 }
3397
3398 static void
3399 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs, int start_dci,
3400 int end_dci)
3401 {
3402 u_int dci;
3403
3404 XHCIHIST_FUNC();
3405 XHCIHIST_CALLARGS("slot %ju start %ju end %ju",
3406 xs->xs_idx, start_dci, end_dci, 0);
3407
3408 for (dci = start_dci; dci < end_dci; dci++) {
3409 xhci_ring_free(sc, &xs->xs_ep[dci].xe_tr);
3410 memset(&xs->xs_ep[dci], 0, sizeof(xs->xs_ep[dci]));
3411 }
3412 usb_freemem(&sc->sc_bus, &xs->xs_ic_dma);
3413 usb_freemem(&sc->sc_bus, &xs->xs_dc_dma);
3414 xs->xs_idx = 0;
3415 }
3416
3417 /*
3418 * Setup slot context, set Device Context Base Address, and issue
3419 * Set Address Device command.
3420 */
3421 static usbd_status
3422 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr)
3423 {
3424 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3425 struct xhci_slot *xs;
3426 usbd_status err;
3427
3428 XHCIHIST_FUNC();
3429 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0);
3430
3431 xs = &sc->sc_slots[slot];
3432
3433 xhci_setup_ctx(dev->ud_pipe0);
3434
3435 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3436 sc->sc_ctxsz * 3);
3437
3438 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot);
3439
3440 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr);
3441
3442 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
3443 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0),
3444 sc->sc_ctxsz * 2);
3445
3446 return err;
3447 }
3448
3449 /*
3450 * 4.8.2, 6.2.3.2
3451 * construct slot/endpoint context parameters and do syncmem
3452 */
3453 static void
3454 xhci_setup_ctx(struct usbd_pipe *pipe)
3455 {
3456 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3457 struct usbd_device *dev = pipe->up_dev;
3458 struct xhci_slot * const xs = dev->ud_hcpriv;
3459 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3460 const u_int dci = xhci_ep_get_dci(ed);
3461 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3462 uint32_t *cp;
3463 uint16_t mps = UGETW(ed->wMaxPacketSize);
3464 uint8_t speed = dev->ud_speed;
3465 uint8_t ival = ed->bInterval;
3466
3467 XHCIHIST_FUNC();
3468 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju",
3469 (uintptr_t)pipe, xs->xs_idx, dci, speed);
3470
3471 /* set up initial input control context */
3472 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3473 cp[0] = htole32(0);
3474 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci));
3475 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT));
3476 cp[7] = htole32(0);
3477
3478 /* set up input slot context */
3479 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
3480 cp[0] =
3481 XHCI_SCTX_0_CTX_NUM_SET(dci) |
3482 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed));
3483 cp[1] = 0;
3484 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0);
3485 cp[3] = 0;
3486 xhci_setup_route(pipe, cp);
3487 xhci_setup_tthub(pipe, cp);
3488
3489 cp[0] = htole32(cp[0]);
3490 cp[1] = htole32(cp[1]);
3491 cp[2] = htole32(cp[2]);
3492 cp[3] = htole32(cp[3]);
3493
3494 /* set up input endpoint context */
3495 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci));
3496 cp[0] =
3497 XHCI_EPCTX_0_EPSTATE_SET(0) |
3498 XHCI_EPCTX_0_MULT_SET(0) |
3499 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) |
3500 XHCI_EPCTX_0_LSA_SET(0) |
3501 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0);
3502 cp[1] =
3503 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) |
3504 XHCI_EPCTX_1_HID_SET(0) |
3505 XHCI_EPCTX_1_MAXB_SET(0);
3506
3507 if (xfertype != UE_ISOCHRONOUS)
3508 cp[1] |= XHCI_EPCTX_1_CERR_SET(3);
3509
3510 if (xfertype == UE_CONTROL)
3511 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(8); /* 6.2.3 */
3512 else if (USB_IS_SS(speed))
3513 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(mps);
3514 else
3515 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(UE_GET_SIZE(mps));
3516
3517 xhci_setup_maxburst(pipe, cp);
3518
3519 switch (xfertype) {
3520 case UE_CONTROL:
3521 break;
3522 case UE_BULK:
3523 /* XXX Set MaxPStreams, HID, and LSA if streams enabled */
3524 break;
3525 case UE_INTERRUPT:
3526 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3527 ival = pipe->up_interval;
3528
3529 ival = xhci_bival2ival(ival, speed);
3530 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3531 break;
3532 case UE_ISOCHRONOUS:
3533 if (pipe->up_interval != USBD_DEFAULT_INTERVAL)
3534 ival = pipe->up_interval;
3535
3536 /* xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 */
3537 if (speed == USB_SPEED_FULL)
3538 ival += 3; /* 1ms -> 125us */
3539 ival--;
3540 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3541 break;
3542 default:
3543 break;
3544 }
3545 DPRINTFN(4, "setting ival %ju MaxBurst %#jx",
3546 XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_1_MAXB_GET(cp[1]), 0, 0);
3547
3548 /* rewind TR dequeue pointer in xHC */
3549 /* can't use xhci_ep_get_dci() yet? */
3550 *(uint64_t *)(&cp[2]) = htole64(
3551 xhci_ring_trbp(&xs->xs_ep[dci].xe_tr, 0) |
3552 XHCI_EPCTX_2_DCS_SET(1));
3553
3554 cp[0] = htole32(cp[0]);
3555 cp[1] = htole32(cp[1]);
3556 cp[4] = htole32(cp[4]);
3557
3558 /* rewind TR dequeue pointer in driver */
3559 struct xhci_ring *xr = &xs->xs_ep[dci].xe_tr;
3560 mutex_enter(&xr->xr_lock);
3561 xhci_host_dequeue(xr);
3562 mutex_exit(&xr->xr_lock);
3563
3564 /* sync input contexts before they are read from memory */
3565 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3566 }
3567
3568 /*
3569 * Setup route string and roothub port of given device for slot context
3570 */
3571 static void
3572 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp)
3573 {
3574 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3575 struct usbd_device *dev = pipe->up_dev;
3576 struct usbd_port *up = dev->ud_powersrc;
3577 struct usbd_device *hub;
3578 struct usbd_device *adev;
3579 uint8_t rhport = 0;
3580 uint32_t route = 0;
3581
3582 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3583
3584 /* Locate root hub port and Determine route string */
3585 /* 4.3.3 route string does not include roothub port */
3586 for (hub = dev; hub != NULL; hub = hub->ud_myhub) {
3587 uint32_t dep;
3588
3589 DPRINTFN(4, "hub %#jx depth %jd upport %jp upportno %jd",
3590 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc,
3591 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno :
3592 -1);
3593
3594 if (hub->ud_powersrc == NULL)
3595 break;
3596 dep = hub->ud_depth;
3597 if (dep == 0)
3598 break;
3599 rhport = hub->ud_powersrc->up_portno;
3600 if (dep > USB_HUB_MAX_DEPTH)
3601 continue;
3602
3603 route |=
3604 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport)
3605 << ((dep - 1) * 4);
3606 }
3607 route = route >> 4;
3608 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1;
3609
3610 /* Locate port on upstream high speed hub */
3611 for (adev = dev, hub = up->up_parent;
3612 hub != NULL && hub->ud_speed != USB_SPEED_HIGH;
3613 adev = hub, hub = hub->ud_myhub)
3614 ;
3615 if (hub) {
3616 int p;
3617 for (p = 0; p < hub->ud_hub->uh_hubdesc.bNbrPorts; p++) {
3618 if (hub->ud_hub->uh_ports[p].up_dev == adev) {
3619 dev->ud_myhsport = &hub->ud_hub->uh_ports[p];
3620 goto found;
3621 }
3622 }
3623 panic("%s: cannot find HS port", __func__);
3624 found:
3625 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0);
3626 } else {
3627 dev->ud_myhsport = NULL;
3628 }
3629
3630 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport);
3631
3632 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport,
3633 ctlrport, route, (uintptr_t)hub);
3634
3635 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route);
3636 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport);
3637 }
3638
3639 /*
3640 * Setup whether device is hub, whether device uses MTT, and
3641 * TT informations if it uses MTT.
3642 */
3643 static void
3644 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp)
3645 {
3646 struct usbd_device *dev = pipe->up_dev;
3647 struct usbd_port *myhsport = dev->ud_myhsport;
3648 usb_device_descriptor_t * const dd = &dev->ud_ddesc;
3649 uint32_t speed = dev->ud_speed;
3650 uint8_t rhaddr = dev->ud_bus->ub_rhaddr;
3651 uint8_t tthubslot, ttportnum;
3652 bool ishub;
3653 bool usemtt;
3654
3655 XHCIHIST_FUNC();
3656
3657 /*
3658 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2
3659 * tthubslot:
3660 * This is the slot ID of parent HS hub
3661 * if LS/FS device is connected && connected through HS hub.
3662 * This is 0 if device is not LS/FS device ||
3663 * parent hub is not HS hub ||
3664 * attached to root hub.
3665 * ttportnum:
3666 * This is the downstream facing port of parent HS hub
3667 * if LS/FS device is connected.
3668 * This is 0 if device is not LS/FS device ||
3669 * parent hub is not HS hub ||
3670 * attached to root hub.
3671 */
3672 if (myhsport &&
3673 myhsport->up_parent->ud_addr != rhaddr &&
3674 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
3675 ttportnum = myhsport->up_portno;
3676 tthubslot = myhsport->up_parent->ud_addr;
3677 } else {
3678 ttportnum = 0;
3679 tthubslot = 0;
3680 }
3681 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd",
3682 (uintptr_t)myhsport, ttportnum, tthubslot, 0);
3683
3684 /* ishub is valid after reading UDESC_DEVICE */
3685 ishub = (dd->bDeviceClass == UDCLASS_HUB);
3686
3687 /* dev->ud_hub is valid after reading UDESC_HUB */
3688 if (ishub && dev->ud_hub) {
3689 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc;
3690 uint8_t ttt =
3691 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK);
3692
3693 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts);
3694 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt);
3695 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0);
3696 }
3697
3698 #define IS_MTTHUB(dd) \
3699 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT)
3700
3701 /*
3702 * MTT flag is set if
3703 * 1. this is HS hub && MTTs are supported and enabled; or
3704 * 2. this is LS or FS device && there is a parent HS hub where MTTs
3705 * are supported and enabled.
3706 *
3707 * XXX enabled is not tested yet
3708 */
3709 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd))
3710 usemtt = true;
3711 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) &&
3712 myhsport &&
3713 myhsport->up_parent->ud_addr != rhaddr &&
3714 IS_MTTHUB(&myhsport->up_parent->ud_ddesc))
3715 usemtt = true;
3716 else
3717 usemtt = false;
3718 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd",
3719 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt);
3720
3721 #undef IS_MTTHUB
3722
3723 cp[0] |=
3724 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) |
3725 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0);
3726 cp[2] |=
3727 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) |
3728 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum);
3729 }
3730
3731 /* set up params for periodic endpoint */
3732 static void
3733 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp)
3734 {
3735 struct usbd_device *dev = pipe->up_dev;
3736 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3737 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3738 usbd_desc_iter_t iter;
3739 const usb_cdc_descriptor_t *cdcd;
3740 uint32_t maxb = 0;
3741 uint16_t mps = UGETW(ed->wMaxPacketSize);
3742 uint8_t speed = dev->ud_speed;
3743 uint8_t ep;
3744
3745 /* config desc is NULL when opening ep0 */
3746 if (dev == NULL || dev->ud_cdesc == NULL)
3747 goto no_cdcd;
3748 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev,
3749 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY);
3750 if (cdcd == NULL)
3751 goto no_cdcd;
3752 usb_desc_iter_init(dev, &iter);
3753 iter.cur = (const void *)cdcd;
3754
3755 /* find endpoint_ss_comp desc for ep of this pipe */
3756 for (ep = 0;;) {
3757 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter);
3758 if (cdcd == NULL)
3759 break;
3760 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) {
3761 ep = ((const usb_endpoint_descriptor_t *)cdcd)->
3762 bEndpointAddress;
3763 if (UE_GET_ADDR(ep) ==
3764 UE_GET_ADDR(ed->bEndpointAddress)) {
3765 cdcd = (const usb_cdc_descriptor_t *)
3766 usb_desc_iter_next(&iter);
3767 break;
3768 }
3769 ep = 0;
3770 }
3771 }
3772 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) {
3773 const usb_endpoint_ss_comp_descriptor_t * esscd =
3774 (const usb_endpoint_ss_comp_descriptor_t *)cdcd;
3775 maxb = esscd->bMaxBurst;
3776 }
3777
3778 no_cdcd:
3779 /* 6.2.3.4, 4.8.2.4 */
3780 if (USB_IS_SS(speed)) {
3781 /* USB 3.1 9.6.6 */
3782 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps);
3783 /* USB 3.1 9.6.7 */
3784 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3785 #ifdef notyet
3786 if (xfertype == UE_ISOCHRONOUS) {
3787 }
3788 if (XHCI_HCC2_LEC(sc->sc_hcc2) != 0) {
3789 /* use ESIT */
3790 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(x);
3791 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(x);
3792
3793 /* XXX if LEC = 1, set ESIT instead */
3794 cp[0] |= XHCI_EPCTX_0_MULT_SET(0);
3795 } else {
3796 /* use ival */
3797 }
3798 #endif
3799 } else {
3800 /* USB 2.0 9.6.6 */
3801 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(UE_GET_SIZE(mps));
3802
3803 /* 6.2.3.4 */
3804 if (speed == USB_SPEED_HIGH &&
3805 (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)) {
3806 maxb = UE_GET_TRANS(mps);
3807 } else {
3808 /* LS/FS or HS CTRL or HS BULK */
3809 maxb = 0;
3810 }
3811 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3812 }
3813 }
3814
3815 /*
3816 * Convert endpoint bInterval value to endpoint context interval value
3817 * for Interrupt pipe.
3818 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6
3819 */
3820 static uint32_t
3821 xhci_bival2ival(uint32_t ival, uint32_t speed)
3822 {
3823 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) {
3824 int i;
3825
3826 /*
3827 * round ival down to "the nearest base 2 multiple of
3828 * bInterval * 8".
3829 * bInterval is at most 255 as its type is uByte.
3830 * 255(ms) = 2040(x 125us) < 2^11, so start with 10.
3831 */
3832 for (i = 10; i > 0; i--) {
3833 if ((ival * 8) >= (1 << i))
3834 break;
3835 }
3836 ival = i;
3837 } else {
3838 /* Interval = bInterval-1 for SS/HS */
3839 ival--;
3840 }
3841
3842 return ival;
3843 }
3844
3845 /* ----- */
3846
3847 static void
3848 xhci_noop(struct usbd_pipe *pipe)
3849 {
3850 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3851 }
3852
3853 /*
3854 * Process root hub request.
3855 */
3856 static int
3857 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
3858 void *buf, int buflen)
3859 {
3860 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
3861 usb_port_status_t ps;
3862 int l, totlen = 0;
3863 uint16_t len, value, index;
3864 int port, i;
3865 uint32_t v;
3866
3867 XHCIHIST_FUNC();
3868
3869 if (sc->sc_dying)
3870 return -1;
3871
3872 size_t bn = bus == &sc->sc_bus ? 0 : 1;
3873
3874 len = UGETW(req->wLength);
3875 value = UGETW(req->wValue);
3876 index = UGETW(req->wIndex);
3877
3878 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx",
3879 req->bmRequestType | (req->bRequest << 8), value, index, len);
3880
3881 #define C(x,y) ((x) | ((y) << 8))
3882 switch (C(req->bRequest, req->bmRequestType)) {
3883 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
3884 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0);
3885 if (len == 0)
3886 break;
3887 switch (value) {
3888 #define sd ((usb_string_descriptor_t *)buf)
3889 case C(2, UDESC_STRING):
3890 /* Product */
3891 totlen = usb_makestrdesc(sd, len, "xHCI root hub");
3892 break;
3893 #undef sd
3894 default:
3895 /* default from usbroothub */
3896 return buflen;
3897 }
3898 break;
3899
3900 /* Hub requests */
3901 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
3902 break;
3903 /* Clear Port Feature request */
3904 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): {
3905 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
3906
3907 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd",
3908 index, value, bn, cp);
3909 if (index < 1 || index > sc->sc_rhportcount[bn]) {
3910 return -1;
3911 }
3912 port = XHCI_PORTSC(cp);
3913 v = xhci_op_read_4(sc, port);
3914 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0);
3915 v &= ~XHCI_PS_CLEAR;
3916 switch (value) {
3917 case UHF_PORT_ENABLE:
3918 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED);
3919 break;
3920 case UHF_PORT_SUSPEND:
3921 return -1;
3922 case UHF_PORT_POWER:
3923 break;
3924 case UHF_PORT_TEST:
3925 case UHF_PORT_INDICATOR:
3926 return -1;
3927 case UHF_C_PORT_CONNECTION:
3928 xhci_op_write_4(sc, port, v | XHCI_PS_CSC);
3929 break;
3930 case UHF_C_PORT_ENABLE:
3931 case UHF_C_PORT_SUSPEND:
3932 case UHF_C_PORT_OVER_CURRENT:
3933 return -1;
3934 case UHF_C_BH_PORT_RESET:
3935 xhci_op_write_4(sc, port, v | XHCI_PS_WRC);
3936 break;
3937 case UHF_C_PORT_RESET:
3938 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
3939 break;
3940 case UHF_C_PORT_LINK_STATE:
3941 xhci_op_write_4(sc, port, v | XHCI_PS_PLC);
3942 break;
3943 case UHF_C_PORT_CONFIG_ERROR:
3944 xhci_op_write_4(sc, port, v | XHCI_PS_CEC);
3945 break;
3946 default:
3947 return -1;
3948 }
3949 break;
3950 }
3951 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
3952 if (len == 0)
3953 break;
3954 if ((value & 0xff) != 0) {
3955 return -1;
3956 }
3957 usb_hub_descriptor_t hubd;
3958
3959 totlen = uimin(buflen, sizeof(hubd));
3960 memcpy(&hubd, buf, totlen);
3961 hubd.bNbrPorts = sc->sc_rhportcount[bn];
3962 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH);
3963 hubd.bPwrOn2PwrGood = 200;
3964 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) {
3965 /* XXX can't find out? */
3966 hubd.DeviceRemovable[i++] = 0;
3967 }
3968 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
3969 totlen = uimin(totlen, hubd.bDescLength);
3970 memcpy(buf, &hubd, totlen);
3971 break;
3972 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
3973 if (len != 4) {
3974 return -1;
3975 }
3976 memset(buf, 0, len); /* ? XXX */
3977 totlen = len;
3978 break;
3979 /* Get Port Status request */
3980 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): {
3981 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
3982
3983 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju",
3984 bn, index, cp, 0);
3985 if (index < 1 || index > sc->sc_rhportcount[bn]) {
3986 DPRINTFN(5, "bad get port status: index=%jd bn=%jd "
3987 "portcount=%jd",
3988 index, bn, sc->sc_rhportcount[bn], 0);
3989 return -1;
3990 }
3991 if (len != 4) {
3992 DPRINTFN(5, "bad get port status: len %d != 4",
3993 len, 0, 0, 0);
3994 return -1;
3995 }
3996 v = xhci_op_read_4(sc, XHCI_PORTSC(cp));
3997 DPRINTFN(4, "getrhportsc %jd %08jx", cp, v, 0, 0);
3998 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v));
3999 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS;
4000 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED;
4001 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR;
4002 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND;
4003 if (v & XHCI_PS_PR) i |= UPS_RESET;
4004 if (v & XHCI_PS_PP) {
4005 if (i & UPS_OTHER_SPEED)
4006 i |= UPS_PORT_POWER_SS;
4007 else
4008 i |= UPS_PORT_POWER;
4009 }
4010 if (i & UPS_OTHER_SPEED)
4011 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v));
4012 if (sc->sc_vendor_port_status)
4013 i = sc->sc_vendor_port_status(sc, v, i);
4014 USETW(ps.wPortStatus, i);
4015 i = 0;
4016 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS;
4017 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED;
4018 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR;
4019 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET;
4020 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET;
4021 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE;
4022 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR;
4023 USETW(ps.wPortChange, i);
4024 totlen = uimin(len, sizeof(ps));
4025 memcpy(buf, &ps, totlen);
4026 DPRINTFN(5, "get port status: wPortStatus %x wPortChange %x "
4027 "totlen %d",
4028 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0);
4029 break;
4030 }
4031 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
4032 return -1;
4033 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE):
4034 break;
4035 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
4036 break;
4037 /* Set Port Feature request */
4038 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): {
4039 int optval = (index >> 8) & 0xff;
4040 index &= 0xff;
4041 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4042 return -1;
4043 }
4044
4045 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4046
4047 port = XHCI_PORTSC(cp);
4048 v = xhci_op_read_4(sc, port);
4049 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0);
4050 v &= ~XHCI_PS_CLEAR;
4051 switch (value) {
4052 case UHF_PORT_ENABLE:
4053 xhci_op_write_4(sc, port, v | XHCI_PS_PED);
4054 break;
4055 case UHF_PORT_SUSPEND:
4056 /* XXX suspend */
4057 break;
4058 case UHF_PORT_RESET:
4059 v &= ~(XHCI_PS_PED | XHCI_PS_PR);
4060 xhci_op_write_4(sc, port, v | XHCI_PS_PR);
4061 /* Wait for reset to complete. */
4062 usb_delay_ms(&sc->sc_bus, USB_PORT_ROOT_RESET_DELAY);
4063 if (sc->sc_dying) {
4064 return -1;
4065 }
4066 v = xhci_op_read_4(sc, port);
4067 if (v & XHCI_PS_PR) {
4068 xhci_op_write_4(sc, port, v & ~XHCI_PS_PR);
4069 usb_delay_ms(&sc->sc_bus, 10);
4070 /* XXX */
4071 }
4072 break;
4073 case UHF_PORT_POWER:
4074 /* XXX power control */
4075 break;
4076 /* XXX more */
4077 case UHF_C_PORT_RESET:
4078 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4079 break;
4080 case UHF_PORT_U1_TIMEOUT:
4081 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4082 return -1;
4083 }
4084 port = XHCI_PORTPMSC(cp);
4085 v = xhci_op_read_4(sc, port);
4086 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4087 index, cp, v, 0);
4088 v &= ~XHCI_PM3_U1TO_SET(0xff);
4089 v |= XHCI_PM3_U1TO_SET(optval);
4090 xhci_op_write_4(sc, port, v);
4091 break;
4092 case UHF_PORT_U2_TIMEOUT:
4093 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4094 return -1;
4095 }
4096 port = XHCI_PORTPMSC(cp);
4097 v = xhci_op_read_4(sc, port);
4098 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4099 index, cp, v, 0);
4100 v &= ~XHCI_PM3_U2TO_SET(0xff);
4101 v |= XHCI_PM3_U2TO_SET(optval);
4102 xhci_op_write_4(sc, port, v);
4103 break;
4104 default:
4105 return -1;
4106 }
4107 }
4108 break;
4109 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
4110 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
4111 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
4112 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
4113 break;
4114 default:
4115 /* default from usbroothub */
4116 return buflen;
4117 }
4118
4119 return totlen;
4120 }
4121
4122 /* root hub interrupt */
4123
4124 static usbd_status
4125 xhci_root_intr_transfer(struct usbd_xfer *xfer)
4126 {
4127 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4128 usbd_status err;
4129
4130 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4131
4132 /* Insert last in queue. */
4133 mutex_enter(&sc->sc_lock);
4134 err = usb_insert_transfer(xfer);
4135 mutex_exit(&sc->sc_lock);
4136 if (err)
4137 return err;
4138
4139 /* Pipe isn't running, start first */
4140 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4141 }
4142
4143 /* Wait for roothub port status/change */
4144 static usbd_status
4145 xhci_root_intr_start(struct usbd_xfer *xfer)
4146 {
4147 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4148 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4149 const bool polling = xhci_polling_p(sc);
4150
4151 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4152
4153 if (sc->sc_dying)
4154 return USBD_IOERROR;
4155
4156 if (!polling)
4157 mutex_enter(&sc->sc_lock);
4158 KASSERT(sc->sc_intrxfer[bn] == NULL);
4159 sc->sc_intrxfer[bn] = xfer;
4160 xfer->ux_status = USBD_IN_PROGRESS;
4161 if (!polling)
4162 mutex_exit(&sc->sc_lock);
4163
4164 return USBD_IN_PROGRESS;
4165 }
4166
4167 static void
4168 xhci_root_intr_abort(struct usbd_xfer *xfer)
4169 {
4170 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4171 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4172
4173 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4174
4175 KASSERT(mutex_owned(&sc->sc_lock));
4176 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
4177
4178 /* If xfer has already completed, nothing to do here. */
4179 if (sc->sc_intrxfer[bn] == NULL)
4180 return;
4181
4182 /*
4183 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer.
4184 * Cancel it.
4185 */
4186 KASSERT(sc->sc_intrxfer[bn] == xfer);
4187 xfer->ux_status = USBD_CANCELLED;
4188 usb_transfer_complete(xfer);
4189 }
4190
4191 static void
4192 xhci_root_intr_close(struct usbd_pipe *pipe)
4193 {
4194 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe);
4195 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer;
4196 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4197
4198 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4199
4200 KASSERT(mutex_owned(&sc->sc_lock));
4201
4202 /*
4203 * Caller must guarantee the xfer has completed first, by
4204 * closing the pipe only after normal completion or an abort.
4205 */
4206 KASSERT(sc->sc_intrxfer[bn] == NULL);
4207 }
4208
4209 static void
4210 xhci_root_intr_done(struct usbd_xfer *xfer)
4211 {
4212 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4213 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4214
4215 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4216
4217 KASSERT(mutex_owned(&sc->sc_lock));
4218
4219 /* Claim the xfer so it doesn't get completed again. */
4220 KASSERT(sc->sc_intrxfer[bn] == xfer);
4221 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
4222 sc->sc_intrxfer[bn] = NULL;
4223 }
4224
4225 /* -------------- */
4226 /* device control */
4227
4228 static usbd_status
4229 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
4230 {
4231 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4232 usbd_status err;
4233
4234 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4235
4236 /* Insert last in queue. */
4237 mutex_enter(&sc->sc_lock);
4238 err = usb_insert_transfer(xfer);
4239 mutex_exit(&sc->sc_lock);
4240 if (err)
4241 return err;
4242
4243 /* Pipe isn't running, start first */
4244 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4245 }
4246
4247 static usbd_status
4248 xhci_device_ctrl_start(struct usbd_xfer *xfer)
4249 {
4250 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4251 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4252 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4253 struct xhci_ring * const tr = &xs->xs_ep[dci].xe_tr;
4254 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4255 usb_device_request_t * const req = &xfer->ux_request;
4256 const int isread = usbd_xfer_isread(xfer);
4257 const uint32_t len = UGETW(req->wLength);
4258 usb_dma_t * const dma = &xfer->ux_dmabuf;
4259 uint64_t parameter;
4260 uint32_t status;
4261 uint32_t control;
4262 u_int i;
4263 const bool polling = xhci_polling_p(sc);
4264
4265 XHCIHIST_FUNC();
4266 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx",
4267 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue),
4268 UGETW(req->wIndex), UGETW(req->wLength));
4269
4270 /* we rely on the bottom bits for extra info */
4271 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %zx",
4272 (uintptr_t) xfer);
4273
4274 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0);
4275
4276 i = 0;
4277
4278 /* setup phase */
4279 memcpy(¶meter, req, sizeof(parameter));
4280 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req));
4281 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE :
4282 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) |
4283 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) |
4284 XHCI_TRB_3_IDT_BIT;
4285 /* we need parameter un-swapped on big endian, so pre-swap it here */
4286 xhci_soft_trb_put(&xx->xx_trb[i++], htole64(parameter), status, control);
4287
4288 if (len != 0) {
4289 /* data phase */
4290 parameter = DMAADDR(dma, 0);
4291 KASSERTMSG(len <= 0x10000, "len %d", len);
4292 status = XHCI_TRB_2_IRQ_SET(0) |
4293 XHCI_TRB_2_TDSZ_SET(0) |
4294 XHCI_TRB_2_BYTES_SET(len);
4295 control = (isread ? XHCI_TRB_3_DIR_IN : 0) |
4296 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) |
4297 (usbd_xfer_isread(xfer) ? XHCI_TRB_3_ISP_BIT : 0) |
4298 XHCI_TRB_3_IOC_BIT;
4299 xhci_soft_trb_put(&xx->xx_trb[i++], parameter, status, control);
4300 }
4301
4302 parameter = 0;
4303 status = XHCI_TRB_2_IRQ_SET(0);
4304 /* the status stage has inverted direction */
4305 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) |
4306 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) |
4307 XHCI_TRB_3_IOC_BIT;
4308 xhci_soft_trb_put(&xx->xx_trb[i++], parameter, status, control);
4309
4310 if (!polling)
4311 mutex_enter(&tr->xr_lock);
4312 xhci_ring_put(sc, tr, xfer, xx->xx_trb, i);
4313 if (!polling)
4314 mutex_exit(&tr->xr_lock);
4315
4316 if (!polling)
4317 mutex_enter(&sc->sc_lock);
4318 xfer->ux_status = USBD_IN_PROGRESS;
4319 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4320 usbd_xfer_schedule_timeout(xfer);
4321 if (!polling)
4322 mutex_exit(&sc->sc_lock);
4323
4324 return USBD_IN_PROGRESS;
4325 }
4326
4327 static void
4328 xhci_device_ctrl_done(struct usbd_xfer *xfer)
4329 {
4330 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4331 usb_device_request_t *req = &xfer->ux_request;
4332 int len = UGETW(req->wLength);
4333 int rd = req->bmRequestType & UT_READ;
4334
4335 if (len)
4336 usb_syncmem(&xfer->ux_dmabuf, 0, len,
4337 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4338 }
4339
4340 static void
4341 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
4342 {
4343 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4344
4345 usbd_xfer_abort(xfer);
4346 }
4347
4348 static void
4349 xhci_device_ctrl_close(struct usbd_pipe *pipe)
4350 {
4351 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4352
4353 xhci_close_pipe(pipe);
4354 }
4355
4356 /* ------------------ */
4357 /* device isochronous */
4358
4359 /* ----------- */
4360 /* device bulk */
4361
4362 static usbd_status
4363 xhci_device_bulk_transfer(struct usbd_xfer *xfer)
4364 {
4365 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4366 usbd_status err;
4367
4368 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4369
4370 /* Insert last in queue. */
4371 mutex_enter(&sc->sc_lock);
4372 err = usb_insert_transfer(xfer);
4373 mutex_exit(&sc->sc_lock);
4374 if (err)
4375 return err;
4376
4377 /*
4378 * Pipe isn't running (otherwise err would be USBD_INPROG),
4379 * so start it first.
4380 */
4381 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4382 }
4383
4384 static usbd_status
4385 xhci_device_bulk_start(struct usbd_xfer *xfer)
4386 {
4387 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4388 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4389 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4390 struct xhci_ring * const tr = &xs->xs_ep[dci].xe_tr;
4391 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4392 const uint32_t len = xfer->ux_length;
4393 usb_dma_t * const dma = &xfer->ux_dmabuf;
4394 uint64_t parameter;
4395 uint32_t status;
4396 uint32_t control;
4397 u_int i = 0;
4398 const bool polling = xhci_polling_p(sc);
4399
4400 XHCIHIST_FUNC();
4401 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4402 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4403
4404 if (sc->sc_dying)
4405 return USBD_IOERROR;
4406
4407 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4408
4409 parameter = DMAADDR(dma, 0);
4410 /*
4411 * XXX: (dsl) The physical buffer must not cross a 64k boundary.
4412 * If the user supplied buffer crosses such a boundary then 2
4413 * (or more) TRB should be used.
4414 * If multiple TRB are used the td_size field must be set correctly.
4415 * For v1.0 devices (like ivy bridge) this is the number of usb data
4416 * blocks needed to complete the transfer.
4417 * Setting it to 1 in the last TRB causes an extra zero-length
4418 * data block be sent.
4419 * The earlier documentation differs, I don't know how it behaves.
4420 */
4421 KASSERTMSG(len <= 0x10000, "len %d", len);
4422 status = XHCI_TRB_2_IRQ_SET(0) |
4423 XHCI_TRB_2_TDSZ_SET(0) |
4424 XHCI_TRB_2_BYTES_SET(len);
4425 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4426 (usbd_xfer_isread(xfer) ? XHCI_TRB_3_ISP_BIT : 0) |
4427 XHCI_TRB_3_IOC_BIT;
4428 xhci_soft_trb_put(&xx->xx_trb[i++], parameter, status, control);
4429
4430 if (!polling)
4431 mutex_enter(&tr->xr_lock);
4432 xhci_ring_put(sc, tr, xfer, xx->xx_trb, i);
4433 if (!polling)
4434 mutex_exit(&tr->xr_lock);
4435
4436 if (!polling)
4437 mutex_enter(&sc->sc_lock);
4438 xfer->ux_status = USBD_IN_PROGRESS;
4439 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4440 usbd_xfer_schedule_timeout(xfer);
4441 if (!polling)
4442 mutex_exit(&sc->sc_lock);
4443
4444 return USBD_IN_PROGRESS;
4445 }
4446
4447 static void
4448 xhci_device_bulk_done(struct usbd_xfer *xfer)
4449 {
4450 #ifdef USB_DEBUG
4451 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4452 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4453 #endif
4454 const int isread = usbd_xfer_isread(xfer);
4455
4456 XHCIHIST_FUNC();
4457 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4458 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4459
4460 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4461 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4462 }
4463
4464 static void
4465 xhci_device_bulk_abort(struct usbd_xfer *xfer)
4466 {
4467 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4468
4469 usbd_xfer_abort(xfer);
4470 }
4471
4472 static void
4473 xhci_device_bulk_close(struct usbd_pipe *pipe)
4474 {
4475 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4476
4477 xhci_close_pipe(pipe);
4478 }
4479
4480 /* ---------------- */
4481 /* device interrupt */
4482
4483 static usbd_status
4484 xhci_device_intr_transfer(struct usbd_xfer *xfer)
4485 {
4486 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4487 usbd_status err;
4488
4489 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4490
4491 /* Insert last in queue. */
4492 mutex_enter(&sc->sc_lock);
4493 err = usb_insert_transfer(xfer);
4494 mutex_exit(&sc->sc_lock);
4495 if (err)
4496 return err;
4497
4498 /*
4499 * Pipe isn't running (otherwise err would be USBD_INPROG),
4500 * so start it first.
4501 */
4502 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4503 }
4504
4505 static usbd_status
4506 xhci_device_intr_start(struct usbd_xfer *xfer)
4507 {
4508 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4509 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4510 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4511 struct xhci_ring * const tr = &xs->xs_ep[dci].xe_tr;
4512 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4513 const uint32_t len = xfer->ux_length;
4514 const bool polling = xhci_polling_p(sc);
4515 usb_dma_t * const dma = &xfer->ux_dmabuf;
4516 uint64_t parameter;
4517 uint32_t status;
4518 uint32_t control;
4519 u_int i = 0;
4520
4521 XHCIHIST_FUNC();
4522 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4523 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4524
4525 if (sc->sc_dying)
4526 return USBD_IOERROR;
4527
4528 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4529
4530 parameter = DMAADDR(dma, 0);
4531 KASSERTMSG(len <= 0x10000, "len %d", len);
4532 status = XHCI_TRB_2_IRQ_SET(0) |
4533 XHCI_TRB_2_TDSZ_SET(0) |
4534 XHCI_TRB_2_BYTES_SET(len);
4535 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4536 (usbd_xfer_isread(xfer) ? XHCI_TRB_3_ISP_BIT : 0) |
4537 XHCI_TRB_3_IOC_BIT;
4538 xhci_soft_trb_put(&xx->xx_trb[i++], parameter, status, control);
4539
4540 if (!polling)
4541 mutex_enter(&tr->xr_lock);
4542 xhci_ring_put(sc, tr, xfer, xx->xx_trb, i);
4543 if (!polling)
4544 mutex_exit(&tr->xr_lock);
4545
4546 if (!polling)
4547 mutex_enter(&sc->sc_lock);
4548 xfer->ux_status = USBD_IN_PROGRESS;
4549 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4550 usbd_xfer_schedule_timeout(xfer);
4551 if (!polling)
4552 mutex_exit(&sc->sc_lock);
4553
4554 return USBD_IN_PROGRESS;
4555 }
4556
4557 static void
4558 xhci_device_intr_done(struct usbd_xfer *xfer)
4559 {
4560 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4561 #ifdef USB_DEBUG
4562 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4563 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4564 #endif
4565 const int isread = usbd_xfer_isread(xfer);
4566
4567 XHCIHIST_FUNC();
4568 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4569 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4570
4571 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4572
4573 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4574 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4575 }
4576
4577 static void
4578 xhci_device_intr_abort(struct usbd_xfer *xfer)
4579 {
4580 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4581
4582 XHCIHIST_FUNC();
4583 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0);
4584
4585 KASSERT(mutex_owned(&sc->sc_lock));
4586 usbd_xfer_abort(xfer);
4587 }
4588
4589 static void
4590 xhci_device_intr_close(struct usbd_pipe *pipe)
4591 {
4592 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
4593
4594 XHCIHIST_FUNC();
4595 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0);
4596
4597 xhci_close_pipe(pipe);
4598 }
4599