Home | History | Annotate | Line # | Download | only in dwc2
dwc2.c revision 1.77
      1 /*	$NetBSD: dwc2.c,v 1.77 2021/12/21 09:51:22 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nick Hudson
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: dwc2.c,v 1.77 2021/12/21 09:51:22 skrll Exp $");
     34 
     35 #include "opt_usb.h"
     36 
     37 #include <sys/param.h>
     38 
     39 #include <sys/cpu.h>
     40 #include <sys/device.h>
     41 #include <sys/kernel.h>
     42 #include <sys/kmem.h>
     43 #include <sys/proc.h>
     44 #include <sys/queue.h>
     45 #include <sys/select.h>
     46 #include <sys/sysctl.h>
     47 #include <sys/systm.h>
     48 
     49 #include <machine/endian.h>
     50 
     51 #include <dev/usb/usb.h>
     52 #include <dev/usb/usbdi.h>
     53 #include <dev/usb/usbdivar.h>
     54 #include <dev/usb/usb_mem.h>
     55 #include <dev/usb/usbroothub.h>
     56 
     57 #include <dwc2/dwc2.h>
     58 #include <dwc2/dwc2var.h>
     59 
     60 #include "dwc2_core.h"
     61 #include "dwc2_hcd.h"
     62 
     63 #ifdef DWC2_COUNTERS
     64 #define	DWC2_EVCNT_ADD(a,b)	((void)((a).ev_count += (b)))
     65 #else
     66 #define	DWC2_EVCNT_ADD(a,b)	do { } while (/*CONSTCOND*/0)
     67 #endif
     68 #define	DWC2_EVCNT_INCR(a)	DWC2_EVCNT_ADD((a), 1)
     69 
     70 #ifdef DWC2_DEBUG
     71 #define	DPRINTFN(n,fmt,...) do {			\
     72 	if (dwc2debug >= (n)) {			\
     73 		printf("%s: " fmt,			\
     74 		__FUNCTION__,## __VA_ARGS__);		\
     75 	}						\
     76 } while (0)
     77 #define	DPRINTF(...)	DPRINTFN(1, __VA_ARGS__)
     78 int dwc2debug = 0;
     79 
     80 SYSCTL_SETUP(sysctl_hw_dwc2_setup, "sysctl hw.dwc2 setup")
     81 {
     82 	int err;
     83 	const struct sysctlnode *rnode;
     84 	const struct sysctlnode *cnode;
     85 
     86 	err = sysctl_createv(clog, 0, NULL, &rnode,
     87 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "dwc2",
     88 	    SYSCTL_DESCR("dwc2 global controls"),
     89 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
     90 
     91 	if (err)
     92 		goto fail;
     93 
     94 	/* control debugging printfs */
     95 	err = sysctl_createv(clog, 0, &rnode, &cnode,
     96 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
     97 	    "debug", SYSCTL_DESCR("Enable debugging output"),
     98 	    NULL, 0, &dwc2debug, sizeof(dwc2debug), CTL_CREATE, CTL_EOL);
     99 	if (err)
    100 		goto fail;
    101 
    102 	return;
    103 fail:
    104 	aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
    105 }
    106 #else
    107 #define	DPRINTF(...) do { } while (0)
    108 #define	DPRINTFN(...) do { } while (0)
    109 #endif
    110 
    111 Static usbd_status	dwc2_open(struct usbd_pipe *);
    112 Static void		dwc2_poll(struct usbd_bus *);
    113 Static void		dwc2_softintr(void *);
    114 
    115 Static struct usbd_xfer *
    116 			dwc2_allocx(struct usbd_bus *, unsigned int);
    117 Static void		dwc2_freex(struct usbd_bus *, struct usbd_xfer *);
    118 Static void		dwc2_get_lock(struct usbd_bus *, kmutex_t **);
    119 Static bool		dwc2_dying(struct usbd_bus *);
    120 Static int		dwc2_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
    121 			    void *, int);
    122 
    123 Static usbd_status	dwc2_root_intr_transfer(struct usbd_xfer *);
    124 Static usbd_status	dwc2_root_intr_start(struct usbd_xfer *);
    125 Static void		dwc2_root_intr_abort(struct usbd_xfer *);
    126 Static void		dwc2_root_intr_close(struct usbd_pipe *);
    127 Static void		dwc2_root_intr_done(struct usbd_xfer *);
    128 
    129 Static usbd_status	dwc2_device_ctrl_transfer(struct usbd_xfer *);
    130 Static usbd_status	dwc2_device_ctrl_start(struct usbd_xfer *);
    131 Static void		dwc2_device_ctrl_abort(struct usbd_xfer *);
    132 Static void		dwc2_device_ctrl_close(struct usbd_pipe *);
    133 Static void		dwc2_device_ctrl_done(struct usbd_xfer *);
    134 
    135 Static usbd_status	dwc2_device_bulk_transfer(struct usbd_xfer *);
    136 Static void		dwc2_device_bulk_abort(struct usbd_xfer *);
    137 Static void		dwc2_device_bulk_close(struct usbd_pipe *);
    138 Static void		dwc2_device_bulk_done(struct usbd_xfer *);
    139 
    140 Static usbd_status	dwc2_device_intr_transfer(struct usbd_xfer *);
    141 Static usbd_status	dwc2_device_intr_start(struct usbd_xfer *);
    142 Static void		dwc2_device_intr_abort(struct usbd_xfer *);
    143 Static void		dwc2_device_intr_close(struct usbd_pipe *);
    144 Static void		dwc2_device_intr_done(struct usbd_xfer *);
    145 
    146 Static usbd_status	dwc2_device_isoc_transfer(struct usbd_xfer *);
    147 Static void		dwc2_device_isoc_abort(struct usbd_xfer *);
    148 Static void		dwc2_device_isoc_close(struct usbd_pipe *);
    149 Static void		dwc2_device_isoc_done(struct usbd_xfer *);
    150 
    151 Static usbd_status	dwc2_device_start(struct usbd_xfer *);
    152 
    153 Static void		dwc2_close_pipe(struct usbd_pipe *);
    154 Static void		dwc2_abortx(struct usbd_xfer *);
    155 
    156 Static void		dwc2_device_clear_toggle(struct usbd_pipe *);
    157 Static void		dwc2_noop(struct usbd_pipe *pipe);
    158 
    159 Static int		dwc2_interrupt(struct dwc2_softc *);
    160 Static void		dwc2_rhc(void *);
    161 
    162 
    163 static inline void
    164 dwc2_allocate_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
    165 			    struct usbd_xfer *xfer)
    166 {
    167 }
    168 
    169 static inline void
    170 dwc2_free_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
    171 			struct usbd_xfer *xfer)
    172 {
    173 }
    174 
    175 Static const struct usbd_bus_methods dwc2_bus_methods = {
    176 	.ubm_open =	dwc2_open,
    177 	.ubm_softint =	dwc2_softintr,
    178 	.ubm_dopoll =	dwc2_poll,
    179 	.ubm_allocx =	dwc2_allocx,
    180 	.ubm_freex =	dwc2_freex,
    181 	.ubm_abortx =	dwc2_abortx,
    182 	.ubm_dying =	dwc2_dying,
    183 	.ubm_getlock =	dwc2_get_lock,
    184 	.ubm_rhctrl =	dwc2_roothub_ctrl,
    185 };
    186 
    187 Static const struct usbd_pipe_methods dwc2_root_intr_methods = {
    188 	.upm_transfer =	dwc2_root_intr_transfer,
    189 	.upm_start =	dwc2_root_intr_start,
    190 	.upm_abort =	dwc2_root_intr_abort,
    191 	.upm_close =	dwc2_root_intr_close,
    192 	.upm_cleartoggle =	dwc2_noop,
    193 	.upm_done =	dwc2_root_intr_done,
    194 };
    195 
    196 Static const struct usbd_pipe_methods dwc2_device_ctrl_methods = {
    197 	.upm_transfer =	dwc2_device_ctrl_transfer,
    198 	.upm_start =	dwc2_device_ctrl_start,
    199 	.upm_abort =	dwc2_device_ctrl_abort,
    200 	.upm_close =	dwc2_device_ctrl_close,
    201 	.upm_cleartoggle =	dwc2_noop,
    202 	.upm_done =	dwc2_device_ctrl_done,
    203 };
    204 
    205 Static const struct usbd_pipe_methods dwc2_device_intr_methods = {
    206 	.upm_transfer =	dwc2_device_intr_transfer,
    207 	.upm_start =	dwc2_device_intr_start,
    208 	.upm_abort =	dwc2_device_intr_abort,
    209 	.upm_close =	dwc2_device_intr_close,
    210 	.upm_cleartoggle =	dwc2_device_clear_toggle,
    211 	.upm_done =	dwc2_device_intr_done,
    212 };
    213 
    214 Static const struct usbd_pipe_methods dwc2_device_bulk_methods = {
    215 	.upm_transfer =	dwc2_device_bulk_transfer,
    216 	.upm_abort =	dwc2_device_bulk_abort,
    217 	.upm_close =	dwc2_device_bulk_close,
    218 	.upm_cleartoggle =	dwc2_device_clear_toggle,
    219 	.upm_done =	dwc2_device_bulk_done,
    220 };
    221 
    222 Static const struct usbd_pipe_methods dwc2_device_isoc_methods = {
    223 	.upm_transfer =	dwc2_device_isoc_transfer,
    224 	.upm_abort =	dwc2_device_isoc_abort,
    225 	.upm_close =	dwc2_device_isoc_close,
    226 	.upm_cleartoggle =	dwc2_noop,
    227 	.upm_done =	dwc2_device_isoc_done,
    228 };
    229 
    230 struct usbd_xfer *
    231 dwc2_allocx(struct usbd_bus *bus, unsigned int nframes)
    232 {
    233 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    234 	struct dwc2_xfer *dxfer;
    235 
    236 	DPRINTFN(10, "\n");
    237 
    238 	DWC2_EVCNT_INCR(sc->sc_ev_xferpoolget);
    239 	dxfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
    240 	if (dxfer != NULL) {
    241 		memset(dxfer, 0, sizeof(*dxfer));
    242 		dxfer->urb = dwc2_hcd_urb_alloc(sc->sc_hsotg,
    243 		    nframes, GFP_KERNEL);
    244 #ifdef DIAGNOSTIC
    245 		dxfer->xfer.ux_state = XFER_BUSY;
    246 #endif
    247 	}
    248 	return (struct usbd_xfer *)dxfer;
    249 }
    250 
    251 void
    252 dwc2_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
    253 {
    254 	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
    255 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    256 
    257 	DPRINTFN(10, "\n");
    258 
    259 #ifdef DIAGNOSTIC
    260 	if (xfer->ux_state != XFER_BUSY &&
    261 	    xfer->ux_status != USBD_NOT_STARTED) {
    262 		DPRINTF("xfer=%p not busy, 0x%08x\n", xfer, xfer->ux_state);
    263 	}
    264 	xfer->ux_state = XFER_FREE;
    265 #endif
    266 	DWC2_EVCNT_INCR(sc->sc_ev_xferpoolput);
    267 	dwc2_hcd_urb_free(sc->sc_hsotg, dxfer->urb, dxfer->urb->packet_count);
    268 	pool_cache_put(sc->sc_xferpool, xfer);
    269 }
    270 
    271 Static bool
    272 dwc2_dying(struct usbd_bus *bus)
    273 {
    274 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    275 
    276 	return sc->sc_dying;
    277 }
    278 
    279 Static void
    280 dwc2_get_lock(struct usbd_bus *bus, kmutex_t **lock)
    281 {
    282 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    283 
    284 	*lock = &sc->sc_lock;
    285 }
    286 
    287 Static void
    288 dwc2_rhc(void *addr)
    289 {
    290 	struct dwc2_softc *sc = addr;
    291 	struct usbd_xfer *xfer;
    292 	u_char *p;
    293 
    294 	DPRINTF("\n");
    295 	mutex_enter(&sc->sc_lock);
    296 	xfer = sc->sc_intrxfer;
    297 
    298 	if (xfer == NULL) {
    299 		/* Just ignore the change. */
    300 		mutex_exit(&sc->sc_lock);
    301 		return;
    302 
    303 	}
    304 	KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
    305 
    306 	/* set port bit */
    307 	p = KERNADDR(&xfer->ux_dmabuf, 0);
    308 
    309 	p[0] = 0x02;	/* we only have one port (1 << 1) */
    310 
    311 	xfer->ux_actlen = xfer->ux_length;
    312 	xfer->ux_status = USBD_NORMAL_COMPLETION;
    313 
    314 	usb_transfer_complete(xfer);
    315 	mutex_exit(&sc->sc_lock);
    316 }
    317 
    318 Static void
    319 dwc2_softintr(void *v)
    320 {
    321 	struct usbd_bus *bus = v;
    322 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    323 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    324 	struct dwc2_xfer *dxfer, *next;
    325 	TAILQ_HEAD(, dwc2_xfer) claimed = TAILQ_HEAD_INITIALIZER(claimed);
    326 
    327 	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
    328 
    329 	/*
    330 	 * Grab all the xfers that have not been aborted or timed out.
    331 	 * Do so under a single lock -- without dropping it to run
    332 	 * usb_transfer_complete as we go -- so that dwc2_abortx won't
    333 	 * remove next out from under us during iteration when we've
    334 	 * dropped the lock.
    335 	 */
    336 	mutex_spin_enter(&hsotg->lock);
    337 	TAILQ_FOREACH_SAFE(dxfer, &sc->sc_complete, xnext, next) {
    338 		if (!usbd_xfer_trycomplete(&dxfer->xfer))
    339 			/*
    340 			 * The hard interrput handler decided to
    341 			 * complete the xfer, and put it on sc_complete
    342 			 * to pass it to us in the soft interrupt
    343 			 * handler, but in the time between hard
    344 			 * interrupt and soft interrupt, the xfer was
    345 			 * aborted or timed out and we lost the race.
    346 			 */
    347 			continue;
    348 		KASSERT(dxfer->xfer.ux_status == USBD_IN_PROGRESS);
    349 		KASSERT(dxfer->intr_status != USBD_CANCELLED);
    350 		KASSERT(dxfer->intr_status != USBD_TIMEOUT);
    351 		TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext);
    352 		TAILQ_INSERT_TAIL(&claimed, dxfer, xnext);
    353 	}
    354 	mutex_spin_exit(&hsotg->lock);
    355 
    356 	/* Now complete them.  */
    357 	while (!TAILQ_EMPTY(&claimed)) {
    358 		dxfer = TAILQ_FIRST(&claimed);
    359 		KASSERT(dxfer->xfer.ux_status == USBD_IN_PROGRESS);
    360 		KASSERT(dxfer->intr_status != USBD_CANCELLED);
    361 		KASSERT(dxfer->intr_status != USBD_TIMEOUT);
    362 		TAILQ_REMOVE(&claimed, dxfer, xnext);
    363 
    364 		dxfer->xfer.ux_status = dxfer->intr_status;
    365 		usb_transfer_complete(&dxfer->xfer);
    366 	}
    367 }
    368 
    369 usbd_status
    370 dwc2_open(struct usbd_pipe *pipe)
    371 {
    372 	struct usbd_device *dev = pipe->up_dev;
    373 	struct dwc2_softc *sc = DWC2_PIPE2SC(pipe);
    374 	struct dwc2_pipe *dpipe = DWC2_PIPE2DPIPE(pipe);
    375 	usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc;
    376 	uint8_t addr = dev->ud_addr;
    377 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
    378 
    379 	DPRINTF("pipe %p addr %d xfertype %d dir %s\n", pipe, addr, xfertype,
    380 	    UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN ? "in" : "out");
    381 
    382 	if (sc->sc_dying) {
    383 		return USBD_IOERROR;
    384 	}
    385 
    386 	if (addr == dev->ud_bus->ub_rhaddr) {
    387 		switch (ed->bEndpointAddress) {
    388 		case USB_CONTROL_ENDPOINT:
    389 			pipe->up_methods = &roothub_ctrl_methods;
    390 			break;
    391 		case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
    392 			pipe->up_methods = &dwc2_root_intr_methods;
    393 			break;
    394 		default:
    395 			DPRINTF("bad bEndpointAddress 0x%02x\n",
    396 			    ed->bEndpointAddress);
    397 			return USBD_INVAL;
    398 		}
    399 		DPRINTF("root hub pipe open\n");
    400 		return USBD_NORMAL_COMPLETION;
    401 	}
    402 
    403 	switch (xfertype) {
    404 	case UE_CONTROL:
    405 		pipe->up_methods = &dwc2_device_ctrl_methods;
    406 		int err = usb_allocmem(sc->sc_bus.ub_dmatag, sizeof(usb_device_request_t),
    407 		    0, USBMALLOC_COHERENT, &dpipe->req_dma);
    408 		if (err)
    409 			return USBD_NOMEM;
    410 		break;
    411 	case UE_INTERRUPT:
    412 		pipe->up_methods = &dwc2_device_intr_methods;
    413 		break;
    414 	case UE_ISOCHRONOUS:
    415 		pipe->up_serialise = false;
    416 		pipe->up_methods = &dwc2_device_isoc_methods;
    417 		break;
    418 	case UE_BULK:
    419 		pipe->up_serialise = false;
    420 		pipe->up_methods = &dwc2_device_bulk_methods;
    421 		break;
    422 	default:
    423 		DPRINTF("bad xfer type %d\n", xfertype);
    424 		return USBD_INVAL;
    425 	}
    426 
    427 	/* QH */
    428 	dpipe->priv = NULL;
    429 
    430 	return USBD_NORMAL_COMPLETION;
    431 }
    432 
    433 Static void
    434 dwc2_poll(struct usbd_bus *bus)
    435 {
    436 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    437 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    438 
    439 	mutex_spin_enter(&hsotg->lock);
    440 	dwc2_interrupt(sc);
    441 	mutex_spin_exit(&hsotg->lock);
    442 }
    443 
    444 /*
    445  * Close a reqular pipe.
    446  * Assumes that there are no pending transactions.
    447  */
    448 Static void
    449 dwc2_close_pipe(struct usbd_pipe *pipe)
    450 {
    451 	struct dwc2_softc *sc __diagused = pipe->up_dev->ud_bus->ub_hcpriv;
    452 
    453 	KASSERT(mutex_owned(&sc->sc_lock));
    454 }
    455 
    456 /*
    457  * Abort a device request.
    458  */
    459 Static void
    460 dwc2_abortx(struct usbd_xfer *xfer)
    461 {
    462 	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
    463 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    464 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    465 	struct dwc2_xfer *d;
    466 	int err;
    467 
    468 	DPRINTF("xfer %p pipe %p status 0x%08x", xfer, xfer->ux_pipe,
    469 	    xfer->ux_status);
    470 
    471 	KASSERT(mutex_owned(&sc->sc_lock));
    472 	ASSERT_SLEEPABLE();
    473 
    474 	KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
    475 		xfer->ux_status == USBD_TIMEOUT),
    476 	    "bad abort status: %d", xfer->ux_status);
    477 
    478 	mutex_spin_enter(&hsotg->lock);
    479 
    480 	/*
    481 	 * Check whether we aborted or timed out after the hardware
    482 	 * completion interrupt determined that it's done but before
    483 	 * the soft interrupt could actually complete it.  If so, it's
    484 	 * too late for the soft interrupt -- at this point we've
    485 	 * already committed to abort it or time it out, so we need to
    486 	 * take it off the softint's list of work in case the caller,
    487 	 * say, frees the xfer before the softint runs.
    488 	 *
    489 	 * This logic is unusual among host controller drivers, and
    490 	 * happens because dwc2 decides to complete xfers in the hard
    491 	 * interrupt handler rather than in the soft interrupt handler,
    492 	 * but usb_transfer_complete must be deferred to softint -- and
    493 	 * we happened to swoop in between the hard interrupt and the
    494 	 * soft interrupt.  Other host controller drivers do almost all
    495 	 * processing in the softint so there's no intermediate stage.
    496 	 *
    497 	 * Fortunately, this linear search to discern the intermediate
    498 	 * stage is not likely to be a serious performance impact
    499 	 * because it happens only on abort or timeout.
    500 	 */
    501 	TAILQ_FOREACH(d, &sc->sc_complete, xnext) {
    502 		if (d == dxfer) {
    503 			TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext);
    504 			break;
    505 		}
    506 	}
    507 
    508 	/*
    509 	 * If we're dying, skip the hardware action and just notify the
    510 	 * software that we're done.
    511 	 */
    512 	if (sc->sc_dying) {
    513 		DPRINTFN(4, "xfer %p dying 0x%08x", xfer, xfer->ux_status);
    514 		goto dying;
    515 	}
    516 
    517 	/*
    518 	 * HC Step 1: Handle the hardware.
    519 	 */
    520 	err = dwc2_hcd_urb_dequeue(hsotg, dxfer->urb);
    521 	if (err) {
    522 		DPRINTF("dwc2_hcd_urb_dequeue failed\n");
    523 	}
    524 
    525 dying:
    526 	mutex_spin_exit(&hsotg->lock);
    527 
    528 	/*
    529 	 * Final Step: Notify completion to waiting xfers.
    530 	 */
    531 	usb_transfer_complete(xfer);
    532 	KASSERT(mutex_owned(&sc->sc_lock));
    533 }
    534 
    535 Static void
    536 dwc2_noop(struct usbd_pipe *pipe)
    537 {
    538 
    539 }
    540 
    541 Static void
    542 dwc2_device_clear_toggle(struct usbd_pipe *pipe)
    543 {
    544 
    545 	DPRINTF("toggle %d -> 0", pipe->up_endpoint->ue_toggle);
    546 }
    547 
    548 /***********************************************************************/
    549 
    550 Static int
    551 dwc2_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
    552     void *buf, int buflen)
    553 {
    554 	struct dwc2_softc *sc = bus->ub_hcpriv;
    555 	usbd_status err = USBD_IOERROR;
    556 	uint16_t len, value, index;
    557 	int totlen = 0;
    558 
    559 	if (sc->sc_dying)
    560 		return -1;
    561 
    562 	DPRINTFN(4, "type=0x%02x request=%02x\n",
    563 	    req->bmRequestType, req->bRequest);
    564 
    565 	len = UGETW(req->wLength);
    566 	value = UGETW(req->wValue);
    567 	index = UGETW(req->wIndex);
    568 
    569 #define C(x,y) ((x) | ((y) << 8))
    570 	switch (C(req->bRequest, req->bmRequestType)) {
    571 	case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
    572 		DPRINTFN(8, "wValue=0x%04x\n", value);
    573 
    574 		if (len == 0)
    575 			break;
    576 		switch (value) {
    577 #define sd ((usb_string_descriptor_t *)buf)
    578 		case C(2, UDESC_STRING):
    579 			/* Product */
    580 			totlen = usb_makestrdesc(sd, len, "DWC2 root hub");
    581 			break;
    582 #undef sd
    583 		default:
    584 			/* default from usbroothub */
    585 			return buflen;
    586 		}
    587 		break;
    588 
    589 	case C(UR_GET_CONFIG, UT_READ_DEVICE):
    590 	case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
    591 	case C(UR_GET_STATUS, UT_READ_INTERFACE):
    592 	case C(UR_GET_STATUS, UT_READ_ENDPOINT):
    593 	case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
    594 	case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
    595 		/* default from usbroothub */
    596 		DPRINTFN(4, "returning %d (usbroothub default)", buflen);
    597 
    598 		return buflen;
    599 
    600 	default:
    601 		/* Hub requests */
    602 		err = dwc2_hcd_hub_control(sc->sc_hsotg,
    603 		    C(req->bRequest, req->bmRequestType), value, index,
    604 		    buf, len);
    605 		if (err) {
    606 			return -1;
    607 		}
    608 		totlen = len;
    609 	}
    610 
    611 	return totlen;
    612 }
    613 
    614 Static usbd_status
    615 dwc2_root_intr_transfer(struct usbd_xfer *xfer)
    616 {
    617 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    618 	usbd_status err;
    619 
    620 	DPRINTF("\n");
    621 
    622 	/* Insert last in queue. */
    623 	mutex_enter(&sc->sc_lock);
    624 	err = usb_insert_transfer(xfer);
    625 	mutex_exit(&sc->sc_lock);
    626 	if (err)
    627 		return err;
    628 
    629 	/* Pipe isn't running, start first */
    630 	return dwc2_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
    631 }
    632 
    633 Static usbd_status
    634 dwc2_root_intr_start(struct usbd_xfer *xfer)
    635 {
    636 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    637 	const bool polling = sc->sc_bus.ub_usepolling;
    638 
    639 	DPRINTF("\n");
    640 
    641 	if (sc->sc_dying)
    642 		return USBD_IOERROR;
    643 
    644 	if (!polling)
    645 		mutex_enter(&sc->sc_lock);
    646 	KASSERT(sc->sc_intrxfer == NULL);
    647 	sc->sc_intrxfer = xfer;
    648 	xfer->ux_status = USBD_IN_PROGRESS;
    649 	if (!polling)
    650 		mutex_exit(&sc->sc_lock);
    651 
    652 	return USBD_IN_PROGRESS;
    653 }
    654 
    655 /* Abort a root interrupt request. */
    656 Static void
    657 dwc2_root_intr_abort(struct usbd_xfer *xfer)
    658 {
    659 	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
    660 
    661 	DPRINTF("xfer=%p\n", xfer);
    662 
    663 	KASSERT(mutex_owned(&sc->sc_lock));
    664 	KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
    665 
    666 	/* If xfer has already completed, nothing to do here.  */
    667 	if (sc->sc_intrxfer == NULL)
    668 		return;
    669 
    670 	/*
    671 	 * Otherwise, sc->sc_intrxfer had better be this transfer.
    672 	 * Cancel it.
    673 	 */
    674 	KASSERT(sc->sc_intrxfer == xfer);
    675 	KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
    676 	xfer->ux_status = USBD_CANCELLED;
    677 	usb_transfer_complete(xfer);
    678 }
    679 
    680 Static void
    681 dwc2_root_intr_close(struct usbd_pipe *pipe)
    682 {
    683 	struct dwc2_softc *sc __diagused = DWC2_PIPE2SC(pipe);
    684 
    685 	DPRINTF("\n");
    686 
    687 	KASSERT(mutex_owned(&sc->sc_lock));
    688 
    689 	/*
    690 	 * Caller must guarantee the xfer has completed first, by
    691 	 * closing the pipe only after normal completion or an abort.
    692 	 */
    693 	KASSERT(sc->sc_intrxfer == NULL);
    694 }
    695 
    696 Static void
    697 dwc2_root_intr_done(struct usbd_xfer *xfer)
    698 {
    699 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    700 
    701 	DPRINTF("\n");
    702 
    703 	/* Claim the xfer so it doesn't get completed again.  */
    704 	KASSERT(sc->sc_intrxfer == xfer);
    705 	KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
    706 	sc->sc_intrxfer = NULL;
    707 }
    708 
    709 /***********************************************************************/
    710 
    711 Static usbd_status
    712 dwc2_device_ctrl_transfer(struct usbd_xfer *xfer)
    713 {
    714 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    715 	usbd_status err;
    716 
    717 	DPRINTF("\n");
    718 
    719 	/* Insert last in queue. */
    720 	mutex_enter(&sc->sc_lock);
    721 	err = usb_insert_transfer(xfer);
    722 	mutex_exit(&sc->sc_lock);
    723 	if (err)
    724 		return err;
    725 
    726 	/* Pipe isn't running, start first */
    727 	return dwc2_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
    728 }
    729 
    730 Static usbd_status
    731 dwc2_device_ctrl_start(struct usbd_xfer *xfer)
    732 {
    733 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    734 	usbd_status err;
    735 	const bool polling = sc->sc_bus.ub_usepolling;
    736 
    737 	DPRINTF("\n");
    738 
    739 	if (!polling)
    740 		mutex_enter(&sc->sc_lock);
    741 	xfer->ux_status = USBD_IN_PROGRESS;
    742 	err = dwc2_device_start(xfer);
    743 	if (!polling)
    744 		mutex_exit(&sc->sc_lock);
    745 
    746 	if (err)
    747 		return err;
    748 
    749 	return USBD_IN_PROGRESS;
    750 }
    751 
    752 Static void
    753 dwc2_device_ctrl_abort(struct usbd_xfer *xfer)
    754 {
    755 	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
    756 
    757 	KASSERT(mutex_owned(&sc->sc_lock));
    758 
    759 	DPRINTF("xfer=%p\n", xfer);
    760 	usbd_xfer_abort(xfer);
    761 }
    762 
    763 Static void
    764 dwc2_device_ctrl_close(struct usbd_pipe *pipe)
    765 {
    766 	struct dwc2_pipe * const dpipe = DWC2_PIPE2DPIPE(pipe);
    767 
    768 	DPRINTF("pipe=%p\n", pipe);
    769 	dwc2_close_pipe(pipe);
    770 
    771 	usb_freemem(&dpipe->req_dma);
    772 }
    773 
    774 Static void
    775 dwc2_device_ctrl_done(struct usbd_xfer *xfer)
    776 {
    777 
    778 	DPRINTF("xfer=%p\n", xfer);
    779 }
    780 
    781 /***********************************************************************/
    782 
    783 Static usbd_status
    784 dwc2_device_bulk_transfer(struct usbd_xfer *xfer)
    785 {
    786 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    787 	usbd_status err;
    788 
    789 	DPRINTF("xfer=%p\n", xfer);
    790 
    791 	/* Insert last in queue. */
    792 	mutex_enter(&sc->sc_lock);
    793 	err = usb_insert_transfer(xfer);
    794 
    795 	KASSERT(err == USBD_NORMAL_COMPLETION);
    796 
    797 	xfer->ux_status = USBD_IN_PROGRESS;
    798 	err = dwc2_device_start(xfer);
    799 	mutex_exit(&sc->sc_lock);
    800 
    801 	return err;
    802 }
    803 
    804 Static void
    805 dwc2_device_bulk_abort(struct usbd_xfer *xfer)
    806 {
    807 	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
    808 
    809 	KASSERT(mutex_owned(&sc->sc_lock));
    810 
    811 	DPRINTF("xfer=%p\n", xfer);
    812 	usbd_xfer_abort(xfer);
    813 }
    814 
    815 Static void
    816 dwc2_device_bulk_close(struct usbd_pipe *pipe)
    817 {
    818 
    819 	DPRINTF("pipe=%p\n", pipe);
    820 
    821 	dwc2_close_pipe(pipe);
    822 }
    823 
    824 Static void
    825 dwc2_device_bulk_done(struct usbd_xfer *xfer)
    826 {
    827 
    828 	DPRINTF("xfer=%p\n", xfer);
    829 }
    830 
    831 /***********************************************************************/
    832 
    833 Static usbd_status
    834 dwc2_device_intr_transfer(struct usbd_xfer *xfer)
    835 {
    836 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    837 	usbd_status err;
    838 
    839 	DPRINTF("xfer=%p\n", xfer);
    840 
    841 	/* Insert last in queue. */
    842 	mutex_enter(&sc->sc_lock);
    843 	err = usb_insert_transfer(xfer);
    844 	mutex_exit(&sc->sc_lock);
    845 	if (err)
    846 		return err;
    847 
    848 	/* Pipe isn't running, start first */
    849 	return dwc2_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
    850 }
    851 
    852 Static usbd_status
    853 dwc2_device_intr_start(struct usbd_xfer *xfer)
    854 {
    855 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer)
    856 	struct usbd_device *dev = dpipe->pipe.up_dev;
    857 	struct dwc2_softc *sc = dev->ud_bus->ub_hcpriv;
    858 	usbd_status err;
    859 	const bool polling = sc->sc_bus.ub_usepolling;
    860 
    861 	if (!polling)
    862 		mutex_enter(&sc->sc_lock);
    863 	xfer->ux_status = USBD_IN_PROGRESS;
    864 	err = dwc2_device_start(xfer);
    865 	if (!polling)
    866 		mutex_exit(&sc->sc_lock);
    867 
    868 	if (err)
    869 		return err;
    870 
    871 	return USBD_IN_PROGRESS;
    872 }
    873 
    874 /* Abort a device interrupt request. */
    875 Static void
    876 dwc2_device_intr_abort(struct usbd_xfer *xfer)
    877 {
    878 	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
    879 
    880 	KASSERT(mutex_owned(&sc->sc_lock));
    881 
    882 	DPRINTF("xfer=%p\n", xfer);
    883 	usbd_xfer_abort(xfer);
    884 }
    885 
    886 Static void
    887 dwc2_device_intr_close(struct usbd_pipe *pipe)
    888 {
    889 
    890 	DPRINTF("pipe=%p\n", pipe);
    891 
    892 	dwc2_close_pipe(pipe);
    893 }
    894 
    895 Static void
    896 dwc2_device_intr_done(struct usbd_xfer *xfer)
    897 {
    898 
    899 	DPRINTF("\n");
    900 }
    901 
    902 /***********************************************************************/
    903 
    904 usbd_status
    905 dwc2_device_isoc_transfer(struct usbd_xfer *xfer)
    906 {
    907 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    908 	usbd_status err;
    909 
    910 	DPRINTF("xfer=%p\n", xfer);
    911 
    912 	/* Insert last in queue. */
    913 	mutex_enter(&sc->sc_lock);
    914 	err = usb_insert_transfer(xfer);
    915 
    916 	KASSERT(err == USBD_NORMAL_COMPLETION);
    917 
    918 	xfer->ux_status = USBD_IN_PROGRESS;
    919 	err = dwc2_device_start(xfer);
    920 	mutex_exit(&sc->sc_lock);
    921 
    922 	return err;
    923 }
    924 
    925 void
    926 dwc2_device_isoc_abort(struct usbd_xfer *xfer)
    927 {
    928 	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
    929 	KASSERT(mutex_owned(&sc->sc_lock));
    930 
    931 	DPRINTF("xfer=%p\n", xfer);
    932 	usbd_xfer_abort(xfer);
    933 }
    934 
    935 void
    936 dwc2_device_isoc_close(struct usbd_pipe *pipe)
    937 {
    938 	DPRINTF("\n");
    939 
    940 	dwc2_close_pipe(pipe);
    941 }
    942 
    943 void
    944 dwc2_device_isoc_done(struct usbd_xfer *xfer)
    945 {
    946 
    947 	DPRINTF("\n");
    948 }
    949 
    950 
    951 usbd_status
    952 dwc2_device_start(struct usbd_xfer *xfer)
    953 {
    954  	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
    955 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
    956 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    957 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    958 	struct dwc2_hcd_urb *dwc2_urb;
    959 
    960 	struct usbd_device *dev = xfer->ux_pipe->up_dev;
    961 	usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
    962 	uint8_t addr = dev->ud_addr;
    963 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
    964 	uint8_t epnum = UE_GET_ADDR(ed->bEndpointAddress);
    965 	uint8_t dir = UE_GET_DIR(ed->bEndpointAddress);
    966 	uint16_t mps = UE_GET_SIZE(UGETW(ed->wMaxPacketSize));
    967 	uint32_t len;
    968 
    969 	uint32_t flags = 0;
    970 	uint32_t off = 0;
    971 	int retval, err;
    972 	int alloc_bandwidth = 0;
    973 
    974 	DPRINTFN(1, "xfer=%p pipe=%p\n", xfer, xfer->ux_pipe);
    975 
    976 	if (xfertype == UE_ISOCHRONOUS ||
    977 	    xfertype == UE_INTERRUPT) {
    978 		mutex_spin_enter(&hsotg->lock);
    979 		if (!dwc2_hcd_is_bandwidth_allocated(hsotg, xfer))
    980 			alloc_bandwidth = 1;
    981 		mutex_spin_exit(&hsotg->lock);
    982 	}
    983 
    984 	/*
    985 	 * For Control pipe the direction is from the request, all other
    986 	 * transfers have been set correctly at pipe open time.
    987 	 */
    988 	if (xfertype == UE_CONTROL) {
    989 		usb_device_request_t *req = &xfer->ux_request;
    990 
    991 		DPRINTFN(3, "xfer=%p type=0x%02x request=0x%02x wValue=0x%04x "
    992 		    "wIndex=0x%04x len=%d addr=%d endpt=%d dir=%s speed=%d "
    993 		    "mps=%d\n",
    994 		    xfer, req->bmRequestType, req->bRequest, UGETW(req->wValue),
    995 		    UGETW(req->wIndex), UGETW(req->wLength), dev->ud_addr,
    996 		    epnum, dir == UT_READ ? "in" :"out", dev->ud_speed, mps);
    997 
    998 		/* Copy request packet to our DMA buffer */
    999 		memcpy(KERNADDR(&dpipe->req_dma, 0), req, sizeof(*req));
   1000 		usb_syncmem(&dpipe->req_dma, 0, sizeof(*req),
   1001 		    BUS_DMASYNC_PREWRITE);
   1002 		len = UGETW(req->wLength);
   1003 		if ((req->bmRequestType & UT_READ) == UT_READ) {
   1004 			dir = UE_DIR_IN;
   1005 		} else {
   1006 			dir = UE_DIR_OUT;
   1007 		}
   1008 
   1009 		DPRINTFN(3, "req = %p dma = %" PRIxBUSADDR " len %d dir %s\n",
   1010 		    KERNADDR(&dpipe->req_dma, 0), DMAADDR(&dpipe->req_dma, 0),
   1011 		    len, dir == UE_DIR_IN ? "in" : "out");
   1012 	} else if (xfertype == UE_ISOCHRONOUS) {
   1013 		DPRINTFN(3, "xfer=%p nframes=%d flags=%d addr=%d endpt=%d,"
   1014 		    " mps=%d dir %s\n", xfer, xfer->ux_nframes, xfer->ux_flags, addr,
   1015 		    epnum, mps, dir == UT_READ ? "in" :"out");
   1016 
   1017 #ifdef DIAGNOSTIC
   1018 		len = 0;
   1019 		for (size_t i = 0; i < xfer->ux_nframes; i++)
   1020 			len += xfer->ux_frlengths[i];
   1021 		if (len != xfer->ux_length)
   1022 			panic("len (%d) != xfer->ux_length (%d)", len,
   1023 			    xfer->ux_length);
   1024 #endif
   1025 		len = xfer->ux_length;
   1026 	} else {
   1027 		DPRINTFN(3, "xfer=%p len=%d flags=%d addr=%d endpt=%d,"
   1028 		    " mps=%d dir %s\n", xfer, xfer->ux_length, xfer->ux_flags, addr,
   1029 		    epnum, mps, dir == UT_READ ? "in" :"out");
   1030 
   1031 		len = xfer->ux_length;
   1032 	}
   1033 
   1034 	dwc2_urb = dxfer->urb;
   1035 	if (!dwc2_urb)
   1036 		return USBD_NOMEM;
   1037 
   1038 	KASSERT(dwc2_urb->packet_count == xfer->ux_nframes);
   1039 	memset(dwc2_urb, 0, sizeof(*dwc2_urb) +
   1040 	    sizeof(dwc2_urb->iso_descs[0]) * dwc2_urb->packet_count);
   1041 
   1042 	dwc2_urb->priv = xfer;
   1043 	dwc2_urb->packet_count = xfer->ux_nframes;
   1044 
   1045 	dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, addr, epnum, xfertype, dir,
   1046 	    mps);
   1047 
   1048 	if (xfertype == UE_CONTROL) {
   1049 		dwc2_urb->setup_usbdma = &dpipe->req_dma;
   1050 		dwc2_urb->setup_packet = KERNADDR(&dpipe->req_dma, 0);
   1051 		dwc2_urb->setup_dma = DMAADDR(&dpipe->req_dma, 0);
   1052 	} else {
   1053 		/* XXXNH - % mps required? */
   1054 		if ((xfer->ux_flags & USBD_FORCE_SHORT_XFER) && (len % mps) == 0)
   1055 		    flags |= URB_SEND_ZERO_PACKET;
   1056 	}
   1057 	flags |= URB_GIVEBACK_ASAP;
   1058 
   1059 	/*
   1060 	 * control transfers with no data phase don't touch usbdma, but
   1061 	 * everything else does.
   1062 	 */
   1063 	if (!(xfertype == UE_CONTROL && len == 0)) {
   1064 		dwc2_urb->usbdma = &xfer->ux_dmabuf;
   1065 		dwc2_urb->buf = KERNADDR(dwc2_urb->usbdma, 0);
   1066 		dwc2_urb->dma = DMAADDR(dwc2_urb->usbdma, 0);
   1067 
   1068 		usb_syncmem(&xfer->ux_dmabuf, 0, len,
   1069 		    dir == UE_DIR_IN ?
   1070 			BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
   1071  	}
   1072 	dwc2_urb->length = len;
   1073  	dwc2_urb->flags = flags;
   1074 	dwc2_urb->status = -EINPROGRESS;
   1075 
   1076 	if (xfertype == UE_INTERRUPT ||
   1077 	    xfertype == UE_ISOCHRONOUS) {
   1078 		uint16_t ival;
   1079 
   1080 		if (xfertype == UE_INTERRUPT &&
   1081 		    dpipe->pipe.up_interval != USBD_DEFAULT_INTERVAL) {
   1082 			ival = dpipe->pipe.up_interval;
   1083 		} else {
   1084 			ival = ed->bInterval;
   1085 		}
   1086 
   1087 		if (ival < 1) {
   1088 			retval = -ENODEV;
   1089 			goto fail;
   1090 		}
   1091 		if (dev->ud_speed == USB_SPEED_HIGH ||
   1092 		   (dev->ud_speed == USB_SPEED_FULL && xfertype == UE_ISOCHRONOUS)) {
   1093 			if (ival > 16) {
   1094 				/*
   1095 				 * illegal with HS/FS, but there were
   1096 				 * documentation bugs in the spec
   1097 				 */
   1098 				ival = 256;
   1099 			} else {
   1100 				ival = (1 << (ival - 1));
   1101 			}
   1102 		} else {
   1103 			if (xfertype == UE_INTERRUPT && ival < 10)
   1104 				ival = 10;
   1105 		}
   1106 		dwc2_urb->interval = ival;
   1107 	}
   1108 
   1109 	/* XXXNH bring down from callers?? */
   1110 // 	mutex_enter(&sc->sc_lock);
   1111 
   1112 	xfer->ux_actlen = 0;
   1113 
   1114 	KASSERT(xfertype != UE_ISOCHRONOUS ||
   1115 	    xfer->ux_nframes <= dwc2_urb->packet_count);
   1116 	KASSERTMSG(xfer->ux_nframes == 0 || xfertype == UE_ISOCHRONOUS,
   1117 	    "nframes %d xfertype %d\n", xfer->ux_nframes, xfertype);
   1118 
   1119 	off = 0;
   1120 	for (size_t i = 0; i < xfer->ux_nframes; ++i) {
   1121 		DPRINTFN(3, "xfer=%p frame=%zd offset=%d length=%d\n", xfer, i,
   1122 		    off, xfer->ux_frlengths[i]);
   1123 
   1124 		dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i, off,
   1125 		    xfer->ux_frlengths[i]);
   1126 		off += xfer->ux_frlengths[i];
   1127 	}
   1128 
   1129 	struct dwc2_qh *qh = dpipe->priv;
   1130 	struct dwc2_qtd *qtd;
   1131 	bool qh_allocated = false;
   1132 
   1133 	/* Create QH for the endpoint if it doesn't exist */
   1134 	if (!qh) {
   1135 		qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, GFP_ATOMIC);
   1136 		if (!qh) {
   1137 			retval = -ENOMEM;
   1138 			goto fail;
   1139 		}
   1140 		dpipe->priv = qh;
   1141 		qh_allocated = true;
   1142 	}
   1143 
   1144 	qtd = pool_cache_get(sc->sc_qtdpool, PR_NOWAIT);
   1145 	if (!qtd) {
   1146 		retval = -ENOMEM;
   1147 		goto fail1;
   1148 	}
   1149 	memset(qtd, 0, sizeof(*qtd));
   1150 
   1151 	/* might need to check cpu_intr_p */
   1152 	mutex_spin_enter(&hsotg->lock);
   1153 	retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
   1154 	if (retval)
   1155 		goto fail2;
   1156 	usbd_xfer_schedule_timeout(xfer);
   1157 	xfer->ux_status = USBD_IN_PROGRESS;
   1158 
   1159 	if (alloc_bandwidth) {
   1160 		dwc2_allocate_bus_bandwidth(hsotg,
   1161 				dwc2_hcd_get_ep_bandwidth(hsotg, dpipe),
   1162 				xfer);
   1163 	}
   1164 
   1165 	mutex_spin_exit(&hsotg->lock);
   1166 // 	mutex_exit(&sc->sc_lock);
   1167 
   1168 	return USBD_IN_PROGRESS;
   1169 
   1170 fail2:
   1171 	dwc2_urb->priv = NULL;
   1172 	mutex_spin_exit(&hsotg->lock);
   1173 	pool_cache_put(sc->sc_qtdpool, qtd);
   1174 
   1175 fail1:
   1176 	if (qh_allocated) {
   1177 		dpipe->priv = NULL;
   1178 		dwc2_hcd_qh_free(hsotg, qh);
   1179 	}
   1180 fail:
   1181 
   1182 	switch (retval) {
   1183 	case -EINVAL:
   1184 	case -ENODEV:
   1185 		err = USBD_INVAL;
   1186 		break;
   1187 	case -ENOMEM:
   1188 		err = USBD_NOMEM;
   1189 		break;
   1190 	default:
   1191 		err = USBD_IOERROR;
   1192 	}
   1193 
   1194 	return err;
   1195 
   1196 }
   1197 
   1198 int dwc2_intr(void *p)
   1199 {
   1200 	struct dwc2_softc *sc = p;
   1201 	struct dwc2_hsotg *hsotg;
   1202 	int ret = 0;
   1203 
   1204 	if (sc == NULL)
   1205 		return 0;
   1206 
   1207 	hsotg = sc->sc_hsotg;
   1208 	mutex_spin_enter(&hsotg->lock);
   1209 
   1210 	if (sc->sc_dying || !device_has_power(sc->sc_dev))
   1211 		goto done;
   1212 
   1213 	if (sc->sc_bus.ub_usepolling) {
   1214 		uint32_t intrs;
   1215 
   1216 		intrs = dwc2_read_core_intr(hsotg);
   1217 		DWC2_WRITE_4(hsotg, GINTSTS, intrs);
   1218 	} else {
   1219 		ret = dwc2_interrupt(sc);
   1220 	}
   1221 
   1222 done:
   1223 	mutex_spin_exit(&hsotg->lock);
   1224 
   1225 	return ret;
   1226 }
   1227 
   1228 int
   1229 dwc2_interrupt(struct dwc2_softc *sc)
   1230 {
   1231 	int ret = 0;
   1232 
   1233 	if (sc->sc_hcdenabled) {
   1234 		ret |= dwc2_handle_hcd_intr(sc->sc_hsotg);
   1235 	}
   1236 
   1237 	ret |= dwc2_handle_common_intr(sc->sc_hsotg);
   1238 
   1239 	return ret;
   1240 }
   1241 
   1242 /***********************************************************************/
   1243 
   1244 int
   1245 dwc2_detach(struct dwc2_softc *sc, int flags)
   1246 {
   1247 	int rv = 0;
   1248 
   1249 	if (sc->sc_child != NULL)
   1250 		rv = config_detach(sc->sc_child, flags);
   1251 
   1252 	return rv;
   1253 }
   1254 
   1255 bool
   1256 dwc2_shutdown(device_t self, int flags)
   1257 {
   1258 	struct dwc2_softc *sc = device_private(self);
   1259 
   1260 	sc = sc;
   1261 
   1262 	return true;
   1263 }
   1264 
   1265 void
   1266 dwc2_childdet(device_t self, device_t child)
   1267 {
   1268 	struct dwc2_softc *sc = device_private(self);
   1269 
   1270 	sc = sc;
   1271 }
   1272 
   1273 int
   1274 dwc2_activate(device_t self, enum devact act)
   1275 {
   1276 	struct dwc2_softc *sc = device_private(self);
   1277 
   1278 	sc = sc;
   1279 
   1280 	return 0;
   1281 }
   1282 
   1283 bool
   1284 dwc2_resume(device_t dv, const pmf_qual_t *qual)
   1285 {
   1286 	struct dwc2_softc *sc = device_private(dv);
   1287 
   1288 	sc = sc;
   1289 
   1290 	return true;
   1291 }
   1292 
   1293 bool
   1294 dwc2_suspend(device_t dv, const pmf_qual_t *qual)
   1295 {
   1296 	struct dwc2_softc *sc = device_private(dv);
   1297 
   1298 	sc = sc;
   1299 
   1300 	return true;
   1301 }
   1302 
   1303 /***********************************************************************/
   1304 int
   1305 dwc2_init(struct dwc2_softc *sc)
   1306 {
   1307 	int err = 0;
   1308 
   1309 	err = linux_workqueue_init();
   1310 	if (err)
   1311 		return err;
   1312 
   1313 	sc->sc_bus.ub_hcpriv = sc;
   1314 	sc->sc_bus.ub_revision = USBREV_2_0;
   1315 	sc->sc_bus.ub_methods = &dwc2_bus_methods;
   1316 	sc->sc_bus.ub_pipesize = sizeof(struct dwc2_pipe);
   1317 	sc->sc_bus.ub_usedma = true;
   1318 	sc->sc_hcdenabled = false;
   1319 
   1320 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
   1321 
   1322 	TAILQ_INIT(&sc->sc_complete);
   1323 
   1324 	sc->sc_rhc_si = softint_establish(SOFTINT_USB | SOFTINT_MPSAFE,
   1325 	    dwc2_rhc, sc);
   1326 
   1327 	sc->sc_xferpool = pool_cache_init(sizeof(struct dwc2_xfer), 0, 0, 0,
   1328 	    "dwc2xfer", NULL, IPL_USB, NULL, NULL, NULL);
   1329 	sc->sc_qhpool = pool_cache_init(sizeof(struct dwc2_qh), 0, 0, 0,
   1330 	    "dwc2qh", NULL, IPL_USB, NULL, NULL, NULL);
   1331 	sc->sc_qtdpool = pool_cache_init(sizeof(struct dwc2_qtd), 0, 0, 0,
   1332 	    "dwc2qtd", NULL, IPL_USB, NULL, NULL, NULL);
   1333 
   1334 	sc->sc_hsotg = kmem_zalloc(sizeof(struct dwc2_hsotg), KM_SLEEP);
   1335 	sc->sc_hsotg->hsotg_sc = sc;
   1336 	sc->sc_hsotg->dev = sc->sc_dev;
   1337 	sc->sc_hcdenabled = true;
   1338 
   1339 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
   1340 	struct dwc2_core_params defparams;
   1341 	int retval;
   1342 
   1343 	if (sc->sc_params == NULL) {
   1344 		/* Default all params to autodetect */
   1345 		dwc2_set_all_params(&defparams, -1);
   1346 		sc->sc_params = &defparams;
   1347 
   1348 		/*
   1349 		 * Disable descriptor dma mode by default as the HW can support
   1350 		 * it, but does not support it for SPLIT transactions.
   1351 		 */
   1352 		defparams.dma_desc_enable = 0;
   1353 	}
   1354 	hsotg->dr_mode = USB_DR_MODE_HOST;
   1355 
   1356 	/* Detect config values from hardware */
   1357 	retval = dwc2_get_hwparams(hsotg);
   1358 	if (retval) {
   1359 		goto fail2;
   1360 	}
   1361 
   1362 	hsotg->core_params = kmem_zalloc(sizeof(*hsotg->core_params), KM_SLEEP);
   1363 	dwc2_set_all_params(hsotg->core_params, -1);
   1364 
   1365 	/* Validate parameter values */
   1366 	dwc2_set_parameters(hsotg, sc->sc_params);
   1367 
   1368 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
   1369     IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
   1370 	if (hsotg->dr_mode != USB_DR_MODE_HOST) {
   1371 		retval = dwc2_gadget_init(hsotg);
   1372 		if (retval)
   1373 			goto fail2;
   1374 		hsotg->gadget_enabled = 1;
   1375 	}
   1376 #endif
   1377 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \
   1378     IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
   1379 	if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) {
   1380 		retval = dwc2_hcd_init(hsotg);
   1381 		if (retval) {
   1382 			if (hsotg->gadget_enabled)
   1383 				dwc2_hsotg_remove(hsotg);
   1384 			goto fail2;
   1385 		}
   1386 	    hsotg->hcd_enabled = 1;
   1387         }
   1388 #endif
   1389 
   1390 	uint32_t snpsid = hsotg->hw_params.snpsid;
   1391 	aprint_verbose_dev(sc->sc_dev, "Core Release: %x.%x%x%x (snpsid=%x)\n",
   1392 	    snpsid >> 12 & 0xf, snpsid >> 8 & 0xf,
   1393 	    snpsid >> 4 & 0xf, snpsid & 0xf, snpsid);
   1394 
   1395 	return 0;
   1396 
   1397 fail2:
   1398 	err = -retval;
   1399 	kmem_free(sc->sc_hsotg, sizeof(struct dwc2_hsotg));
   1400 	softint_disestablish(sc->sc_rhc_si);
   1401 
   1402 	return err;
   1403 }
   1404 
   1405 #if 0
   1406 /*
   1407  * curmode is a mode indication bit 0 = device, 1 = host
   1408  */
   1409 static const char * const intnames[32] = {
   1410 	"curmode",	"modemis",	"otgint",	"sof",
   1411 	"rxflvl",	"nptxfemp",	"ginnakeff",	"goutnakeff",
   1412 	"ulpickint",	"i2cint",	"erlysusp",	"usbsusp",
   1413 	"usbrst",	"enumdone",	"isooutdrop",	"eopf",
   1414 	"restore_done",	"epmis",	"iepint",	"oepint",
   1415 	"incompisoin",	"incomplp",	"fetsusp",	"resetdet",
   1416 	"prtint",	"hchint",	"ptxfemp",	"lpm",
   1417 	"conidstschng",	"disconnint",	"sessreqint",	"wkupint"
   1418 };
   1419 
   1420 
   1421 /***********************************************************************/
   1422 
   1423 #endif
   1424 
   1425 void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr,
   1426 			int *hub_port)
   1427 {
   1428 	struct usbd_xfer *xfer = context;
   1429 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
   1430 	struct usbd_device *dev = dpipe->pipe.up_dev;
   1431 
   1432 	*hub_addr = dev->ud_myhsport->up_parent->ud_addr;
   1433  	*hub_port = dev->ud_myhsport->up_portno;
   1434 }
   1435 
   1436 int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
   1437 {
   1438 	struct usbd_xfer *xfer = context;
   1439 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
   1440 	struct usbd_device *dev = dpipe->pipe.up_dev;
   1441 
   1442 	return dev->ud_speed;
   1443 }
   1444 
   1445 /*
   1446  * Sets the final status of an URB and returns it to the upper layer. Any
   1447  * required cleanup of the URB is performed.
   1448  *
   1449  * Must be called with interrupt disabled and spinlock held
   1450  */
   1451 void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
   1452     int status)
   1453 {
   1454 	struct usbd_xfer *xfer;
   1455 	struct dwc2_xfer *dxfer;
   1456 	struct dwc2_softc *sc;
   1457 	usb_endpoint_descriptor_t *ed;
   1458 	uint8_t xfertype;
   1459 
   1460 	KASSERT(mutex_owned(&hsotg->lock));
   1461 
   1462 	if (!qtd) {
   1463 		dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__);
   1464 		return;
   1465 	}
   1466 
   1467 	if (!qtd->urb) {
   1468 		dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__);
   1469 		return;
   1470 	}
   1471 
   1472 	xfer = qtd->urb->priv;
   1473 	if (!xfer) {
   1474 		dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__);
   1475 		return;
   1476 	}
   1477 
   1478 	dxfer = DWC2_XFER2DXFER(xfer);
   1479 	sc = DWC2_XFER2SC(xfer);
   1480 	ed = xfer->ux_pipe->up_endpoint->ue_edesc;
   1481 	xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
   1482 
   1483 	struct dwc2_hcd_urb *urb = qtd->urb;
   1484 	xfer->ux_actlen = dwc2_hcd_urb_get_actual_length(urb);
   1485 
   1486 	DPRINTFN(3, "xfer=%p actlen=%d\n", xfer, xfer->ux_actlen);
   1487 
   1488 	if (xfertype == UE_ISOCHRONOUS) {
   1489 		xfer->ux_actlen = 0;
   1490 		for (size_t i = 0; i < xfer->ux_nframes; ++i) {
   1491 			xfer->ux_frlengths[i] =
   1492 				dwc2_hcd_urb_get_iso_desc_actual_length(
   1493 						urb, i);
   1494 			DPRINTFN(1, "xfer=%p frame=%zu length=%d\n", xfer, i,
   1495 			    xfer->ux_frlengths[i]);
   1496 			xfer->ux_actlen += xfer->ux_frlengths[i];
   1497 		}
   1498 		DPRINTFN(1, "xfer=%p actlen=%d (isoc)\n", xfer, xfer->ux_actlen);
   1499 	}
   1500 
   1501 	if (xfertype == UE_ISOCHRONOUS && dbg_perio()) {
   1502 		for (size_t i = 0; i < xfer->ux_nframes; i++)
   1503 			dev_vdbg(hsotg->dev, " ISO Desc %zu status %d\n",
   1504 				 i, urb->iso_descs[i].status);
   1505 	}
   1506 
   1507 	if (!status) {
   1508 		if (!(xfer->ux_flags & USBD_SHORT_XFER_OK) &&
   1509 		    xfer->ux_actlen < xfer->ux_length)
   1510 			status = -EIO;
   1511 	}
   1512 
   1513 	switch (status) {
   1514 	case 0:
   1515 		dxfer->intr_status = USBD_NORMAL_COMPLETION;
   1516 		break;
   1517 	case -EPIPE:
   1518 		dxfer->intr_status = USBD_STALLED;
   1519 		break;
   1520 	case -EPROTO:
   1521 		dxfer->intr_status = USBD_INVAL;
   1522 		break;
   1523 	case -EIO:
   1524 		dxfer->intr_status = USBD_IOERROR;
   1525 		break;
   1526 	case -EOVERFLOW:
   1527 		dxfer->intr_status = USBD_IOERROR;
   1528 		break;
   1529 	default:
   1530 		dxfer->intr_status = USBD_IOERROR;
   1531 		printf("%s: unknown error status %d\n", __func__, status);
   1532 	}
   1533 
   1534 	if (dxfer->intr_status == USBD_NORMAL_COMPLETION) {
   1535 		/*
   1536 		 * control transfers with no data phase don't touch dmabuf, but
   1537 		 * everything else does.
   1538 		 */
   1539 		if (!(xfertype == UE_CONTROL &&
   1540 		    UGETW(xfer->ux_request.wLength) == 0) &&
   1541 		    xfer->ux_actlen > 0	/* XXX PR/53503 */
   1542 		    ) {
   1543 			int rd = usbd_xfer_isread(xfer);
   1544 
   1545 			usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_actlen,
   1546 			    rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
   1547 		}
   1548 	}
   1549 
   1550 	if (xfertype == UE_ISOCHRONOUS ||
   1551 	    xfertype == UE_INTERRUPT) {
   1552 		struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
   1553 
   1554 		dwc2_free_bus_bandwidth(hsotg,
   1555 					dwc2_hcd_get_ep_bandwidth(hsotg, dpipe),
   1556 					xfer);
   1557 	}
   1558 
   1559 	qtd->urb = NULL;
   1560 	KASSERT(mutex_owned(&hsotg->lock));
   1561 
   1562 	TAILQ_INSERT_TAIL(&sc->sc_complete, dxfer, xnext);
   1563 
   1564 	mutex_spin_exit(&hsotg->lock);
   1565 	usb_schedsoftintr(&sc->sc_bus);
   1566 	mutex_spin_enter(&hsotg->lock);
   1567 }
   1568 
   1569 
   1570 int
   1571 _dwc2_hcd_start(struct dwc2_hsotg *hsotg)
   1572 {
   1573 	dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
   1574 
   1575 	mutex_spin_enter(&hsotg->lock);
   1576 
   1577 	hsotg->lx_state = DWC2_L0;
   1578 
   1579 	if (dwc2_is_device_mode(hsotg)) {
   1580 		mutex_spin_exit(&hsotg->lock);
   1581 		return 0;	/* why 0 ?? */
   1582 	}
   1583 
   1584 	dwc2_hcd_reinit(hsotg);
   1585 
   1586 	mutex_spin_exit(&hsotg->lock);
   1587 	return 0;
   1588 }
   1589 
   1590 int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg)
   1591 {
   1592 
   1593 	return false;
   1594 }
   1595