Home | History | Annotate | Line # | Download | only in dwc2
      1 /*	$NetBSD: dwc2.c,v 1.81 2024/04/05 18:57:10 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nick Hudson
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: dwc2.c,v 1.81 2024/04/05 18:57:10 riastradh Exp $");
     34 
     35 #include "opt_usb.h"
     36 
     37 #include <sys/param.h>
     38 
     39 #include <sys/cpu.h>
     40 #include <sys/device.h>
     41 #include <sys/kernel.h>
     42 #include <sys/kmem.h>
     43 #include <sys/proc.h>
     44 #include <sys/queue.h>
     45 #include <sys/select.h>
     46 #include <sys/sysctl.h>
     47 #include <sys/systm.h>
     48 
     49 #include <machine/endian.h>
     50 
     51 #include <dev/usb/usb.h>
     52 #include <dev/usb/usbdi.h>
     53 #include <dev/usb/usbdivar.h>
     54 #include <dev/usb/usb_mem.h>
     55 #include <dev/usb/usbroothub.h>
     56 
     57 #include <dwc2/dwc2.h>
     58 #include <dwc2/dwc2var.h>
     59 
     60 #include "dwc2_core.h"
     61 #include "dwc2_hcd.h"
     62 
     63 #ifdef DWC2_COUNTERS
     64 #define	DWC2_EVCNT_ADD(a,b)	((void)((a).ev_count += (b)))
     65 #else
     66 #define	DWC2_EVCNT_ADD(a,b)	do { } while (/*CONSTCOND*/0)
     67 #endif
     68 #define	DWC2_EVCNT_INCR(a)	DWC2_EVCNT_ADD((a), 1)
     69 
     70 #ifdef DWC2_DEBUG
     71 #define	DPRINTFN(n,fmt,...) do {			\
     72 	if (dwc2debug >= (n)) {			\
     73 		printf("%s: " fmt,			\
     74 		__FUNCTION__,## __VA_ARGS__);		\
     75 	}						\
     76 } while (0)
     77 #define	DPRINTF(...)	DPRINTFN(1, __VA_ARGS__)
     78 int dwc2debug = 0;
     79 
     80 SYSCTL_SETUP(sysctl_hw_dwc2_setup, "sysctl hw.dwc2 setup")
     81 {
     82 	int err;
     83 	const struct sysctlnode *rnode;
     84 	const struct sysctlnode *cnode;
     85 
     86 	err = sysctl_createv(clog, 0, NULL, &rnode,
     87 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "dwc2",
     88 	    SYSCTL_DESCR("dwc2 global controls"),
     89 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
     90 
     91 	if (err)
     92 		goto fail;
     93 
     94 	/* control debugging printfs */
     95 	err = sysctl_createv(clog, 0, &rnode, &cnode,
     96 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
     97 	    "debug", SYSCTL_DESCR("Enable debugging output"),
     98 	    NULL, 0, &dwc2debug, sizeof(dwc2debug), CTL_CREATE, CTL_EOL);
     99 	if (err)
    100 		goto fail;
    101 
    102 	return;
    103 fail:
    104 	aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
    105 }
    106 #else
    107 #define	DPRINTF(...) do { } while (0)
    108 #define	DPRINTFN(...) do { } while (0)
    109 #endif
    110 
    111 Static usbd_status	dwc2_open(struct usbd_pipe *);
    112 Static void		dwc2_poll(struct usbd_bus *);
    113 Static void		dwc2_softintr(void *);
    114 
    115 Static struct usbd_xfer *
    116 			dwc2_allocx(struct usbd_bus *, unsigned int);
    117 Static void		dwc2_freex(struct usbd_bus *, struct usbd_xfer *);
    118 Static void		dwc2_get_lock(struct usbd_bus *, kmutex_t **);
    119 Static bool		dwc2_dying(struct usbd_bus *);
    120 Static int		dwc2_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
    121 			    void *, int);
    122 
    123 Static usbd_status	dwc2_root_intr_transfer(struct usbd_xfer *);
    124 Static usbd_status	dwc2_root_intr_start(struct usbd_xfer *);
    125 Static void		dwc2_root_intr_abort(struct usbd_xfer *);
    126 Static void		dwc2_root_intr_close(struct usbd_pipe *);
    127 Static void		dwc2_root_intr_done(struct usbd_xfer *);
    128 
    129 Static usbd_status	dwc2_device_ctrl_transfer(struct usbd_xfer *);
    130 Static usbd_status	dwc2_device_ctrl_start(struct usbd_xfer *);
    131 Static void		dwc2_device_ctrl_abort(struct usbd_xfer *);
    132 Static void		dwc2_device_ctrl_close(struct usbd_pipe *);
    133 Static void		dwc2_device_ctrl_done(struct usbd_xfer *);
    134 
    135 Static usbd_status	dwc2_device_bulk_transfer(struct usbd_xfer *);
    136 Static void		dwc2_device_bulk_abort(struct usbd_xfer *);
    137 Static void		dwc2_device_bulk_close(struct usbd_pipe *);
    138 Static void		dwc2_device_bulk_done(struct usbd_xfer *);
    139 
    140 Static usbd_status	dwc2_device_intr_transfer(struct usbd_xfer *);
    141 Static usbd_status	dwc2_device_intr_start(struct usbd_xfer *);
    142 Static void		dwc2_device_intr_abort(struct usbd_xfer *);
    143 Static void		dwc2_device_intr_close(struct usbd_pipe *);
    144 Static void		dwc2_device_intr_done(struct usbd_xfer *);
    145 
    146 Static usbd_status	dwc2_device_isoc_transfer(struct usbd_xfer *);
    147 Static void		dwc2_device_isoc_abort(struct usbd_xfer *);
    148 Static void		dwc2_device_isoc_close(struct usbd_pipe *);
    149 Static void		dwc2_device_isoc_done(struct usbd_xfer *);
    150 
    151 Static usbd_status	dwc2_device_start(struct usbd_xfer *);
    152 
    153 Static void		dwc2_close_pipe(struct usbd_pipe *);
    154 Static void		dwc2_abortx(struct usbd_xfer *);
    155 
    156 Static void		dwc2_device_clear_toggle(struct usbd_pipe *);
    157 Static void		dwc2_noop(struct usbd_pipe *pipe);
    158 
    159 Static int		dwc2_interrupt(struct dwc2_softc *);
    160 Static void		dwc2_rhc(void *);
    161 
    162 
    163 static inline void
    164 dwc2_allocate_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
    165 			    struct usbd_xfer *xfer)
    166 {
    167 }
    168 
    169 static inline void
    170 dwc2_free_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
    171 			struct usbd_xfer *xfer)
    172 {
    173 }
    174 
    175 Static const struct usbd_bus_methods dwc2_bus_methods = {
    176 	.ubm_open =	dwc2_open,
    177 	.ubm_softint =	dwc2_softintr,
    178 	.ubm_dopoll =	dwc2_poll,
    179 	.ubm_allocx =	dwc2_allocx,
    180 	.ubm_freex =	dwc2_freex,
    181 	.ubm_abortx =	dwc2_abortx,
    182 	.ubm_dying =	dwc2_dying,
    183 	.ubm_getlock =	dwc2_get_lock,
    184 	.ubm_rhctrl =	dwc2_roothub_ctrl,
    185 };
    186 
    187 Static const struct usbd_pipe_methods dwc2_root_intr_methods = {
    188 	.upm_transfer =	dwc2_root_intr_transfer,
    189 	.upm_start =	dwc2_root_intr_start,
    190 	.upm_abort =	dwc2_root_intr_abort,
    191 	.upm_close =	dwc2_root_intr_close,
    192 	.upm_cleartoggle =	dwc2_noop,
    193 	.upm_done =	dwc2_root_intr_done,
    194 };
    195 
    196 Static const struct usbd_pipe_methods dwc2_device_ctrl_methods = {
    197 	.upm_transfer =	dwc2_device_ctrl_transfer,
    198 	.upm_start =	dwc2_device_ctrl_start,
    199 	.upm_abort =	dwc2_device_ctrl_abort,
    200 	.upm_close =	dwc2_device_ctrl_close,
    201 	.upm_cleartoggle =	dwc2_noop,
    202 	.upm_done =	dwc2_device_ctrl_done,
    203 };
    204 
    205 Static const struct usbd_pipe_methods dwc2_device_intr_methods = {
    206 	.upm_transfer =	dwc2_device_intr_transfer,
    207 	.upm_start =	dwc2_device_intr_start,
    208 	.upm_abort =	dwc2_device_intr_abort,
    209 	.upm_close =	dwc2_device_intr_close,
    210 	.upm_cleartoggle =	dwc2_device_clear_toggle,
    211 	.upm_done =	dwc2_device_intr_done,
    212 };
    213 
    214 Static const struct usbd_pipe_methods dwc2_device_bulk_methods = {
    215 	.upm_transfer =	dwc2_device_bulk_transfer,
    216 	.upm_abort =	dwc2_device_bulk_abort,
    217 	.upm_close =	dwc2_device_bulk_close,
    218 	.upm_cleartoggle =	dwc2_device_clear_toggle,
    219 	.upm_done =	dwc2_device_bulk_done,
    220 };
    221 
    222 Static const struct usbd_pipe_methods dwc2_device_isoc_methods = {
    223 	.upm_transfer =	dwc2_device_isoc_transfer,
    224 	.upm_abort =	dwc2_device_isoc_abort,
    225 	.upm_close =	dwc2_device_isoc_close,
    226 	.upm_cleartoggle =	dwc2_noop,
    227 	.upm_done =	dwc2_device_isoc_done,
    228 };
    229 
    230 struct usbd_xfer *
    231 dwc2_allocx(struct usbd_bus *bus, unsigned int nframes)
    232 {
    233 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    234 	struct dwc2_xfer *dxfer;
    235 
    236 	DPRINTFN(10, "\n");
    237 
    238 	DWC2_EVCNT_INCR(sc->sc_ev_xferpoolget);
    239 	dxfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
    240 	if (dxfer != NULL) {
    241 		memset(dxfer, 0, sizeof(*dxfer));
    242 		dxfer->urb = dwc2_hcd_urb_alloc(sc->sc_hsotg,
    243 		    nframes, GFP_KERNEL);
    244 #ifdef DIAGNOSTIC
    245 		dxfer->xfer.ux_state = XFER_BUSY;
    246 #endif
    247 	}
    248 	return (struct usbd_xfer *)dxfer;
    249 }
    250 
    251 void
    252 dwc2_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
    253 {
    254 	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
    255 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    256 
    257 	DPRINTFN(10, "\n");
    258 
    259 #ifdef DIAGNOSTIC
    260 	if (xfer->ux_state != XFER_BUSY &&
    261 	    xfer->ux_status != USBD_NOT_STARTED) {
    262 		DPRINTF("xfer=%p not busy, 0x%08x\n", xfer, xfer->ux_state);
    263 	}
    264 	xfer->ux_state = XFER_FREE;
    265 #endif
    266 	DWC2_EVCNT_INCR(sc->sc_ev_xferpoolput);
    267 	dwc2_hcd_urb_free(sc->sc_hsotg, dxfer->urb, dxfer->urb->packet_count);
    268 	pool_cache_put(sc->sc_xferpool, xfer);
    269 }
    270 
    271 Static bool
    272 dwc2_dying(struct usbd_bus *bus)
    273 {
    274 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    275 
    276 	return sc->sc_dying;
    277 }
    278 
    279 Static void
    280 dwc2_get_lock(struct usbd_bus *bus, kmutex_t **lock)
    281 {
    282 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    283 
    284 	*lock = &sc->sc_lock;
    285 }
    286 
    287 Static void
    288 dwc2_rhc(void *addr)
    289 {
    290 	struct dwc2_softc *sc = addr;
    291 	struct usbd_xfer *xfer;
    292 	u_char *p;
    293 
    294 	DPRINTF("\n");
    295 	mutex_enter(&sc->sc_lock);
    296 	xfer = sc->sc_intrxfer;
    297 
    298 	if (xfer == NULL) {
    299 		/* Just ignore the change. */
    300 		mutex_exit(&sc->sc_lock);
    301 		return;
    302 
    303 	}
    304 	KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
    305 
    306 	/* set port bit */
    307 	p = KERNADDR(&xfer->ux_dmabuf, 0);
    308 
    309 	p[0] = 0x02;	/* we only have one port (1 << 1) */
    310 
    311 	xfer->ux_actlen = xfer->ux_length;
    312 	xfer->ux_status = USBD_NORMAL_COMPLETION;
    313 
    314 	usb_transfer_complete(xfer);
    315 	mutex_exit(&sc->sc_lock);
    316 }
    317 
    318 Static void
    319 dwc2_softintr(void *v)
    320 {
    321 	struct usbd_bus *bus = v;
    322 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    323 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    324 	struct dwc2_xfer *dxfer, *next;
    325 	TAILQ_HEAD(, dwc2_xfer) claimed = TAILQ_HEAD_INITIALIZER(claimed);
    326 
    327 	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
    328 
    329 	/*
    330 	 * Grab all the xfers that have not been aborted or timed out.
    331 	 * Do so under a single lock -- without dropping it to run
    332 	 * usb_transfer_complete as we go -- so that dwc2_abortx won't
    333 	 * remove next out from under us during iteration when we've
    334 	 * dropped the lock.
    335 	 */
    336 	mutex_spin_enter(&hsotg->lock);
    337 	TAILQ_FOREACH_SAFE(dxfer, &sc->sc_complete, xnext, next) {
    338 		if (!usbd_xfer_trycomplete(&dxfer->xfer))
    339 			/*
    340 			 * The hard interrput handler decided to
    341 			 * complete the xfer, and put it on sc_complete
    342 			 * to pass it to us in the soft interrupt
    343 			 * handler, but in the time between hard
    344 			 * interrupt and soft interrupt, the xfer was
    345 			 * aborted or timed out and we lost the race.
    346 			 */
    347 			continue;
    348 		KASSERT(dxfer->xfer.ux_status == USBD_IN_PROGRESS);
    349 		KASSERT(dxfer->intr_status != USBD_CANCELLED);
    350 		KASSERT(dxfer->intr_status != USBD_TIMEOUT);
    351 		TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext);
    352 		TAILQ_INSERT_TAIL(&claimed, dxfer, xnext);
    353 	}
    354 	mutex_spin_exit(&hsotg->lock);
    355 
    356 	/* Now complete them.  */
    357 	while (!TAILQ_EMPTY(&claimed)) {
    358 		dxfer = TAILQ_FIRST(&claimed);
    359 		KASSERT(dxfer->xfer.ux_status == USBD_IN_PROGRESS);
    360 		KASSERT(dxfer->intr_status != USBD_CANCELLED);
    361 		KASSERT(dxfer->intr_status != USBD_TIMEOUT);
    362 		TAILQ_REMOVE(&claimed, dxfer, xnext);
    363 
    364 		dxfer->xfer.ux_status = dxfer->intr_status;
    365 		usb_transfer_complete(&dxfer->xfer);
    366 	}
    367 }
    368 
    369 usbd_status
    370 dwc2_open(struct usbd_pipe *pipe)
    371 {
    372 	struct usbd_device *dev = pipe->up_dev;
    373 	struct dwc2_softc *sc = DWC2_PIPE2SC(pipe);
    374 	struct dwc2_pipe *dpipe = DWC2_PIPE2DPIPE(pipe);
    375 	usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc;
    376 	uint8_t addr = dev->ud_addr;
    377 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
    378 
    379 	DPRINTF("pipe %p addr %d xfertype %d dir %s\n", pipe, addr, xfertype,
    380 	    UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN ? "in" : "out");
    381 
    382 	if (sc->sc_dying) {
    383 		return USBD_IOERROR;
    384 	}
    385 
    386 	if (addr == dev->ud_bus->ub_rhaddr) {
    387 		switch (ed->bEndpointAddress) {
    388 		case USB_CONTROL_ENDPOINT:
    389 			pipe->up_methods = &roothub_ctrl_methods;
    390 			break;
    391 		case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
    392 			pipe->up_methods = &dwc2_root_intr_methods;
    393 			break;
    394 		default:
    395 			DPRINTF("bad bEndpointAddress 0x%02x\n",
    396 			    ed->bEndpointAddress);
    397 			return USBD_INVAL;
    398 		}
    399 		DPRINTF("root hub pipe open\n");
    400 		return USBD_NORMAL_COMPLETION;
    401 	}
    402 
    403 	switch (xfertype) {
    404 	case UE_CONTROL:
    405 		pipe->up_methods = &dwc2_device_ctrl_methods;
    406 		int err = usb_allocmem(sc->sc_bus.ub_dmatag, sizeof(usb_device_request_t),
    407 		    0, USBMALLOC_COHERENT, &dpipe->req_dma);
    408 		if (err)
    409 			return USBD_NOMEM;
    410 		break;
    411 	case UE_INTERRUPT:
    412 		pipe->up_methods = &dwc2_device_intr_methods;
    413 		break;
    414 	case UE_ISOCHRONOUS:
    415 		pipe->up_serialise = false;
    416 		pipe->up_methods = &dwc2_device_isoc_methods;
    417 		break;
    418 	case UE_BULK:
    419 		pipe->up_serialise = false;
    420 		pipe->up_methods = &dwc2_device_bulk_methods;
    421 		break;
    422 	default:
    423 		DPRINTF("bad xfer type %d\n", xfertype);
    424 		return USBD_INVAL;
    425 	}
    426 
    427 	/* QH */
    428 	dpipe->priv = NULL;
    429 
    430 	return USBD_NORMAL_COMPLETION;
    431 }
    432 
    433 Static void
    434 dwc2_poll(struct usbd_bus *bus)
    435 {
    436 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    437 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    438 
    439 	mutex_spin_enter(&hsotg->lock);
    440 	dwc2_interrupt(sc);
    441 	mutex_spin_exit(&hsotg->lock);
    442 }
    443 
    444 /*
    445  * Close a reqular pipe.
    446  * Assumes that there are no pending transactions.
    447  */
    448 Static void
    449 dwc2_close_pipe(struct usbd_pipe *pipe)
    450 {
    451 	struct dwc2_softc *sc __diagused = pipe->up_dev->ud_bus->ub_hcpriv;
    452 
    453 	KASSERT(mutex_owned(&sc->sc_lock));
    454 }
    455 
    456 /*
    457  * Abort a device request.
    458  */
    459 Static void
    460 dwc2_abortx(struct usbd_xfer *xfer)
    461 {
    462 	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
    463 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    464 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    465 	struct dwc2_xfer *d;
    466 	int err;
    467 
    468 	DPRINTF("xfer %p pipe %p status 0x%08x", xfer, xfer->ux_pipe,
    469 	    xfer->ux_status);
    470 
    471 	KASSERT(mutex_owned(&sc->sc_lock));
    472 	ASSERT_SLEEPABLE();
    473 
    474 	KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
    475 		xfer->ux_status == USBD_TIMEOUT),
    476 	    "bad abort status: %d", xfer->ux_status);
    477 
    478 	mutex_spin_enter(&hsotg->lock);
    479 
    480 	/*
    481 	 * Check whether we aborted or timed out after the hardware
    482 	 * completion interrupt determined that it's done but before
    483 	 * the soft interrupt could actually complete it.  If so, it's
    484 	 * too late for the soft interrupt -- at this point we've
    485 	 * already committed to abort it or time it out, so we need to
    486 	 * take it off the softint's list of work in case the caller,
    487 	 * say, frees the xfer before the softint runs.
    488 	 *
    489 	 * This logic is unusual among host controller drivers, and
    490 	 * happens because dwc2 decides to complete xfers in the hard
    491 	 * interrupt handler rather than in the soft interrupt handler,
    492 	 * but usb_transfer_complete must be deferred to softint -- and
    493 	 * we happened to swoop in between the hard interrupt and the
    494 	 * soft interrupt.  Other host controller drivers do almost all
    495 	 * processing in the softint so there's no intermediate stage.
    496 	 *
    497 	 * Fortunately, this linear search to discern the intermediate
    498 	 * stage is not likely to be a serious performance impact
    499 	 * because it happens only on abort or timeout.
    500 	 */
    501 	TAILQ_FOREACH(d, &sc->sc_complete, xnext) {
    502 		if (d == dxfer) {
    503 			TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext);
    504 			break;
    505 		}
    506 	}
    507 
    508 	/*
    509 	 * If we're dying, skip the hardware action and just notify the
    510 	 * software that we're done.
    511 	 */
    512 	if (sc->sc_dying) {
    513 		DPRINTFN(4, "xfer %p dying 0x%08x", xfer, xfer->ux_status);
    514 		goto dying;
    515 	}
    516 
    517 	/*
    518 	 * Handle the hardware.
    519 	 */
    520 	err = dwc2_hcd_urb_dequeue(hsotg, dxfer->urb);
    521 	if (err) {
    522 		DPRINTF("dwc2_hcd_urb_dequeue failed\n");
    523 	}
    524 
    525 dying:
    526 	mutex_spin_exit(&hsotg->lock);
    527 	KASSERT(mutex_owned(&sc->sc_lock));
    528 }
    529 
    530 Static void
    531 dwc2_noop(struct usbd_pipe *pipe)
    532 {
    533 
    534 }
    535 
    536 Static void
    537 dwc2_device_clear_toggle(struct usbd_pipe *pipe)
    538 {
    539 
    540 	DPRINTF("toggle %d -> 0", pipe->up_endpoint->ue_toggle);
    541 }
    542 
    543 /***********************************************************************/
    544 
    545 Static int
    546 dwc2_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
    547     void *buf, int buflen)
    548 {
    549 	struct dwc2_softc *sc = bus->ub_hcpriv;
    550 	usbd_status err = USBD_IOERROR;
    551 	uint16_t len, value, index;
    552 	int totlen = 0;
    553 
    554 	if (sc->sc_dying)
    555 		return -1;
    556 
    557 	DPRINTFN(4, "type=0x%02x request=%02x\n",
    558 	    req->bmRequestType, req->bRequest);
    559 
    560 	len = UGETW(req->wLength);
    561 	value = UGETW(req->wValue);
    562 	index = UGETW(req->wIndex);
    563 
    564 #define C(x,y) ((x) | ((y) << 8))
    565 	switch (C(req->bRequest, req->bmRequestType)) {
    566 	case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
    567 		DPRINTFN(8, "wValue=0x%04x\n", value);
    568 
    569 		if (len == 0)
    570 			break;
    571 		switch (value) {
    572 #define sd ((usb_string_descriptor_t *)buf)
    573 		case C(2, UDESC_STRING):
    574 			/* Product */
    575 			totlen = usb_makestrdesc(sd, len, "DWC2 root hub");
    576 			break;
    577 #undef sd
    578 		default:
    579 			/* default from usbroothub */
    580 			return buflen;
    581 		}
    582 		break;
    583 
    584 	case C(UR_GET_CONFIG, UT_READ_DEVICE):
    585 	case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
    586 	case C(UR_GET_STATUS, UT_READ_INTERFACE):
    587 	case C(UR_GET_STATUS, UT_READ_ENDPOINT):
    588 	case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
    589 	case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
    590 		/* default from usbroothub */
    591 		DPRINTFN(4, "returning %d (usbroothub default)", buflen);
    592 
    593 		return buflen;
    594 
    595 	default:
    596 		/* Hub requests */
    597 		err = dwc2_hcd_hub_control(sc->sc_hsotg,
    598 		    C(req->bRequest, req->bmRequestType), value, index,
    599 		    buf, len);
    600 		if (err) {
    601 			return -1;
    602 		}
    603 		totlen = len;
    604 	}
    605 
    606 	return totlen;
    607 }
    608 
    609 Static usbd_status
    610 dwc2_root_intr_transfer(struct usbd_xfer *xfer)
    611 {
    612 
    613 	DPRINTF("\n");
    614 
    615 	/* Pipe isn't running, start first */
    616 	return dwc2_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
    617 }
    618 
    619 Static usbd_status
    620 dwc2_root_intr_start(struct usbd_xfer *xfer)
    621 {
    622 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    623 
    624 	DPRINTF("\n");
    625 
    626 	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
    627 
    628 	if (sc->sc_dying)
    629 		return USBD_IOERROR;
    630 
    631 	KASSERT(sc->sc_intrxfer == NULL);
    632 	sc->sc_intrxfer = xfer;
    633 	xfer->ux_status = USBD_IN_PROGRESS;
    634 
    635 	return USBD_IN_PROGRESS;
    636 }
    637 
    638 /* Abort a root interrupt request. */
    639 Static void
    640 dwc2_root_intr_abort(struct usbd_xfer *xfer)
    641 {
    642 	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
    643 
    644 	DPRINTF("xfer=%p\n", xfer);
    645 
    646 	KASSERT(mutex_owned(&sc->sc_lock));
    647 	KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
    648 
    649 	/* If xfer has already completed, nothing to do here.  */
    650 	if (sc->sc_intrxfer == NULL)
    651 		return;
    652 
    653 	/*
    654 	 * Otherwise, sc->sc_intrxfer had better be this transfer.
    655 	 * Cancel it.
    656 	 */
    657 	KASSERT(sc->sc_intrxfer == xfer);
    658 	KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
    659 	xfer->ux_status = USBD_CANCELLED;
    660 	usb_transfer_complete(xfer);
    661 }
    662 
    663 Static void
    664 dwc2_root_intr_close(struct usbd_pipe *pipe)
    665 {
    666 	struct dwc2_softc *sc __diagused = DWC2_PIPE2SC(pipe);
    667 
    668 	DPRINTF("\n");
    669 
    670 	KASSERT(mutex_owned(&sc->sc_lock));
    671 
    672 	/*
    673 	 * Caller must guarantee the xfer has completed first, by
    674 	 * closing the pipe only after normal completion or an abort.
    675 	 */
    676 	KASSERT(sc->sc_intrxfer == NULL);
    677 }
    678 
    679 Static void
    680 dwc2_root_intr_done(struct usbd_xfer *xfer)
    681 {
    682 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    683 
    684 	DPRINTF("\n");
    685 
    686 	/* Claim the xfer so it doesn't get completed again.  */
    687 	KASSERT(sc->sc_intrxfer == xfer);
    688 	KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
    689 	sc->sc_intrxfer = NULL;
    690 }
    691 
    692 /***********************************************************************/
    693 
    694 Static usbd_status
    695 dwc2_device_ctrl_transfer(struct usbd_xfer *xfer)
    696 {
    697 
    698 	DPRINTF("\n");
    699 
    700 	/* Pipe isn't running, start first */
    701 	return dwc2_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
    702 }
    703 
    704 Static usbd_status
    705 dwc2_device_ctrl_start(struct usbd_xfer *xfer)
    706 {
    707 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    708 	usbd_status err;
    709 
    710 	DPRINTF("\n");
    711 
    712 	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
    713 
    714 	xfer->ux_status = USBD_IN_PROGRESS;
    715 	err = dwc2_device_start(xfer);
    716 	if (err)
    717 		return err;
    718 
    719 	return USBD_IN_PROGRESS;
    720 }
    721 
    722 Static void
    723 dwc2_device_ctrl_abort(struct usbd_xfer *xfer)
    724 {
    725 	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
    726 
    727 	KASSERT(mutex_owned(&sc->sc_lock));
    728 
    729 	DPRINTF("xfer=%p\n", xfer);
    730 	usbd_xfer_abort(xfer);
    731 }
    732 
    733 Static void
    734 dwc2_device_ctrl_close(struct usbd_pipe *pipe)
    735 {
    736 	struct dwc2_pipe * const dpipe = DWC2_PIPE2DPIPE(pipe);
    737 
    738 	DPRINTF("pipe=%p\n", pipe);
    739 	dwc2_close_pipe(pipe);
    740 
    741 	usb_freemem(&dpipe->req_dma);
    742 }
    743 
    744 Static void
    745 dwc2_device_ctrl_done(struct usbd_xfer *xfer)
    746 {
    747 
    748 	DPRINTF("xfer=%p\n", xfer);
    749 }
    750 
    751 /***********************************************************************/
    752 
    753 Static usbd_status
    754 dwc2_device_bulk_transfer(struct usbd_xfer *xfer)
    755 {
    756 
    757 	DPRINTF("xfer=%p\n", xfer);
    758 
    759 	KASSERT(xfer->ux_status == USBD_NOT_STARTED);
    760 	xfer->ux_status = USBD_IN_PROGRESS;
    761 	return dwc2_device_start(xfer);
    762 }
    763 
    764 Static void
    765 dwc2_device_bulk_abort(struct usbd_xfer *xfer)
    766 {
    767 	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
    768 
    769 	KASSERT(mutex_owned(&sc->sc_lock));
    770 
    771 	DPRINTF("xfer=%p\n", xfer);
    772 	usbd_xfer_abort(xfer);
    773 }
    774 
    775 Static void
    776 dwc2_device_bulk_close(struct usbd_pipe *pipe)
    777 {
    778 
    779 	DPRINTF("pipe=%p\n", pipe);
    780 
    781 	dwc2_close_pipe(pipe);
    782 }
    783 
    784 Static void
    785 dwc2_device_bulk_done(struct usbd_xfer *xfer)
    786 {
    787 
    788 	DPRINTF("xfer=%p\n", xfer);
    789 }
    790 
    791 /***********************************************************************/
    792 
    793 Static usbd_status
    794 dwc2_device_intr_transfer(struct usbd_xfer *xfer)
    795 {
    796 
    797 	DPRINTF("xfer=%p\n", xfer);
    798 
    799 	/* Pipe isn't running, start first */
    800 	return dwc2_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
    801 }
    802 
    803 Static usbd_status
    804 dwc2_device_intr_start(struct usbd_xfer *xfer)
    805 {
    806 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer)
    807 	struct usbd_device *dev = dpipe->pipe.up_dev;
    808 	struct dwc2_softc *sc = dev->ud_bus->ub_hcpriv;
    809 	usbd_status err;
    810 
    811 	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
    812 
    813 	xfer->ux_status = USBD_IN_PROGRESS;
    814 	err = dwc2_device_start(xfer);
    815 	if (err)
    816 		return err;
    817 
    818 	return USBD_IN_PROGRESS;
    819 }
    820 
    821 /* Abort a device interrupt request. */
    822 Static void
    823 dwc2_device_intr_abort(struct usbd_xfer *xfer)
    824 {
    825 	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
    826 
    827 	KASSERT(mutex_owned(&sc->sc_lock));
    828 
    829 	DPRINTF("xfer=%p\n", xfer);
    830 	usbd_xfer_abort(xfer);
    831 }
    832 
    833 Static void
    834 dwc2_device_intr_close(struct usbd_pipe *pipe)
    835 {
    836 
    837 	DPRINTF("pipe=%p\n", pipe);
    838 
    839 	dwc2_close_pipe(pipe);
    840 }
    841 
    842 Static void
    843 dwc2_device_intr_done(struct usbd_xfer *xfer)
    844 {
    845 
    846 	DPRINTF("\n");
    847 }
    848 
    849 /***********************************************************************/
    850 
    851 usbd_status
    852 dwc2_device_isoc_transfer(struct usbd_xfer *xfer)
    853 {
    854 
    855 	DPRINTF("xfer=%p\n", xfer);
    856 
    857 	KASSERT(xfer->ux_status == USBD_NOT_STARTED);
    858 	xfer->ux_status = USBD_IN_PROGRESS;
    859 	return dwc2_device_start(xfer);
    860 }
    861 
    862 void
    863 dwc2_device_isoc_abort(struct usbd_xfer *xfer)
    864 {
    865 	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
    866 	KASSERT(mutex_owned(&sc->sc_lock));
    867 
    868 	DPRINTF("xfer=%p\n", xfer);
    869 	usbd_xfer_abort(xfer);
    870 }
    871 
    872 void
    873 dwc2_device_isoc_close(struct usbd_pipe *pipe)
    874 {
    875 	DPRINTF("\n");
    876 
    877 	dwc2_close_pipe(pipe);
    878 }
    879 
    880 void
    881 dwc2_device_isoc_done(struct usbd_xfer *xfer)
    882 {
    883 
    884 	DPRINTF("\n");
    885 }
    886 
    887 
    888 usbd_status
    889 dwc2_device_start(struct usbd_xfer *xfer)
    890 {
    891  	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
    892 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
    893 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    894 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    895 	struct dwc2_hcd_urb *dwc2_urb;
    896 
    897 	struct usbd_device *dev = xfer->ux_pipe->up_dev;
    898 	usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
    899 	uint8_t addr = dev->ud_addr;
    900 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
    901 	uint8_t epnum = UE_GET_ADDR(ed->bEndpointAddress);
    902 	uint8_t dir = UE_GET_DIR(ed->bEndpointAddress);
    903 	uint16_t mps = UE_GET_SIZE(UGETW(ed->wMaxPacketSize));
    904 	uint32_t len;
    905 
    906 	uint32_t flags = 0;
    907 	uint32_t off = 0;
    908 	int retval, err;
    909 	int alloc_bandwidth = 0;
    910 
    911 	DPRINTFN(1, "xfer=%p pipe=%p\n", xfer, xfer->ux_pipe);
    912 
    913 	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
    914 
    915 	if (xfertype == UE_ISOCHRONOUS ||
    916 	    xfertype == UE_INTERRUPT) {
    917 		mutex_spin_enter(&hsotg->lock);
    918 		if (!dwc2_hcd_is_bandwidth_allocated(hsotg, xfer))
    919 			alloc_bandwidth = 1;
    920 		mutex_spin_exit(&hsotg->lock);
    921 	}
    922 
    923 	/*
    924 	 * For Control pipe the direction is from the request, all other
    925 	 * transfers have been set correctly at pipe open time.
    926 	 */
    927 	if (xfertype == UE_CONTROL) {
    928 		usb_device_request_t *req = &xfer->ux_request;
    929 
    930 		DPRINTFN(3, "xfer=%p type=0x%02x request=0x%02x wValue=0x%04x "
    931 		    "wIndex=0x%04x len=%d addr=%d endpt=%d dir=%s speed=%d "
    932 		    "mps=%d\n",
    933 		    xfer, req->bmRequestType, req->bRequest, UGETW(req->wValue),
    934 		    UGETW(req->wIndex), UGETW(req->wLength), dev->ud_addr,
    935 		    epnum, dir == UT_READ ? "in" :"out", dev->ud_speed, mps);
    936 
    937 		/* Copy request packet to our DMA buffer */
    938 		memcpy(KERNADDR(&dpipe->req_dma, 0), req, sizeof(*req));
    939 		usb_syncmem(&dpipe->req_dma, 0, sizeof(*req),
    940 		    BUS_DMASYNC_PREWRITE);
    941 		len = UGETW(req->wLength);
    942 		if ((req->bmRequestType & UT_READ) == UT_READ) {
    943 			dir = UE_DIR_IN;
    944 		} else {
    945 			dir = UE_DIR_OUT;
    946 		}
    947 
    948 		DPRINTFN(3, "req = %p dma = %" PRIxBUSADDR " len %d dir %s\n",
    949 		    KERNADDR(&dpipe->req_dma, 0), DMAADDR(&dpipe->req_dma, 0),
    950 		    len, dir == UE_DIR_IN ? "in" : "out");
    951 	} else if (xfertype == UE_ISOCHRONOUS) {
    952 		DPRINTFN(3, "xfer=%p nframes=%d flags=%d addr=%d endpt=%d,"
    953 		    " mps=%d dir %s\n", xfer, xfer->ux_nframes, xfer->ux_flags, addr,
    954 		    epnum, mps, dir == UT_READ ? "in" :"out");
    955 
    956 #ifdef DIAGNOSTIC
    957 		len = 0;
    958 		for (size_t i = 0; i < xfer->ux_nframes; i++)
    959 			len += xfer->ux_frlengths[i];
    960 		if (len != xfer->ux_length)
    961 			panic("len (%d) != xfer->ux_length (%d)", len,
    962 			    xfer->ux_length);
    963 #endif
    964 		len = xfer->ux_length;
    965 	} else {
    966 		DPRINTFN(3, "xfer=%p len=%d flags=%d addr=%d endpt=%d,"
    967 		    " mps=%d dir %s\n", xfer, xfer->ux_length, xfer->ux_flags, addr,
    968 		    epnum, mps, dir == UT_READ ? "in" :"out");
    969 
    970 		len = xfer->ux_length;
    971 	}
    972 
    973 	dwc2_urb = dxfer->urb;
    974 	if (!dwc2_urb)
    975 		return USBD_NOMEM;
    976 
    977 	KASSERT(dwc2_urb->packet_count == xfer->ux_nframes);
    978 	memset(dwc2_urb, 0, sizeof(*dwc2_urb) +
    979 	    sizeof(dwc2_urb->iso_descs[0]) * dwc2_urb->packet_count);
    980 
    981 	dwc2_urb->priv = xfer;
    982 	dwc2_urb->packet_count = xfer->ux_nframes;
    983 
    984 	dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, addr, epnum, xfertype, dir,
    985 	    mps);
    986 
    987 	if (xfertype == UE_CONTROL) {
    988 		dwc2_urb->setup_usbdma = &dpipe->req_dma;
    989 		dwc2_urb->setup_packet = KERNADDR(&dpipe->req_dma, 0);
    990 		dwc2_urb->setup_dma = DMAADDR(&dpipe->req_dma, 0);
    991 	} else {
    992 		/* XXXNH - % mps required? */
    993 		if ((xfer->ux_flags & USBD_FORCE_SHORT_XFER) && (len % mps) == 0)
    994 		    flags |= URB_SEND_ZERO_PACKET;
    995 	}
    996 	flags |= URB_GIVEBACK_ASAP;
    997 
    998 	/*
    999 	 * control transfers with no data phase don't touch usbdma, but
   1000 	 * everything else does.
   1001 	 */
   1002 	if (!(xfertype == UE_CONTROL && len == 0)) {
   1003 		dwc2_urb->usbdma = &xfer->ux_dmabuf;
   1004 		dwc2_urb->buf = KERNADDR(dwc2_urb->usbdma, 0);
   1005 		dwc2_urb->dma = DMAADDR(dwc2_urb->usbdma, 0);
   1006 
   1007 		usb_syncmem(&xfer->ux_dmabuf, 0, len,
   1008 		    dir == UE_DIR_IN ?
   1009 			BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
   1010  	}
   1011 	dwc2_urb->length = len;
   1012  	dwc2_urb->flags = flags;
   1013 	dwc2_urb->status = -EINPROGRESS;
   1014 
   1015 	if (xfertype == UE_INTERRUPT ||
   1016 	    xfertype == UE_ISOCHRONOUS) {
   1017 		uint16_t ival;
   1018 
   1019 		if (xfertype == UE_INTERRUPT &&
   1020 		    dpipe->pipe.up_interval != USBD_DEFAULT_INTERVAL) {
   1021 			ival = dpipe->pipe.up_interval;
   1022 		} else {
   1023 			ival = ed->bInterval;
   1024 		}
   1025 
   1026 		if (ival < 1) {
   1027 			retval = -ENODEV;
   1028 			goto fail;
   1029 		}
   1030 		if (dev->ud_speed == USB_SPEED_HIGH ||
   1031 		   (dev->ud_speed == USB_SPEED_FULL && xfertype == UE_ISOCHRONOUS)) {
   1032 			if (ival > 16) {
   1033 				/*
   1034 				 * illegal with HS/FS, but there were
   1035 				 * documentation bugs in the spec
   1036 				 */
   1037 				ival = 256;
   1038 			} else {
   1039 				ival = (1 << (ival - 1));
   1040 			}
   1041 		} else {
   1042 			if (xfertype == UE_INTERRUPT && ival < 10)
   1043 				ival = 10;
   1044 		}
   1045 		dwc2_urb->interval = ival;
   1046 	}
   1047 
   1048 	/* XXXNH bring down from callers?? */
   1049 // 	mutex_enter(&sc->sc_lock);
   1050 
   1051 	xfer->ux_actlen = 0;
   1052 
   1053 	KASSERT(xfertype != UE_ISOCHRONOUS ||
   1054 	    xfer->ux_nframes <= dwc2_urb->packet_count);
   1055 	KASSERTMSG(xfer->ux_nframes == 0 || xfertype == UE_ISOCHRONOUS,
   1056 	    "nframes %d xfertype %d\n", xfer->ux_nframes, xfertype);
   1057 
   1058 	off = 0;
   1059 	for (size_t i = 0; i < xfer->ux_nframes; ++i) {
   1060 		DPRINTFN(3, "xfer=%p frame=%zd offset=%d length=%d\n", xfer, i,
   1061 		    off, xfer->ux_frlengths[i]);
   1062 
   1063 		dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i, off,
   1064 		    xfer->ux_frlengths[i]);
   1065 		off += xfer->ux_frlengths[i];
   1066 	}
   1067 
   1068 	struct dwc2_qh *qh = dpipe->priv;
   1069 	struct dwc2_qtd *qtd;
   1070 	bool qh_allocated = false;
   1071 
   1072 	/* Create QH for the endpoint if it doesn't exist */
   1073 	if (!qh) {
   1074 		qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, GFP_ATOMIC);
   1075 		if (!qh) {
   1076 			retval = -ENOMEM;
   1077 			goto fail;
   1078 		}
   1079 		dpipe->priv = qh;
   1080 		qh_allocated = true;
   1081 	}
   1082 
   1083 	qtd = pool_cache_get(sc->sc_qtdpool, PR_NOWAIT);
   1084 	if (!qtd) {
   1085 		retval = -ENOMEM;
   1086 		goto fail1;
   1087 	}
   1088 	memset(qtd, 0, sizeof(*qtd));
   1089 
   1090 	/* might need to check cpu_intr_p */
   1091 	mutex_spin_enter(&hsotg->lock);
   1092 	retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
   1093 	if (retval)
   1094 		goto fail2;
   1095 	xfer->ux_status = USBD_IN_PROGRESS;
   1096 	usbd_xfer_schedule_timeout(xfer);
   1097 
   1098 	if (alloc_bandwidth) {
   1099 		dwc2_allocate_bus_bandwidth(hsotg,
   1100 				dwc2_hcd_get_ep_bandwidth(hsotg, dpipe),
   1101 				xfer);
   1102 	}
   1103 	mutex_spin_exit(&hsotg->lock);
   1104 
   1105 	return USBD_IN_PROGRESS;
   1106 
   1107 fail2:
   1108 	dwc2_urb->priv = NULL;
   1109 	mutex_spin_exit(&hsotg->lock);
   1110 	pool_cache_put(sc->sc_qtdpool, qtd);
   1111 
   1112 fail1:
   1113 	if (qh_allocated) {
   1114 		dpipe->priv = NULL;
   1115 		dwc2_hcd_qh_free(hsotg, qh);
   1116 	}
   1117 fail:
   1118 
   1119 	switch (retval) {
   1120 	case -EINVAL:
   1121 	case -ENODEV:
   1122 		err = USBD_INVAL;
   1123 		break;
   1124 	case -ENOMEM:
   1125 		err = USBD_NOMEM;
   1126 		break;
   1127 	default:
   1128 		err = USBD_IOERROR;
   1129 	}
   1130 
   1131 	return err;
   1132 
   1133 }
   1134 
   1135 int dwc2_intr(void *p)
   1136 {
   1137 	struct dwc2_softc *sc = p;
   1138 	struct dwc2_hsotg *hsotg;
   1139 	int ret = 0;
   1140 
   1141 	if (sc == NULL)
   1142 		return 0;
   1143 
   1144 	hsotg = sc->sc_hsotg;
   1145 	mutex_spin_enter(&hsotg->lock);
   1146 
   1147 	if (sc->sc_dying || !device_has_power(sc->sc_dev))
   1148 		goto done;
   1149 
   1150 	if (sc->sc_bus.ub_usepolling) {
   1151 		uint32_t intrs;
   1152 
   1153 		intrs = dwc2_read_core_intr(hsotg);
   1154 		DWC2_WRITE_4(hsotg, GINTSTS, intrs);
   1155 	} else {
   1156 		ret = dwc2_interrupt(sc);
   1157 	}
   1158 
   1159 done:
   1160 	mutex_spin_exit(&hsotg->lock);
   1161 
   1162 	return ret;
   1163 }
   1164 
   1165 int
   1166 dwc2_interrupt(struct dwc2_softc *sc)
   1167 {
   1168 	int ret = 0;
   1169 
   1170 	if (sc->sc_hcdenabled) {
   1171 		ret |= dwc2_handle_hcd_intr(sc->sc_hsotg);
   1172 	}
   1173 
   1174 	ret |= dwc2_handle_common_intr(sc->sc_hsotg);
   1175 
   1176 	return ret;
   1177 }
   1178 
   1179 /***********************************************************************/
   1180 
   1181 int
   1182 dwc2_detach(struct dwc2_softc *sc, int flags)
   1183 {
   1184 	int rv = 0;
   1185 
   1186 	if (sc->sc_child != NULL)
   1187 		rv = config_detach(sc->sc_child, flags);
   1188 
   1189 	return rv;
   1190 }
   1191 
   1192 bool
   1193 dwc2_shutdown(device_t self, int flags)
   1194 {
   1195 	struct dwc2_softc *sc = device_private(self);
   1196 
   1197 	sc = sc;
   1198 
   1199 	return true;
   1200 }
   1201 
   1202 void
   1203 dwc2_childdet(device_t self, device_t child)
   1204 {
   1205 	struct dwc2_softc *sc = device_private(self);
   1206 
   1207 	sc = sc;
   1208 }
   1209 
   1210 int
   1211 dwc2_activate(device_t self, enum devact act)
   1212 {
   1213 	struct dwc2_softc *sc = device_private(self);
   1214 
   1215 	sc = sc;
   1216 
   1217 	return 0;
   1218 }
   1219 
   1220 bool
   1221 dwc2_resume(device_t dv, const pmf_qual_t *qual)
   1222 {
   1223 	struct dwc2_softc *sc = device_private(dv);
   1224 
   1225 	sc = sc;
   1226 
   1227 	return true;
   1228 }
   1229 
   1230 bool
   1231 dwc2_suspend(device_t dv, const pmf_qual_t *qual)
   1232 {
   1233 	struct dwc2_softc *sc = device_private(dv);
   1234 
   1235 	sc = sc;
   1236 
   1237 	return true;
   1238 }
   1239 
   1240 /***********************************************************************/
   1241 int
   1242 dwc2_init(struct dwc2_softc *sc)
   1243 {
   1244 	int err = 0;
   1245 
   1246 	err = linux_workqueue_init();
   1247 	if (err)
   1248 		return err;
   1249 
   1250 	sc->sc_bus.ub_hcpriv = sc;
   1251 	sc->sc_bus.ub_revision = USBREV_2_0;
   1252 	sc->sc_bus.ub_methods = &dwc2_bus_methods;
   1253 	sc->sc_bus.ub_pipesize = sizeof(struct dwc2_pipe);
   1254 	sc->sc_bus.ub_usedma = true;
   1255 	sc->sc_hcdenabled = false;
   1256 
   1257 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
   1258 
   1259 	TAILQ_INIT(&sc->sc_complete);
   1260 
   1261 	sc->sc_rhc_si = softint_establish(SOFTINT_USB | SOFTINT_MPSAFE,
   1262 	    dwc2_rhc, sc);
   1263 
   1264 	sc->sc_xferpool = pool_cache_init(sizeof(struct dwc2_xfer), 0, 0, 0,
   1265 	    "dwc2xfer", NULL, IPL_USB, NULL, NULL, NULL);
   1266 	sc->sc_qhpool = pool_cache_init(sizeof(struct dwc2_qh), 0, 0, 0,
   1267 	    "dwc2qh", NULL, IPL_USB, NULL, NULL, NULL);
   1268 	sc->sc_qtdpool = pool_cache_init(sizeof(struct dwc2_qtd), 0, 0, 0,
   1269 	    "dwc2qtd", NULL, IPL_USB, NULL, NULL, NULL);
   1270 
   1271 	sc->sc_hsotg = kmem_zalloc(sizeof(struct dwc2_hsotg), KM_SLEEP);
   1272 	sc->sc_hsotg->hsotg_sc = sc;
   1273 	sc->sc_hsotg->dev = sc->sc_dev;
   1274 	sc->sc_hcdenabled = true;
   1275 
   1276 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
   1277 	struct dwc2_core_params defparams;
   1278 	int retval;
   1279 
   1280 	if (sc->sc_params == NULL) {
   1281 		/* Default all params to autodetect */
   1282 		dwc2_set_all_params(&defparams, -1);
   1283 		sc->sc_params = &defparams;
   1284 
   1285 		/*
   1286 		 * Disable descriptor dma mode by default as the HW can support
   1287 		 * it, but does not support it for SPLIT transactions.
   1288 		 */
   1289 		defparams.dma_desc_enable = 0;
   1290 	}
   1291 	hsotg->dr_mode = USB_DR_MODE_HOST;
   1292 
   1293 	/* Detect config values from hardware */
   1294 	retval = dwc2_get_hwparams(hsotg);
   1295 	if (retval) {
   1296 		goto fail2;
   1297 	}
   1298 
   1299 	hsotg->core_params = kmem_zalloc(sizeof(*hsotg->core_params), KM_SLEEP);
   1300 	dwc2_set_all_params(hsotg->core_params, -1);
   1301 
   1302 	/* Validate parameter values */
   1303 	dwc2_set_parameters(hsotg, sc->sc_params);
   1304 
   1305 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
   1306     IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
   1307 	if (hsotg->dr_mode != USB_DR_MODE_HOST) {
   1308 		retval = dwc2_gadget_init(hsotg);
   1309 		if (retval)
   1310 			goto fail2;
   1311 		hsotg->gadget_enabled = 1;
   1312 	}
   1313 #endif
   1314 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \
   1315     IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
   1316 	if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) {
   1317 		retval = dwc2_hcd_init(hsotg);
   1318 		if (retval) {
   1319 			if (hsotg->gadget_enabled)
   1320 				dwc2_hsotg_remove(hsotg);
   1321 			goto fail2;
   1322 		}
   1323 	    hsotg->hcd_enabled = 1;
   1324         }
   1325 #endif
   1326 
   1327 	uint32_t snpsid = hsotg->hw_params.snpsid;
   1328 	aprint_verbose_dev(sc->sc_dev, "Core Release: %x.%x%x%x (snpsid=%x)\n",
   1329 	    snpsid >> 12 & 0xf, snpsid >> 8 & 0xf,
   1330 	    snpsid >> 4 & 0xf, snpsid & 0xf, snpsid);
   1331 
   1332 	return 0;
   1333 
   1334 fail2:
   1335 	err = -retval;
   1336 	kmem_free(sc->sc_hsotg, sizeof(struct dwc2_hsotg));
   1337 	softint_disestablish(sc->sc_rhc_si);
   1338 
   1339 	return err;
   1340 }
   1341 
   1342 #if 0
   1343 /*
   1344  * curmode is a mode indication bit 0 = device, 1 = host
   1345  */
   1346 static const char * const intnames[32] = {
   1347 	"curmode",	"modemis",	"otgint",	"sof",
   1348 	"rxflvl",	"nptxfemp",	"ginnakeff",	"goutnakeff",
   1349 	"ulpickint",	"i2cint",	"erlysusp",	"usbsusp",
   1350 	"usbrst",	"enumdone",	"isooutdrop",	"eopf",
   1351 	"restore_done",	"epmis",	"iepint",	"oepint",
   1352 	"incompisoin",	"incomplp",	"fetsusp",	"resetdet",
   1353 	"prtint",	"hchint",	"ptxfemp",	"lpm",
   1354 	"conidstschng",	"disconnint",	"sessreqint",	"wkupint"
   1355 };
   1356 
   1357 
   1358 /***********************************************************************/
   1359 
   1360 #endif
   1361 
   1362 void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr,
   1363 			int *hub_port)
   1364 {
   1365 	struct usbd_xfer *xfer = context;
   1366 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
   1367 	struct usbd_device *dev = dpipe->pipe.up_dev;
   1368 
   1369 	*hub_addr = dev->ud_myhsport->up_parent->ud_addr;
   1370  	*hub_port = dev->ud_myhsport->up_portno;
   1371 }
   1372 
   1373 int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
   1374 {
   1375 	struct usbd_xfer *xfer = context;
   1376 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
   1377 	struct usbd_device *dev = dpipe->pipe.up_dev;
   1378 
   1379 	return dev->ud_speed;
   1380 }
   1381 
   1382 /*
   1383  * Sets the final status of an URB and returns it to the upper layer. Any
   1384  * required cleanup of the URB is performed.
   1385  *
   1386  * Must be called with interrupt disabled and spinlock held
   1387  */
   1388 void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
   1389     int status)
   1390 {
   1391 	struct usbd_xfer *xfer;
   1392 	struct dwc2_xfer *dxfer;
   1393 	struct dwc2_softc *sc;
   1394 	usb_endpoint_descriptor_t *ed;
   1395 	uint8_t xfertype;
   1396 
   1397 	KASSERT(mutex_owned(&hsotg->lock));
   1398 
   1399 	if (!qtd) {
   1400 		dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__);
   1401 		return;
   1402 	}
   1403 
   1404 	if (!qtd->urb) {
   1405 		dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__);
   1406 		return;
   1407 	}
   1408 
   1409 	xfer = qtd->urb->priv;
   1410 	if (!xfer) {
   1411 		dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__);
   1412 		return;
   1413 	}
   1414 
   1415 	dxfer = DWC2_XFER2DXFER(xfer);
   1416 	sc = DWC2_XFER2SC(xfer);
   1417 	ed = xfer->ux_pipe->up_endpoint->ue_edesc;
   1418 	xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
   1419 
   1420 	struct dwc2_hcd_urb *urb = qtd->urb;
   1421 	xfer->ux_actlen = dwc2_hcd_urb_get_actual_length(urb);
   1422 
   1423 	DPRINTFN(3, "xfer=%p actlen=%d\n", xfer, xfer->ux_actlen);
   1424 
   1425 	if (xfertype == UE_ISOCHRONOUS) {
   1426 		xfer->ux_actlen = 0;
   1427 		for (size_t i = 0; i < xfer->ux_nframes; ++i) {
   1428 			xfer->ux_frlengths[i] =
   1429 				dwc2_hcd_urb_get_iso_desc_actual_length(
   1430 						urb, i);
   1431 			DPRINTFN(1, "xfer=%p frame=%zu length=%d\n", xfer, i,
   1432 			    xfer->ux_frlengths[i]);
   1433 			xfer->ux_actlen += xfer->ux_frlengths[i];
   1434 		}
   1435 		DPRINTFN(1, "xfer=%p actlen=%d (isoc)\n", xfer, xfer->ux_actlen);
   1436 	}
   1437 
   1438 	if (xfertype == UE_ISOCHRONOUS && dbg_perio()) {
   1439 		for (size_t i = 0; i < xfer->ux_nframes; i++)
   1440 			dev_vdbg(hsotg->dev, " ISO Desc %zu status %d\n",
   1441 				 i, urb->iso_descs[i].status);
   1442 	}
   1443 
   1444 	if (!status) {
   1445 		if (!(xfer->ux_flags & USBD_SHORT_XFER_OK) &&
   1446 		    xfer->ux_actlen < xfer->ux_length)
   1447 			status = -EIO;
   1448 	}
   1449 
   1450 	switch (status) {
   1451 	case 0:
   1452 		dxfer->intr_status = USBD_NORMAL_COMPLETION;
   1453 		break;
   1454 	case -EPIPE:
   1455 		dxfer->intr_status = USBD_STALLED;
   1456 		break;
   1457 	case -EPROTO:
   1458 		dxfer->intr_status = USBD_INVAL;
   1459 		break;
   1460 	case -EIO:
   1461 		dxfer->intr_status = USBD_IOERROR;
   1462 		break;
   1463 	case -EOVERFLOW:
   1464 		dxfer->intr_status = USBD_IOERROR;
   1465 		break;
   1466 	default:
   1467 		dxfer->intr_status = USBD_IOERROR;
   1468 		printf("%s: unknown error status %d\n", __func__, status);
   1469 	}
   1470 
   1471 	if (dxfer->intr_status == USBD_NORMAL_COMPLETION) {
   1472 		/*
   1473 		 * control transfers with no data phase don't touch dmabuf, but
   1474 		 * everything else does.
   1475 		 */
   1476 		if (!(xfertype == UE_CONTROL &&
   1477 		    UGETW(xfer->ux_request.wLength) == 0) &&
   1478 		    xfer->ux_actlen > 0	/* XXX PR/53503 */
   1479 		    ) {
   1480 			int rd = usbd_xfer_isread(xfer);
   1481 
   1482 			usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_actlen,
   1483 			    rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
   1484 		}
   1485 	}
   1486 
   1487 	if (xfertype == UE_ISOCHRONOUS ||
   1488 	    xfertype == UE_INTERRUPT) {
   1489 		struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
   1490 
   1491 		dwc2_free_bus_bandwidth(hsotg,
   1492 					dwc2_hcd_get_ep_bandwidth(hsotg, dpipe),
   1493 					xfer);
   1494 	}
   1495 
   1496 	qtd->urb = NULL;
   1497 	KASSERT(mutex_owned(&hsotg->lock));
   1498 
   1499 	TAILQ_INSERT_TAIL(&sc->sc_complete, dxfer, xnext);
   1500 
   1501 	mutex_spin_exit(&hsotg->lock);
   1502 	usb_schedsoftintr(&sc->sc_bus);
   1503 	mutex_spin_enter(&hsotg->lock);
   1504 }
   1505 
   1506 
   1507 int
   1508 _dwc2_hcd_start(struct dwc2_hsotg *hsotg)
   1509 {
   1510 	dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
   1511 
   1512 	mutex_spin_enter(&hsotg->lock);
   1513 
   1514 	hsotg->lx_state = DWC2_L0;
   1515 
   1516 	if (dwc2_is_device_mode(hsotg)) {
   1517 		mutex_spin_exit(&hsotg->lock);
   1518 		return 0;	/* why 0 ?? */
   1519 	}
   1520 
   1521 	dwc2_hcd_reinit(hsotg);
   1522 
   1523 	mutex_spin_exit(&hsotg->lock);
   1524 	return 0;
   1525 }
   1526 
   1527 int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg)
   1528 {
   1529 
   1530 	return false;
   1531 }
   1532