Home | History | Annotate | Line # | Download | only in dwc2
dwc2.c revision 1.68
      1 /*	$NetBSD: dwc2.c,v 1.68 2020/02/12 16:02:01 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nick Hudson
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: dwc2.c,v 1.68 2020/02/12 16:02:01 riastradh Exp $");
     34 
     35 #include "opt_usb.h"
     36 
     37 #include <sys/param.h>
     38 
     39 #include <sys/cpu.h>
     40 #include <sys/device.h>
     41 #include <sys/kernel.h>
     42 #include <sys/kmem.h>
     43 #include <sys/proc.h>
     44 #include <sys/queue.h>
     45 #include <sys/select.h>
     46 #include <sys/sysctl.h>
     47 #include <sys/systm.h>
     48 
     49 #include <machine/endian.h>
     50 
     51 #include <dev/usb/usb.h>
     52 #include <dev/usb/usbdi.h>
     53 #include <dev/usb/usbdivar.h>
     54 #include <dev/usb/usb_mem.h>
     55 #include <dev/usb/usbroothub.h>
     56 
     57 #include <dwc2/dwc2.h>
     58 #include <dwc2/dwc2var.h>
     59 
     60 #include "dwc2_core.h"
     61 #include "dwc2_hcd.h"
     62 
     63 #ifdef DWC2_COUNTERS
     64 #define	DWC2_EVCNT_ADD(a,b)	((void)((a).ev_count += (b)))
     65 #else
     66 #define	DWC2_EVCNT_ADD(a,b)	do { } while (/*CONSTCOND*/0)
     67 #endif
     68 #define	DWC2_EVCNT_INCR(a)	DWC2_EVCNT_ADD((a), 1)
     69 
     70 #ifdef DWC2_DEBUG
     71 #define	DPRINTFN(n,fmt,...) do {			\
     72 	if (dwc2debug >= (n)) {			\
     73 		printf("%s: " fmt,			\
     74 		__FUNCTION__,## __VA_ARGS__);		\
     75 	}						\
     76 } while (0)
     77 #define	DPRINTF(...)	DPRINTFN(1, __VA_ARGS__)
     78 int dwc2debug = 0;
     79 
     80 SYSCTL_SETUP(sysctl_hw_dwc2_setup, "sysctl hw.dwc2 setup")
     81 {
     82 	int err;
     83 	const struct sysctlnode *rnode;
     84 	const struct sysctlnode *cnode;
     85 
     86 	err = sysctl_createv(clog, 0, NULL, &rnode,
     87 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "dwc2",
     88 	    SYSCTL_DESCR("dwc2 global controls"),
     89 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
     90 
     91 	if (err)
     92 		goto fail;
     93 
     94 	/* control debugging printfs */
     95 	err = sysctl_createv(clog, 0, &rnode, &cnode,
     96 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
     97 	    "debug", SYSCTL_DESCR("Enable debugging output"),
     98 	    NULL, 0, &dwc2debug, sizeof(dwc2debug), CTL_CREATE, CTL_EOL);
     99 	if (err)
    100 		goto fail;
    101 
    102 	return;
    103 fail:
    104 	aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
    105 }
    106 #else
    107 #define	DPRINTF(...) do { } while (0)
    108 #define	DPRINTFN(...) do { } while (0)
    109 #endif
    110 
    111 Static usbd_status	dwc2_open(struct usbd_pipe *);
    112 Static void		dwc2_poll(struct usbd_bus *);
    113 Static void		dwc2_softintr(void *);
    114 
    115 Static struct usbd_xfer *
    116 			dwc2_allocx(struct usbd_bus *, unsigned int);
    117 Static void		dwc2_freex(struct usbd_bus *, struct usbd_xfer *);
    118 Static void		dwc2_get_lock(struct usbd_bus *, kmutex_t **);
    119 Static bool		dwc2_dying(struct usbd_bus *);
    120 Static int		dwc2_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
    121 			    void *, int);
    122 
    123 Static usbd_status	dwc2_root_intr_transfer(struct usbd_xfer *);
    124 Static usbd_status	dwc2_root_intr_start(struct usbd_xfer *);
    125 Static void		dwc2_root_intr_abort(struct usbd_xfer *);
    126 Static void		dwc2_root_intr_close(struct usbd_pipe *);
    127 Static void		dwc2_root_intr_done(struct usbd_xfer *);
    128 
    129 Static usbd_status	dwc2_device_ctrl_transfer(struct usbd_xfer *);
    130 Static usbd_status	dwc2_device_ctrl_start(struct usbd_xfer *);
    131 Static void		dwc2_device_ctrl_abort(struct usbd_xfer *);
    132 Static void		dwc2_device_ctrl_close(struct usbd_pipe *);
    133 Static void		dwc2_device_ctrl_done(struct usbd_xfer *);
    134 
    135 Static usbd_status	dwc2_device_bulk_transfer(struct usbd_xfer *);
    136 Static void		dwc2_device_bulk_abort(struct usbd_xfer *);
    137 Static void		dwc2_device_bulk_close(struct usbd_pipe *);
    138 Static void		dwc2_device_bulk_done(struct usbd_xfer *);
    139 
    140 Static usbd_status	dwc2_device_intr_transfer(struct usbd_xfer *);
    141 Static usbd_status	dwc2_device_intr_start(struct usbd_xfer *);
    142 Static void		dwc2_device_intr_abort(struct usbd_xfer *);
    143 Static void		dwc2_device_intr_close(struct usbd_pipe *);
    144 Static void		dwc2_device_intr_done(struct usbd_xfer *);
    145 
    146 Static usbd_status	dwc2_device_isoc_transfer(struct usbd_xfer *);
    147 Static void		dwc2_device_isoc_abort(struct usbd_xfer *);
    148 Static void		dwc2_device_isoc_close(struct usbd_pipe *);
    149 Static void		dwc2_device_isoc_done(struct usbd_xfer *);
    150 
    151 Static usbd_status	dwc2_device_start(struct usbd_xfer *);
    152 
    153 Static void		dwc2_close_pipe(struct usbd_pipe *);
    154 Static void		dwc2_abortx(struct usbd_xfer *);
    155 
    156 Static void		dwc2_device_clear_toggle(struct usbd_pipe *);
    157 Static void		dwc2_noop(struct usbd_pipe *pipe);
    158 
    159 Static int		dwc2_interrupt(struct dwc2_softc *);
    160 Static void		dwc2_rhc(void *);
    161 
    162 
    163 static inline void
    164 dwc2_allocate_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
    165 			    struct usbd_xfer *xfer)
    166 {
    167 }
    168 
    169 static inline void
    170 dwc2_free_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
    171 			struct usbd_xfer *xfer)
    172 {
    173 }
    174 
    175 Static const struct usbd_bus_methods dwc2_bus_methods = {
    176 	.ubm_open =	dwc2_open,
    177 	.ubm_softint =	dwc2_softintr,
    178 	.ubm_dopoll =	dwc2_poll,
    179 	.ubm_allocx =	dwc2_allocx,
    180 	.ubm_freex =	dwc2_freex,
    181 	.ubm_abortx =	dwc2_abortx,
    182 	.ubm_dying =	dwc2_dying,
    183 	.ubm_getlock =	dwc2_get_lock,
    184 	.ubm_rhctrl =	dwc2_roothub_ctrl,
    185 };
    186 
    187 Static const struct usbd_pipe_methods dwc2_root_intr_methods = {
    188 	.upm_transfer =	dwc2_root_intr_transfer,
    189 	.upm_start =	dwc2_root_intr_start,
    190 	.upm_abort =	dwc2_root_intr_abort,
    191 	.upm_close =	dwc2_root_intr_close,
    192 	.upm_cleartoggle =	dwc2_noop,
    193 	.upm_done =	dwc2_root_intr_done,
    194 };
    195 
    196 Static const struct usbd_pipe_methods dwc2_device_ctrl_methods = {
    197 	.upm_transfer =	dwc2_device_ctrl_transfer,
    198 	.upm_start =	dwc2_device_ctrl_start,
    199 	.upm_abort =	dwc2_device_ctrl_abort,
    200 	.upm_close =	dwc2_device_ctrl_close,
    201 	.upm_cleartoggle =	dwc2_noop,
    202 	.upm_done =	dwc2_device_ctrl_done,
    203 };
    204 
    205 Static const struct usbd_pipe_methods dwc2_device_intr_methods = {
    206 	.upm_transfer =	dwc2_device_intr_transfer,
    207 	.upm_start =	dwc2_device_intr_start,
    208 	.upm_abort =	dwc2_device_intr_abort,
    209 	.upm_close =	dwc2_device_intr_close,
    210 	.upm_cleartoggle =	dwc2_device_clear_toggle,
    211 	.upm_done =	dwc2_device_intr_done,
    212 };
    213 
    214 Static const struct usbd_pipe_methods dwc2_device_bulk_methods = {
    215 	.upm_transfer =	dwc2_device_bulk_transfer,
    216 	.upm_abort =	dwc2_device_bulk_abort,
    217 	.upm_close =	dwc2_device_bulk_close,
    218 	.upm_cleartoggle =	dwc2_device_clear_toggle,
    219 	.upm_done =	dwc2_device_bulk_done,
    220 };
    221 
    222 Static const struct usbd_pipe_methods dwc2_device_isoc_methods = {
    223 	.upm_transfer =	dwc2_device_isoc_transfer,
    224 	.upm_abort =	dwc2_device_isoc_abort,
    225 	.upm_close =	dwc2_device_isoc_close,
    226 	.upm_cleartoggle =	dwc2_noop,
    227 	.upm_done =	dwc2_device_isoc_done,
    228 };
    229 
    230 struct usbd_xfer *
    231 dwc2_allocx(struct usbd_bus *bus, unsigned int nframes)
    232 {
    233 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    234 	struct dwc2_xfer *dxfer;
    235 
    236 	DPRINTFN(10, "\n");
    237 
    238 	DWC2_EVCNT_INCR(sc->sc_ev_xferpoolget);
    239 	dxfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
    240 	if (dxfer != NULL) {
    241 		memset(dxfer, 0, sizeof(*dxfer));
    242 		dxfer->urb = dwc2_hcd_urb_alloc(sc->sc_hsotg,
    243 		    nframes, GFP_KERNEL);
    244 #ifdef DIAGNOSTIC
    245 		dxfer->xfer.ux_state = XFER_BUSY;
    246 #endif
    247 	}
    248 	return (struct usbd_xfer *)dxfer;
    249 }
    250 
    251 void
    252 dwc2_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
    253 {
    254 	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
    255 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    256 
    257 	DPRINTFN(10, "\n");
    258 
    259 #ifdef DIAGNOSTIC
    260 	if (xfer->ux_state != XFER_BUSY &&
    261 	    xfer->ux_status != USBD_NOT_STARTED) {
    262 		DPRINTF("xfer=%p not busy, 0x%08x\n", xfer, xfer->ux_state);
    263 	}
    264 	xfer->ux_state = XFER_FREE;
    265 #endif
    266 	DWC2_EVCNT_INCR(sc->sc_ev_xferpoolput);
    267 	dwc2_hcd_urb_free(sc->sc_hsotg, dxfer->urb, dxfer->urb->packet_count);
    268 	pool_cache_put(sc->sc_xferpool, xfer);
    269 }
    270 
    271 Static bool
    272 dwc2_dying(struct usbd_bus *bus)
    273 {
    274 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    275 
    276 	return sc->sc_dying;
    277 }
    278 
    279 Static void
    280 dwc2_get_lock(struct usbd_bus *bus, kmutex_t **lock)
    281 {
    282 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    283 
    284 	*lock = &sc->sc_lock;
    285 }
    286 
    287 Static void
    288 dwc2_rhc(void *addr)
    289 {
    290 	struct dwc2_softc *sc = addr;
    291 	struct usbd_xfer *xfer;
    292 	u_char *p;
    293 
    294 	DPRINTF("\n");
    295 	mutex_enter(&sc->sc_lock);
    296 	xfer = sc->sc_intrxfer;
    297 
    298 	if (xfer == NULL) {
    299 		/* Just ignore the change. */
    300 		mutex_exit(&sc->sc_lock);
    301 		return;
    302 
    303 	}
    304 	/* set port bit */
    305 	p = KERNADDR(&xfer->ux_dmabuf, 0);
    306 
    307 	p[0] = 0x02;	/* we only have one port (1 << 1) */
    308 
    309 	xfer->ux_actlen = xfer->ux_length;
    310 	xfer->ux_status = USBD_NORMAL_COMPLETION;
    311 
    312 	usb_transfer_complete(xfer);
    313 	mutex_exit(&sc->sc_lock);
    314 }
    315 
    316 Static void
    317 dwc2_softintr(void *v)
    318 {
    319 	struct usbd_bus *bus = v;
    320 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    321 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    322 	struct dwc2_xfer *dxfer, *next;
    323 	TAILQ_HEAD(, dwc2_xfer) claimed = TAILQ_HEAD_INITIALIZER(claimed);
    324 
    325 	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
    326 
    327 	/*
    328 	 * Grab all the xfers that have not been aborted or timed out.
    329 	 * Do so under a single lock -- without dropping it to run
    330 	 * usb_transfer_complete as we go -- so that dwc2_abortx won't
    331 	 * remove next out from under us during iteration when we've
    332 	 * dropped the lock.
    333 	 */
    334 	mutex_spin_enter(&hsotg->lock);
    335 	TAILQ_FOREACH_SAFE(dxfer, &sc->sc_complete, xnext, next) {
    336 		if (!usbd_xfer_trycomplete(&dxfer->xfer))
    337 			/*
    338 			 * The hard interrput handler decided to
    339 			 * complete the xfer, and put it on sc_complete
    340 			 * to pass it to us in the soft interrupt
    341 			 * handler, but in the time between hard
    342 			 * interrupt and soft interrupt, the xfer was
    343 			 * aborted or timed out and we lost the race.
    344 			 */
    345 			continue;
    346 		KASSERT(dxfer->xfer.ux_status == USBD_IN_PROGRESS);
    347 		KASSERT(dxfer->intr_status != USBD_CANCELLED);
    348 		KASSERT(dxfer->intr_status != USBD_TIMEOUT);
    349 		TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext);
    350 		TAILQ_INSERT_TAIL(&claimed, dxfer, xnext);
    351 	}
    352 	mutex_spin_exit(&hsotg->lock);
    353 
    354 	/* Now complete them.  */
    355 	while (!TAILQ_EMPTY(&claimed)) {
    356 		dxfer = TAILQ_FIRST(&claimed);
    357 		KASSERT(dxfer->xfer.ux_status == USBD_IN_PROGRESS);
    358 		KASSERT(dxfer->intr_status != USBD_CANCELLED);
    359 		KASSERT(dxfer->intr_status != USBD_TIMEOUT);
    360 		TAILQ_REMOVE(&claimed, dxfer, xnext);
    361 
    362 		dxfer->xfer.ux_status = dxfer->intr_status;
    363 		usb_transfer_complete(&dxfer->xfer);
    364 	}
    365 }
    366 
    367 usbd_status
    368 dwc2_open(struct usbd_pipe *pipe)
    369 {
    370 	struct usbd_device *dev = pipe->up_dev;
    371 	struct dwc2_softc *sc = DWC2_PIPE2SC(pipe);
    372 	struct dwc2_pipe *dpipe = DWC2_PIPE2DPIPE(pipe);
    373 	usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc;
    374 	uint8_t addr = dev->ud_addr;
    375 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
    376 	usbd_status err;
    377 
    378 	DPRINTF("pipe %p addr %d xfertype %d dir %s\n", pipe, addr, xfertype,
    379 	    UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN ? "in" : "out");
    380 
    381 	if (sc->sc_dying) {
    382 		return USBD_IOERROR;
    383 	}
    384 
    385 	if (addr == dev->ud_bus->ub_rhaddr) {
    386 		switch (ed->bEndpointAddress) {
    387 		case USB_CONTROL_ENDPOINT:
    388 			pipe->up_methods = &roothub_ctrl_methods;
    389 			break;
    390 		case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
    391 			pipe->up_methods = &dwc2_root_intr_methods;
    392 			break;
    393 		default:
    394 			DPRINTF("bad bEndpointAddress 0x%02x\n",
    395 			    ed->bEndpointAddress);
    396 			return USBD_INVAL;
    397 		}
    398 		DPRINTF("root hub pipe open\n");
    399 		return USBD_NORMAL_COMPLETION;
    400 	}
    401 
    402 	switch (xfertype) {
    403 	case UE_CONTROL:
    404 		pipe->up_methods = &dwc2_device_ctrl_methods;
    405 		err = usb_allocmem(&sc->sc_bus, sizeof(usb_device_request_t),
    406 		    0, &dpipe->req_dma);
    407 		if (err)
    408 			return err;
    409 		break;
    410 	case UE_INTERRUPT:
    411 		pipe->up_methods = &dwc2_device_intr_methods;
    412 		break;
    413 	case UE_ISOCHRONOUS:
    414 		pipe->up_serialise = false;
    415 		pipe->up_methods = &dwc2_device_isoc_methods;
    416 		break;
    417 	case UE_BULK:
    418 		pipe->up_serialise = false;
    419 		pipe->up_methods = &dwc2_device_bulk_methods;
    420 		break;
    421 	default:
    422 		DPRINTF("bad xfer type %d\n", xfertype);
    423 		return USBD_INVAL;
    424 	}
    425 
    426 	/* QH */
    427 	dpipe->priv = NULL;
    428 
    429 	return USBD_NORMAL_COMPLETION;
    430 }
    431 
    432 Static void
    433 dwc2_poll(struct usbd_bus *bus)
    434 {
    435 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    436 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    437 
    438 	mutex_spin_enter(&hsotg->lock);
    439 	dwc2_interrupt(sc);
    440 	mutex_spin_exit(&hsotg->lock);
    441 }
    442 
    443 /*
    444  * Close a reqular pipe.
    445  * Assumes that there are no pending transactions.
    446  */
    447 Static void
    448 dwc2_close_pipe(struct usbd_pipe *pipe)
    449 {
    450 #ifdef DIAGNOSTIC
    451 	struct dwc2_softc *sc = pipe->up_dev->ud_bus->ub_hcpriv;
    452 #endif
    453 
    454 	KASSERT(mutex_owned(&sc->sc_lock));
    455 }
    456 
    457 /*
    458  * Abort a device request.
    459  */
    460 Static void
    461 dwc2_abortx(struct usbd_xfer *xfer)
    462 {
    463 	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
    464 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    465 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    466 	struct dwc2_xfer *d;
    467 	int err;
    468 
    469 	DPRINTF("xfer %p pipe %p status 0x%08x", xfer, xfer->ux_pipe,
    470 	    xfer->ux_status);
    471 
    472 	KASSERT(mutex_owned(&sc->sc_lock));
    473 	ASSERT_SLEEPABLE();
    474 
    475 	KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
    476 		xfer->ux_status == USBD_TIMEOUT),
    477 	    "bad abort status: %d", xfer->ux_status);
    478 
    479 	mutex_spin_enter(&hsotg->lock);
    480 
    481 	/*
    482 	 * Check whether we aborted or timed out after the hardware
    483 	 * completion interrupt determined that it's done but before
    484 	 * the soft interrupt could actually complete it.  If so, it's
    485 	 * too late for the soft interrupt -- at this point we've
    486 	 * already committed to abort it or time it out, so we need to
    487 	 * take it off the softint's list of work in case the caller,
    488 	 * say, frees the xfer before the softint runs.
    489 	 *
    490 	 * This logic is unusual among host controller drivers, and
    491 	 * happens because dwc2 decides to complete xfers in the hard
    492 	 * interrupt handler rather than in the soft interrupt handler,
    493 	 * but usb_transfer_complete must be deferred to softint -- and
    494 	 * we happened to swoop in between the hard interrupt and the
    495 	 * soft interrupt.  Other host controller drivers do almost all
    496 	 * processing in the softint so there's no intermediate stage.
    497 	 *
    498 	 * Fortunately, this linear search to discern the intermediate
    499 	 * stage is not likely to be a serious performance impact
    500 	 * because it happens only on abort or timeout.
    501 	 */
    502 	TAILQ_FOREACH(d, &sc->sc_complete, xnext) {
    503 		if (d == dxfer) {
    504 			TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext);
    505 			break;
    506 		}
    507 	}
    508 
    509 	/*
    510 	 * If we're dying, skip the hardware action and just notify the
    511 	 * software that we're done.
    512 	 */
    513 	if (sc->sc_dying) {
    514 		DPRINTFN(4, "xfer %p dying 0x%08x", xfer, xfer->ux_status);
    515 		goto dying;
    516 	}
    517 
    518 	/*
    519 	 * HC Step 1: Handle the hardware.
    520 	 */
    521 	err = dwc2_hcd_urb_dequeue(hsotg, dxfer->urb);
    522 	if (err) {
    523 		DPRINTF("dwc2_hcd_urb_dequeue failed\n");
    524 	}
    525 
    526 dying:
    527 	mutex_spin_exit(&hsotg->lock);
    528 
    529 	/*
    530 	 * Final Step: Notify completion to waiting xfers.
    531 	 */
    532 	usb_transfer_complete(xfer);
    533 	KASSERT(mutex_owned(&sc->sc_lock));
    534 }
    535 
    536 Static void
    537 dwc2_noop(struct usbd_pipe *pipe)
    538 {
    539 
    540 }
    541 
    542 Static void
    543 dwc2_device_clear_toggle(struct usbd_pipe *pipe)
    544 {
    545 
    546 	DPRINTF("toggle %d -> 0", pipe->up_endpoint->ue_toggle);
    547 }
    548 
    549 /***********************************************************************/
    550 
    551 Static int
    552 dwc2_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
    553     void *buf, int buflen)
    554 {
    555 	struct dwc2_softc *sc = bus->ub_hcpriv;
    556 	usbd_status err = USBD_IOERROR;
    557 	uint16_t len, value, index;
    558 	int totlen = 0;
    559 
    560 	if (sc->sc_dying)
    561 		return -1;
    562 
    563 	DPRINTFN(4, "type=0x%02x request=%02x\n",
    564 	    req->bmRequestType, req->bRequest);
    565 
    566 	len = UGETW(req->wLength);
    567 	value = UGETW(req->wValue);
    568 	index = UGETW(req->wIndex);
    569 
    570 #define C(x,y) ((x) | ((y) << 8))
    571 	switch (C(req->bRequest, req->bmRequestType)) {
    572 	case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
    573 		DPRINTFN(8, "wValue=0x%04x\n", value);
    574 
    575 		if (len == 0)
    576 			break;
    577 		switch (value) {
    578 #define sd ((usb_string_descriptor_t *)buf)
    579 		case C(2, UDESC_STRING):
    580 			/* Product */
    581 			totlen = usb_makestrdesc(sd, len, "DWC2 root hub");
    582 			break;
    583 #undef sd
    584 		default:
    585 			/* default from usbroothub */
    586 			return buflen;
    587 		}
    588 		break;
    589 
    590 	case C(UR_GET_CONFIG, UT_READ_DEVICE):
    591 	case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
    592 	case C(UR_GET_STATUS, UT_READ_INTERFACE):
    593 	case C(UR_GET_STATUS, UT_READ_ENDPOINT):
    594 	case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
    595 	case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
    596 		/* default from usbroothub */
    597 		DPRINTFN(4, "returning %d (usbroothub default)", buflen);
    598 
    599 		return buflen;
    600 
    601 	default:
    602 		/* Hub requests */
    603 		err = dwc2_hcd_hub_control(sc->sc_hsotg,
    604 		    C(req->bRequest, req->bmRequestType), value, index,
    605 		    buf, len);
    606 		if (err) {
    607 			return -1;
    608 		}
    609 		totlen = len;
    610 	}
    611 
    612 	return totlen;
    613 }
    614 
    615 Static usbd_status
    616 dwc2_root_intr_transfer(struct usbd_xfer *xfer)
    617 {
    618 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    619 	usbd_status err;
    620 
    621 	DPRINTF("\n");
    622 
    623 	/* Insert last in queue. */
    624 	mutex_enter(&sc->sc_lock);
    625 	err = usb_insert_transfer(xfer);
    626 	mutex_exit(&sc->sc_lock);
    627 	if (err)
    628 		return err;
    629 
    630 	/* Pipe isn't running, start first */
    631 	return dwc2_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
    632 }
    633 
    634 Static usbd_status
    635 dwc2_root_intr_start(struct usbd_xfer *xfer)
    636 {
    637 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    638 	const bool polling = sc->sc_bus.ub_usepolling;
    639 
    640 	DPRINTF("\n");
    641 
    642 	if (sc->sc_dying)
    643 		return USBD_IOERROR;
    644 
    645 	if (!polling)
    646 		mutex_enter(&sc->sc_lock);
    647 	KASSERT(sc->sc_intrxfer == NULL);
    648 	sc->sc_intrxfer = xfer;
    649 	if (!polling)
    650 		mutex_exit(&sc->sc_lock);
    651 
    652 	xfer->ux_status = USBD_IN_PROGRESS;
    653 	return xfer->ux_status;
    654 }
    655 
    656 /* Abort a root interrupt request. */
    657 Static void
    658 dwc2_root_intr_abort(struct usbd_xfer *xfer)
    659 {
    660 	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
    661 
    662 	DPRINTF("xfer=%p\n", xfer);
    663 
    664 	KASSERT(mutex_owned(&sc->sc_lock));
    665 	KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
    666 
    667 	/* If xfer has already completed, nothing to do here.  */
    668 	if (sc->sc_intrxfer == NULL)
    669 		return;
    670 
    671 	/*
    672 	 * Otherwise, sc->sc_intrxfer had better be this transfer.
    673 	 * Cancel it.
    674 	 */
    675 	KASSERT(sc->sc_intrxfer == xfer);
    676 	KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
    677 	xfer->ux_status = USBD_CANCELLED;
    678 	usb_transfer_complete(xfer);
    679 }
    680 
    681 Static void
    682 dwc2_root_intr_close(struct usbd_pipe *pipe)
    683 {
    684 	struct dwc2_softc *sc = DWC2_PIPE2SC(pipe);
    685 
    686 	DPRINTF("\n");
    687 
    688 	KASSERT(mutex_owned(&sc->sc_lock));
    689 
    690 	/*
    691 	 * Caller must guarantee the xfer has completed first, by
    692 	 * closing the pipe only after normal completion or an abort.
    693 	 */
    694 	KASSERT(sc->sc_intrxfer == NULL);
    695 }
    696 
    697 Static void
    698 dwc2_root_intr_done(struct usbd_xfer *xfer)
    699 {
    700 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    701 
    702 	DPRINTF("\n");
    703 
    704 	/* Claim the xfer so it doesn't get completed again.  */
    705 	KASSERT(sc->sc_intrxfer == xfer);
    706 	KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
    707 	sc->sc_intrxfer = NULL;
    708 }
    709 
    710 /***********************************************************************/
    711 
    712 Static usbd_status
    713 dwc2_device_ctrl_transfer(struct usbd_xfer *xfer)
    714 {
    715 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    716 	usbd_status err;
    717 
    718 	DPRINTF("\n");
    719 
    720 	/* Insert last in queue. */
    721 	mutex_enter(&sc->sc_lock);
    722 	err = usb_insert_transfer(xfer);
    723 	mutex_exit(&sc->sc_lock);
    724 	if (err)
    725 		return err;
    726 
    727 	/* Pipe isn't running, start first */
    728 	return dwc2_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
    729 }
    730 
    731 Static usbd_status
    732 dwc2_device_ctrl_start(struct usbd_xfer *xfer)
    733 {
    734 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    735 	usbd_status err;
    736 	const bool polling = sc->sc_bus.ub_usepolling;
    737 
    738 	DPRINTF("\n");
    739 
    740 	if (!polling)
    741 		mutex_enter(&sc->sc_lock);
    742 	xfer->ux_status = USBD_IN_PROGRESS;
    743 	err = dwc2_device_start(xfer);
    744 	if (!polling)
    745 		mutex_exit(&sc->sc_lock);
    746 
    747 	if (err)
    748 		return err;
    749 
    750 	return USBD_IN_PROGRESS;
    751 }
    752 
    753 Static void
    754 dwc2_device_ctrl_abort(struct usbd_xfer *xfer)
    755 {
    756 #ifdef DIAGNOSTIC
    757 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    758 #endif
    759 	KASSERT(mutex_owned(&sc->sc_lock));
    760 
    761 	DPRINTF("xfer=%p\n", xfer);
    762 	usbd_xfer_abort(xfer);
    763 }
    764 
    765 Static void
    766 dwc2_device_ctrl_close(struct usbd_pipe *pipe)
    767 {
    768 
    769 	DPRINTF("pipe=%p\n", pipe);
    770 	dwc2_close_pipe(pipe);
    771 }
    772 
    773 Static void
    774 dwc2_device_ctrl_done(struct usbd_xfer *xfer)
    775 {
    776 
    777 	DPRINTF("xfer=%p\n", xfer);
    778 }
    779 
    780 /***********************************************************************/
    781 
    782 Static usbd_status
    783 dwc2_device_bulk_transfer(struct usbd_xfer *xfer)
    784 {
    785 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    786 	usbd_status err;
    787 
    788 	DPRINTF("xfer=%p\n", xfer);
    789 
    790 	/* Insert last in queue. */
    791 	mutex_enter(&sc->sc_lock);
    792 	err = usb_insert_transfer(xfer);
    793 
    794 	KASSERT(err == USBD_NORMAL_COMPLETION);
    795 
    796 	xfer->ux_status = USBD_IN_PROGRESS;
    797 	err = dwc2_device_start(xfer);
    798 	mutex_exit(&sc->sc_lock);
    799 
    800 	return err;
    801 }
    802 
    803 Static void
    804 dwc2_device_bulk_abort(struct usbd_xfer *xfer)
    805 {
    806 #ifdef DIAGNOSTIC
    807 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    808 #endif
    809 	KASSERT(mutex_owned(&sc->sc_lock));
    810 
    811 	DPRINTF("xfer=%p\n", xfer);
    812 	usbd_xfer_abort(xfer);
    813 }
    814 
    815 Static void
    816 dwc2_device_bulk_close(struct usbd_pipe *pipe)
    817 {
    818 
    819 	DPRINTF("pipe=%p\n", pipe);
    820 
    821 	dwc2_close_pipe(pipe);
    822 }
    823 
    824 Static void
    825 dwc2_device_bulk_done(struct usbd_xfer *xfer)
    826 {
    827 
    828 	DPRINTF("xfer=%p\n", xfer);
    829 }
    830 
    831 /***********************************************************************/
    832 
    833 Static usbd_status
    834 dwc2_device_intr_transfer(struct usbd_xfer *xfer)
    835 {
    836 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    837 	usbd_status err;
    838 
    839 	DPRINTF("xfer=%p\n", xfer);
    840 
    841 	/* Insert last in queue. */
    842 	mutex_enter(&sc->sc_lock);
    843 	err = usb_insert_transfer(xfer);
    844 	mutex_exit(&sc->sc_lock);
    845 	if (err)
    846 		return err;
    847 
    848 	/* Pipe isn't running, start first */
    849 	return dwc2_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
    850 }
    851 
    852 Static usbd_status
    853 dwc2_device_intr_start(struct usbd_xfer *xfer)
    854 {
    855 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer)
    856 	struct usbd_device *dev = dpipe->pipe.up_dev;
    857 	struct dwc2_softc *sc = dev->ud_bus->ub_hcpriv;
    858 	usbd_status err;
    859 	const bool polling = sc->sc_bus.ub_usepolling;
    860 
    861 	if (!polling)
    862 		mutex_enter(&sc->sc_lock);
    863 	xfer->ux_status = USBD_IN_PROGRESS;
    864 	err = dwc2_device_start(xfer);
    865 	if (!polling)
    866 		mutex_exit(&sc->sc_lock);
    867 
    868 	if (err)
    869 		return err;
    870 
    871 	return USBD_IN_PROGRESS;
    872 }
    873 
    874 /* Abort a device interrupt request. */
    875 Static void
    876 dwc2_device_intr_abort(struct usbd_xfer *xfer)
    877 {
    878 #ifdef DIAGNOSTIC
    879 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    880 #endif
    881 
    882 	KASSERT(mutex_owned(&sc->sc_lock));
    883 	KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
    884 
    885 	DPRINTF("xfer=%p\n", xfer);
    886 	usbd_xfer_abort(xfer);
    887 }
    888 
    889 Static void
    890 dwc2_device_intr_close(struct usbd_pipe *pipe)
    891 {
    892 
    893 	DPRINTF("pipe=%p\n", pipe);
    894 
    895 	dwc2_close_pipe(pipe);
    896 }
    897 
    898 Static void
    899 dwc2_device_intr_done(struct usbd_xfer *xfer)
    900 {
    901 
    902 	DPRINTF("\n");
    903 }
    904 
    905 /***********************************************************************/
    906 
    907 usbd_status
    908 dwc2_device_isoc_transfer(struct usbd_xfer *xfer)
    909 {
    910 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    911 	usbd_status err;
    912 
    913 	DPRINTF("xfer=%p\n", xfer);
    914 
    915 	/* Insert last in queue. */
    916 	mutex_enter(&sc->sc_lock);
    917 	err = usb_insert_transfer(xfer);
    918 
    919 	KASSERT(err == USBD_NORMAL_COMPLETION);
    920 
    921 	xfer->ux_status = USBD_IN_PROGRESS;
    922 	err = dwc2_device_start(xfer);
    923 	mutex_exit(&sc->sc_lock);
    924 
    925 	return err;
    926 }
    927 
    928 void
    929 dwc2_device_isoc_abort(struct usbd_xfer *xfer)
    930 {
    931 	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
    932 	KASSERT(mutex_owned(&sc->sc_lock));
    933 
    934 	DPRINTF("xfer=%p\n", xfer);
    935 	usbd_xfer_abort(xfer);
    936 }
    937 
    938 void
    939 dwc2_device_isoc_close(struct usbd_pipe *pipe)
    940 {
    941 	DPRINTF("\n");
    942 
    943 	dwc2_close_pipe(pipe);
    944 }
    945 
    946 void
    947 dwc2_device_isoc_done(struct usbd_xfer *xfer)
    948 {
    949 
    950 	DPRINTF("\n");
    951 }
    952 
    953 
    954 usbd_status
    955 dwc2_device_start(struct usbd_xfer *xfer)
    956 {
    957  	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
    958 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
    959 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    960 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    961 	struct dwc2_hcd_urb *dwc2_urb;
    962 
    963 	struct usbd_device *dev = xfer->ux_pipe->up_dev;
    964 	usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
    965 	uint8_t addr = dev->ud_addr;
    966 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
    967 	uint8_t epnum = UE_GET_ADDR(ed->bEndpointAddress);
    968 	uint8_t dir = UE_GET_DIR(ed->bEndpointAddress);
    969 	uint16_t mps = UE_GET_SIZE(UGETW(ed->wMaxPacketSize));
    970 	uint32_t len;
    971 
    972 	uint32_t flags = 0;
    973 	uint32_t off = 0;
    974 	int retval, err;
    975 	int alloc_bandwidth = 0;
    976 
    977 	DPRINTFN(1, "xfer=%p pipe=%p\n", xfer, xfer->ux_pipe);
    978 
    979 	if (xfertype == UE_ISOCHRONOUS ||
    980 	    xfertype == UE_INTERRUPT) {
    981 		mutex_spin_enter(&hsotg->lock);
    982 		if (!dwc2_hcd_is_bandwidth_allocated(hsotg, xfer))
    983 			alloc_bandwidth = 1;
    984 		mutex_spin_exit(&hsotg->lock);
    985 	}
    986 
    987 	/*
    988 	 * For Control pipe the direction is from the request, all other
    989 	 * transfers have been set correctly at pipe open time.
    990 	 */
    991 	if (xfertype == UE_CONTROL) {
    992 		usb_device_request_t *req = &xfer->ux_request;
    993 
    994 		DPRINTFN(3, "xfer=%p type=0x%02x request=0x%02x wValue=0x%04x "
    995 		    "wIndex=0x%04x len=%d addr=%d endpt=%d dir=%s speed=%d "
    996 		    "mps=%d\n",
    997 		    xfer, req->bmRequestType, req->bRequest, UGETW(req->wValue),
    998 		    UGETW(req->wIndex), UGETW(req->wLength), dev->ud_addr,
    999 		    epnum, dir == UT_READ ? "in" :"out", dev->ud_speed, mps);
   1000 
   1001 		/* Copy request packet to our DMA buffer */
   1002 		memcpy(KERNADDR(&dpipe->req_dma, 0), req, sizeof(*req));
   1003 		usb_syncmem(&dpipe->req_dma, 0, sizeof(*req),
   1004 		    BUS_DMASYNC_PREWRITE);
   1005 		len = UGETW(req->wLength);
   1006 		if ((req->bmRequestType & UT_READ) == UT_READ) {
   1007 			dir = UE_DIR_IN;
   1008 		} else {
   1009 			dir = UE_DIR_OUT;
   1010 		}
   1011 
   1012 		DPRINTFN(3, "req = %p dma = %" PRIxBUSADDR " len %d dir %s\n",
   1013 		    KERNADDR(&dpipe->req_dma, 0), DMAADDR(&dpipe->req_dma, 0),
   1014 		    len, dir == UE_DIR_IN ? "in" : "out");
   1015 	} else if (xfertype == UE_ISOCHRONOUS) {
   1016 		DPRINTFN(3, "xfer=%p nframes=%d flags=%d addr=%d endpt=%d,"
   1017 		    " mps=%d dir %s\n", xfer, xfer->ux_nframes, xfer->ux_flags, addr,
   1018 		    epnum, mps, dir == UT_READ ? "in" :"out");
   1019 
   1020 		len = 0;
   1021 		for (size_t i = 0; i < xfer->ux_nframes; i++)
   1022 			len += xfer->ux_frlengths[i];
   1023 	} else {
   1024 		DPRINTFN(3, "xfer=%p len=%d flags=%d addr=%d endpt=%d,"
   1025 		    " mps=%d dir %s\n", xfer, xfer->ux_length, xfer->ux_flags, addr,
   1026 		    epnum, mps, dir == UT_READ ? "in" :"out");
   1027 
   1028 		len = xfer->ux_length;
   1029 	}
   1030 
   1031 	dwc2_urb = dxfer->urb;
   1032 	if (!dwc2_urb)
   1033 		return USBD_NOMEM;
   1034 
   1035 	KASSERT(dwc2_urb->packet_count == xfer->ux_nframes);
   1036 	memset(dwc2_urb, 0, sizeof(*dwc2_urb) +
   1037 	    sizeof(dwc2_urb->iso_descs[0]) * dwc2_urb->packet_count);
   1038 
   1039 	dwc2_urb->priv = xfer;
   1040 	dwc2_urb->packet_count = xfer->ux_nframes;
   1041 
   1042 	dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, addr, epnum, xfertype, dir,
   1043 	    mps);
   1044 
   1045 	if (xfertype == UE_CONTROL) {
   1046 		dwc2_urb->setup_usbdma = &dpipe->req_dma;
   1047 		dwc2_urb->setup_packet = KERNADDR(&dpipe->req_dma, 0);
   1048 		dwc2_urb->setup_dma = DMAADDR(&dpipe->req_dma, 0);
   1049 	} else {
   1050 		/* XXXNH - % mps required? */
   1051 		if ((xfer->ux_flags & USBD_FORCE_SHORT_XFER) && (len % mps) == 0)
   1052 		    flags |= URB_SEND_ZERO_PACKET;
   1053 	}
   1054 	flags |= URB_GIVEBACK_ASAP;
   1055 
   1056 	/*
   1057 	 * control transfers with no data phase don't touch usbdma, but
   1058 	 * everything else does.
   1059 	 */
   1060 	if (!(xfertype == UE_CONTROL && len == 0)) {
   1061 		dwc2_urb->usbdma = &xfer->ux_dmabuf;
   1062 		dwc2_urb->buf = KERNADDR(dwc2_urb->usbdma, 0);
   1063 		dwc2_urb->dma = DMAADDR(dwc2_urb->usbdma, 0);
   1064 
   1065 		usb_syncmem(&xfer->ux_dmabuf, 0, len,
   1066 		    dir == UE_DIR_IN ?
   1067 			BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
   1068  	}
   1069 	dwc2_urb->length = len;
   1070  	dwc2_urb->flags = flags;
   1071 	dwc2_urb->status = -EINPROGRESS;
   1072 
   1073 	if (xfertype == UE_INTERRUPT ||
   1074 	    xfertype == UE_ISOCHRONOUS) {
   1075 		uint16_t ival;
   1076 
   1077 		if (xfertype == UE_INTERRUPT &&
   1078 		    dpipe->pipe.up_interval != USBD_DEFAULT_INTERVAL) {
   1079 			ival = dpipe->pipe.up_interval;
   1080 		} else {
   1081 			ival = ed->bInterval;
   1082 		}
   1083 
   1084 		if (ival < 1) {
   1085 			retval = -ENODEV;
   1086 			goto fail;
   1087 		}
   1088 		if (dev->ud_speed == USB_SPEED_HIGH ||
   1089 		   (dev->ud_speed == USB_SPEED_FULL && xfertype == UE_ISOCHRONOUS)) {
   1090 			if (ival > 16) {
   1091 				/*
   1092 				 * illegal with HS/FS, but there were
   1093 				 * documentation bugs in the spec
   1094 				 */
   1095 				ival = 256;
   1096 			} else {
   1097 				ival = (1 << (ival - 1));
   1098 			}
   1099 		} else {
   1100 			if (xfertype == UE_INTERRUPT && ival < 10)
   1101 				ival = 10;
   1102 		}
   1103 		dwc2_urb->interval = ival;
   1104 	}
   1105 
   1106 	/* XXXNH bring down from callers?? */
   1107 // 	mutex_enter(&sc->sc_lock);
   1108 
   1109 	xfer->ux_actlen = 0;
   1110 
   1111 	KASSERT(xfertype != UE_ISOCHRONOUS ||
   1112 	    xfer->ux_nframes <= dwc2_urb->packet_count);
   1113 	KASSERTMSG(xfer->ux_nframes == 0 || xfertype == UE_ISOCHRONOUS,
   1114 	    "nframes %d xfertype %d\n", xfer->ux_nframes, xfertype);
   1115 
   1116 	off = 0;
   1117 	for (size_t i = 0; i < xfer->ux_nframes; ++i) {
   1118 		DPRINTFN(3, "xfer=%p frame=%zd offset=%d length=%d\n", xfer, i,
   1119 		    off, xfer->ux_frlengths[i]);
   1120 
   1121 		dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i, off,
   1122 		    xfer->ux_frlengths[i]);
   1123 		off += xfer->ux_frlengths[i];
   1124 	}
   1125 
   1126 	struct dwc2_qh *qh = dpipe->priv;
   1127 	struct dwc2_qtd *qtd;
   1128 	bool qh_allocated = false;
   1129 
   1130 	/* Create QH for the endpoint if it doesn't exist */
   1131 	if (!qh) {
   1132 		qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, GFP_ATOMIC);
   1133 		if (!qh) {
   1134 			retval = -ENOMEM;
   1135 			goto fail;
   1136 		}
   1137 		dpipe->priv = qh;
   1138 		qh_allocated = true;
   1139 	}
   1140 
   1141 	qtd = pool_cache_get(sc->sc_qtdpool, PR_NOWAIT);
   1142 	if (!qtd) {
   1143 		retval = -ENOMEM;
   1144 		goto fail1;
   1145 	}
   1146 	memset(qtd, 0, sizeof(*qtd));
   1147 
   1148 	/* might need to check cpu_intr_p */
   1149 	mutex_spin_enter(&hsotg->lock);
   1150 	retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
   1151 	if (retval)
   1152 		goto fail2;
   1153 	usbd_xfer_schedule_timeout(xfer);
   1154 	xfer->ux_status = USBD_IN_PROGRESS;
   1155 
   1156 	if (alloc_bandwidth) {
   1157 		dwc2_allocate_bus_bandwidth(hsotg,
   1158 				dwc2_hcd_get_ep_bandwidth(hsotg, dpipe),
   1159 				xfer);
   1160 	}
   1161 
   1162 	mutex_spin_exit(&hsotg->lock);
   1163 // 	mutex_exit(&sc->sc_lock);
   1164 
   1165 	return USBD_IN_PROGRESS;
   1166 
   1167 fail2:
   1168 	dwc2_urb->priv = NULL;
   1169 	mutex_spin_exit(&hsotg->lock);
   1170 	pool_cache_put(sc->sc_qtdpool, qtd);
   1171 
   1172 fail1:
   1173 	if (qh_allocated) {
   1174 		dpipe->priv = NULL;
   1175 		dwc2_hcd_qh_free(hsotg, qh);
   1176 	}
   1177 fail:
   1178 
   1179 	switch (retval) {
   1180 	case -EINVAL:
   1181 	case -ENODEV:
   1182 		err = USBD_INVAL;
   1183 		break;
   1184 	case -ENOMEM:
   1185 		err = USBD_NOMEM;
   1186 		break;
   1187 	default:
   1188 		err = USBD_IOERROR;
   1189 	}
   1190 
   1191 	return err;
   1192 
   1193 }
   1194 
   1195 int dwc2_intr(void *p)
   1196 {
   1197 	struct dwc2_softc *sc = p;
   1198 	struct dwc2_hsotg *hsotg;
   1199 	int ret = 0;
   1200 
   1201 	if (sc == NULL)
   1202 		return 0;
   1203 
   1204 	hsotg = sc->sc_hsotg;
   1205 	mutex_spin_enter(&hsotg->lock);
   1206 
   1207 	if (sc->sc_dying || !device_has_power(sc->sc_dev))
   1208 		goto done;
   1209 
   1210 	if (sc->sc_bus.ub_usepolling) {
   1211 		uint32_t intrs;
   1212 
   1213 		intrs = dwc2_read_core_intr(hsotg);
   1214 		DWC2_WRITE_4(hsotg, GINTSTS, intrs);
   1215 	} else {
   1216 		ret = dwc2_interrupt(sc);
   1217 	}
   1218 
   1219 done:
   1220 	mutex_spin_exit(&hsotg->lock);
   1221 
   1222 	return ret;
   1223 }
   1224 
   1225 int
   1226 dwc2_interrupt(struct dwc2_softc *sc)
   1227 {
   1228 	int ret = 0;
   1229 
   1230 	if (sc->sc_hcdenabled) {
   1231 		ret |= dwc2_handle_hcd_intr(sc->sc_hsotg);
   1232 	}
   1233 
   1234 	ret |= dwc2_handle_common_intr(sc->sc_hsotg);
   1235 
   1236 	return ret;
   1237 }
   1238 
   1239 /***********************************************************************/
   1240 
   1241 int
   1242 dwc2_detach(struct dwc2_softc *sc, int flags)
   1243 {
   1244 	int rv = 0;
   1245 
   1246 	if (sc->sc_child != NULL)
   1247 		rv = config_detach(sc->sc_child, flags);
   1248 
   1249 	return rv;
   1250 }
   1251 
   1252 bool
   1253 dwc2_shutdown(device_t self, int flags)
   1254 {
   1255 	struct dwc2_softc *sc = device_private(self);
   1256 
   1257 	sc = sc;
   1258 
   1259 	return true;
   1260 }
   1261 
   1262 void
   1263 dwc2_childdet(device_t self, device_t child)
   1264 {
   1265 	struct dwc2_softc *sc = device_private(self);
   1266 
   1267 	sc = sc;
   1268 }
   1269 
   1270 int
   1271 dwc2_activate(device_t self, enum devact act)
   1272 {
   1273 	struct dwc2_softc *sc = device_private(self);
   1274 
   1275 	sc = sc;
   1276 
   1277 	return 0;
   1278 }
   1279 
   1280 bool
   1281 dwc2_resume(device_t dv, const pmf_qual_t *qual)
   1282 {
   1283 	struct dwc2_softc *sc = device_private(dv);
   1284 
   1285 	sc = sc;
   1286 
   1287 	return true;
   1288 }
   1289 
   1290 bool
   1291 dwc2_suspend(device_t dv, const pmf_qual_t *qual)
   1292 {
   1293 	struct dwc2_softc *sc = device_private(dv);
   1294 
   1295 	sc = sc;
   1296 
   1297 	return true;
   1298 }
   1299 
   1300 /***********************************************************************/
   1301 int
   1302 dwc2_init(struct dwc2_softc *sc)
   1303 {
   1304 	int err = 0;
   1305 
   1306 	err = linux_workqueue_init();
   1307 	if (err)
   1308 		return err;
   1309 
   1310 	sc->sc_bus.ub_hcpriv = sc;
   1311 	sc->sc_bus.ub_revision = USBREV_2_0;
   1312 	sc->sc_bus.ub_methods = &dwc2_bus_methods;
   1313 	sc->sc_bus.ub_pipesize = sizeof(struct dwc2_pipe);
   1314 	sc->sc_bus.ub_usedma = true;
   1315 	sc->sc_hcdenabled = false;
   1316 
   1317 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
   1318 
   1319 	TAILQ_INIT(&sc->sc_complete);
   1320 
   1321 	sc->sc_rhc_si = softint_establish(SOFTINT_USB | SOFTINT_MPSAFE,
   1322 	    dwc2_rhc, sc);
   1323 
   1324 	sc->sc_xferpool = pool_cache_init(sizeof(struct dwc2_xfer), 0, 0, 0,
   1325 	    "dwc2xfer", NULL, IPL_USB, NULL, NULL, NULL);
   1326 	sc->sc_qhpool = pool_cache_init(sizeof(struct dwc2_qh), 0, 0, 0,
   1327 	    "dwc2qh", NULL, IPL_USB, NULL, NULL, NULL);
   1328 	sc->sc_qtdpool = pool_cache_init(sizeof(struct dwc2_qtd), 0, 0, 0,
   1329 	    "dwc2qtd", NULL, IPL_USB, NULL, NULL, NULL);
   1330 
   1331 	sc->sc_hsotg = kmem_zalloc(sizeof(struct dwc2_hsotg), KM_SLEEP);
   1332 	sc->sc_hsotg->hsotg_sc = sc;
   1333 	sc->sc_hsotg->dev = sc->sc_dev;
   1334 	sc->sc_hcdenabled = true;
   1335 
   1336 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
   1337 	struct dwc2_core_params defparams;
   1338 	int retval;
   1339 
   1340 	if (sc->sc_params == NULL) {
   1341 		/* Default all params to autodetect */
   1342 		dwc2_set_all_params(&defparams, -1);
   1343 		sc->sc_params = &defparams;
   1344 
   1345 		/*
   1346 		 * Disable descriptor dma mode by default as the HW can support
   1347 		 * it, but does not support it for SPLIT transactions.
   1348 		 */
   1349 		defparams.dma_desc_enable = 0;
   1350 	}
   1351 	hsotg->dr_mode = USB_DR_MODE_HOST;
   1352 
   1353 	/* Detect config values from hardware */
   1354 	retval = dwc2_get_hwparams(hsotg);
   1355 	if (retval) {
   1356 		goto fail2;
   1357 	}
   1358 
   1359 	hsotg->core_params = kmem_zalloc(sizeof(*hsotg->core_params), KM_SLEEP);
   1360 	dwc2_set_all_params(hsotg->core_params, -1);
   1361 
   1362 	/* Validate parameter values */
   1363 	dwc2_set_parameters(hsotg, sc->sc_params);
   1364 
   1365 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
   1366     IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
   1367 	if (hsotg->dr_mode != USB_DR_MODE_HOST) {
   1368 		retval = dwc2_gadget_init(hsotg);
   1369 		if (retval)
   1370 			goto fail2;
   1371 		hsotg->gadget_enabled = 1;
   1372 	}
   1373 #endif
   1374 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \
   1375     IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
   1376 	if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) {
   1377 		retval = dwc2_hcd_init(hsotg);
   1378 		if (retval) {
   1379 			if (hsotg->gadget_enabled)
   1380 				dwc2_hsotg_remove(hsotg);
   1381 			goto fail2;
   1382 		}
   1383 	    hsotg->hcd_enabled = 1;
   1384         }
   1385 #endif
   1386 
   1387 	uint32_t snpsid = hsotg->hw_params.snpsid;
   1388 	aprint_verbose_dev(sc->sc_dev, "Core Release: %x.%x%x%x (snpsid=%x)\n",
   1389 	    snpsid >> 12 & 0xf, snpsid >> 8 & 0xf,
   1390 	    snpsid >> 4 & 0xf, snpsid & 0xf, snpsid);
   1391 
   1392 	return 0;
   1393 
   1394 fail2:
   1395 	err = -retval;
   1396 	kmem_free(sc->sc_hsotg, sizeof(struct dwc2_hsotg));
   1397 	softint_disestablish(sc->sc_rhc_si);
   1398 
   1399 	return err;
   1400 }
   1401 
   1402 #if 0
   1403 /*
   1404  * curmode is a mode indication bit 0 = device, 1 = host
   1405  */
   1406 static const char * const intnames[32] = {
   1407 	"curmode",	"modemis",	"otgint",	"sof",
   1408 	"rxflvl",	"nptxfemp",	"ginnakeff",	"goutnakeff",
   1409 	"ulpickint",	"i2cint",	"erlysusp",	"usbsusp",
   1410 	"usbrst",	"enumdone",	"isooutdrop",	"eopf",
   1411 	"restore_done",	"epmis",	"iepint",	"oepint",
   1412 	"incompisoin",	"incomplp",	"fetsusp",	"resetdet",
   1413 	"prtint",	"hchint",	"ptxfemp",	"lpm",
   1414 	"conidstschng",	"disconnint",	"sessreqint",	"wkupint"
   1415 };
   1416 
   1417 
   1418 /***********************************************************************/
   1419 
   1420 #endif
   1421 
   1422 void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr,
   1423 			int *hub_port)
   1424 {
   1425 	struct usbd_xfer *xfer = context;
   1426 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
   1427 	struct usbd_device *dev = dpipe->pipe.up_dev;
   1428 
   1429 	*hub_addr = dev->ud_myhsport->up_parent->ud_addr;
   1430  	*hub_port = dev->ud_myhsport->up_portno;
   1431 }
   1432 
   1433 int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
   1434 {
   1435 	struct usbd_xfer *xfer = context;
   1436 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
   1437 	struct usbd_device *dev = dpipe->pipe.up_dev;
   1438 
   1439 	return dev->ud_speed;
   1440 }
   1441 
   1442 /*
   1443  * Sets the final status of an URB and returns it to the upper layer. Any
   1444  * required cleanup of the URB is performed.
   1445  *
   1446  * Must be called with interrupt disabled and spinlock held
   1447  */
   1448 void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
   1449     int status)
   1450 {
   1451 	struct usbd_xfer *xfer;
   1452 	struct dwc2_xfer *dxfer;
   1453 	struct dwc2_softc *sc;
   1454 	usb_endpoint_descriptor_t *ed;
   1455 	uint8_t xfertype;
   1456 
   1457 	KASSERT(mutex_owned(&hsotg->lock));
   1458 
   1459 	if (!qtd) {
   1460 		dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__);
   1461 		return;
   1462 	}
   1463 
   1464 	if (!qtd->urb) {
   1465 		dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__);
   1466 		return;
   1467 	}
   1468 
   1469 	xfer = qtd->urb->priv;
   1470 	if (!xfer) {
   1471 		dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__);
   1472 		return;
   1473 	}
   1474 
   1475 	dxfer = DWC2_XFER2DXFER(xfer);
   1476 	sc = DWC2_XFER2SC(xfer);
   1477 	ed = xfer->ux_pipe->up_endpoint->ue_edesc;
   1478 	xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
   1479 
   1480 	struct dwc2_hcd_urb *urb = qtd->urb;
   1481 	xfer->ux_actlen = dwc2_hcd_urb_get_actual_length(urb);
   1482 
   1483 	DPRINTFN(3, "xfer=%p actlen=%d\n", xfer, xfer->ux_actlen);
   1484 
   1485 	if (xfertype == UE_ISOCHRONOUS) {
   1486 		xfer->ux_actlen = 0;
   1487 		for (size_t i = 0; i < xfer->ux_nframes; ++i) {
   1488 			xfer->ux_frlengths[i] =
   1489 				dwc2_hcd_urb_get_iso_desc_actual_length(
   1490 						urb, i);
   1491 			DPRINTFN(1, "xfer=%p frame=%zu length=%d\n", xfer, i,
   1492 			    xfer->ux_frlengths[i]);
   1493 			xfer->ux_actlen += xfer->ux_frlengths[i];
   1494 		}
   1495 		DPRINTFN(1, "xfer=%p actlen=%d (isoc)\n", xfer, xfer->ux_actlen);
   1496 	}
   1497 
   1498 	if (xfertype == UE_ISOCHRONOUS && dbg_perio()) {
   1499 		for (size_t i = 0; i < xfer->ux_nframes; i++)
   1500 			dev_vdbg(hsotg->dev, " ISO Desc %zu status %d\n",
   1501 				 i, urb->iso_descs[i].status);
   1502 	}
   1503 
   1504 	if (!status) {
   1505 		if (!(xfer->ux_flags & USBD_SHORT_XFER_OK) &&
   1506 		    xfer->ux_actlen < xfer->ux_length)
   1507 			status = -EIO;
   1508 	}
   1509 
   1510 	switch (status) {
   1511 	case 0:
   1512 		dxfer->intr_status = USBD_NORMAL_COMPLETION;
   1513 		break;
   1514 	case -EPIPE:
   1515 		dxfer->intr_status = USBD_STALLED;
   1516 		break;
   1517 	case -EPROTO:
   1518 		dxfer->intr_status = USBD_INVAL;
   1519 		break;
   1520 	case -EIO:
   1521 		dxfer->intr_status = USBD_IOERROR;
   1522 		break;
   1523 	case -EOVERFLOW:
   1524 		dxfer->intr_status = USBD_IOERROR;
   1525 		break;
   1526 	default:
   1527 		dxfer->intr_status = USBD_IOERROR;
   1528 		printf("%s: unknown error status %d\n", __func__, status);
   1529 	}
   1530 
   1531 	if (dxfer->intr_status == USBD_NORMAL_COMPLETION) {
   1532 		/*
   1533 		 * control transfers with no data phase don't touch dmabuf, but
   1534 		 * everything else does.
   1535 		 */
   1536 		if (!(xfertype == UE_CONTROL &&
   1537 		    UGETW(xfer->ux_request.wLength) == 0) &&
   1538 		    xfer->ux_actlen > 0	/* XXX PR/53503 */
   1539 		    ) {
   1540 			int rd = usbd_xfer_isread(xfer);
   1541 
   1542 			usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_actlen,
   1543 			    rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
   1544 		}
   1545 	}
   1546 
   1547 	if (xfertype == UE_ISOCHRONOUS ||
   1548 	    xfertype == UE_INTERRUPT) {
   1549 		struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
   1550 
   1551 		dwc2_free_bus_bandwidth(hsotg,
   1552 					dwc2_hcd_get_ep_bandwidth(hsotg, dpipe),
   1553 					xfer);
   1554 	}
   1555 
   1556 	qtd->urb = NULL;
   1557 	KASSERT(mutex_owned(&hsotg->lock));
   1558 
   1559 	TAILQ_INSERT_TAIL(&sc->sc_complete, dxfer, xnext);
   1560 
   1561 	mutex_spin_exit(&hsotg->lock);
   1562 	usb_schedsoftintr(&sc->sc_bus);
   1563 	mutex_spin_enter(&hsotg->lock);
   1564 }
   1565 
   1566 
   1567 int
   1568 _dwc2_hcd_start(struct dwc2_hsotg *hsotg)
   1569 {
   1570 	dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
   1571 
   1572 	mutex_spin_enter(&hsotg->lock);
   1573 
   1574 	hsotg->lx_state = DWC2_L0;
   1575 
   1576 	if (dwc2_is_device_mode(hsotg)) {
   1577 		mutex_spin_exit(&hsotg->lock);
   1578 		return 0;	/* why 0 ?? */
   1579 	}
   1580 
   1581 	dwc2_hcd_reinit(hsotg);
   1582 
   1583 	mutex_spin_exit(&hsotg->lock);
   1584 	return 0;
   1585 }
   1586 
   1587 int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg)
   1588 {
   1589 
   1590 	return false;
   1591 }
   1592