Home | History | Annotate | Line # | Download | only in dwc2
dwc2.c revision 1.61
      1 /*	$NetBSD: dwc2.c,v 1.61 2019/12/03 11:25:43 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nick Hudson
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: dwc2.c,v 1.61 2019/12/03 11:25:43 skrll Exp $");
     34 
     35 #include "opt_usb.h"
     36 
     37 #include <sys/param.h>
     38 
     39 #include <sys/cpu.h>
     40 #include <sys/device.h>
     41 #include <sys/kernel.h>
     42 #include <sys/kmem.h>
     43 #include <sys/proc.h>
     44 #include <sys/queue.h>
     45 #include <sys/select.h>
     46 #include <sys/systm.h>
     47 
     48 #include <machine/endian.h>
     49 
     50 #include <dev/usb/usb.h>
     51 #include <dev/usb/usbdi.h>
     52 #include <dev/usb/usbdivar.h>
     53 #include <dev/usb/usb_mem.h>
     54 #include <dev/usb/usbroothub.h>
     55 
     56 #include <dwc2/dwc2.h>
     57 #include <dwc2/dwc2var.h>
     58 
     59 #include "dwc2_core.h"
     60 #include "dwc2_hcd.h"
     61 
     62 #ifdef DWC2_COUNTERS
     63 #define	DWC2_EVCNT_ADD(a,b)	((void)((a).ev_count += (b)))
     64 #else
     65 #define	DWC2_EVCNT_ADD(a,b)	do { } while (/*CONSTCOND*/0)
     66 #endif
     67 #define	DWC2_EVCNT_INCR(a)	DWC2_EVCNT_ADD((a), 1)
     68 
     69 #ifdef DWC2_DEBUG
     70 #define	DPRINTFN(n,fmt,...) do {			\
     71 	if (dwc2debug >= (n)) {			\
     72 		printf("%s: " fmt,			\
     73 		__FUNCTION__,## __VA_ARGS__);		\
     74 	}						\
     75 } while (0)
     76 #define	DPRINTF(...)	DPRINTFN(1, __VA_ARGS__)
     77 int dwc2debug = 0;
     78 
     79 SYSCTL_SETUP(sysctl_hw_dwc2_setup, "sysctl hw.dwc2 setup")
     80 {
     81 	int err;
     82 	const struct sysctlnode *rnode;
     83 	const struct sysctlnode *cnode;
     84 
     85 	err = sysctl_createv(clog, 0, NULL, &rnode,
     86 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "dwc2",
     87 	    SYSCTL_DESCR("dwc2 global controls"),
     88 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
     89 
     90 	if (err)
     91 		goto fail;
     92 
     93 	/* control debugging printfs */
     94 	err = sysctl_createv(clog, 0, &rnode, &cnode,
     95 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
     96 	    "debug", SYSCTL_DESCR("Enable debugging output"),
     97 	    NULL, 0, &dwc2debug, sizeof(dwc2debug), CTL_CREATE, CTL_EOL);
     98 	if (err)
     99 		goto fail;
    100 
    101 	return;
    102 fail:
    103 	aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
    104 }
    105 #else
    106 #define	DPRINTF(...) do { } while (0)
    107 #define	DPRINTFN(...) do { } while (0)
    108 #endif
    109 
    110 Static usbd_status	dwc2_open(struct usbd_pipe *);
    111 Static void		dwc2_poll(struct usbd_bus *);
    112 Static void		dwc2_softintr(void *);
    113 
    114 Static struct usbd_xfer *
    115 			dwc2_allocx(struct usbd_bus *, unsigned int);
    116 Static void		dwc2_freex(struct usbd_bus *, struct usbd_xfer *);
    117 Static void		dwc2_get_lock(struct usbd_bus *, kmutex_t **);
    118 Static int		dwc2_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
    119 			    void *, int);
    120 
    121 Static usbd_status	dwc2_root_intr_transfer(struct usbd_xfer *);
    122 Static usbd_status	dwc2_root_intr_start(struct usbd_xfer *);
    123 Static void		dwc2_root_intr_abort(struct usbd_xfer *);
    124 Static void		dwc2_root_intr_close(struct usbd_pipe *);
    125 Static void		dwc2_root_intr_done(struct usbd_xfer *);
    126 
    127 Static usbd_status	dwc2_device_ctrl_transfer(struct usbd_xfer *);
    128 Static usbd_status	dwc2_device_ctrl_start(struct usbd_xfer *);
    129 Static void		dwc2_device_ctrl_abort(struct usbd_xfer *);
    130 Static void		dwc2_device_ctrl_close(struct usbd_pipe *);
    131 Static void		dwc2_device_ctrl_done(struct usbd_xfer *);
    132 
    133 Static usbd_status	dwc2_device_bulk_transfer(struct usbd_xfer *);
    134 Static void		dwc2_device_bulk_abort(struct usbd_xfer *);
    135 Static void		dwc2_device_bulk_close(struct usbd_pipe *);
    136 Static void		dwc2_device_bulk_done(struct usbd_xfer *);
    137 
    138 Static usbd_status	dwc2_device_intr_transfer(struct usbd_xfer *);
    139 Static usbd_status	dwc2_device_intr_start(struct usbd_xfer *);
    140 Static void		dwc2_device_intr_abort(struct usbd_xfer *);
    141 Static void		dwc2_device_intr_close(struct usbd_pipe *);
    142 Static void		dwc2_device_intr_done(struct usbd_xfer *);
    143 
    144 Static usbd_status	dwc2_device_isoc_transfer(struct usbd_xfer *);
    145 Static void		dwc2_device_isoc_abort(struct usbd_xfer *);
    146 Static void		dwc2_device_isoc_close(struct usbd_pipe *);
    147 Static void		dwc2_device_isoc_done(struct usbd_xfer *);
    148 
    149 Static usbd_status	dwc2_device_start(struct usbd_xfer *);
    150 
    151 Static void		dwc2_close_pipe(struct usbd_pipe *);
    152 Static void		dwc2_abort_xfer(struct usbd_xfer *, usbd_status);
    153 
    154 Static void		dwc2_device_clear_toggle(struct usbd_pipe *);
    155 Static void		dwc2_noop(struct usbd_pipe *pipe);
    156 
    157 Static int		dwc2_interrupt(struct dwc2_softc *);
    158 Static void		dwc2_rhc(void *);
    159 
    160 Static void		dwc2_timeout(void *);
    161 Static void		dwc2_timeout_task(void *);
    162 
    163 
    164 static inline void
    165 dwc2_allocate_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
    166 			    struct usbd_xfer *xfer)
    167 {
    168 }
    169 
    170 static inline void
    171 dwc2_free_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
    172 			struct usbd_xfer *xfer)
    173 {
    174 }
    175 
    176 Static const struct usbd_bus_methods dwc2_bus_methods = {
    177 	.ubm_open =	dwc2_open,
    178 	.ubm_softint =	dwc2_softintr,
    179 	.ubm_dopoll =	dwc2_poll,
    180 	.ubm_allocx =	dwc2_allocx,
    181 	.ubm_freex =	dwc2_freex,
    182 	.ubm_getlock =	dwc2_get_lock,
    183 	.ubm_rhctrl =	dwc2_roothub_ctrl,
    184 };
    185 
    186 Static const struct usbd_pipe_methods dwc2_root_intr_methods = {
    187 	.upm_transfer =	dwc2_root_intr_transfer,
    188 	.upm_start =	dwc2_root_intr_start,
    189 	.upm_abort =	dwc2_root_intr_abort,
    190 	.upm_close =	dwc2_root_intr_close,
    191 	.upm_cleartoggle =	dwc2_noop,
    192 	.upm_done =	dwc2_root_intr_done,
    193 };
    194 
    195 Static const struct usbd_pipe_methods dwc2_device_ctrl_methods = {
    196 	.upm_transfer =	dwc2_device_ctrl_transfer,
    197 	.upm_start =	dwc2_device_ctrl_start,
    198 	.upm_abort =	dwc2_device_ctrl_abort,
    199 	.upm_close =	dwc2_device_ctrl_close,
    200 	.upm_cleartoggle =	dwc2_noop,
    201 	.upm_done =	dwc2_device_ctrl_done,
    202 };
    203 
    204 Static const struct usbd_pipe_methods dwc2_device_intr_methods = {
    205 	.upm_transfer =	dwc2_device_intr_transfer,
    206 	.upm_start =	dwc2_device_intr_start,
    207 	.upm_abort =	dwc2_device_intr_abort,
    208 	.upm_close =	dwc2_device_intr_close,
    209 	.upm_cleartoggle =	dwc2_device_clear_toggle,
    210 	.upm_done =	dwc2_device_intr_done,
    211 };
    212 
    213 Static const struct usbd_pipe_methods dwc2_device_bulk_methods = {
    214 	.upm_transfer =	dwc2_device_bulk_transfer,
    215 	.upm_abort =	dwc2_device_bulk_abort,
    216 	.upm_close =	dwc2_device_bulk_close,
    217 	.upm_cleartoggle =	dwc2_device_clear_toggle,
    218 	.upm_done =	dwc2_device_bulk_done,
    219 };
    220 
    221 Static const struct usbd_pipe_methods dwc2_device_isoc_methods = {
    222 	.upm_transfer =	dwc2_device_isoc_transfer,
    223 	.upm_abort =	dwc2_device_isoc_abort,
    224 	.upm_close =	dwc2_device_isoc_close,
    225 	.upm_cleartoggle =	dwc2_noop,
    226 	.upm_done =	dwc2_device_isoc_done,
    227 };
    228 
    229 struct usbd_xfer *
    230 dwc2_allocx(struct usbd_bus *bus, unsigned int nframes)
    231 {
    232 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    233 	struct dwc2_xfer *dxfer;
    234 	struct usbd_xfer *xfer;
    235 
    236 	DPRINTFN(10, "\n");
    237 
    238 	DWC2_EVCNT_INCR(sc->sc_ev_xferpoolget);
    239 	dxfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
    240 	xfer = (struct usbd_xfer *)dxfer;
    241 	if (dxfer != NULL) {
    242 		memset(dxfer, 0, sizeof(*dxfer));
    243 
    244 		dxfer->urb = dwc2_hcd_urb_alloc(sc->sc_hsotg,
    245 		    nframes, GFP_KERNEL);
    246 
    247 		/* Initialise this always so we can call remove on it. */
    248 		usb_init_task(&xfer->ux_aborttask, dwc2_timeout_task, xfer,
    249 		    USB_TASKQ_MPSAFE);
    250 #ifdef DIAGNOSTIC
    251 		dxfer->xfer.ux_state = XFER_BUSY;
    252 #endif
    253 	}
    254 	return (struct usbd_xfer *)dxfer;
    255 }
    256 
    257 void
    258 dwc2_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
    259 {
    260 	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
    261 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    262 
    263 	DPRINTFN(10, "\n");
    264 
    265 #ifdef DIAGNOSTIC
    266 	if (xfer->ux_state != XFER_BUSY &&
    267 	    xfer->ux_status != USBD_NOT_STARTED) {
    268 		DPRINTF("xfer=%p not busy, 0x%08x\n", xfer, xfer->ux_state);
    269 	}
    270 	xfer->ux_state = XFER_FREE;
    271 #endif
    272 	DWC2_EVCNT_INCR(sc->sc_ev_xferpoolput);
    273 	dwc2_hcd_urb_free(sc->sc_hsotg, dxfer->urb, dxfer->urb->packet_count);
    274 	pool_cache_put(sc->sc_xferpool, xfer);
    275 }
    276 
    277 Static void
    278 dwc2_get_lock(struct usbd_bus *bus, kmutex_t **lock)
    279 {
    280 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    281 
    282 	*lock = &sc->sc_lock;
    283 }
    284 
    285 Static void
    286 dwc2_rhc(void *addr)
    287 {
    288 	struct dwc2_softc *sc = addr;
    289 	struct usbd_xfer *xfer;
    290 	u_char *p;
    291 
    292 	DPRINTF("\n");
    293 	mutex_enter(&sc->sc_lock);
    294 	xfer = sc->sc_intrxfer;
    295 
    296 	if (xfer == NULL) {
    297 		/* Just ignore the change. */
    298 		mutex_exit(&sc->sc_lock);
    299 		return;
    300 
    301 	}
    302 	/* set port bit */
    303 	p = KERNADDR(&xfer->ux_dmabuf, 0);
    304 
    305 	p[0] = 0x02;	/* we only have one port (1 << 1) */
    306 
    307 	xfer->ux_actlen = xfer->ux_length;
    308 	xfer->ux_status = USBD_NORMAL_COMPLETION;
    309 
    310 	usb_transfer_complete(xfer);
    311 	mutex_exit(&sc->sc_lock);
    312 }
    313 
    314 Static void
    315 dwc2_softintr(void *v)
    316 {
    317 	struct usbd_bus *bus = v;
    318 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    319 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    320 	struct dwc2_xfer *dxfer;
    321 
    322 	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
    323 
    324 	mutex_spin_enter(&hsotg->lock);
    325 	while ((dxfer = TAILQ_FIRST(&sc->sc_complete)) != NULL) {
    326 
    327 		KASSERTMSG(!callout_pending(&dxfer->xfer.ux_callout),
    328 		    "xfer %p pipe %p\n", dxfer, dxfer->xfer.ux_pipe);
    329 
    330 		/*
    331 		 * dwc2_abort_xfer will remove this transfer from the
    332 		 * sc_complete queue
    333 		 */
    334 		/*XXXNH not tested */
    335 		if (dxfer->xfer.ux_status == USBD_CANCELLED ||
    336 		    dxfer->xfer.ux_status == USBD_TIMEOUT) {
    337 			continue;
    338 		}
    339 
    340 		TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext);
    341 
    342 		mutex_spin_exit(&hsotg->lock);
    343 		usb_transfer_complete(&dxfer->xfer);
    344 		mutex_spin_enter(&hsotg->lock);
    345 	}
    346 	mutex_spin_exit(&hsotg->lock);
    347 }
    348 
    349 Static void
    350 dwc2_timeout(void *addr)
    351 {
    352 	struct usbd_xfer *xfer = addr;
    353  	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    354 	struct usbd_device *dev = xfer->ux_pipe->up_dev;
    355 
    356 	DPRINTF("xfer=%p\n", xfer);
    357 
    358 	mutex_enter(&sc->sc_lock);
    359 	if (!sc->sc_dying && xfer->ux_status == USBD_IN_PROGRESS)
    360 		usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC);
    361 	mutex_exit(&sc->sc_lock);
    362 }
    363 
    364 Static void
    365 dwc2_timeout_task(void *addr)
    366 {
    367 	struct usbd_xfer *xfer = addr;
    368  	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    369 
    370 	DPRINTF("xfer=%p\n", xfer);
    371 
    372 	mutex_enter(&sc->sc_lock);
    373 	dwc2_abort_xfer(xfer, USBD_TIMEOUT);
    374 	mutex_exit(&sc->sc_lock);
    375 }
    376 
    377 usbd_status
    378 dwc2_open(struct usbd_pipe *pipe)
    379 {
    380 	struct usbd_device *dev = pipe->up_dev;
    381 	struct dwc2_softc *sc = DWC2_PIPE2SC(pipe);
    382 	struct dwc2_pipe *dpipe = DWC2_PIPE2DPIPE(pipe);
    383 	usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc;
    384 	uint8_t addr = dev->ud_addr;
    385 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
    386 	usbd_status err;
    387 
    388 	DPRINTF("pipe %p addr %d xfertype %d dir %s\n", pipe, addr, xfertype,
    389 	    UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN ? "in" : "out");
    390 
    391 	if (sc->sc_dying) {
    392 		return USBD_IOERROR;
    393 	}
    394 
    395 	if (addr == dev->ud_bus->ub_rhaddr) {
    396 		switch (ed->bEndpointAddress) {
    397 		case USB_CONTROL_ENDPOINT:
    398 			pipe->up_methods = &roothub_ctrl_methods;
    399 			break;
    400 		case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
    401 			pipe->up_methods = &dwc2_root_intr_methods;
    402 			break;
    403 		default:
    404 			DPRINTF("bad bEndpointAddress 0x%02x\n",
    405 			    ed->bEndpointAddress);
    406 			return USBD_INVAL;
    407 		}
    408 		DPRINTF("root hub pipe open\n");
    409 		return USBD_NORMAL_COMPLETION;
    410 	}
    411 
    412 	switch (xfertype) {
    413 	case UE_CONTROL:
    414 		pipe->up_methods = &dwc2_device_ctrl_methods;
    415 		err = usb_allocmem(&sc->sc_bus, sizeof(usb_device_request_t),
    416 		    0, &dpipe->req_dma);
    417 		if (err)
    418 			return err;
    419 		break;
    420 	case UE_INTERRUPT:
    421 		pipe->up_methods = &dwc2_device_intr_methods;
    422 		break;
    423 	case UE_ISOCHRONOUS:
    424 		pipe->up_serialise = false;
    425 		pipe->up_methods = &dwc2_device_isoc_methods;
    426 		break;
    427 	case UE_BULK:
    428 		pipe->up_serialise = false;
    429 		pipe->up_methods = &dwc2_device_bulk_methods;
    430 		break;
    431 	default:
    432 		DPRINTF("bad xfer type %d\n", xfertype);
    433 		return USBD_INVAL;
    434 	}
    435 
    436 	/* QH */
    437 	dpipe->priv = NULL;
    438 
    439 	return USBD_NORMAL_COMPLETION;
    440 }
    441 
    442 Static void
    443 dwc2_poll(struct usbd_bus *bus)
    444 {
    445 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
    446 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    447 
    448 	mutex_spin_enter(&hsotg->lock);
    449 	dwc2_interrupt(sc);
    450 	mutex_spin_exit(&hsotg->lock);
    451 }
    452 
    453 /*
    454  * Close a reqular pipe.
    455  * Assumes that there are no pending transactions.
    456  */
    457 Static void
    458 dwc2_close_pipe(struct usbd_pipe *pipe)
    459 {
    460 #ifdef DIAGNOSTIC
    461 	struct dwc2_softc *sc = pipe->up_dev->ud_bus->ub_hcpriv;
    462 #endif
    463 
    464 	KASSERT(mutex_owned(&sc->sc_lock));
    465 }
    466 
    467 /*
    468  * Abort a device request.
    469  */
    470 Static void
    471 dwc2_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
    472 {
    473 	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
    474 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    475 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    476 	struct dwc2_xfer *d, *tmp;
    477 	int err;
    478 
    479 	KASSERTMSG((status == USBD_CANCELLED || status == USBD_TIMEOUT),
    480 	    "invalid status for abort: %d", (int)status);
    481 
    482 	DPRINTF("xfer %p pipe %p status 0x%08x", xfer, xfer->ux_pipe, status);
    483 
    484 	KASSERT(mutex_owned(&sc->sc_lock));
    485 	ASSERT_SLEEPABLE();
    486 
    487 	if (status == USBD_CANCELLED) {
    488 		/*
    489 		 * We are synchronously aborting.  Try to stop the
    490 		 * callout and task, but if we can't, wait for them to
    491 		 * complete.
    492 		 */
    493 		callout_halt(&xfer->ux_callout, &sc->sc_lock);
    494 		usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask,
    495 		    USB_TASKQ_HC, &sc->sc_lock);
    496 	} else {
    497 		/* Otherwise, we are timing out.  */
    498 		KASSERT(status == USBD_TIMEOUT);
    499 	}
    500 
    501 	/*
    502 	 * The xfer cannot have been cancelled already.  It is the
    503 	 * responsibility of the caller of usbd_abort_pipe not to try
    504 	 * to abort a pipe multiple times, whether concurrently or
    505 	 * sequentially.
    506 	 */
    507 	KASSERT(xfer->ux_status != USBD_CANCELLED);
    508 
    509 	/* Only the timeout, which runs only once, can time it out.  */
    510 	KASSERT(xfer->ux_status != USBD_TIMEOUT);
    511 
    512 	/* If anyone else beat us, we're done.  */
    513 	if (xfer->ux_status != USBD_IN_PROGRESS)
    514 		return;
    515 
    516 	/* We beat everyone else.  Claim the status.  */
    517 	xfer->ux_status = status;
    518 
    519 	/*
    520 	 * If we're dying, skip the hardware action and just notify the
    521 	 * software that we're done.
    522 	 */
    523 	if (sc->sc_dying) {
    524 		DPRINTFN(4, "xfer %p dying 0x%08x", xfer, xfer->ux_status);
    525 		goto dying;
    526 	}
    527 
    528 	/*
    529 	 * HC Step 1: Handle the hardware.
    530 	 */
    531 	mutex_spin_enter(&hsotg->lock);
    532 	/* XXXNH suboptimal */
    533 	TAILQ_FOREACH_SAFE(d, &sc->sc_complete, xnext, tmp) {
    534 		if (d == dxfer) {
    535 			TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext);
    536 			break;
    537 		}
    538 	}
    539 
    540 	err = dwc2_hcd_urb_dequeue(hsotg, dxfer->urb);
    541 	if (err) {
    542 		DPRINTF("dwc2_hcd_urb_dequeue failed\n");
    543 	}
    544 
    545 	mutex_spin_exit(&hsotg->lock);
    546 
    547 	/*
    548 	 * Final Step: Notify completion to waiting xfers.
    549 	 */
    550 dying:
    551 	usb_transfer_complete(xfer);
    552 	KASSERT(mutex_owned(&sc->sc_lock));
    553 }
    554 
    555 Static void
    556 dwc2_noop(struct usbd_pipe *pipe)
    557 {
    558 
    559 }
    560 
    561 Static void
    562 dwc2_device_clear_toggle(struct usbd_pipe *pipe)
    563 {
    564 
    565 	DPRINTF("toggle %d -> 0", pipe->up_endpoint->ue_toggle);
    566 }
    567 
    568 /***********************************************************************/
    569 
    570 Static int
    571 dwc2_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
    572     void *buf, int buflen)
    573 {
    574 	struct dwc2_softc *sc = bus->ub_hcpriv;
    575 	usbd_status err = USBD_IOERROR;
    576 	uint16_t len, value, index;
    577 	int totlen = 0;
    578 
    579 	if (sc->sc_dying)
    580 		return -1;
    581 
    582 	DPRINTFN(4, "type=0x%02x request=%02x\n",
    583 	    req->bmRequestType, req->bRequest);
    584 
    585 	len = UGETW(req->wLength);
    586 	value = UGETW(req->wValue);
    587 	index = UGETW(req->wIndex);
    588 
    589 #define C(x,y) ((x) | ((y) << 8))
    590 	switch (C(req->bRequest, req->bmRequestType)) {
    591 	case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
    592 		DPRINTFN(8, "wValue=0x%04x\n", value);
    593 
    594 		if (len == 0)
    595 			break;
    596 		switch (value) {
    597 #define sd ((usb_string_descriptor_t *)buf)
    598 		case C(2, UDESC_STRING):
    599 			/* Product */
    600 			totlen = usb_makestrdesc(sd, len, "DWC2 root hub");
    601 			break;
    602 #undef sd
    603 		default:
    604 			/* default from usbroothub */
    605 			return buflen;
    606 		}
    607 		break;
    608 
    609 	case C(UR_GET_CONFIG, UT_READ_DEVICE):
    610 	case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
    611 	case C(UR_GET_STATUS, UT_READ_INTERFACE):
    612 	case C(UR_GET_STATUS, UT_READ_ENDPOINT):
    613 	case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
    614 	case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
    615 		/* default from usbroothub */
    616 		DPRINTFN(4, "returning %d (usbroothub default)", buflen);
    617 
    618 		return buflen;
    619 
    620 	default:
    621 		/* Hub requests */
    622 		err = dwc2_hcd_hub_control(sc->sc_hsotg,
    623 		    C(req->bRequest, req->bmRequestType), value, index,
    624 		    buf, len);
    625 		if (err) {
    626 			return -1;
    627 		}
    628 		totlen = len;
    629 	}
    630 
    631 	return totlen;
    632 }
    633 
    634 Static usbd_status
    635 dwc2_root_intr_transfer(struct usbd_xfer *xfer)
    636 {
    637 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    638 	usbd_status err;
    639 
    640 	DPRINTF("\n");
    641 
    642 	/* Insert last in queue. */
    643 	mutex_enter(&sc->sc_lock);
    644 	err = usb_insert_transfer(xfer);
    645 	mutex_exit(&sc->sc_lock);
    646 	if (err)
    647 		return err;
    648 
    649 	/* Pipe isn't running, start first */
    650 	return dwc2_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
    651 }
    652 
    653 Static usbd_status
    654 dwc2_root_intr_start(struct usbd_xfer *xfer)
    655 {
    656 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    657 	const bool polling = sc->sc_bus.ub_usepolling;
    658 
    659 	DPRINTF("\n");
    660 
    661 	if (sc->sc_dying)
    662 		return USBD_IOERROR;
    663 
    664 	if (!polling)
    665 		mutex_enter(&sc->sc_lock);
    666 	KASSERT(sc->sc_intrxfer == NULL);
    667 	sc->sc_intrxfer = xfer;
    668 	if (!polling)
    669 		mutex_exit(&sc->sc_lock);
    670 
    671 	return USBD_IN_PROGRESS;
    672 }
    673 
    674 /* Abort a root interrupt request. */
    675 Static void
    676 dwc2_root_intr_abort(struct usbd_xfer *xfer)
    677 {
    678 	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
    679 
    680 	DPRINTF("xfer=%p\n", xfer);
    681 
    682 	KASSERT(mutex_owned(&sc->sc_lock));
    683 	KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
    684 
    685 	xfer->ux_status = USBD_CANCELLED;
    686 	usb_transfer_complete(xfer);
    687 }
    688 
    689 Static void
    690 dwc2_root_intr_close(struct usbd_pipe *pipe)
    691 {
    692 	struct dwc2_softc *sc = DWC2_PIPE2SC(pipe);
    693 
    694 	DPRINTF("\n");
    695 
    696 	KASSERT(mutex_owned(&sc->sc_lock));
    697 
    698 	sc->sc_intrxfer = NULL;
    699 }
    700 
    701 Static void
    702 dwc2_root_intr_done(struct usbd_xfer *xfer)
    703 {
    704 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    705 
    706 	KASSERT(sc->sc_intrxfer != NULL);
    707 	sc->sc_intrxfer = NULL;
    708 	DPRINTF("\n");
    709 }
    710 
    711 /***********************************************************************/
    712 
    713 Static usbd_status
    714 dwc2_device_ctrl_transfer(struct usbd_xfer *xfer)
    715 {
    716 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    717 	usbd_status err;
    718 
    719 	DPRINTF("\n");
    720 
    721 	/* Insert last in queue. */
    722 	mutex_enter(&sc->sc_lock);
    723 	err = usb_insert_transfer(xfer);
    724 	mutex_exit(&sc->sc_lock);
    725 	if (err)
    726 		return err;
    727 
    728 	/* Pipe isn't running, start first */
    729 	return dwc2_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
    730 }
    731 
    732 Static usbd_status
    733 dwc2_device_ctrl_start(struct usbd_xfer *xfer)
    734 {
    735 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    736 	usbd_status err;
    737 	const bool polling = sc->sc_bus.ub_usepolling;
    738 
    739 	DPRINTF("\n");
    740 
    741 	if (!polling)
    742 		mutex_enter(&sc->sc_lock);
    743 	xfer->ux_status = USBD_IN_PROGRESS;
    744 	err = dwc2_device_start(xfer);
    745 	if (!polling)
    746 		mutex_exit(&sc->sc_lock);
    747 
    748 	if (err)
    749 		return err;
    750 
    751 	return USBD_IN_PROGRESS;
    752 }
    753 
    754 Static void
    755 dwc2_device_ctrl_abort(struct usbd_xfer *xfer)
    756 {
    757 #ifdef DIAGNOSTIC
    758 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    759 #endif
    760 	KASSERT(mutex_owned(&sc->sc_lock));
    761 
    762 	DPRINTF("xfer=%p\n", xfer);
    763 	dwc2_abort_xfer(xfer, USBD_CANCELLED);
    764 }
    765 
    766 Static void
    767 dwc2_device_ctrl_close(struct usbd_pipe *pipe)
    768 {
    769 
    770 	DPRINTF("pipe=%p\n", pipe);
    771 	dwc2_close_pipe(pipe);
    772 }
    773 
    774 Static void
    775 dwc2_device_ctrl_done(struct usbd_xfer *xfer)
    776 {
    777 
    778 	DPRINTF("xfer=%p\n", xfer);
    779 }
    780 
    781 /***********************************************************************/
    782 
    783 Static usbd_status
    784 dwc2_device_bulk_transfer(struct usbd_xfer *xfer)
    785 {
    786 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    787 	usbd_status err;
    788 
    789 	DPRINTF("xfer=%p\n", xfer);
    790 
    791 	/* Insert last in queue. */
    792 	mutex_enter(&sc->sc_lock);
    793 	err = usb_insert_transfer(xfer);
    794 
    795 	KASSERT(err == USBD_NORMAL_COMPLETION);
    796 
    797 	xfer->ux_status = USBD_IN_PROGRESS;
    798 	err = dwc2_device_start(xfer);
    799 	mutex_exit(&sc->sc_lock);
    800 
    801 	return err;
    802 }
    803 
    804 Static void
    805 dwc2_device_bulk_abort(struct usbd_xfer *xfer)
    806 {
    807 #ifdef DIAGNOSTIC
    808 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    809 #endif
    810 	KASSERT(mutex_owned(&sc->sc_lock));
    811 
    812 	DPRINTF("xfer=%p\n", xfer);
    813 	dwc2_abort_xfer(xfer, USBD_CANCELLED);
    814 }
    815 
    816 Static void
    817 dwc2_device_bulk_close(struct usbd_pipe *pipe)
    818 {
    819 
    820 	DPRINTF("pipe=%p\n", pipe);
    821 
    822 	dwc2_close_pipe(pipe);
    823 }
    824 
    825 Static void
    826 dwc2_device_bulk_done(struct usbd_xfer *xfer)
    827 {
    828 
    829 	DPRINTF("xfer=%p\n", xfer);
    830 }
    831 
    832 /***********************************************************************/
    833 
    834 Static usbd_status
    835 dwc2_device_intr_transfer(struct usbd_xfer *xfer)
    836 {
    837 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    838 	usbd_status err;
    839 
    840 	DPRINTF("xfer=%p\n", xfer);
    841 
    842 	/* Insert last in queue. */
    843 	mutex_enter(&sc->sc_lock);
    844 	err = usb_insert_transfer(xfer);
    845 	mutex_exit(&sc->sc_lock);
    846 	if (err)
    847 		return err;
    848 
    849 	/* Pipe isn't running, start first */
    850 	return dwc2_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
    851 }
    852 
    853 Static usbd_status
    854 dwc2_device_intr_start(struct usbd_xfer *xfer)
    855 {
    856 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer)
    857 	struct usbd_device *dev = dpipe->pipe.up_dev;
    858 	struct dwc2_softc *sc = dev->ud_bus->ub_hcpriv;
    859 	usbd_status err;
    860 	const bool polling = sc->sc_bus.ub_usepolling;
    861 
    862 	if (!polling)
    863 		mutex_enter(&sc->sc_lock);
    864 	xfer->ux_status = USBD_IN_PROGRESS;
    865 	err = dwc2_device_start(xfer);
    866 	if (!polling)
    867 		mutex_exit(&sc->sc_lock);
    868 
    869 	if (err)
    870 		return err;
    871 
    872 	return USBD_IN_PROGRESS;
    873 }
    874 
    875 /* Abort a device interrupt request. */
    876 Static void
    877 dwc2_device_intr_abort(struct usbd_xfer *xfer)
    878 {
    879 #ifdef DIAGNOSTIC
    880 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    881 #endif
    882 
    883 	KASSERT(mutex_owned(&sc->sc_lock));
    884 	KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
    885 
    886 	DPRINTF("xfer=%p\n", xfer);
    887 
    888 	dwc2_abort_xfer(xfer, USBD_CANCELLED);
    889 }
    890 
    891 Static void
    892 dwc2_device_intr_close(struct usbd_pipe *pipe)
    893 {
    894 
    895 	DPRINTF("pipe=%p\n", pipe);
    896 
    897 	dwc2_close_pipe(pipe);
    898 }
    899 
    900 Static void
    901 dwc2_device_intr_done(struct usbd_xfer *xfer)
    902 {
    903 
    904 	DPRINTF("\n");
    905 }
    906 
    907 /***********************************************************************/
    908 
    909 usbd_status
    910 dwc2_device_isoc_transfer(struct usbd_xfer *xfer)
    911 {
    912 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    913 	usbd_status err;
    914 
    915 	DPRINTF("xfer=%p\n", xfer);
    916 
    917 	/* Insert last in queue. */
    918 	mutex_enter(&sc->sc_lock);
    919 	err = usb_insert_transfer(xfer);
    920 
    921 	KASSERT(err == USBD_NORMAL_COMPLETION);
    922 
    923 	xfer->ux_status = USBD_IN_PROGRESS;
    924 	err = dwc2_device_start(xfer);
    925 	mutex_exit(&sc->sc_lock);
    926 
    927 	return err;
    928 }
    929 
    930 void
    931 dwc2_device_isoc_abort(struct usbd_xfer *xfer)
    932 {
    933 	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
    934 	KASSERT(mutex_owned(&sc->sc_lock));
    935 
    936 	DPRINTF("xfer=%p\n", xfer);
    937 	dwc2_abort_xfer(xfer, USBD_CANCELLED);
    938 }
    939 
    940 void
    941 dwc2_device_isoc_close(struct usbd_pipe *pipe)
    942 {
    943 	DPRINTF("\n");
    944 
    945 	dwc2_close_pipe(pipe);
    946 }
    947 
    948 void
    949 dwc2_device_isoc_done(struct usbd_xfer *xfer)
    950 {
    951 
    952 	DPRINTF("\n");
    953 }
    954 
    955 
    956 usbd_status
    957 dwc2_device_start(struct usbd_xfer *xfer)
    958 {
    959  	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
    960 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
    961 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
    962 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
    963 	struct dwc2_hcd_urb *dwc2_urb;
    964 
    965 	struct usbd_device *dev = xfer->ux_pipe->up_dev;
    966 	usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
    967 	uint8_t addr = dev->ud_addr;
    968 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
    969 	uint8_t epnum = UE_GET_ADDR(ed->bEndpointAddress);
    970 	uint8_t dir = UE_GET_DIR(ed->bEndpointAddress);
    971 	uint16_t mps = UE_GET_SIZE(UGETW(ed->wMaxPacketSize));
    972 	uint32_t len;
    973 
    974 	uint32_t flags = 0;
    975 	uint32_t off = 0;
    976 	int retval, err;
    977 	int alloc_bandwidth = 0;
    978 	int i;
    979 
    980 	DPRINTFN(1, "xfer=%p pipe=%p\n", xfer, xfer->ux_pipe);
    981 
    982 	if (xfertype == UE_ISOCHRONOUS ||
    983 	    xfertype == UE_INTERRUPT) {
    984 		mutex_spin_enter(&hsotg->lock);
    985 		if (!dwc2_hcd_is_bandwidth_allocated(hsotg, xfer))
    986 			alloc_bandwidth = 1;
    987 		mutex_spin_exit(&hsotg->lock);
    988 	}
    989 
    990 	/*
    991 	 * For Control pipe the direction is from the request, all other
    992 	 * transfers have been set correctly at pipe open time.
    993 	 */
    994 	if (xfertype == UE_CONTROL) {
    995 		usb_device_request_t *req = &xfer->ux_request;
    996 
    997 		DPRINTFN(3, "xfer=%p type=0x%02x request=0x%02x wValue=0x%04x "
    998 		    "wIndex=0x%04x len=%d addr=%d endpt=%d dir=%s speed=%d "
    999 		    "mps=%d\n",
   1000 		    xfer, req->bmRequestType, req->bRequest, UGETW(req->wValue),
   1001 		    UGETW(req->wIndex), UGETW(req->wLength), dev->ud_addr,
   1002 		    epnum, dir == UT_READ ? "in" :"out", dev->ud_speed, mps);
   1003 
   1004 		/* Copy request packet to our DMA buffer */
   1005 		memcpy(KERNADDR(&dpipe->req_dma, 0), req, sizeof(*req));
   1006 		usb_syncmem(&dpipe->req_dma, 0, sizeof(*req),
   1007 		    BUS_DMASYNC_PREWRITE);
   1008 		len = UGETW(req->wLength);
   1009 		if ((req->bmRequestType & UT_READ) == UT_READ) {
   1010 			dir = UE_DIR_IN;
   1011 		} else {
   1012 			dir = UE_DIR_OUT;
   1013 		}
   1014 
   1015 		DPRINTFN(3, "req = %p dma = %" PRIxBUSADDR " len %d dir %s\n",
   1016 		    KERNADDR(&dpipe->req_dma, 0), DMAADDR(&dpipe->req_dma, 0),
   1017 		    len, dir == UE_DIR_IN ? "in" : "out");
   1018 	} else {
   1019 		DPRINTFN(3, "xfer=%p len=%d flags=%d addr=%d endpt=%d,"
   1020 		    " mps=%d dir %s\n", xfer, xfer->ux_length, xfer->ux_flags, addr,
   1021 		    epnum, mps, dir == UT_READ ? "in" :"out");
   1022 
   1023 		len = xfer->ux_length;
   1024 	}
   1025 
   1026 	dwc2_urb = dxfer->urb;
   1027 	if (!dwc2_urb)
   1028 		return USBD_NOMEM;
   1029 
   1030 	KASSERT(dwc2_urb->packet_count == xfer->ux_nframes);
   1031 	memset(dwc2_urb, 0, sizeof(*dwc2_urb) +
   1032 	    sizeof(dwc2_urb->iso_descs[0]) * dwc2_urb->packet_count);
   1033 
   1034 	dwc2_urb->priv = xfer;
   1035 	dwc2_urb->packet_count = xfer->ux_nframes;
   1036 
   1037 	dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, addr, epnum, xfertype, dir,
   1038 	    mps);
   1039 
   1040 	if (xfertype == UE_CONTROL) {
   1041 		dwc2_urb->setup_usbdma = &dpipe->req_dma;
   1042 		dwc2_urb->setup_packet = KERNADDR(&dpipe->req_dma, 0);
   1043 		dwc2_urb->setup_dma = DMAADDR(&dpipe->req_dma, 0);
   1044 	} else {
   1045 		/* XXXNH - % mps required? */
   1046 		if ((xfer->ux_flags & USBD_FORCE_SHORT_XFER) && (len % mps) == 0)
   1047 		    flags |= URB_SEND_ZERO_PACKET;
   1048 	}
   1049 	flags |= URB_GIVEBACK_ASAP;
   1050 
   1051 	/*
   1052 	 * control transfers with no data phase don't touch usbdma, but
   1053 	 * everything else does.
   1054 	 */
   1055 	if (!(xfertype == UE_CONTROL && len == 0)) {
   1056 		dwc2_urb->usbdma = &xfer->ux_dmabuf;
   1057 		dwc2_urb->buf = KERNADDR(dwc2_urb->usbdma, 0);
   1058 		dwc2_urb->dma = DMAADDR(dwc2_urb->usbdma, 0);
   1059 
   1060 		usb_syncmem(&xfer->ux_dmabuf, 0, len,
   1061 		    dir == UE_DIR_IN ?
   1062 			BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
   1063  	}
   1064 	dwc2_urb->length = len;
   1065  	dwc2_urb->flags = flags;
   1066 	dwc2_urb->status = -EINPROGRESS;
   1067 
   1068 	if (xfertype == UE_INTERRUPT ||
   1069 	    xfertype == UE_ISOCHRONOUS) {
   1070 		uint16_t ival;
   1071 
   1072 		if (xfertype == UE_INTERRUPT &&
   1073 		    dpipe->pipe.up_interval != USBD_DEFAULT_INTERVAL) {
   1074 			ival = dpipe->pipe.up_interval;
   1075 		} else {
   1076 			ival = ed->bInterval;
   1077 		}
   1078 
   1079 		if (ival < 1) {
   1080 			retval = -ENODEV;
   1081 			goto fail;
   1082 		}
   1083 		if (dev->ud_speed == USB_SPEED_HIGH ||
   1084 		   (dev->ud_speed == USB_SPEED_FULL && xfertype == UE_ISOCHRONOUS)) {
   1085 			if (ival > 16) {
   1086 				/*
   1087 				 * illegal with HS/FS, but there were
   1088 				 * documentation bugs in the spec
   1089 				 */
   1090 				ival = 256;
   1091 			} else {
   1092 				ival = (1 << (ival - 1));
   1093 			}
   1094 		} else {
   1095 			if (xfertype == UE_INTERRUPT && ival < 10)
   1096 				ival = 10;
   1097 		}
   1098 		dwc2_urb->interval = ival;
   1099 	}
   1100 
   1101 	/* XXXNH bring down from callers?? */
   1102 // 	mutex_enter(&sc->sc_lock);
   1103 
   1104 	xfer->ux_actlen = 0;
   1105 
   1106 	KASSERT(xfertype != UE_ISOCHRONOUS ||
   1107 	    xfer->ux_nframes <= dwc2_urb->packet_count);
   1108 	KASSERTMSG(xfer->ux_nframes == 0 || xfertype == UE_ISOCHRONOUS,
   1109 	    "nframes %d xfertype %d\n", xfer->ux_nframes, xfertype);
   1110 
   1111 	for (off = i = 0; i < xfer->ux_nframes; ++i) {
   1112 		DPRINTFN(3, "xfer=%p frame=%d offset=%d length=%d\n", xfer, i,
   1113 		    off, xfer->ux_frlengths[i]);
   1114 
   1115 		dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i, off,
   1116 		    xfer->ux_frlengths[i]);
   1117 		off += xfer->ux_frlengths[i];
   1118 	}
   1119 
   1120 	struct dwc2_qh *qh = dpipe->priv;
   1121 	struct dwc2_qtd *qtd;
   1122 	bool qh_allocated = false;
   1123 
   1124 	/* Create QH for the endpoint if it doesn't exist */
   1125 	if (!qh) {
   1126 		qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, GFP_ATOMIC);
   1127 		if (!qh) {
   1128 			retval = -ENOMEM;
   1129 			goto fail;
   1130 		}
   1131 		dpipe->priv = qh;
   1132 		qh_allocated = true;
   1133 	}
   1134 
   1135 	qtd = pool_cache_get(sc->sc_qtdpool, PR_NOWAIT);
   1136 	if (!qtd) {
   1137 		retval = -ENOMEM;
   1138 		goto fail1;
   1139 	}
   1140 	memset(qtd, 0, sizeof(*qtd));
   1141 
   1142 	/* might need to check cpu_intr_p */
   1143 	mutex_spin_enter(&hsotg->lock);
   1144 
   1145 	if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) {
   1146 		callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout),
   1147 		    dwc2_timeout, xfer);
   1148 	}
   1149 	retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
   1150 	if (retval)
   1151 		goto fail2;
   1152 
   1153 	if (alloc_bandwidth) {
   1154 		dwc2_allocate_bus_bandwidth(hsotg,
   1155 				dwc2_hcd_get_ep_bandwidth(hsotg, dpipe),
   1156 				xfer);
   1157 	}
   1158 
   1159 	mutex_spin_exit(&hsotg->lock);
   1160 // 	mutex_exit(&sc->sc_lock);
   1161 
   1162 	return USBD_IN_PROGRESS;
   1163 
   1164 fail2:
   1165 	callout_halt(&xfer->ux_callout, &hsotg->lock);
   1166 	dwc2_urb->priv = NULL;
   1167 	mutex_spin_exit(&hsotg->lock);
   1168 	pool_cache_put(sc->sc_qtdpool, qtd);
   1169 
   1170 fail1:
   1171 	if (qh_allocated) {
   1172 		dpipe->priv = NULL;
   1173 		dwc2_hcd_qh_free(hsotg, qh);
   1174 	}
   1175 fail:
   1176 
   1177 	switch (retval) {
   1178 	case -EINVAL:
   1179 	case -ENODEV:
   1180 		err = USBD_INVAL;
   1181 		break;
   1182 	case -ENOMEM:
   1183 		err = USBD_NOMEM;
   1184 		break;
   1185 	default:
   1186 		err = USBD_IOERROR;
   1187 	}
   1188 
   1189 	return err;
   1190 
   1191 }
   1192 
   1193 int dwc2_intr(void *p)
   1194 {
   1195 	struct dwc2_softc *sc = p;
   1196 	struct dwc2_hsotg *hsotg;
   1197 	int ret = 0;
   1198 
   1199 	if (sc == NULL)
   1200 		return 0;
   1201 
   1202 	hsotg = sc->sc_hsotg;
   1203 	mutex_spin_enter(&hsotg->lock);
   1204 
   1205 	if (sc->sc_dying || !device_has_power(sc->sc_dev))
   1206 		goto done;
   1207 
   1208 	if (sc->sc_bus.ub_usepolling) {
   1209 		uint32_t intrs;
   1210 
   1211 		intrs = dwc2_read_core_intr(hsotg);
   1212 		DWC2_WRITE_4(hsotg, GINTSTS, intrs);
   1213 	} else {
   1214 		ret = dwc2_interrupt(sc);
   1215 	}
   1216 
   1217 done:
   1218 	mutex_spin_exit(&hsotg->lock);
   1219 
   1220 	return ret;
   1221 }
   1222 
   1223 int
   1224 dwc2_interrupt(struct dwc2_softc *sc)
   1225 {
   1226 	int ret = 0;
   1227 
   1228 	if (sc->sc_hcdenabled) {
   1229 		ret |= dwc2_handle_hcd_intr(sc->sc_hsotg);
   1230 	}
   1231 
   1232 	ret |= dwc2_handle_common_intr(sc->sc_hsotg);
   1233 
   1234 	return ret;
   1235 }
   1236 
   1237 /***********************************************************************/
   1238 
   1239 int
   1240 dwc2_detach(struct dwc2_softc *sc, int flags)
   1241 {
   1242 	int rv = 0;
   1243 
   1244 	if (sc->sc_child != NULL)
   1245 		rv = config_detach(sc->sc_child, flags);
   1246 
   1247 	return rv;
   1248 }
   1249 
   1250 bool
   1251 dwc2_shutdown(device_t self, int flags)
   1252 {
   1253 	struct dwc2_softc *sc = device_private(self);
   1254 
   1255 	sc = sc;
   1256 
   1257 	return true;
   1258 }
   1259 
   1260 void
   1261 dwc2_childdet(device_t self, device_t child)
   1262 {
   1263 	struct dwc2_softc *sc = device_private(self);
   1264 
   1265 	sc = sc;
   1266 }
   1267 
   1268 int
   1269 dwc2_activate(device_t self, enum devact act)
   1270 {
   1271 	struct dwc2_softc *sc = device_private(self);
   1272 
   1273 	sc = sc;
   1274 
   1275 	return 0;
   1276 }
   1277 
   1278 bool
   1279 dwc2_resume(device_t dv, const pmf_qual_t *qual)
   1280 {
   1281 	struct dwc2_softc *sc = device_private(dv);
   1282 
   1283 	sc = sc;
   1284 
   1285 	return true;
   1286 }
   1287 
   1288 bool
   1289 dwc2_suspend(device_t dv, const pmf_qual_t *qual)
   1290 {
   1291 	struct dwc2_softc *sc = device_private(dv);
   1292 
   1293 	sc = sc;
   1294 
   1295 	return true;
   1296 }
   1297 
   1298 /***********************************************************************/
   1299 int
   1300 dwc2_init(struct dwc2_softc *sc)
   1301 {
   1302 	int err = 0;
   1303 
   1304 	err = linux_workqueue_init();
   1305 	if (err)
   1306 		return err;
   1307 
   1308 	sc->sc_bus.ub_hcpriv = sc;
   1309 	sc->sc_bus.ub_revision = USBREV_2_0;
   1310 	sc->sc_bus.ub_methods = &dwc2_bus_methods;
   1311 	sc->sc_bus.ub_pipesize = sizeof(struct dwc2_pipe);
   1312 	sc->sc_bus.ub_usedma = true;
   1313 	sc->sc_hcdenabled = false;
   1314 
   1315 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
   1316 
   1317 	TAILQ_INIT(&sc->sc_complete);
   1318 
   1319 	sc->sc_rhc_si = softint_establish(SOFTINT_USB | SOFTINT_MPSAFE,
   1320 	    dwc2_rhc, sc);
   1321 
   1322 	sc->sc_xferpool = pool_cache_init(sizeof(struct dwc2_xfer), 0, 0, 0,
   1323 	    "dwc2xfer", NULL, IPL_USB, NULL, NULL, NULL);
   1324 	sc->sc_qhpool = pool_cache_init(sizeof(struct dwc2_qh), 0, 0, 0,
   1325 	    "dwc2qh", NULL, IPL_USB, NULL, NULL, NULL);
   1326 	sc->sc_qtdpool = pool_cache_init(sizeof(struct dwc2_qtd), 0, 0, 0,
   1327 	    "dwc2qtd", NULL, IPL_USB, NULL, NULL, NULL);
   1328 
   1329 	sc->sc_hsotg = kmem_zalloc(sizeof(struct dwc2_hsotg), KM_SLEEP);
   1330 	sc->sc_hsotg->hsotg_sc = sc;
   1331 	sc->sc_hsotg->dev = sc->sc_dev;
   1332 	sc->sc_hcdenabled = true;
   1333 
   1334 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
   1335 	struct dwc2_core_params defparams;
   1336 	int retval;
   1337 
   1338 	if (sc->sc_params == NULL) {
   1339 		/* Default all params to autodetect */
   1340 		dwc2_set_all_params(&defparams, -1);
   1341 		sc->sc_params = &defparams;
   1342 
   1343 		/*
   1344 		 * Disable descriptor dma mode by default as the HW can support
   1345 		 * it, but does not support it for SPLIT transactions.
   1346 		 */
   1347 		defparams.dma_desc_enable = 0;
   1348 	}
   1349 	hsotg->dr_mode = USB_DR_MODE_HOST;
   1350 
   1351 	/* Detect config values from hardware */
   1352 	retval = dwc2_get_hwparams(hsotg);
   1353 	if (retval) {
   1354 		goto fail2;
   1355 	}
   1356 
   1357 	hsotg->core_params = kmem_zalloc(sizeof(*hsotg->core_params), KM_SLEEP);
   1358 	dwc2_set_all_params(hsotg->core_params, -1);
   1359 
   1360 	/* Validate parameter values */
   1361 	dwc2_set_parameters(hsotg, sc->sc_params);
   1362 
   1363 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
   1364     IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
   1365 	if (hsotg->dr_mode != USB_DR_MODE_HOST) {
   1366 		retval = dwc2_gadget_init(hsotg);
   1367 		if (retval)
   1368 			goto fail2;
   1369 		hsotg->gadget_enabled = 1;
   1370 	}
   1371 #endif
   1372 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \
   1373     IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
   1374 	if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) {
   1375 		retval = dwc2_hcd_init(hsotg);
   1376 		if (retval) {
   1377 			if (hsotg->gadget_enabled)
   1378 				dwc2_hsotg_remove(hsotg);
   1379 			goto fail2;
   1380 		}
   1381 	    hsotg->hcd_enabled = 1;
   1382         }
   1383 #endif
   1384 
   1385 	uint32_t snpsid = hsotg->hw_params.snpsid;
   1386 	aprint_verbose_dev(sc->sc_dev, "Core Release: %x.%x%x%x (snpsid=%x)\n",
   1387 	    snpsid >> 12 & 0xf, snpsid >> 8 & 0xf,
   1388 	    snpsid >> 4 & 0xf, snpsid & 0xf, snpsid);
   1389 
   1390 	return 0;
   1391 
   1392 fail2:
   1393 	err = -retval;
   1394 	kmem_free(sc->sc_hsotg, sizeof(struct dwc2_hsotg));
   1395 	softint_disestablish(sc->sc_rhc_si);
   1396 
   1397 	return err;
   1398 }
   1399 
   1400 #if 0
   1401 /*
   1402  * curmode is a mode indication bit 0 = device, 1 = host
   1403  */
   1404 static const char * const intnames[32] = {
   1405 	"curmode",	"modemis",	"otgint",	"sof",
   1406 	"rxflvl",	"nptxfemp",	"ginnakeff",	"goutnakeff",
   1407 	"ulpickint",	"i2cint",	"erlysusp",	"usbsusp",
   1408 	"usbrst",	"enumdone",	"isooutdrop",	"eopf",
   1409 	"restore_done",	"epmis",	"iepint",	"oepint",
   1410 	"incompisoin",	"incomplp",	"fetsusp",	"resetdet",
   1411 	"prtint",	"hchint",	"ptxfemp",	"lpm",
   1412 	"conidstschng",	"disconnint",	"sessreqint",	"wkupint"
   1413 };
   1414 
   1415 
   1416 /***********************************************************************/
   1417 
   1418 #endif
   1419 
   1420 void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr,
   1421 			int *hub_port)
   1422 {
   1423 	struct usbd_xfer *xfer = context;
   1424 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
   1425 	struct usbd_device *dev = dpipe->pipe.up_dev;
   1426 
   1427 	*hub_addr = dev->ud_myhsport->up_parent->ud_addr;
   1428  	*hub_port = dev->ud_myhsport->up_portno;
   1429 }
   1430 
   1431 int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
   1432 {
   1433 	struct usbd_xfer *xfer = context;
   1434 	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
   1435 	struct usbd_device *dev = dpipe->pipe.up_dev;
   1436 
   1437 	return dev->ud_speed;
   1438 }
   1439 
   1440 /*
   1441  * Sets the final status of an URB and returns it to the upper layer. Any
   1442  * required cleanup of the URB is performed.
   1443  *
   1444  * Must be called with interrupt disabled and spinlock held
   1445  */
   1446 void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
   1447     int status)
   1448 {
   1449 	struct usbd_xfer *xfer;
   1450 	struct dwc2_xfer *dxfer;
   1451 	struct dwc2_softc *sc;
   1452 	usb_endpoint_descriptor_t *ed;
   1453 	uint8_t xfertype;
   1454 
   1455 	if (!qtd) {
   1456 		dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__);
   1457 		return;
   1458 	}
   1459 
   1460 	if (!qtd->urb) {
   1461 		dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__);
   1462 		return;
   1463 	}
   1464 
   1465 	xfer = qtd->urb->priv;
   1466 	if (!xfer) {
   1467 		dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__);
   1468 		return;
   1469 	}
   1470 
   1471 	/*
   1472 	 * If software has completed it, either by cancellation
   1473 	 * or timeout, drop it on the floor.
   1474 	 */
   1475 	if (xfer->ux_status != USBD_IN_PROGRESS) {
   1476 		KASSERT(xfer->ux_status == USBD_CANCELLED ||
   1477 		    xfer->ux_status == USBD_TIMEOUT);
   1478 		return;
   1479 	}
   1480 
   1481 	/*
   1482 	 * Cancel the timeout and the task, which have not yet
   1483 	 * run.  If they have already fired, at worst they are
   1484 	 * waiting for the lock.  They will see that the xfer
   1485 	 * is no longer in progress and give up.
   1486 	 */
   1487 	callout_stop(&xfer->ux_callout);
   1488 	usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask);
   1489 
   1490 	dxfer = DWC2_XFER2DXFER(xfer);
   1491 	sc = DWC2_XFER2SC(xfer);
   1492 	ed = xfer->ux_pipe->up_endpoint->ue_edesc;
   1493 	xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
   1494 
   1495 	struct dwc2_hcd_urb *urb = qtd->urb;
   1496 	xfer->ux_actlen = dwc2_hcd_urb_get_actual_length(urb);
   1497 
   1498 	DPRINTFN(3, "xfer=%p actlen=%d\n", xfer, xfer->ux_actlen);
   1499 
   1500 	if (xfertype == UE_ISOCHRONOUS) {
   1501 		int i;
   1502 
   1503 		xfer->ux_actlen = 0;
   1504 		for (i = 0; i < xfer->ux_nframes; ++i) {
   1505 			xfer->ux_frlengths[i] =
   1506 				dwc2_hcd_urb_get_iso_desc_actual_length(
   1507 						urb, i);
   1508 			xfer->ux_actlen += xfer->ux_frlengths[i];
   1509 		}
   1510 	}
   1511 
   1512 	if (xfertype == UE_ISOCHRONOUS && dbg_perio()) {
   1513 		int i;
   1514 
   1515 		for (i = 0; i < xfer->ux_nframes; i++)
   1516 			dev_vdbg(hsotg->dev, " ISO Desc %d status %d\n",
   1517 				 i, urb->iso_descs[i].status);
   1518 	}
   1519 
   1520 	if (!status) {
   1521 		if (!(xfer->ux_flags & USBD_SHORT_XFER_OK) &&
   1522 		    xfer->ux_actlen < xfer->ux_length)
   1523 			status = -EIO;
   1524 	}
   1525 
   1526 	switch (status) {
   1527 	case 0:
   1528 		xfer->ux_status = USBD_NORMAL_COMPLETION;
   1529 		break;
   1530 	case -EPIPE:
   1531 		xfer->ux_status = USBD_STALLED;
   1532 		break;
   1533 	case -ETIMEDOUT:
   1534 		xfer->ux_status = USBD_TIMEOUT;
   1535 		break;
   1536 	case -EPROTO:
   1537 		xfer->ux_status = USBD_INVAL;
   1538 		break;
   1539 	case -EIO:
   1540 		xfer->ux_status = USBD_IOERROR;
   1541 		break;
   1542 	case -EOVERFLOW:
   1543 		xfer->ux_status = USBD_IOERROR;
   1544 		break;
   1545 	default:
   1546 		xfer->ux_status = USBD_IOERROR;
   1547 		printf("%s: unknown error status %d\n", __func__, status);
   1548 	}
   1549 
   1550 	if (xfer->ux_status == USBD_NORMAL_COMPLETION) {
   1551 		/*
   1552 		 * control transfers with no data phase don't touch dmabuf, but
   1553 		 * everything else does.
   1554 		 */
   1555 		if (!(xfertype == UE_CONTROL &&
   1556 		    UGETW(xfer->ux_request.wLength) == 0) &&
   1557 		    xfer->ux_actlen > 0	/* XXX PR/53503 */
   1558 		    ) {
   1559 			int rd = usbd_xfer_isread(xfer);
   1560 
   1561 			usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_actlen,
   1562 			    rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
   1563 		}
   1564 	}
   1565 
   1566 	if (xfertype == UE_ISOCHRONOUS ||
   1567 	    xfertype == UE_INTERRUPT) {
   1568 		struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
   1569 
   1570 		dwc2_free_bus_bandwidth(hsotg,
   1571 					dwc2_hcd_get_ep_bandwidth(hsotg, dpipe),
   1572 					xfer);
   1573 	}
   1574 
   1575 	qtd->urb = NULL;
   1576 	KASSERT(mutex_owned(&hsotg->lock));
   1577 
   1578 	TAILQ_INSERT_TAIL(&sc->sc_complete, dxfer, xnext);
   1579 
   1580 	mutex_spin_exit(&hsotg->lock);
   1581 	usb_schedsoftintr(&sc->sc_bus);
   1582 	mutex_spin_enter(&hsotg->lock);
   1583 }
   1584 
   1585 
   1586 int
   1587 _dwc2_hcd_start(struct dwc2_hsotg *hsotg)
   1588 {
   1589 	dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
   1590 
   1591 	mutex_spin_enter(&hsotg->lock);
   1592 
   1593 	hsotg->lx_state = DWC2_L0;
   1594 
   1595 	if (dwc2_is_device_mode(hsotg)) {
   1596 		mutex_spin_exit(&hsotg->lock);
   1597 		return 0;	/* why 0 ?? */
   1598 	}
   1599 
   1600 	dwc2_hcd_reinit(hsotg);
   1601 
   1602 	mutex_spin_exit(&hsotg->lock);
   1603 	return 0;
   1604 }
   1605 
   1606 int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg)
   1607 {
   1608 
   1609 	return false;
   1610 }
   1611