Home | History | Annotate | Line # | Download | only in usb
usbdi.c revision 1.204.6.1
      1 /*	$NetBSD: usbdi.c,v 1.204.6.1 2021/06/17 04:46:31 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998, 2012, 2015 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Lennart Augustsson (lennart (at) augustsson.net) at
      9  * Carlstedt Research & Technology, Matthew R. Green (mrg (at) eterna.com.au),
     10  * and Nick Hudson.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31  * POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: usbdi.c,v 1.204.6.1 2021/06/17 04:46:31 thorpej Exp $");
     36 
     37 #ifdef _KERNEL_OPT
     38 #include "opt_usb.h"
     39 #include "opt_compat_netbsd.h"
     40 #include "usb_dma.h"
     41 #endif
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/kernel.h>
     46 #include <sys/device.h>
     47 #include <sys/kmem.h>
     48 #include <sys/proc.h>
     49 #include <sys/bus.h>
     50 #include <sys/cpu.h>
     51 
     52 #include <dev/usb/usb.h>
     53 #include <dev/usb/usbdi.h>
     54 #include <dev/usb/usbdi_util.h>
     55 #include <dev/usb/usbdivar.h>
     56 #include <dev/usb/usb_mem.h>
     57 #include <dev/usb/usb_quirks.h>
     58 #include <dev/usb/usb_sdt.h>
     59 #include <dev/usb/usbhist.h>
     60 
     61 /* UTF-8 encoding stuff */
     62 #include <fs/unicode.h>
     63 
     64 extern int usbdebug;
     65 
     66 SDT_PROBE_DEFINE5(usb, device, pipe, open,
     67     "struct usbd_interface *"/*iface*/,
     68     "uint8_t"/*address*/,
     69     "uint8_t"/*flags*/,
     70     "int"/*ival*/,
     71     "struct usbd_pipe *"/*pipe*/);
     72 
     73 SDT_PROBE_DEFINE7(usb, device, pipe, open__intr,
     74     "struct usbd_interface *"/*iface*/,
     75     "uint8_t"/*address*/,
     76     "uint8_t"/*flags*/,
     77     "int"/*ival*/,
     78     "usbd_callback"/*cb*/,
     79     "void *"/*cookie*/,
     80     "struct usbd_pipe *"/*pipe*/);
     81 
     82 SDT_PROBE_DEFINE2(usb, device, pipe, transfer__start,
     83     "struct usbd_pipe *"/*pipe*/,
     84     "struct usbd_xfer *"/*xfer*/);
     85 SDT_PROBE_DEFINE3(usb, device, pipe, transfer__done,
     86     "struct usbd_pipe *"/*pipe*/,
     87     "struct usbd_xfer *"/*xfer*/,
     88     "usbd_status"/*err*/);
     89 SDT_PROBE_DEFINE2(usb, device, pipe, start,
     90     "struct usbd_pipe *"/*pipe*/,
     91     "struct usbd_xfer *"/*xfer*/);
     92 
     93 SDT_PROBE_DEFINE1(usb, device, pipe, close,  "struct usbd_pipe *"/*pipe*/);
     94 SDT_PROBE_DEFINE1(usb, device, pipe, abort__start,
     95     "struct usbd_pipe *"/*pipe*/);
     96 SDT_PROBE_DEFINE1(usb, device, pipe, abort__done,
     97     "struct usbd_pipe *"/*pipe*/);
     98 SDT_PROBE_DEFINE1(usb, device, pipe, clear__endpoint__stall,
     99     "struct usbd_pipe *"/*pipe*/);
    100 SDT_PROBE_DEFINE1(usb, device, pipe, clear__endpoint__toggle,
    101     "struct usbd_pipe *"/*pipe*/);
    102 
    103 SDT_PROBE_DEFINE5(usb, device, xfer, create,
    104     "struct usbd_xfer *"/*xfer*/,
    105     "struct usbd_pipe *"/*pipe*/,
    106     "size_t"/*len*/,
    107     "unsigned int"/*flags*/,
    108     "unsigned int"/*nframes*/);
    109 SDT_PROBE_DEFINE1(usb, device, xfer, start,  "struct usbd_xfer *"/*xfer*/);
    110 SDT_PROBE_DEFINE1(usb, device, xfer, preabort,  "struct usbd_xfer *"/*xfer*/);
    111 SDT_PROBE_DEFINE1(usb, device, xfer, abort,  "struct usbd_xfer *"/*xfer*/);
    112 SDT_PROBE_DEFINE1(usb, device, xfer, timeout,  "struct usbd_xfer *"/*xfer*/);
    113 SDT_PROBE_DEFINE2(usb, device, xfer, done,
    114     "struct usbd_xfer *"/*xfer*/,
    115     "usbd_status"/*status*/);
    116 SDT_PROBE_DEFINE1(usb, device, xfer, destroy,  "struct usbd_xfer *"/*xfer*/);
    117 
    118 Static usbd_status usbd_ar_pipe(struct usbd_pipe *);
    119 Static void usbd_start_next(struct usbd_pipe *);
    120 Static usbd_status usbd_open_pipe_ival
    121 	(struct usbd_interface *, uint8_t, uint8_t, struct usbd_pipe **, int);
    122 static void *usbd_alloc_buffer(struct usbd_xfer *, uint32_t);
    123 static void usbd_free_buffer(struct usbd_xfer *);
    124 static struct usbd_xfer *usbd_alloc_xfer(struct usbd_device *, unsigned int);
    125 static usbd_status usbd_free_xfer(struct usbd_xfer *);
    126 static void usbd_request_async_cb(struct usbd_xfer *, void *, usbd_status);
    127 static void usbd_xfer_timeout(void *);
    128 static void usbd_xfer_timeout_task(void *);
    129 static bool usbd_xfer_probe_timeout(struct usbd_xfer *);
    130 static void usbd_xfer_cancel_timeout_async(struct usbd_xfer *);
    131 
    132 #if defined(USB_DEBUG)
    133 void
    134 usbd_dump_iface(struct usbd_interface *iface)
    135 {
    136 	USBHIST_FUNC();
    137 	USBHIST_CALLARGS(usbdebug, "iface %#jx", (uintptr_t)iface, 0, 0, 0);
    138 
    139 	if (iface == NULL)
    140 		return;
    141 	USBHIST_LOG(usbdebug, "     device = %#jx idesc = %#jx index = %jd",
    142 	    (uintptr_t)iface->ui_dev, (uintptr_t)iface->ui_idesc,
    143 	    iface->ui_index, 0);
    144 	USBHIST_LOG(usbdebug, "     altindex=%jd",
    145 	    iface->ui_altindex, 0, 0, 0);
    146 }
    147 
    148 void
    149 usbd_dump_device(struct usbd_device *dev)
    150 {
    151 	USBHIST_FUNC();
    152 	USBHIST_CALLARGS(usbdebug, "dev = %#jx", (uintptr_t)dev, 0, 0, 0);
    153 
    154 	if (dev == NULL)
    155 		return;
    156 	USBHIST_LOG(usbdebug, "     bus = %#jx default_pipe = %#jx",
    157 	    (uintptr_t)dev->ud_bus, (uintptr_t)dev->ud_pipe0, 0, 0);
    158 	USBHIST_LOG(usbdebug, "     address = %jd config = %jd depth = %jd ",
    159 	    dev->ud_addr, dev->ud_config, dev->ud_depth, 0);
    160 	USBHIST_LOG(usbdebug, "     speed = %jd self_powered = %jd "
    161 	    "power = %jd langid = %jd",
    162 	    dev->ud_speed, dev->ud_selfpowered, dev->ud_power, dev->ud_langid);
    163 }
    164 
    165 void
    166 usbd_dump_endpoint(struct usbd_endpoint *endp)
    167 {
    168 	USBHIST_FUNC();
    169 	USBHIST_CALLARGS(usbdebug, "endp = %#jx", (uintptr_t)endp, 0, 0, 0);
    170 
    171 	if (endp == NULL)
    172 		return;
    173 	USBHIST_LOG(usbdebug, "    edesc = %#jx refcnt = %jd",
    174 	    (uintptr_t)endp->ue_edesc, endp->ue_refcnt, 0, 0);
    175 	if (endp->ue_edesc)
    176 		USBHIST_LOG(usbdebug, "     bEndpointAddress=0x%02jx",
    177 		    endp->ue_edesc->bEndpointAddress, 0, 0, 0);
    178 }
    179 
    180 void
    181 usbd_dump_queue(struct usbd_pipe *pipe)
    182 {
    183 	struct usbd_xfer *xfer;
    184 
    185 	USBHIST_FUNC();
    186 	USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0);
    187 
    188 	SIMPLEQ_FOREACH(xfer, &pipe->up_queue, ux_next) {
    189 		USBHIST_LOG(usbdebug, "     xfer = %#jx", (uintptr_t)xfer,
    190 		    0, 0, 0);
    191 	}
    192 }
    193 
    194 void
    195 usbd_dump_pipe(struct usbd_pipe *pipe)
    196 {
    197 	USBHIST_FUNC();
    198 	USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0);
    199 
    200 	if (pipe == NULL)
    201 		return;
    202 	usbd_dump_iface(pipe->up_iface);
    203 	usbd_dump_device(pipe->up_dev);
    204 	usbd_dump_endpoint(pipe->up_endpoint);
    205 	USBHIST_LOG(usbdebug, "(usbd_dump_pipe)", 0, 0, 0, 0);
    206 	USBHIST_LOG(usbdebug, "     running = %jd aborting = %jd",
    207 	    pipe->up_running, pipe->up_aborting, 0, 0);
    208 	USBHIST_LOG(usbdebug, "     intrxfer = %#jx, repeat = %jd, "
    209 	    "interval = %jd", (uintptr_t)pipe->up_intrxfer, pipe->up_repeat,
    210 	    pipe->up_interval, 0);
    211 }
    212 #endif
    213 
    214 usbd_status
    215 usbd_open_pipe(struct usbd_interface *iface, uint8_t address,
    216 	       uint8_t flags, struct usbd_pipe **pipe)
    217 {
    218 	return (usbd_open_pipe_ival(iface, address, flags, pipe,
    219 				    USBD_DEFAULT_INTERVAL));
    220 }
    221 
    222 usbd_status
    223 usbd_open_pipe_ival(struct usbd_interface *iface, uint8_t address,
    224 		    uint8_t flags, struct usbd_pipe **pipe, int ival)
    225 {
    226 	struct usbd_pipe *p = NULL;
    227 	struct usbd_endpoint *ep = NULL /* XXXGCC */;
    228 	bool piperef = false;
    229 	usbd_status err;
    230 	int i;
    231 
    232 	USBHIST_FUNC();
    233 	USBHIST_CALLARGS(usbdebug, "iface = %#jx address = %#jx flags = %#jx",
    234 	    (uintptr_t)iface, address, flags, 0);
    235 
    236 	/*
    237 	 * Block usbd_set_interface so we have a snapshot of the
    238 	 * interface endpoints.  They will remain stable until we drop
    239 	 * the reference in usbd_close_pipe (or on failure here).
    240 	 */
    241 	err = usbd_iface_piperef(iface);
    242 	if (err)
    243 		goto out;
    244 	piperef = true;
    245 
    246 	/* Find the endpoint at this address.  */
    247 	for (i = 0; i < iface->ui_idesc->bNumEndpoints; i++) {
    248 		ep = &iface->ui_endpoints[i];
    249 		if (ep->ue_edesc == NULL) {
    250 			err = USBD_IOERROR;
    251 			goto out;
    252 		}
    253 		if (ep->ue_edesc->bEndpointAddress == address)
    254 			break;
    255 	}
    256 	if (i == iface->ui_idesc->bNumEndpoints) {
    257 		err = USBD_BAD_ADDRESS;
    258 		goto out;
    259 	}
    260 
    261 	/* Set up the pipe with this endpoint.  */
    262 	err = usbd_setup_pipe_flags(iface->ui_dev, iface, ep, ival, &p, flags);
    263 	if (err)
    264 		goto out;
    265 
    266 	/* Success! */
    267 	*pipe = p;
    268 	p = NULL;		/* handed off to caller */
    269 	piperef = false;	/* handed off to pipe */
    270 	SDT_PROBE5(usb, device, pipe, open,
    271 	    iface, address, flags, ival, p);
    272 	err = USBD_NORMAL_COMPLETION;
    273 
    274 out:	if (p)
    275 		usbd_close_pipe(p);
    276 	if (piperef)
    277 		usbd_iface_pipeunref(iface);
    278 	return err;
    279 }
    280 
    281 usbd_status
    282 usbd_open_pipe_intr(struct usbd_interface *iface, uint8_t address,
    283 		    uint8_t flags, struct usbd_pipe **pipe,
    284 		    void *priv, void *buffer, uint32_t len,
    285 		    usbd_callback cb, int ival)
    286 {
    287 	usbd_status err;
    288 	struct usbd_xfer *xfer;
    289 	struct usbd_pipe *ipipe;
    290 
    291 	USBHIST_FUNC();
    292 	USBHIST_CALLARGS(usbdebug, "address = %#jx flags = %#jx len = %jd",
    293 	    address, flags, len, 0);
    294 
    295 	err = usbd_open_pipe_ival(iface, address,
    296 				  USBD_EXCLUSIVE_USE | (flags & USBD_MPSAFE),
    297 				  &ipipe, ival);
    298 	if (err)
    299 		return err;
    300 	err = usbd_create_xfer(ipipe, len, flags, 0, &xfer);
    301 	if (err)
    302 		goto bad1;
    303 
    304 	usbd_setup_xfer(xfer, priv, buffer, len, flags, USBD_NO_TIMEOUT, cb);
    305 	ipipe->up_intrxfer = xfer;
    306 	ipipe->up_repeat = 1;
    307 	err = usbd_transfer(xfer);
    308 	*pipe = ipipe;
    309 	if (err != USBD_IN_PROGRESS)
    310 		goto bad3;
    311 	SDT_PROBE7(usb, device, pipe, open__intr,
    312 	    iface, address, flags, ival, cb, priv, ipipe);
    313 	return USBD_NORMAL_COMPLETION;
    314 
    315  bad3:
    316 	ipipe->up_intrxfer = NULL;
    317 	ipipe->up_repeat = 0;
    318 
    319 	usbd_destroy_xfer(xfer);
    320  bad1:
    321 	usbd_close_pipe(ipipe);
    322 	return err;
    323 }
    324 
    325 usbd_status
    326 usbd_close_pipe(struct usbd_pipe *pipe)
    327 {
    328 	USBHIST_FUNC(); USBHIST_CALLED(usbdebug);
    329 
    330 	KASSERT(pipe != NULL);
    331 
    332 	usbd_lock_pipe(pipe);
    333 	SDT_PROBE1(usb, device, pipe, close,  pipe);
    334 	if (!SIMPLEQ_EMPTY(&pipe->up_queue)) {
    335 		printf("WARNING: pipe closed with active xfers on addr %d\n",
    336 		    pipe->up_dev->ud_addr);
    337 		usbd_ar_pipe(pipe);
    338 	}
    339 	KASSERT(SIMPLEQ_EMPTY(&pipe->up_queue));
    340 	pipe->up_methods->upm_close(pipe);
    341 	usbd_unlock_pipe(pipe);
    342 
    343 	if (pipe->up_intrxfer)
    344 		usbd_destroy_xfer(pipe->up_intrxfer);
    345 	usb_rem_task_wait(pipe->up_dev, &pipe->up_async_task, USB_TASKQ_DRIVER,
    346 	    NULL);
    347 	usbd_endpoint_release(pipe->up_dev, pipe->up_endpoint);
    348 	if (pipe->up_iface)
    349 		usbd_iface_pipeunref(pipe->up_iface);
    350 	kmem_free(pipe, pipe->up_dev->ud_bus->ub_pipesize);
    351 
    352 	return USBD_NORMAL_COMPLETION;
    353 }
    354 
    355 usbd_status
    356 usbd_transfer(struct usbd_xfer *xfer)
    357 {
    358 	struct usbd_pipe *pipe = xfer->ux_pipe;
    359 	usbd_status err;
    360 	unsigned int size, flags;
    361 
    362 	USBHIST_FUNC(); USBHIST_CALLARGS(usbdebug,
    363 	    "xfer = %#jx, flags = %#jx, pipe = %#jx, running = %jd",
    364 	    (uintptr_t)xfer, xfer->ux_flags, (uintptr_t)pipe, pipe->up_running);
    365 	KASSERT(xfer->ux_status == USBD_NOT_STARTED);
    366 	SDT_PROBE1(usb, device, xfer, start,  xfer);
    367 
    368 #ifdef USB_DEBUG
    369 	if (usbdebug > 5)
    370 		usbd_dump_queue(pipe);
    371 #endif
    372 	xfer->ux_done = 0;
    373 
    374 	if (pipe->up_aborting) {
    375 		USBHIST_LOG(usbdebug, "<- done xfer %#jx, aborting",
    376 		    (uintptr_t)xfer, 0, 0, 0);
    377 		SDT_PROBE2(usb, device, xfer, done,  xfer, USBD_CANCELLED);
    378 		return USBD_CANCELLED;
    379 	}
    380 
    381 	KASSERT(xfer->ux_length == 0 || xfer->ux_buf != NULL);
    382 
    383 	size = xfer->ux_length;
    384 	flags = xfer->ux_flags;
    385 
    386 	if (size != 0) {
    387 		/*
    388 		 * Use the xfer buffer if none specified in transfer setup.
    389 		 * isoc transfers always use the xfer buffer, i.e.
    390 		 * ux_buffer is always NULL for isoc.
    391 		 */
    392 		if (xfer->ux_buffer == NULL) {
    393 			xfer->ux_buffer = xfer->ux_buf;
    394 		}
    395 
    396 		/*
    397 		 * If not using the xfer buffer copy data to the
    398 		 * xfer buffer for OUT transfers of >0 length
    399 		 */
    400 		if (xfer->ux_buffer != xfer->ux_buf) {
    401 			KASSERT(xfer->ux_buf);
    402 			if (!usbd_xfer_isread(xfer)) {
    403 				memcpy(xfer->ux_buf, xfer->ux_buffer, size);
    404 			}
    405 		}
    406 	}
    407 
    408 	/* xfer is not valid after the transfer method unless synchronous */
    409 	SDT_PROBE2(usb, device, pipe, transfer__start,  pipe, xfer);
    410 	err = pipe->up_methods->upm_transfer(xfer);
    411 	SDT_PROBE3(usb, device, pipe, transfer__done,  pipe, xfer, err);
    412 
    413 	if (err != USBD_IN_PROGRESS && err) {
    414 		/*
    415 		 * The transfer made it onto the pipe queue, but didn't get
    416 		 * accepted by the HCD for some reason.  It needs removing
    417 		 * from the pipe queue.
    418 		 */
    419 		USBHIST_LOG(usbdebug, "xfer failed: %jd, reinserting",
    420 		    err, 0, 0, 0);
    421 		usbd_lock_pipe(pipe);
    422 		SDT_PROBE1(usb, device, xfer, preabort,  xfer);
    423 #ifdef DIAGNOSTIC
    424 		xfer->ux_state = XFER_BUSY;
    425 #endif
    426 		SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next);
    427 		if (pipe->up_serialise)
    428 			usbd_start_next(pipe);
    429 		usbd_unlock_pipe(pipe);
    430 	}
    431 
    432 	if (!(flags & USBD_SYNCHRONOUS)) {
    433 		USBHIST_LOG(usbdebug, "<- done xfer %#jx, not sync (err %jd)",
    434 		    (uintptr_t)xfer, err, 0, 0);
    435 		if (err != USBD_IN_PROGRESS) /* XXX Possible?  */
    436 			SDT_PROBE2(usb, device, xfer, done,  xfer, err);
    437 		return err;
    438 	}
    439 
    440 	if (err != USBD_IN_PROGRESS) {
    441 		USBHIST_LOG(usbdebug, "<- done xfer %#jx, sync (err %jd)",
    442 		    (uintptr_t)xfer, err, 0, 0);
    443 		SDT_PROBE2(usb, device, xfer, done,  xfer, err);
    444 		return err;
    445 	}
    446 
    447 	/* Sync transfer, wait for completion. */
    448 	usbd_lock_pipe(pipe);
    449 	while (!xfer->ux_done) {
    450 		if (pipe->up_dev->ud_bus->ub_usepolling)
    451 			panic("usbd_transfer: not done");
    452 		USBHIST_LOG(usbdebug, "<- sleeping on xfer %#jx",
    453 		    (uintptr_t)xfer, 0, 0, 0);
    454 
    455 		err = 0;
    456 		if ((flags & USBD_SYNCHRONOUS_SIG) != 0) {
    457 			err = cv_wait_sig(&xfer->ux_cv, pipe->up_dev->ud_bus->ub_lock);
    458 		} else {
    459 			cv_wait(&xfer->ux_cv, pipe->up_dev->ud_bus->ub_lock);
    460 		}
    461 		if (err) {
    462 			if (!xfer->ux_done) {
    463 				SDT_PROBE1(usb, device, xfer, abort,  xfer);
    464 				pipe->up_methods->upm_abort(xfer);
    465 			}
    466 			break;
    467 		}
    468 	}
    469 	SDT_PROBE2(usb, device, xfer, done,  xfer, xfer->ux_status);
    470 	/* XXX Race to read xfer->ux_status?  */
    471 	usbd_unlock_pipe(pipe);
    472 	return xfer->ux_status;
    473 }
    474 
    475 /* Like usbd_transfer(), but waits for completion. */
    476 usbd_status
    477 usbd_sync_transfer(struct usbd_xfer *xfer)
    478 {
    479 	xfer->ux_flags |= USBD_SYNCHRONOUS;
    480 	return usbd_transfer(xfer);
    481 }
    482 
    483 /* Like usbd_transfer(), but waits for completion and listens for signals. */
    484 usbd_status
    485 usbd_sync_transfer_sig(struct usbd_xfer *xfer)
    486 {
    487 	xfer->ux_flags |= USBD_SYNCHRONOUS | USBD_SYNCHRONOUS_SIG;
    488 	return usbd_transfer(xfer);
    489 }
    490 
    491 static void *
    492 usbd_alloc_buffer(struct usbd_xfer *xfer, uint32_t size)
    493 {
    494 	KASSERT(xfer->ux_buf == NULL);
    495 	KASSERT(size != 0);
    496 
    497 	xfer->ux_bufsize = 0;
    498 #if NUSB_DMA > 0
    499 	struct usbd_bus *bus = xfer->ux_bus;
    500 
    501 	if (bus->ub_usedma) {
    502 		usb_dma_t *dmap = &xfer->ux_dmabuf;
    503 
    504 		KASSERT((bus->ub_dmaflags & USBMALLOC_COHERENT) == 0);
    505 		int err = usb_allocmem(bus, size, 0, bus->ub_dmaflags, dmap);
    506 		if (err) {
    507 			return NULL;
    508 		}
    509 		xfer->ux_buf = KERNADDR(&xfer->ux_dmabuf, 0);
    510 		xfer->ux_bufsize = size;
    511 
    512 		return xfer->ux_buf;
    513 	}
    514 #endif
    515 	KASSERT(xfer->ux_bus->ub_usedma == false);
    516 	xfer->ux_buf = kmem_alloc(size, KM_SLEEP);
    517 	xfer->ux_bufsize = size;
    518 	return xfer->ux_buf;
    519 }
    520 
    521 static void
    522 usbd_free_buffer(struct usbd_xfer *xfer)
    523 {
    524 	KASSERT(xfer->ux_buf != NULL);
    525 	KASSERT(xfer->ux_bufsize != 0);
    526 
    527 	void *buf = xfer->ux_buf;
    528 	uint32_t size = xfer->ux_bufsize;
    529 
    530 	xfer->ux_buf = NULL;
    531 	xfer->ux_bufsize = 0;
    532 
    533 #if NUSB_DMA > 0
    534 	struct usbd_bus *bus = xfer->ux_bus;
    535 
    536 	if (bus->ub_usedma) {
    537 		usb_dma_t *dmap = &xfer->ux_dmabuf;
    538 
    539 		usb_freemem(bus, dmap);
    540 		return;
    541 	}
    542 #endif
    543 	KASSERT(xfer->ux_bus->ub_usedma == false);
    544 
    545 	kmem_free(buf, size);
    546 }
    547 
    548 void *
    549 usbd_get_buffer(struct usbd_xfer *xfer)
    550 {
    551 	return xfer->ux_buf;
    552 }
    553 
    554 struct usbd_pipe *
    555 usbd_get_pipe0(struct usbd_device *dev)
    556 {
    557 
    558 	return dev->ud_pipe0;
    559 }
    560 
    561 static struct usbd_xfer *
    562 usbd_alloc_xfer(struct usbd_device *dev, unsigned int nframes)
    563 {
    564 	struct usbd_xfer *xfer;
    565 
    566 	USBHIST_FUNC();
    567 
    568 	ASSERT_SLEEPABLE();
    569 
    570 	xfer = dev->ud_bus->ub_methods->ubm_allocx(dev->ud_bus, nframes);
    571 	if (xfer == NULL)
    572 		goto out;
    573 	xfer->ux_bus = dev->ud_bus;
    574 	callout_init(&xfer->ux_callout, CALLOUT_MPSAFE);
    575 	callout_setfunc(&xfer->ux_callout, usbd_xfer_timeout, xfer);
    576 	cv_init(&xfer->ux_cv, "usbxfer");
    577 	usb_init_task(&xfer->ux_aborttask, usbd_xfer_timeout_task, xfer,
    578 	    USB_TASKQ_MPSAFE);
    579 
    580 out:
    581 	USBHIST_CALLARGS(usbdebug, "returns %#jx", (uintptr_t)xfer, 0, 0, 0);
    582 
    583 	return xfer;
    584 }
    585 
    586 static usbd_status
    587 usbd_free_xfer(struct usbd_xfer *xfer)
    588 {
    589 	USBHIST_FUNC();
    590 	USBHIST_CALLARGS(usbdebug, "%#jx", (uintptr_t)xfer, 0, 0, 0);
    591 
    592 	if (xfer->ux_buf) {
    593 		usbd_free_buffer(xfer);
    594 	}
    595 
    596 	/* Wait for any straggling timeout to complete. */
    597 	mutex_enter(xfer->ux_bus->ub_lock);
    598 	xfer->ux_timeout_reset = false; /* do not resuscitate */
    599 	callout_halt(&xfer->ux_callout, xfer->ux_bus->ub_lock);
    600 	usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask,
    601 	    USB_TASKQ_HC, xfer->ux_bus->ub_lock);
    602 	mutex_exit(xfer->ux_bus->ub_lock);
    603 
    604 	cv_destroy(&xfer->ux_cv);
    605 	xfer->ux_bus->ub_methods->ubm_freex(xfer->ux_bus, xfer);
    606 	return USBD_NORMAL_COMPLETION;
    607 }
    608 
    609 int
    610 usbd_create_xfer(struct usbd_pipe *pipe, size_t len, unsigned int flags,
    611     unsigned int nframes, struct usbd_xfer **xp)
    612 {
    613 	KASSERT(xp != NULL);
    614 	void *buf = NULL;
    615 
    616 	struct usbd_xfer *xfer = usbd_alloc_xfer(pipe->up_dev, nframes);
    617 	if (xfer == NULL)
    618 		return ENOMEM;
    619 
    620 	xfer->ux_pipe = pipe;
    621 	xfer->ux_flags = flags;
    622 	xfer->ux_nframes = nframes;
    623 	xfer->ux_methods = pipe->up_methods;
    624 
    625 	if (len) {
    626 		buf = usbd_alloc_buffer(xfer, len);
    627 		if (!buf) {
    628 			usbd_free_xfer(xfer);
    629 			return ENOMEM;
    630 		}
    631 	}
    632 
    633 	if (xfer->ux_methods->upm_init) {
    634 		int err = xfer->ux_methods->upm_init(xfer);
    635 		if (err) {
    636 			usbd_free_xfer(xfer);
    637 			return err;
    638 		}
    639 	}
    640 
    641 	*xp = xfer;
    642 	SDT_PROBE5(usb, device, xfer, create,
    643 	    xfer, pipe, len, flags, nframes);
    644 	return 0;
    645 }
    646 
    647 void
    648 usbd_destroy_xfer(struct usbd_xfer *xfer)
    649 {
    650 
    651 	SDT_PROBE1(usb, device, xfer, destroy,  xfer);
    652 	if (xfer->ux_methods->upm_fini)
    653 		xfer->ux_methods->upm_fini(xfer);
    654 
    655 	usbd_free_xfer(xfer);
    656 }
    657 
    658 void
    659 usbd_setup_xfer(struct usbd_xfer *xfer, void *priv, void *buffer,
    660     uint32_t length, uint16_t flags, uint32_t timeout, usbd_callback callback)
    661 {
    662 	KASSERT(xfer->ux_pipe);
    663 
    664 	xfer->ux_priv = priv;
    665 	xfer->ux_buffer = buffer;
    666 	xfer->ux_length = length;
    667 	xfer->ux_actlen = 0;
    668 	xfer->ux_flags = flags;
    669 	xfer->ux_timeout = timeout;
    670 	xfer->ux_status = USBD_NOT_STARTED;
    671 	xfer->ux_callback = callback;
    672 	xfer->ux_rqflags &= ~URQ_REQUEST;
    673 	xfer->ux_nframes = 0;
    674 }
    675 
    676 void
    677 usbd_setup_default_xfer(struct usbd_xfer *xfer, struct usbd_device *dev,
    678     void *priv, uint32_t timeout, usb_device_request_t *req, void *buffer,
    679     uint32_t length, uint16_t flags, usbd_callback callback)
    680 {
    681 	KASSERT(xfer->ux_pipe == dev->ud_pipe0);
    682 
    683 	xfer->ux_priv = priv;
    684 	xfer->ux_buffer = buffer;
    685 	xfer->ux_length = length;
    686 	xfer->ux_actlen = 0;
    687 	xfer->ux_flags = flags;
    688 	xfer->ux_timeout = timeout;
    689 	xfer->ux_status = USBD_NOT_STARTED;
    690 	xfer->ux_callback = callback;
    691 	xfer->ux_request = *req;
    692 	xfer->ux_rqflags |= URQ_REQUEST;
    693 	xfer->ux_nframes = 0;
    694 }
    695 
    696 void
    697 usbd_setup_isoc_xfer(struct usbd_xfer *xfer, void *priv, uint16_t *frlengths,
    698     uint32_t nframes, uint16_t flags, usbd_callback callback)
    699 {
    700 	xfer->ux_priv = priv;
    701 	xfer->ux_buffer = NULL;
    702 	xfer->ux_length = 0;
    703 	xfer->ux_actlen = 0;
    704 	xfer->ux_flags = flags;
    705 	xfer->ux_timeout = USBD_NO_TIMEOUT;
    706 	xfer->ux_status = USBD_NOT_STARTED;
    707 	xfer->ux_callback = callback;
    708 	xfer->ux_rqflags &= ~URQ_REQUEST;
    709 	xfer->ux_frlengths = frlengths;
    710 	xfer->ux_nframes = nframes;
    711 
    712 	for (size_t i = 0; i < xfer->ux_nframes; i++)
    713 		xfer->ux_length += xfer->ux_frlengths[i];
    714 }
    715 
    716 void
    717 usbd_get_xfer_status(struct usbd_xfer *xfer, void **priv,
    718 		     void **buffer, uint32_t *count, usbd_status *status)
    719 {
    720 	if (priv != NULL)
    721 		*priv = xfer->ux_priv;
    722 	if (buffer != NULL)
    723 		*buffer = xfer->ux_buffer;
    724 	if (count != NULL)
    725 		*count = xfer->ux_actlen;
    726 	if (status != NULL)
    727 		*status = xfer->ux_status;
    728 }
    729 
    730 usb_config_descriptor_t *
    731 usbd_get_config_descriptor(struct usbd_device *dev)
    732 {
    733 	KASSERT(dev != NULL);
    734 
    735 	return dev->ud_cdesc;
    736 }
    737 
    738 usb_interface_descriptor_t *
    739 usbd_get_interface_descriptor(struct usbd_interface *iface)
    740 {
    741 	KASSERT(iface != NULL);
    742 
    743 	return iface->ui_idesc;
    744 }
    745 
    746 usb_device_descriptor_t *
    747 usbd_get_device_descriptor(struct usbd_device *dev)
    748 {
    749 	KASSERT(dev != NULL);
    750 
    751 	return &dev->ud_ddesc;
    752 }
    753 
    754 usb_endpoint_descriptor_t *
    755 usbd_interface2endpoint_descriptor(struct usbd_interface *iface, uint8_t index)
    756 {
    757 
    758 	if (index >= iface->ui_idesc->bNumEndpoints)
    759 		return NULL;
    760 	return iface->ui_endpoints[index].ue_edesc;
    761 }
    762 
    763 /* Some drivers may wish to abort requests on the default pipe, *
    764  * but there is no mechanism for getting a handle on it.        */
    765 usbd_status
    766 usbd_abort_default_pipe(struct usbd_device *device)
    767 {
    768 	return usbd_abort_pipe(device->ud_pipe0);
    769 }
    770 
    771 usbd_status
    772 usbd_abort_pipe(struct usbd_pipe *pipe)
    773 {
    774 	usbd_status err;
    775 
    776 	KASSERT(pipe != NULL);
    777 
    778 	usbd_lock_pipe(pipe);
    779 	err = usbd_ar_pipe(pipe);
    780 	usbd_unlock_pipe(pipe);
    781 	return err;
    782 }
    783 
    784 usbd_status
    785 usbd_clear_endpoint_stall(struct usbd_pipe *pipe)
    786 {
    787 	struct usbd_device *dev = pipe->up_dev;
    788 	usbd_status err;
    789 
    790 	USBHIST_FUNC(); USBHIST_CALLED(usbdebug);
    791 	SDT_PROBE1(usb, device, pipe, clear__endpoint__stall,  pipe);
    792 
    793 	/*
    794 	 * Clearing en endpoint stall resets the endpoint toggle, so
    795 	 * do the same to the HC toggle.
    796 	 */
    797 	SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle,  pipe);
    798 	pipe->up_methods->upm_cleartoggle(pipe);
    799 
    800 	err = usbd_clear_endpoint_feature(dev,
    801 	    pipe->up_endpoint->ue_edesc->bEndpointAddress, UF_ENDPOINT_HALT);
    802 #if 0
    803 XXX should we do this?
    804 	if (!err) {
    805 		pipe->state = USBD_PIPE_ACTIVE;
    806 		/* XXX activate pipe */
    807 	}
    808 #endif
    809 	return err;
    810 }
    811 
    812 void
    813 usbd_clear_endpoint_stall_task(void *arg)
    814 {
    815 	struct usbd_pipe *pipe = arg;
    816 	struct usbd_device *dev = pipe->up_dev;
    817 
    818 	SDT_PROBE1(usb, device, pipe, clear__endpoint__stall,  pipe);
    819 	SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle,  pipe);
    820 	pipe->up_methods->upm_cleartoggle(pipe);
    821 
    822 	(void)usbd_clear_endpoint_feature(dev,
    823 	    pipe->up_endpoint->ue_edesc->bEndpointAddress, UF_ENDPOINT_HALT);
    824 }
    825 
    826 void
    827 usbd_clear_endpoint_stall_async(struct usbd_pipe *pipe)
    828 {
    829 	usb_add_task(pipe->up_dev, &pipe->up_async_task, USB_TASKQ_DRIVER);
    830 }
    831 
    832 void
    833 usbd_clear_endpoint_toggle(struct usbd_pipe *pipe)
    834 {
    835 
    836 	SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle,  pipe);
    837 	pipe->up_methods->upm_cleartoggle(pipe);
    838 }
    839 
    840 usbd_status
    841 usbd_endpoint_count(struct usbd_interface *iface, uint8_t *count)
    842 {
    843 	KASSERT(iface != NULL);
    844 	KASSERT(iface->ui_idesc != NULL);
    845 
    846 	*count = iface->ui_idesc->bNumEndpoints;
    847 	return USBD_NORMAL_COMPLETION;
    848 }
    849 
    850 usbd_status
    851 usbd_interface_count(struct usbd_device *dev, uint8_t *count)
    852 {
    853 
    854 	if (dev->ud_cdesc == NULL)
    855 		return USBD_NOT_CONFIGURED;
    856 	*count = dev->ud_cdesc->bNumInterface;
    857 	return USBD_NORMAL_COMPLETION;
    858 }
    859 
    860 void
    861 usbd_interface2device_handle(struct usbd_interface *iface,
    862 			     struct usbd_device **dev)
    863 {
    864 
    865 	*dev = iface->ui_dev;
    866 }
    867 
    868 usbd_status
    869 usbd_device2interface_handle(struct usbd_device *dev,
    870 			     uint8_t ifaceno, struct usbd_interface **iface)
    871 {
    872 
    873 	if (dev->ud_cdesc == NULL)
    874 		return USBD_NOT_CONFIGURED;
    875 	if (ifaceno >= dev->ud_cdesc->bNumInterface)
    876 		return USBD_INVAL;
    877 	*iface = &dev->ud_ifaces[ifaceno];
    878 	return USBD_NORMAL_COMPLETION;
    879 }
    880 
    881 struct usbd_device *
    882 usbd_pipe2device_handle(struct usbd_pipe *pipe)
    883 {
    884 	KASSERT(pipe != NULL);
    885 
    886 	return pipe->up_dev;
    887 }
    888 
    889 /* XXXX use altno */
    890 usbd_status
    891 usbd_set_interface(struct usbd_interface *iface, int altidx)
    892 {
    893 	bool locked = false;
    894 	usb_device_request_t req;
    895 	usbd_status err;
    896 
    897 	USBHIST_FUNC();
    898 	USBHIST_CALLARGS(usbdebug, "iface %#jx", (uintptr_t)iface, 0, 0, 0);
    899 
    900 	err = usbd_iface_lock(iface);
    901 	if (err)
    902 		goto out;
    903 	locked = true;
    904 
    905 	err = usbd_fill_iface_data(iface->ui_dev, iface->ui_index, altidx);
    906 	if (err)
    907 		goto out;
    908 
    909 	req.bmRequestType = UT_WRITE_INTERFACE;
    910 	req.bRequest = UR_SET_INTERFACE;
    911 	USETW(req.wValue, iface->ui_idesc->bAlternateSetting);
    912 	USETW(req.wIndex, iface->ui_idesc->bInterfaceNumber);
    913 	USETW(req.wLength, 0);
    914 	err = usbd_do_request(iface->ui_dev, &req, 0);
    915 
    916 out:	/* XXX back out iface data?  */
    917 	if (locked)
    918 		usbd_iface_unlock(iface);
    919 	return err;
    920 }
    921 
    922 int
    923 usbd_get_no_alts(usb_config_descriptor_t *cdesc, int ifaceno)
    924 {
    925 	char *p = (char *)cdesc;
    926 	char *end = p + UGETW(cdesc->wTotalLength);
    927 	usb_interface_descriptor_t *d;
    928 	int n;
    929 
    930 	for (n = 0; p < end; p += d->bLength) {
    931 		d = (usb_interface_descriptor_t *)p;
    932 		if (p + d->bLength <= end &&
    933 		    d->bDescriptorType == UDESC_INTERFACE &&
    934 		    d->bInterfaceNumber == ifaceno)
    935 			n++;
    936 	}
    937 	return n;
    938 }
    939 
    940 int
    941 usbd_get_interface_altindex(struct usbd_interface *iface)
    942 {
    943 	return iface->ui_altindex;
    944 }
    945 
    946 usbd_status
    947 usbd_get_interface(struct usbd_interface *iface, uint8_t *aiface)
    948 {
    949 	usb_device_request_t req;
    950 
    951 	req.bmRequestType = UT_READ_INTERFACE;
    952 	req.bRequest = UR_GET_INTERFACE;
    953 	USETW(req.wValue, 0);
    954 	USETW(req.wIndex, iface->ui_idesc->bInterfaceNumber);
    955 	USETW(req.wLength, 1);
    956 	return usbd_do_request(iface->ui_dev, &req, aiface);
    957 }
    958 
    959 /*** Internal routines ***/
    960 
    961 /* Dequeue all pipe operations, called with bus lock held. */
    962 Static usbd_status
    963 usbd_ar_pipe(struct usbd_pipe *pipe)
    964 {
    965 	struct usbd_xfer *xfer;
    966 
    967 	USBHIST_FUNC();
    968 	USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0);
    969 	SDT_PROBE1(usb, device, pipe, abort__start,  pipe);
    970 
    971 	KASSERT(mutex_owned(pipe->up_dev->ud_bus->ub_lock));
    972 
    973 #ifdef USB_DEBUG
    974 	if (usbdebug > 5)
    975 		usbd_dump_queue(pipe);
    976 #endif
    977 	pipe->up_repeat = 0;
    978 	pipe->up_running = 0;
    979 	pipe->up_aborting = 1;
    980 	while ((xfer = SIMPLEQ_FIRST(&pipe->up_queue)) != NULL) {
    981 		USBHIST_LOG(usbdebug, "pipe = %#jx xfer = %#jx "
    982 		    "(methods = %#jx)", (uintptr_t)pipe, (uintptr_t)xfer,
    983 		    (uintptr_t)pipe->up_methods, 0);
    984 		if (xfer->ux_status == USBD_NOT_STARTED) {
    985 			SDT_PROBE1(usb, device, xfer, preabort,  xfer);
    986 #ifdef DIAGNOSTIC
    987 			xfer->ux_state = XFER_BUSY;
    988 #endif
    989 			SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next);
    990 		} else {
    991 			/* Make the HC abort it (and invoke the callback). */
    992 			SDT_PROBE1(usb, device, xfer, abort,  xfer);
    993 			pipe->up_methods->upm_abort(xfer);
    994 			/* XXX only for non-0 usbd_clear_endpoint_stall(pipe); */
    995 		}
    996 	}
    997 	pipe->up_aborting = 0;
    998 	SDT_PROBE1(usb, device, pipe, abort__done,  pipe);
    999 	return USBD_NORMAL_COMPLETION;
   1000 }
   1001 
   1002 /* Called with USB lock held. */
   1003 void
   1004 usb_transfer_complete(struct usbd_xfer *xfer)
   1005 {
   1006 	struct usbd_pipe *pipe = xfer->ux_pipe;
   1007 	struct usbd_bus *bus = pipe->up_dev->ud_bus;
   1008 	int sync = xfer->ux_flags & USBD_SYNCHRONOUS;
   1009 	int erred;
   1010 	int polling = bus->ub_usepolling;
   1011 	int repeat = pipe->up_repeat;
   1012 
   1013 	USBHIST_FUNC();
   1014 	USBHIST_CALLARGS(usbdebug, "pipe = %#jx xfer = %#jx status = %jd "
   1015 	    "actlen = %jd", (uintptr_t)pipe, (uintptr_t)xfer, xfer->ux_status,
   1016 	    xfer->ux_actlen);
   1017 
   1018 	KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock));
   1019 	KASSERTMSG(xfer->ux_state == XFER_ONQU, "xfer %p state is %x", xfer,
   1020 	    xfer->ux_state);
   1021 	KASSERT(pipe != NULL);
   1022 
   1023 	/*
   1024 	 * If device is known to miss out ack, then pretend that
   1025 	 * output timeout is a success. Userland should handle
   1026 	 * the logic to verify that the operation succeeded.
   1027 	 */
   1028 	if (pipe->up_dev->ud_quirks &&
   1029 	    pipe->up_dev->ud_quirks->uq_flags & UQ_MISS_OUT_ACK &&
   1030 	    xfer->ux_status == USBD_TIMEOUT &&
   1031 	    !usbd_xfer_isread(xfer)) {
   1032 		USBHIST_LOG(usbdebug, "Possible output ack miss for xfer %#jx: "
   1033 		    "hiding write timeout to %jd.%jd for %ju bytes written",
   1034 		    (uintptr_t)xfer, curlwp->l_proc->p_pid, curlwp->l_lid,
   1035 		    xfer->ux_length);
   1036 
   1037 		xfer->ux_status = USBD_NORMAL_COMPLETION;
   1038 		xfer->ux_actlen = xfer->ux_length;
   1039 	}
   1040 
   1041 	erred = xfer->ux_status == USBD_CANCELLED ||
   1042 	        xfer->ux_status == USBD_TIMEOUT;
   1043 
   1044 	if (!repeat) {
   1045 		/* Remove request from queue. */
   1046 
   1047 		KASSERTMSG(!SIMPLEQ_EMPTY(&pipe->up_queue),
   1048 		    "pipe %p is empty, but xfer %p wants to complete", pipe,
   1049 		     xfer);
   1050 		KASSERTMSG(xfer == SIMPLEQ_FIRST(&pipe->up_queue),
   1051 		    "xfer %p is not start of queue (%p is at start)", xfer,
   1052 		   SIMPLEQ_FIRST(&pipe->up_queue));
   1053 
   1054 #ifdef DIAGNOSTIC
   1055 		xfer->ux_state = XFER_BUSY;
   1056 #endif
   1057 		SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next);
   1058 	}
   1059 	USBHIST_LOG(usbdebug, "xfer %#jx: repeat %jd new head = %#jx",
   1060 	    (uintptr_t)xfer, repeat, (uintptr_t)SIMPLEQ_FIRST(&pipe->up_queue),
   1061 	    0);
   1062 
   1063 	/* Count completed transfers. */
   1064 	++pipe->up_dev->ud_bus->ub_stats.uds_requests
   1065 		[pipe->up_endpoint->ue_edesc->bmAttributes & UE_XFERTYPE];
   1066 
   1067 	xfer->ux_done = 1;
   1068 	if (!xfer->ux_status && xfer->ux_actlen < xfer->ux_length &&
   1069 	    !(xfer->ux_flags & USBD_SHORT_XFER_OK)) {
   1070 		USBHIST_LOG(usbdebug, "short transfer %jd < %jd",
   1071 		    xfer->ux_actlen, xfer->ux_length, 0, 0);
   1072 		xfer->ux_status = USBD_SHORT_XFER;
   1073 	}
   1074 
   1075 	USBHIST_LOG(usbdebug, "xfer %#jx doing done %#jx", (uintptr_t)xfer,
   1076 	    (uintptr_t)pipe->up_methods->upm_done, 0, 0);
   1077 	SDT_PROBE2(usb, device, xfer, done,  xfer, xfer->ux_status);
   1078 	pipe->up_methods->upm_done(xfer);
   1079 
   1080 	if (xfer->ux_length != 0 && xfer->ux_buffer != xfer->ux_buf) {
   1081 		KDASSERTMSG(xfer->ux_actlen <= xfer->ux_length,
   1082 		    "actlen %d length %d",xfer->ux_actlen, xfer->ux_length);
   1083 
   1084 		/* Only if IN transfer */
   1085 		if (usbd_xfer_isread(xfer)) {
   1086 			memcpy(xfer->ux_buffer, xfer->ux_buf, xfer->ux_actlen);
   1087 		}
   1088 	}
   1089 
   1090 	USBHIST_LOG(usbdebug, "xfer %#jx doing callback %#jx status %jd",
   1091 	    (uintptr_t)xfer, (uintptr_t)xfer->ux_callback, xfer->ux_status, 0);
   1092 
   1093 	if (xfer->ux_callback) {
   1094 		if (!polling) {
   1095 			mutex_exit(pipe->up_dev->ud_bus->ub_lock);
   1096 			if (!(pipe->up_flags & USBD_MPSAFE))
   1097 				KERNEL_LOCK(1, curlwp);
   1098 		}
   1099 
   1100 		xfer->ux_callback(xfer, xfer->ux_priv, xfer->ux_status);
   1101 
   1102 		if (!polling) {
   1103 			if (!(pipe->up_flags & USBD_MPSAFE))
   1104 				KERNEL_UNLOCK_ONE(curlwp);
   1105 			mutex_enter(pipe->up_dev->ud_bus->ub_lock);
   1106 		}
   1107 	}
   1108 
   1109 	if (sync && !polling) {
   1110 		USBHIST_LOG(usbdebug, "<- done xfer %#jx, wakeup",
   1111 		    (uintptr_t)xfer, 0, 0, 0);
   1112 		cv_broadcast(&xfer->ux_cv);
   1113 	}
   1114 
   1115 	if (repeat) {
   1116 		xfer->ux_actlen = 0;
   1117 		xfer->ux_status = USBD_NOT_STARTED;
   1118 	} else {
   1119 		/* XXX should we stop the queue on all errors? */
   1120 		if (erred && pipe->up_iface != NULL)	/* not control pipe */
   1121 			pipe->up_running = 0;
   1122 	}
   1123 	if (pipe->up_running && pipe->up_serialise)
   1124 		usbd_start_next(pipe);
   1125 }
   1126 
   1127 /* Called with USB lock held. */
   1128 usbd_status
   1129 usb_insert_transfer(struct usbd_xfer *xfer)
   1130 {
   1131 	struct usbd_pipe *pipe = xfer->ux_pipe;
   1132 	usbd_status err;
   1133 
   1134 	USBHIST_FUNC(); USBHIST_CALLARGS(usbdebug,
   1135 	    "xfer = %#jx pipe = %#jx running = %jd timeout = %jd",
   1136 	    (uintptr_t)xfer, (uintptr_t)pipe,
   1137 	    pipe->up_running, xfer->ux_timeout);
   1138 
   1139 	KASSERT(mutex_owned(pipe->up_dev->ud_bus->ub_lock));
   1140 	KASSERTMSG(xfer->ux_state == XFER_BUSY, "xfer %p state is %x", xfer,
   1141 	    xfer->ux_state);
   1142 
   1143 #ifdef DIAGNOSTIC
   1144 	xfer->ux_state = XFER_ONQU;
   1145 #endif
   1146 	SIMPLEQ_INSERT_TAIL(&pipe->up_queue, xfer, ux_next);
   1147 	if (pipe->up_running && pipe->up_serialise)
   1148 		err = USBD_IN_PROGRESS;
   1149 	else {
   1150 		pipe->up_running = 1;
   1151 		err = USBD_NORMAL_COMPLETION;
   1152 	}
   1153 	USBHIST_LOG(usbdebug, "<- done xfer %#jx, err %jd", (uintptr_t)xfer,
   1154 	    err, 0, 0);
   1155 	return err;
   1156 }
   1157 
   1158 /* Called with USB lock held. */
   1159 void
   1160 usbd_start_next(struct usbd_pipe *pipe)
   1161 {
   1162 	struct usbd_xfer *xfer;
   1163 	usbd_status err;
   1164 
   1165 	USBHIST_FUNC();
   1166 
   1167 	KASSERT(pipe != NULL);
   1168 	KASSERT(pipe->up_methods != NULL);
   1169 	KASSERT(pipe->up_methods->upm_start != NULL);
   1170 	KASSERT(pipe->up_serialise == true);
   1171 
   1172 	int polling = pipe->up_dev->ud_bus->ub_usepolling;
   1173 	KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock));
   1174 
   1175 	/* Get next request in queue. */
   1176 	xfer = SIMPLEQ_FIRST(&pipe->up_queue);
   1177 	USBHIST_CALLARGS(usbdebug, "pipe = %#jx, xfer = %#jx", (uintptr_t)pipe,
   1178 	    (uintptr_t)xfer, 0, 0);
   1179 	if (xfer == NULL) {
   1180 		pipe->up_running = 0;
   1181 	} else {
   1182 		if (!polling)
   1183 			mutex_exit(pipe->up_dev->ud_bus->ub_lock);
   1184 		SDT_PROBE2(usb, device, pipe, start,  pipe, xfer);
   1185 		err = pipe->up_methods->upm_start(xfer);
   1186 		if (!polling)
   1187 			mutex_enter(pipe->up_dev->ud_bus->ub_lock);
   1188 
   1189 		if (err != USBD_IN_PROGRESS) {
   1190 			USBHIST_LOG(usbdebug, "error = %jd", err, 0, 0, 0);
   1191 			pipe->up_running = 0;
   1192 			/* XXX do what? */
   1193 		}
   1194 	}
   1195 
   1196 	KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock));
   1197 }
   1198 
   1199 usbd_status
   1200 usbd_do_request(struct usbd_device *dev, usb_device_request_t *req, void *data)
   1201 {
   1202 
   1203 	return usbd_do_request_flags(dev, req, data, 0, 0,
   1204 	    USBD_DEFAULT_TIMEOUT);
   1205 }
   1206 
   1207 usbd_status
   1208 usbd_do_request_flags(struct usbd_device *dev, usb_device_request_t *req,
   1209     void *data, uint16_t flags, int *actlen, uint32_t timeout)
   1210 {
   1211 	size_t len = UGETW(req->wLength);
   1212 
   1213 	return usbd_do_request_len(dev, req, len, data, flags, actlen, timeout);
   1214 }
   1215 
   1216 usbd_status
   1217 usbd_do_request_len(struct usbd_device *dev, usb_device_request_t *req,
   1218     size_t len, void *data, uint16_t flags, int *actlen, uint32_t timeout)
   1219 {
   1220 	struct usbd_xfer *xfer;
   1221 	usbd_status err;
   1222 
   1223 	KASSERT(len >= UGETW(req->wLength));
   1224 
   1225 	USBHIST_FUNC();
   1226 	USBHIST_CALLARGS(usbdebug, "dev=%#jx req=%jx flags=%jx len=%jx",
   1227 	    (uintptr_t)dev, (uintptr_t)req, flags, len);
   1228 
   1229 	ASSERT_SLEEPABLE();
   1230 
   1231 	int error = usbd_create_xfer(dev->ud_pipe0, len, 0, 0, &xfer);
   1232 	if (error)
   1233 		return error;
   1234 
   1235 	usbd_setup_default_xfer(xfer, dev, 0, timeout, req, data,
   1236 	    UGETW(req->wLength), flags, NULL);
   1237 	KASSERT(xfer->ux_pipe == dev->ud_pipe0);
   1238 	err = usbd_sync_transfer(xfer);
   1239 #if defined(USB_DEBUG) || defined(DIAGNOSTIC)
   1240 	if (xfer->ux_actlen > xfer->ux_length) {
   1241 		USBHIST_LOG(usbdebug, "overrun addr = %jd type = 0x%02jx",
   1242 		    dev->ud_addr, xfer->ux_request.bmRequestType, 0, 0);
   1243 		USBHIST_LOG(usbdebug, "     req = 0x%02jx val = %jd "
   1244 		    "index = %jd",
   1245 		    xfer->ux_request.bRequest, UGETW(xfer->ux_request.wValue),
   1246 		    UGETW(xfer->ux_request.wIndex), 0);
   1247 		USBHIST_LOG(usbdebug, "     rlen = %jd length = %jd "
   1248 		    "actlen = %jd",
   1249 		    UGETW(xfer->ux_request.wLength),
   1250 		    xfer->ux_length, xfer->ux_actlen, 0);
   1251 	}
   1252 #endif
   1253 	if (actlen != NULL)
   1254 		*actlen = xfer->ux_actlen;
   1255 
   1256 	usbd_destroy_xfer(xfer);
   1257 
   1258 	if (err) {
   1259 		USBHIST_LOG(usbdebug, "returning err = %jd", err, 0, 0, 0);
   1260 	}
   1261 	return err;
   1262 }
   1263 
   1264 static void
   1265 usbd_request_async_cb(struct usbd_xfer *xfer, void *priv, usbd_status status)
   1266 {
   1267 	usbd_destroy_xfer(xfer);
   1268 }
   1269 
   1270 /*
   1271  * Execute a request without waiting for completion.
   1272  * Can be used from interrupt context.
   1273  */
   1274 usbd_status
   1275 usbd_request_async(struct usbd_device *dev, struct usbd_xfer *xfer,
   1276     usb_device_request_t *req, void *priv, usbd_callback callback)
   1277 {
   1278 	usbd_status err;
   1279 
   1280 	if (callback == NULL)
   1281 		callback = usbd_request_async_cb;
   1282 
   1283 	usbd_setup_default_xfer(xfer, dev, priv,
   1284 	    USBD_DEFAULT_TIMEOUT, req, NULL, UGETW(req->wLength), 0,
   1285 	    callback);
   1286 	err = usbd_transfer(xfer);
   1287 	if (err != USBD_IN_PROGRESS) {
   1288 		usbd_destroy_xfer(xfer);
   1289 		return (err);
   1290 	}
   1291 	return (USBD_NORMAL_COMPLETION);
   1292 }
   1293 
   1294 const struct usbd_quirks *
   1295 usbd_get_quirks(struct usbd_device *dev)
   1296 {
   1297 #ifdef DIAGNOSTIC
   1298 	if (dev == NULL) {
   1299 		printf("usbd_get_quirks: dev == NULL\n");
   1300 		return 0;
   1301 	}
   1302 #endif
   1303 	return dev->ud_quirks;
   1304 }
   1305 
   1306 /* XXX do periodic free() of free list */
   1307 
   1308 /*
   1309  * Called from keyboard driver when in polling mode.
   1310  */
   1311 void
   1312 usbd_dopoll(struct usbd_interface *iface)
   1313 {
   1314 	iface->ui_dev->ud_bus->ub_methods->ubm_dopoll(iface->ui_dev->ud_bus);
   1315 }
   1316 
   1317 /*
   1318  * This is for keyboard driver as well, which only operates in polling
   1319  * mode from the ask root, etc., prompt and from DDB.
   1320  */
   1321 void
   1322 usbd_set_polling(struct usbd_device *dev, int on)
   1323 {
   1324 	if (on)
   1325 		dev->ud_bus->ub_usepolling++;
   1326 	else
   1327 		dev->ud_bus->ub_usepolling--;
   1328 
   1329 	/* Kick the host controller when switching modes */
   1330 	mutex_enter(dev->ud_bus->ub_lock);
   1331 	dev->ud_bus->ub_methods->ubm_softint(dev->ud_bus);
   1332 	mutex_exit(dev->ud_bus->ub_lock);
   1333 }
   1334 
   1335 
   1336 usb_endpoint_descriptor_t *
   1337 usbd_get_endpoint_descriptor(struct usbd_interface *iface, uint8_t address)
   1338 {
   1339 	struct usbd_endpoint *ep;
   1340 	int i;
   1341 
   1342 	for (i = 0; i < iface->ui_idesc->bNumEndpoints; i++) {
   1343 		ep = &iface->ui_endpoints[i];
   1344 		if (ep->ue_edesc->bEndpointAddress == address)
   1345 			return iface->ui_endpoints[i].ue_edesc;
   1346 	}
   1347 	return NULL;
   1348 }
   1349 
   1350 /*
   1351  * usbd_ratecheck() can limit the number of error messages that occurs.
   1352  * When a device is unplugged it may take up to 0.25s for the hub driver
   1353  * to notice it.  If the driver continuously tries to do I/O operations
   1354  * this can generate a large number of messages.
   1355  */
   1356 int
   1357 usbd_ratecheck(struct timeval *last)
   1358 {
   1359 	static struct timeval errinterval = { 0, 250000 }; /* 0.25 s*/
   1360 
   1361 	return ratecheck(last, &errinterval);
   1362 }
   1363 
   1364 /*
   1365  * Search for a vendor/product pair in an array.  The item size is
   1366  * given as an argument.
   1367  */
   1368 const struct usb_devno *
   1369 usb_match_device(const struct usb_devno *tbl, u_int nentries, u_int sz,
   1370 		 uint16_t vendor, uint16_t product)
   1371 {
   1372 	while (nentries-- > 0) {
   1373 		uint16_t tproduct = tbl->ud_product;
   1374 		if (tbl->ud_vendor == vendor &&
   1375 		    (tproduct == product || tproduct == USB_PRODUCT_ANY))
   1376 			return tbl;
   1377 		tbl = (const struct usb_devno *)((const char *)tbl + sz);
   1378 	}
   1379 	return NULL;
   1380 }
   1381 
   1382 usbd_status
   1383 usbd_get_string(struct usbd_device *dev, int si, char *buf)
   1384 {
   1385 	return usbd_get_string0(dev, si, buf, 1);
   1386 }
   1387 
   1388 usbd_status
   1389 usbd_get_string0(struct usbd_device *dev, int si, char *buf, int unicode)
   1390 {
   1391 	int swap = dev->ud_quirks->uq_flags & UQ_SWAP_UNICODE;
   1392 	usb_string_descriptor_t us;
   1393 	char *s;
   1394 	int i, n;
   1395 	uint16_t c;
   1396 	usbd_status err;
   1397 	int size;
   1398 
   1399 	USBHIST_FUNC(); USBHIST_CALLED(usbdebug);
   1400 
   1401 	buf[0] = '\0';
   1402 	if (si == 0)
   1403 		return USBD_INVAL;
   1404 	if (dev->ud_quirks->uq_flags & UQ_NO_STRINGS)
   1405 		return USBD_STALLED;
   1406 	if (dev->ud_langid == USBD_NOLANG) {
   1407 		/* Set up default language */
   1408 		err = usbd_get_string_desc(dev, USB_LANGUAGE_TABLE, 0, &us,
   1409 		    &size);
   1410 		if (err || size < 4) {
   1411 			USBHIST_LOG(usbdebug, "getting lang failed, using 0",
   1412 			    0, 0, 0, 0);
   1413 			dev->ud_langid = 0; /* Well, just pick something then */
   1414 		} else {
   1415 			/* Pick the first language as the default. */
   1416 			dev->ud_langid = UGETW(us.bString[0]);
   1417 		}
   1418 	}
   1419 	err = usbd_get_string_desc(dev, si, dev->ud_langid, &us, &size);
   1420 	if (err)
   1421 		return err;
   1422 	s = buf;
   1423 	n = size / 2 - 1;
   1424 	if (unicode) {
   1425 		for (i = 0; i < n; i++) {
   1426 			c = UGETW(us.bString[i]);
   1427 			if (swap)
   1428 				c = (c >> 8) | (c << 8);
   1429 			s += wput_utf8(s, 3, c);
   1430 		}
   1431 		*s++ = 0;
   1432 	}
   1433 #ifdef COMPAT_30
   1434 	else {
   1435 		for (i = 0; i < n; i++) {
   1436 			c = UGETW(us.bString[i]);
   1437 			if (swap)
   1438 				c = (c >> 8) | (c << 8);
   1439 			*s++ = (c < 0x80) ? c : '?';
   1440 		}
   1441 		*s++ = 0;
   1442 	}
   1443 #endif
   1444 	return USBD_NORMAL_COMPLETION;
   1445 }
   1446 
   1447 /*
   1448  * usbd_xfer_trycomplete(xfer)
   1449  *
   1450  *	Try to claim xfer for completion.  Return true if successful,
   1451  *	false if the xfer has been synchronously aborted or has timed
   1452  *	out.
   1453  *
   1454  *	If this returns true, caller is responsible for setting
   1455  *	xfer->ux_status and calling usb_transfer_complete.  To be used
   1456  *	in a host controller interrupt handler.
   1457  *
   1458  *	Caller must either hold the bus lock or have the bus in polling
   1459  *	mode.
   1460  */
   1461 bool
   1462 usbd_xfer_trycomplete(struct usbd_xfer *xfer)
   1463 {
   1464 	struct usbd_bus *bus __diagused = xfer->ux_bus;
   1465 
   1466 	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
   1467 
   1468 	/*
   1469 	 * If software has completed it, either by synchronous abort or
   1470 	 * by timeout, too late.
   1471 	 */
   1472 	if (xfer->ux_status != USBD_IN_PROGRESS)
   1473 		return false;
   1474 
   1475 	/*
   1476 	 * We are completing the xfer.  Cancel the timeout if we can,
   1477 	 * but only asynchronously.  See usbd_xfer_cancel_timeout_async
   1478 	 * for why we need not wait for the callout or task here.
   1479 	 */
   1480 	usbd_xfer_cancel_timeout_async(xfer);
   1481 
   1482 	/* Success!  Note: Caller must set xfer->ux_status afterwar.  */
   1483 	return true;
   1484 }
   1485 
   1486 /*
   1487  * usbd_xfer_abort(xfer)
   1488  *
   1489  *	Try to claim xfer to abort.  If successful, mark it completed
   1490  *	with USBD_CANCELLED and call the bus-specific method to abort
   1491  *	at the hardware level.
   1492  *
   1493  *	To be called in thread context from struct
   1494  *	usbd_pipe_methods::upm_abort.
   1495  *
   1496  *	Caller must hold the bus lock.
   1497  */
   1498 void
   1499 usbd_xfer_abort(struct usbd_xfer *xfer)
   1500 {
   1501 	struct usbd_bus *bus = xfer->ux_bus;
   1502 
   1503 	KASSERT(mutex_owned(bus->ub_lock));
   1504 
   1505 	/*
   1506 	 * If host controller interrupt or timer interrupt has
   1507 	 * completed it, too late.  But the xfer cannot be
   1508 	 * cancelled already -- only one caller can synchronously
   1509 	 * abort.
   1510 	 */
   1511 	KASSERT(xfer->ux_status != USBD_CANCELLED);
   1512 	if (xfer->ux_status != USBD_IN_PROGRESS)
   1513 		return;
   1514 
   1515 	/*
   1516 	 * Cancel the timeout if we can, but only asynchronously; see
   1517 	 * usbd_xfer_cancel_timeout_async for why we need not wait for
   1518 	 * the callout or task here.
   1519 	 */
   1520 	usbd_xfer_cancel_timeout_async(xfer);
   1521 
   1522 	/*
   1523 	 * We beat everyone else.  Claim the status as cancelled and do
   1524 	 * the bus-specific dance to abort the hardware.
   1525 	 */
   1526 	xfer->ux_status = USBD_CANCELLED;
   1527 	bus->ub_methods->ubm_abortx(xfer);
   1528 }
   1529 
   1530 /*
   1531  * usbd_xfer_timeout(xfer)
   1532  *
   1533  *	Called at IPL_SOFTCLOCK when too much time has elapsed waiting
   1534  *	for xfer to complete.  Since we can't abort the xfer at
   1535  *	IPL_SOFTCLOCK, defer to a usb_task to run it in thread context,
   1536  *	unless the xfer has completed or aborted concurrently -- and if
   1537  *	the xfer has also been resubmitted, take care of rescheduling
   1538  *	the callout.
   1539  */
   1540 static void
   1541 usbd_xfer_timeout(void *cookie)
   1542 {
   1543 	struct usbd_xfer *xfer = cookie;
   1544 	struct usbd_bus *bus = xfer->ux_bus;
   1545 	struct usbd_device *dev = xfer->ux_pipe->up_dev;
   1546 
   1547 	/* Acquire the lock so we can transition the timeout state.  */
   1548 	mutex_enter(bus->ub_lock);
   1549 
   1550 	/*
   1551 	 * Use usbd_xfer_probe_timeout to check whether the timeout is
   1552 	 * still valid, or to reschedule the callout if necessary.  If
   1553 	 * it is still valid, schedule the task.
   1554 	 */
   1555 	if (usbd_xfer_probe_timeout(xfer))
   1556 		usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC);
   1557 
   1558 	/*
   1559 	 * Notify usbd_xfer_cancel_timeout_async that we may have
   1560 	 * scheduled the task.  This causes callout_invoking to return
   1561 	 * false in usbd_xfer_cancel_timeout_async so that it can tell
   1562 	 * which stage in the callout->task->abort process we're at.
   1563 	 */
   1564 	callout_ack(&xfer->ux_callout);
   1565 
   1566 	/* All done -- release the lock.  */
   1567 	mutex_exit(bus->ub_lock);
   1568 }
   1569 
   1570 /*
   1571  * usbd_xfer_timeout_task(xfer)
   1572  *
   1573  *	Called in thread context when too much time has elapsed waiting
   1574  *	for xfer to complete.  Abort the xfer with USBD_TIMEOUT, unless
   1575  *	it has completed or aborted concurrently -- and if the xfer has
   1576  *	also been resubmitted, take care of rescheduling the callout.
   1577  */
   1578 static void
   1579 usbd_xfer_timeout_task(void *cookie)
   1580 {
   1581 	struct usbd_xfer *xfer = cookie;
   1582 	struct usbd_bus *bus = xfer->ux_bus;
   1583 
   1584 	/* Acquire the lock so we can transition the timeout state.  */
   1585 	mutex_enter(bus->ub_lock);
   1586 
   1587 	/*
   1588 	 * Use usbd_xfer_probe_timeout to check whether the timeout is
   1589 	 * still valid, or to reschedule the callout if necessary.  If
   1590 	 * it is not valid -- the timeout has been asynchronously
   1591 	 * cancelled, or the xfer has already been resubmitted -- then
   1592 	 * we're done here.
   1593 	 */
   1594 	if (!usbd_xfer_probe_timeout(xfer))
   1595 		goto out;
   1596 
   1597 	/*
   1598 	 * May have completed or been aborted, but we're the only one
   1599 	 * who can time it out.  If it has completed or been aborted,
   1600 	 * no need to timeout.
   1601 	 */
   1602 	KASSERT(xfer->ux_status != USBD_TIMEOUT);
   1603 	if (xfer->ux_status != USBD_IN_PROGRESS)
   1604 		goto out;
   1605 
   1606 	/*
   1607 	 * We beat everyone else.  Claim the status as timed out and do
   1608 	 * the bus-specific dance to abort the hardware.
   1609 	 */
   1610 	xfer->ux_status = USBD_TIMEOUT;
   1611 	bus->ub_methods->ubm_abortx(xfer);
   1612 
   1613 out:	/* All done -- release the lock.  */
   1614 	mutex_exit(bus->ub_lock);
   1615 }
   1616 
   1617 /*
   1618  * usbd_xfer_probe_timeout(xfer)
   1619  *
   1620  *	Probe the status of xfer's timeout.  Acknowledge and process a
   1621  *	request to reschedule.  Return true if the timeout is still
   1622  *	valid and the caller should take further action (queueing a
   1623  *	task or aborting the xfer), false if it must stop here.
   1624  */
   1625 static bool
   1626 usbd_xfer_probe_timeout(struct usbd_xfer *xfer)
   1627 {
   1628 	struct usbd_bus *bus = xfer->ux_bus;
   1629 	bool valid;
   1630 
   1631 	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
   1632 
   1633 	/* The timeout must be set.  */
   1634 	KASSERT(xfer->ux_timeout_set);
   1635 
   1636 	/*
   1637 	 * Neither callout nor task may be pending; they execute
   1638 	 * alternately in lock step.
   1639 	 */
   1640 	KASSERT(!callout_pending(&xfer->ux_callout));
   1641 	KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask));
   1642 
   1643 	/* There are a few cases... */
   1644 	if (bus->ub_methods->ubm_dying(bus)) {
   1645 		/* Host controller dying.  Drop it all on the floor.  */
   1646 		xfer->ux_timeout_set = false;
   1647 		xfer->ux_timeout_reset = false;
   1648 		valid = false;
   1649 	} else if (xfer->ux_timeout_reset) {
   1650 		/*
   1651 		 * The xfer completed _and_ got resubmitted while we
   1652 		 * waited for the lock.  Acknowledge the request to
   1653 		 * reschedule, and reschedule it if there is a timeout
   1654 		 * and the bus is not polling.
   1655 		 */
   1656 		xfer->ux_timeout_reset = false;
   1657 		if (xfer->ux_timeout && !bus->ub_usepolling) {
   1658 			KASSERT(xfer->ux_timeout_set);
   1659 			callout_schedule(&xfer->ux_callout,
   1660 			    mstohz(xfer->ux_timeout));
   1661 		} else {
   1662 			/* No more callout or task scheduled.  */
   1663 			xfer->ux_timeout_set = false;
   1664 		}
   1665 		valid = false;
   1666 	} else if (xfer->ux_status != USBD_IN_PROGRESS) {
   1667 		/*
   1668 		 * The xfer has completed by hardware completion or by
   1669 		 * software abort, and has not been resubmitted, so the
   1670 		 * timeout must be unset, and is no longer valid for
   1671 		 * the caller.
   1672 		 */
   1673 		xfer->ux_timeout_set = false;
   1674 		valid = false;
   1675 	} else {
   1676 		/*
   1677 		 * The xfer has not yet completed, so the timeout is
   1678 		 * valid.
   1679 		 */
   1680 		valid = true;
   1681 	}
   1682 
   1683 	/* Any reset must have been processed.  */
   1684 	KASSERT(!xfer->ux_timeout_reset);
   1685 
   1686 	/*
   1687 	 * Either we claim the timeout is set, or the callout is idle.
   1688 	 * If the timeout is still set, we may be handing off to the
   1689 	 * task instead, so this is an if but not an iff.
   1690 	 */
   1691 	KASSERT(xfer->ux_timeout_set || !callout_pending(&xfer->ux_callout));
   1692 
   1693 	/*
   1694 	 * The task must be idle now.
   1695 	 *
   1696 	 * - If the caller is the callout, _and_ the timeout is still
   1697 	 *   valid, the caller will schedule it, but it hasn't been
   1698 	 *   scheduled yet.  (If the timeout is not valid, the task
   1699 	 *   should not be scheduled.)
   1700 	 *
   1701 	 * - If the caller is the task, it cannot be scheduled again
   1702 	 *   until the callout runs again, which won't happen until we
   1703 	 *   next release the lock.
   1704 	 */
   1705 	KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask));
   1706 
   1707 	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
   1708 
   1709 	return valid;
   1710 }
   1711 
   1712 /*
   1713  * usbd_xfer_schedule_timeout(xfer)
   1714  *
   1715  *	Ensure that xfer has a timeout.  If the callout is already
   1716  *	queued or the task is already running, request that they
   1717  *	reschedule the callout.  If not, and if we're not polling,
   1718  *	schedule the callout anew.
   1719  *
   1720  *	To be called in thread context from struct
   1721  *	usbd_pipe_methods::upm_start.
   1722  */
   1723 void
   1724 usbd_xfer_schedule_timeout(struct usbd_xfer *xfer)
   1725 {
   1726 	struct usbd_bus *bus = xfer->ux_bus;
   1727 
   1728 	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
   1729 
   1730 	if (xfer->ux_timeout_set) {
   1731 		/*
   1732 		 * Callout or task has fired from a prior completed
   1733 		 * xfer but has not yet noticed that the xfer is done.
   1734 		 * Ask it to reschedule itself to ux_timeout.
   1735 		 */
   1736 		xfer->ux_timeout_reset = true;
   1737 	} else if (xfer->ux_timeout && !bus->ub_usepolling) {
   1738 		/* Callout is not scheduled.  Schedule it.  */
   1739 		KASSERT(!callout_pending(&xfer->ux_callout));
   1740 		callout_schedule(&xfer->ux_callout, mstohz(xfer->ux_timeout));
   1741 		xfer->ux_timeout_set = true;
   1742 	}
   1743 
   1744 	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
   1745 }
   1746 
   1747 /*
   1748  * usbd_xfer_cancel_timeout_async(xfer)
   1749  *
   1750  *	Cancel the callout and the task of xfer, which have not yet run
   1751  *	to completion, but don't wait for the callout or task to finish
   1752  *	running.
   1753  *
   1754  *	If they have already fired, at worst they are waiting for the
   1755  *	bus lock.  They will see that the xfer is no longer in progress
   1756  *	and give up, or they will see that the xfer has been
   1757  *	resubmitted with a new timeout and reschedule the callout.
   1758  *
   1759  *	If a resubmitted request completed so fast that the callout
   1760  *	didn't have time to process a timer reset, just cancel the
   1761  *	timer reset.
   1762  */
   1763 static void
   1764 usbd_xfer_cancel_timeout_async(struct usbd_xfer *xfer)
   1765 {
   1766 	struct usbd_bus *bus __diagused = xfer->ux_bus;
   1767 
   1768 	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
   1769 
   1770 	/*
   1771 	 * If the timer wasn't running anyway, forget about it.  This
   1772 	 * can happen if we are completing an isochronous transfer
   1773 	 * which doesn't use the same timeout logic.
   1774 	 */
   1775 	if (!xfer->ux_timeout_set)
   1776 		return;
   1777 
   1778 	xfer->ux_timeout_reset = false;
   1779 	if (!callout_stop(&xfer->ux_callout)) {
   1780 		/*
   1781 		 * We stopped the callout before it ran.  The timeout
   1782 		 * is no longer set.
   1783 		 */
   1784 		xfer->ux_timeout_set = false;
   1785 	} else if (callout_invoking(&xfer->ux_callout)) {
   1786 		/*
   1787 		 * The callout has begun to run but it has not yet
   1788 		 * acquired the lock and called callout_ack.  The task
   1789 		 * cannot be queued yet, and the callout cannot have
   1790 		 * been rescheduled yet.
   1791 		 *
   1792 		 * By the time the callout acquires the lock, we will
   1793 		 * have transitioned from USBD_IN_PROGRESS to a
   1794 		 * completed status, and possibly also resubmitted the
   1795 		 * xfer and set xfer->ux_timeout_reset = true.  In both
   1796 		 * cases, the callout will DTRT, so no further action
   1797 		 * is needed here.
   1798 		 */
   1799 	} else if (usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)) {
   1800 		/*
   1801 		 * The callout had fired and scheduled the task, but we
   1802 		 * stopped the task before it could run.  The timeout
   1803 		 * is therefore no longer set -- the next resubmission
   1804 		 * of the xfer must schedule a new timeout.
   1805 		 *
   1806 		 * The callout should not be pending at this point:
   1807 		 * it is scheduled only under the lock, and only when
   1808 		 * xfer->ux_timeout_set is false, or by the callout or
   1809 		 * task itself when xfer->ux_timeout_reset is true.
   1810 		 */
   1811 		xfer->ux_timeout_set = false;
   1812 	}
   1813 
   1814 	/*
   1815 	 * The callout cannot be scheduled and the task cannot be
   1816 	 * queued at this point.  Either we cancelled them, or they are
   1817 	 * already running and waiting for the bus lock.
   1818 	 */
   1819 	KASSERT(!callout_pending(&xfer->ux_callout));
   1820 	KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask));
   1821 
   1822 	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
   1823 }
   1824