usbdi.c revision 1.255
1/*	$NetBSD: usbdi.c,v 1.255 2025/10/11 12:54:40 skrll Exp $	*/
2
3/*
4 * Copyright (c) 1998, 2012, 2015 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Lennart Augustsson (lennart@augustsson.net) at
9 * Carlstedt Research & Technology, Matthew R. Green (mrg@eterna23.net),
10 * and Nick Hudson.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: usbdi.c,v 1.255 2025/10/11 12:54:40 skrll Exp $");
36
37#ifdef _KERNEL_OPT
38#include "opt_compat_netbsd.h"
39#include "opt_usb.h"
40#include "usb_dma.h"
41#endif
42
43#include <sys/param.h>
44
45#include <sys/bus.h>
46#include <sys/cpu.h>
47#include <sys/device.h>
48#include <sys/kernel.h>
49#include <sys/kmem.h>
50#include <sys/proc.h>
51#include <sys/systm.h>
52
53#include <dev/usb/usb.h>
54#include <dev/usb/usbdi.h>
55#include <dev/usb/usbdi_util.h>
56#include <dev/usb/usbdivar.h>
57#include <dev/usb/usb_mem.h>
58#include <dev/usb/usb_quirks.h>
59#include <dev/usb/usb_sdt.h>
60#include <dev/usb/usbhist.h>
61
62#include <ddb/db_active.h>
63
64/* UTF-8 encoding stuff */
65#include <fs/unicode.h>
66
67SDT_PROBE_DEFINE5(usb, device, pipe, open,
68    "struct usbd_interface *"/*iface*/,
69    "uint8_t"/*address*/,
70    "uint8_t"/*flags*/,
71    "int"/*ival*/,
72    "struct usbd_pipe *"/*pipe*/);
73
74SDT_PROBE_DEFINE7(usb, device, pipe, open__intr,
75    "struct usbd_interface *"/*iface*/,
76    "uint8_t"/*address*/,
77    "uint8_t"/*flags*/,
78    "int"/*ival*/,
79    "usbd_callback"/*cb*/,
80    "void *"/*cookie*/,
81    "struct usbd_pipe *"/*pipe*/);
82
83SDT_PROBE_DEFINE2(usb, device, pipe, transfer__start,
84    "struct usbd_pipe *"/*pipe*/,
85    "struct usbd_xfer *"/*xfer*/);
86SDT_PROBE_DEFINE3(usb, device, pipe, transfer__done,
87    "struct usbd_pipe *"/*pipe*/,
88    "struct usbd_xfer *"/*xfer*/,
89    "usbd_status"/*err*/);
90SDT_PROBE_DEFINE2(usb, device, pipe, start,
91    "struct usbd_pipe *"/*pipe*/,
92    "struct usbd_xfer *"/*xfer*/);
93
94SDT_PROBE_DEFINE1(usb, device, pipe, close,  "struct usbd_pipe *"/*pipe*/);
95SDT_PROBE_DEFINE1(usb, device, pipe, abort__start,
96    "struct usbd_pipe *"/*pipe*/);
97SDT_PROBE_DEFINE1(usb, device, pipe, abort__done,
98    "struct usbd_pipe *"/*pipe*/);
99SDT_PROBE_DEFINE1(usb, device, pipe, clear__endpoint__stall,
100    "struct usbd_pipe *"/*pipe*/);
101SDT_PROBE_DEFINE1(usb, device, pipe, clear__endpoint__toggle,
102    "struct usbd_pipe *"/*pipe*/);
103
104SDT_PROBE_DEFINE5(usb, device, xfer, create,
105    "struct usbd_xfer *"/*xfer*/,
106    "struct usbd_pipe *"/*pipe*/,
107    "size_t"/*len*/,
108    "unsigned int"/*flags*/,
109    "unsigned int"/*nframes*/);
110SDT_PROBE_DEFINE1(usb, device, xfer, start,  "struct usbd_xfer *"/*xfer*/);
111SDT_PROBE_DEFINE1(usb, device, xfer, preabort,  "struct usbd_xfer *"/*xfer*/);
112SDT_PROBE_DEFINE1(usb, device, xfer, abort,  "struct usbd_xfer *"/*xfer*/);
113SDT_PROBE_DEFINE1(usb, device, xfer, timeout,  "struct usbd_xfer *"/*xfer*/);
114SDT_PROBE_DEFINE2(usb, device, xfer, done,
115    "struct usbd_xfer *"/*xfer*/,
116    "usbd_status"/*status*/);
117SDT_PROBE_DEFINE1(usb, device, xfer, destroy,  "struct usbd_xfer *"/*xfer*/);
118
119SDT_PROBE_DEFINE5(usb, device, request, start,
120    "struct usbd_device *"/*dev*/,
121    "usb_device_request_t *"/*req*/,
122    "size_t"/*len*/,
123    "int"/*flags*/,
124    "uint32_t"/*timeout*/);
125
126SDT_PROBE_DEFINE7(usb, device, request, done,
127    "struct usbd_device *"/*dev*/,
128    "usb_device_request_t *"/*req*/,
129    "size_t"/*actlen*/,
130    "int"/*flags*/,
131    "uint32_t"/*timeout*/,
132    "void *"/*data*/,
133    "usbd_status"/*status*/);
134
135Static void usbd_ar_pipe(struct usbd_pipe *);
136Static void usbd_start_next(struct usbd_pipe *);
137Static usbd_status usbd_open_pipe_ival
138	(struct usbd_interface *, uint8_t, uint8_t, struct usbd_pipe **, int);
139static void *usbd_alloc_buffer(struct usbd_xfer *, uint32_t);
140static void usbd_free_buffer(struct usbd_xfer *);
141static struct usbd_xfer *usbd_alloc_xfer(struct usbd_device *, unsigned int);
142static void usbd_free_xfer(struct usbd_xfer *);
143static void usbd_xfer_timeout(void *);
144static void usbd_xfer_timeout_task(void *);
145static bool usbd_xfer_probe_timeout(struct usbd_xfer *);
146static void usbd_xfer_cancel_timeout_async(struct usbd_xfer *);
147
148#if defined(USB_DEBUG)
149void
150usbd_dump_iface(struct usbd_interface *iface)
151{
152	USBHIST_FUNC();
153	USBHIST_CALLARGS(usbdebug, "iface %#jx", (uintptr_t)iface, 0, 0, 0);
154
155	if (iface == NULL)
156		return;
157	USBHIST_LOG(usbdebug, "     device = %#jx idesc = %#jx index = %jd",
158	    (uintptr_t)iface->ui_dev, (uintptr_t)iface->ui_idesc,
159	    iface->ui_index, 0);
160	USBHIST_LOG(usbdebug, "     altindex=%jd",
161	    iface->ui_altindex, 0, 0, 0);
162}
163
164void
165usbd_dump_device(struct usbd_device *dev)
166{
167	USBHIST_FUNC();
168	USBHIST_CALLARGS(usbdebug, "dev = %#jx", (uintptr_t)dev, 0, 0, 0);
169
170	if (dev == NULL)
171		return;
172	USBHIST_LOG(usbdebug, "     bus = %#jx default_pipe = %#jx",
173	    (uintptr_t)dev->ud_bus, (uintptr_t)dev->ud_pipe0, 0, 0);
174	USBHIST_LOG(usbdebug, "     address = %jd config = %jd depth = %jd ",
175	    dev->ud_addr, dev->ud_config, dev->ud_depth, 0);
176	USBHIST_LOG(usbdebug, "     speed = %jd self_powered = %jd "
177	    "power = %jd langid = %jd",
178	    dev->ud_speed, dev->ud_selfpowered, dev->ud_power, dev->ud_langid);
179}
180
181void
182usbd_dump_endpoint(struct usbd_endpoint *endp)
183{
184	USBHIST_FUNC();
185	USBHIST_CALLARGS(usbdebug, "endp = %#jx", (uintptr_t)endp, 0, 0, 0);
186
187	if (endp == NULL)
188		return;
189	USBHIST_LOG(usbdebug, "    edesc = %#jx refcnt = %jd",
190	    (uintptr_t)endp->ue_edesc, endp->ue_refcnt, 0, 0);
191	if (endp->ue_edesc)
192		USBHIST_LOG(usbdebug, "     bEndpointAddress=0x%02jx",
193		    endp->ue_edesc->bEndpointAddress, 0, 0, 0);
194}
195
196void
197usbd_dump_queue(struct usbd_pipe *pipe)
198{
199	struct usbd_xfer *xfer;
200
201	USBHIST_FUNC();
202	USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0);
203
204	SIMPLEQ_FOREACH(xfer, &pipe->up_queue, ux_next) {
205		USBHIST_LOG(usbdebug, "     xfer = %#jx", (uintptr_t)xfer,
206		    0, 0, 0);
207	}
208}
209
210void
211usbd_dump_pipe(struct usbd_pipe *pipe)
212{
213	USBHIST_FUNC();
214	USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0);
215
216	if (pipe == NULL)
217		return;
218	usbd_dump_iface(pipe->up_iface);
219	usbd_dump_device(pipe->up_dev);
220	usbd_dump_endpoint(pipe->up_endpoint);
221	USBHIST_LOG(usbdebug, "(usbd_dump_pipe)", 0, 0, 0, 0);
222	USBHIST_LOG(usbdebug, "     running = %jd aborting = %jd",
223	    pipe->up_running, pipe->up_aborting, 0, 0);
224	USBHIST_LOG(usbdebug, "     intrxfer = %#jx, repeat = %jd, "
225	    "interval = %jd", (uintptr_t)pipe->up_intrxfer, pipe->up_repeat,
226	    pipe->up_interval, 0);
227}
228#endif
229
230usbd_status
231usbd_open_pipe(struct usbd_interface *iface, uint8_t address,
232	       uint8_t flags, struct usbd_pipe **pipe)
233{
234	return (usbd_open_pipe_ival(iface, address, flags, pipe,
235				    USBD_DEFAULT_INTERVAL));
236}
237
238usbd_status
239usbd_open_pipe_ival(struct usbd_interface *iface, uint8_t address,
240		    uint8_t flags, struct usbd_pipe **pipe, int ival)
241{
242	struct usbd_pipe *p = NULL;
243	struct usbd_endpoint *ep = NULL /* XXXGCC */;
244	bool piperef = false;
245	usbd_status err;
246	int i;
247
248	USBHIST_FUNC();
249	USBHIST_CALLARGS(usbdebug, "iface = %#jx address = %#jx flags = %#jx",
250	    (uintptr_t)iface, address, flags, 0);
251
252	/*
253	 * Block usbd_set_interface so we have a snapshot of the
254	 * interface endpoints.  They will remain stable until we drop
255	 * the reference in usbd_close_pipe (or on failure here).
256	 */
257	err = usbd_iface_piperef(iface);
258	if (err)
259		goto out;
260	piperef = true;
261
262	/* Find the endpoint at this address.  */
263	for (i = 0; i < iface->ui_idesc->bNumEndpoints; i++) {
264		ep = &iface->ui_endpoints[i];
265		if (ep->ue_edesc == NULL) {
266			err = USBD_IOERROR;
267			goto out;
268		}
269		if (ep->ue_edesc->bEndpointAddress == address)
270			break;
271	}
272	if (i == iface->ui_idesc->bNumEndpoints) {
273		err = USBD_BAD_ADDRESS;
274		goto out;
275	}
276
277	/* Set up the pipe with this endpoint.  */
278	err = usbd_setup_pipe_flags(iface->ui_dev, iface, ep, ival, &p, flags);
279	if (err)
280		goto out;
281
282	/* Success! */
283	*pipe = p;
284	p = NULL;		/* handed off to caller */
285	piperef = false;	/* handed off to pipe */
286	SDT_PROBE5(usb, device, pipe, open,
287	    iface, address, flags, ival, p);
288	err = USBD_NORMAL_COMPLETION;
289
290out:	if (p)
291		usbd_close_pipe(p);
292	if (piperef)
293		usbd_iface_pipeunref(iface);
294	return err;
295}
296
297usbd_status
298usbd_open_pipe_intr(struct usbd_interface *iface, uint8_t address,
299		    uint8_t flags, struct usbd_pipe **pipe,
300		    void *priv, void *buffer, uint32_t len,
301		    usbd_callback cb, int ival)
302{
303	usbd_status err;
304	struct usbd_xfer *xfer;
305	struct usbd_pipe *ipipe;
306
307	USBHIST_FUNC();
308	USBHIST_CALLARGS(usbdebug, "address = %#jx flags = %#jx len = %jd",
309	    address, flags, len, 0);
310
311	err = usbd_open_pipe_ival(iface, address,
312				  USBD_EXCLUSIVE_USE | (flags & USBD_MPSAFE),
313				  &ipipe, ival);
314	if (err)
315		return err;
316	err = usbd_create_xfer(ipipe, len, flags, 0, &xfer);
317	if (err)
318		goto bad1;
319
320	usbd_setup_xfer(xfer, priv, buffer, len, flags, USBD_NO_TIMEOUT, cb);
321	ipipe->up_intrxfer = xfer;
322	ipipe->up_repeat = 1;
323	err = usbd_transfer(xfer);
324	*pipe = ipipe;
325	if (err != USBD_IN_PROGRESS)
326		goto bad3;
327	SDT_PROBE7(usb, device, pipe, open__intr,
328	    iface, address, flags, ival, cb, priv, ipipe);
329	return USBD_NORMAL_COMPLETION;
330
331 bad3:
332	ipipe->up_intrxfer = NULL;
333	ipipe->up_repeat = 0;
334
335	usbd_destroy_xfer(xfer);
336 bad1:
337	usbd_close_pipe(ipipe);
338	return err;
339}
340
341void
342usbd_close_pipe(struct usbd_pipe *pipe)
343{
344	USBHIST_FUNC(); USBHIST_CALLED(usbdebug);
345
346	KASSERT(pipe != NULL);
347
348	usbd_lock_pipe(pipe);
349	SDT_PROBE1(usb, device, pipe, close,  pipe);
350	if (!SIMPLEQ_EMPTY(&pipe->up_queue)) {
351		printf("WARNING: pipe closed with active xfers on addr %d\n",
352		    pipe->up_dev->ud_addr);
353		usbd_ar_pipe(pipe);
354	}
355	KASSERT(SIMPLEQ_EMPTY(&pipe->up_queue));
356	pipe->up_methods->upm_close(pipe);
357	usbd_unlock_pipe(pipe);
358
359	cv_destroy(&pipe->up_callingcv);
360	if (pipe->up_intrxfer)
361		usbd_destroy_xfer(pipe->up_intrxfer);
362	usb_rem_task_wait(pipe->up_dev, &pipe->up_async_task, USB_TASKQ_DRIVER,
363	    NULL);
364	usbd_endpoint_release(pipe->up_dev, pipe->up_endpoint);
365	if (pipe->up_iface)
366		usbd_iface_pipeunref(pipe->up_iface);
367	kmem_free(pipe, pipe->up_dev->ud_bus->ub_pipesize);
368}
369
370usbd_status
371usbd_transfer(struct usbd_xfer *xfer)
372{
373	struct usbd_pipe *pipe = xfer->ux_pipe;
374	usbd_status err;
375	unsigned int size, flags;
376
377	USBHIST_FUNC(); USBHIST_CALLARGS(usbdebug,
378	    "xfer = %#jx, flags = %#jx, pipe = %#jx, running = %jd",
379	    (uintptr_t)xfer, xfer->ux_flags, (uintptr_t)pipe, pipe->up_running);
380	KASSERT(xfer->ux_status == USBD_NOT_STARTED);
381	SDT_PROBE1(usb, device, xfer, start,  xfer);
382
383#ifdef USB_DEBUG
384	if (usbdebug > 5)
385		usbd_dump_queue(pipe);
386#endif
387	xfer->ux_done = 0;
388
389	KASSERT(xfer->ux_length == 0 || xfer->ux_buf != NULL);
390
391	size = xfer->ux_length;
392	flags = xfer->ux_flags;
393
394	if (size != 0) {
395		/*
396		 * Use the xfer buffer if none specified in transfer setup.
397		 * isoc transfers always use the xfer buffer, i.e.
398		 * ux_buffer is always NULL for isoc.
399		 */
400		if (xfer->ux_buffer == NULL) {
401			xfer->ux_buffer = xfer->ux_buf;
402		}
403
404		/*
405		 * If not using the xfer buffer copy data to the
406		 * xfer buffer for OUT transfers of >0 length
407		 */
408		if (xfer->ux_buffer != xfer->ux_buf) {
409			KASSERT(xfer->ux_buf);
410			if (!usbd_xfer_isread(xfer)) {
411				memcpy(xfer->ux_buf, xfer->ux_buffer, size);
412			}
413		}
414	}
415
416	if (pipe->up_dev->ud_bus->ub_usepolling == 0)
417		usbd_lock_pipe(pipe);
418	if (pipe->up_aborting) {
419		/*
420		 * XXX For synchronous transfers this is fine.  What to
421		 * do for asynchronous transfers?  The callback is
422		 * never run, not even with status USBD_CANCELLED.
423		 */
424		KASSERT(pipe->up_dev->ud_bus->ub_usepolling == 0);
425		usbd_unlock_pipe(pipe);
426		USBHIST_LOG(usbdebug, "<- done xfer %#jx, aborting",
427		    (uintptr_t)xfer, 0, 0, 0);
428		SDT_PROBE2(usb, device, xfer, done,  xfer, USBD_CANCELLED);
429		return USBD_CANCELLED;
430	}
431
432	/* xfer is not valid after the transfer method unless synchronous */
433	SDT_PROBE2(usb, device, pipe, transfer__start,  pipe, xfer);
434	do {
435#ifdef DIAGNOSTIC
436		xfer->ux_state = XFER_ONQU;
437#endif
438		SIMPLEQ_INSERT_TAIL(&pipe->up_queue, xfer, ux_next);
439		if (pipe->up_running && pipe->up_serialise) {
440			err = USBD_IN_PROGRESS;
441		} else {
442			pipe->up_running = 1;
443			err = USBD_NORMAL_COMPLETION;
444		}
445		if (err)
446			break;
447		err = pipe->up_methods->upm_transfer(xfer);
448	} while (0);
449	SDT_PROBE3(usb, device, pipe, transfer__done,  pipe, xfer, err);
450
451	if (pipe->up_dev->ud_bus->ub_usepolling == 0)
452		usbd_unlock_pipe(pipe);
453
454	if (err != USBD_IN_PROGRESS && err) {
455		/*
456		 * The transfer made it onto the pipe queue, but didn't get
457		 * accepted by the HCD for some reason.  It needs removing
458		 * from the pipe queue.
459		 */
460		USBHIST_LOG(usbdebug, "xfer failed: %jd, reinserting",
461		    err, 0, 0, 0);
462		if (pipe->up_dev->ud_bus->ub_usepolling == 0)
463			usbd_lock_pipe(pipe);
464		SDT_PROBE1(usb, device, xfer, preabort,  xfer);
465#ifdef DIAGNOSTIC
466		xfer->ux_state = XFER_BUSY;
467#endif
468		SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next);
469		if (pipe->up_serialise)
470			usbd_start_next(pipe);
471		if (pipe->up_dev->ud_bus->ub_usepolling == 0)
472			usbd_unlock_pipe(pipe);
473	}
474
475	if (!(flags & USBD_SYNCHRONOUS)) {
476		USBHIST_LOG(usbdebug, "<- done xfer %#jx, not sync (err %jd)",
477		    (uintptr_t)xfer, err, 0, 0);
478		KASSERTMSG(err != USBD_NORMAL_COMPLETION,
479		    "asynchronous xfer %p completed synchronously", xfer);
480		return err;
481	}
482
483	if (err != USBD_IN_PROGRESS) {
484		USBHIST_LOG(usbdebug, "<- done xfer %#jx, sync (err %jd)",
485		    (uintptr_t)xfer, err, 0, 0);
486		SDT_PROBE2(usb, device, xfer, done,  xfer, err);
487		return err;
488	}
489
490	/* Sync transfer, wait for completion. */
491	if (pipe->up_dev->ud_bus->ub_usepolling == 0)
492		usbd_lock_pipe(pipe);
493	while (!xfer->ux_done) {
494		if (pipe->up_dev->ud_bus->ub_usepolling)
495			panic("usbd_transfer: not done");
496		USBHIST_LOG(usbdebug, "<- sleeping on xfer %#jx",
497		    (uintptr_t)xfer, 0, 0, 0);
498
499		err = 0;
500		if ((flags & USBD_SYNCHRONOUS_SIG) != 0) {
501			err = cv_wait_sig(&xfer->ux_cv, pipe->up_dev->ud_bus->ub_lock);
502		} else {
503			cv_wait(&xfer->ux_cv, pipe->up_dev->ud_bus->ub_lock);
504		}
505		if (err) {
506			if (!xfer->ux_done) {
507				SDT_PROBE1(usb, device, xfer, abort,  xfer);
508				pipe->up_methods->upm_abort(xfer);
509			}
510			break;
511		}
512	}
513	err = xfer->ux_status;
514	SDT_PROBE2(usb, device, xfer, done,  xfer, err);
515	if (pipe->up_dev->ud_bus->ub_usepolling == 0)
516		usbd_unlock_pipe(pipe);
517	return err;
518}
519
520/* Like usbd_transfer(), but waits for completion. */
521usbd_status
522usbd_sync_transfer(struct usbd_xfer *xfer)
523{
524	xfer->ux_flags |= USBD_SYNCHRONOUS;
525	return usbd_transfer(xfer);
526}
527
528/* Like usbd_transfer(), but waits for completion and listens for signals. */
529usbd_status
530usbd_sync_transfer_sig(struct usbd_xfer *xfer)
531{
532	xfer->ux_flags |= USBD_SYNCHRONOUS | USBD_SYNCHRONOUS_SIG;
533	return usbd_transfer(xfer);
534}
535
536static void *
537usbd_alloc_buffer(struct usbd_xfer *xfer, uint32_t size)
538{
539	KASSERT(xfer->ux_buf == NULL);
540	KASSERT(size != 0);
541
542	xfer->ux_bufsize = 0;
543#if NUSB_DMA > 0
544	struct usbd_bus *bus = xfer->ux_bus;
545
546	if (bus->ub_usedma) {
547		usb_dma_t *dmap = &xfer->ux_dmabuf;
548
549		KASSERT((bus->ub_dmaflags & USBMALLOC_COHERENT) == 0);
550		int err = usb_allocmem(bus->ub_dmatag, size, 0, bus->ub_dmaflags, dmap);
551		if (err) {
552			return NULL;
553		}
554		xfer->ux_buf = KERNADDR(&xfer->ux_dmabuf, 0);
555		xfer->ux_bufsize = size;
556
557		return xfer->ux_buf;
558	}
559#endif
560	KASSERT(xfer->ux_bus->ub_usedma == false);
561	xfer->ux_buf = kmem_alloc(size, KM_SLEEP);
562	xfer->ux_bufsize = size;
563	return xfer->ux_buf;
564}
565
566static void
567usbd_free_buffer(struct usbd_xfer *xfer)
568{
569	KASSERT(xfer->ux_buf != NULL);
570	KASSERT(xfer->ux_bufsize != 0);
571
572	void *buf = xfer->ux_buf;
573	uint32_t size = xfer->ux_bufsize;
574
575	xfer->ux_buf = NULL;
576	xfer->ux_bufsize = 0;
577
578#if NUSB_DMA > 0
579	struct usbd_bus *bus = xfer->ux_bus;
580
581	if (bus->ub_usedma) {
582		usb_dma_t *dmap = &xfer->ux_dmabuf;
583
584		usb_freemem(dmap);
585		return;
586	}
587#endif
588	KASSERT(xfer->ux_bus->ub_usedma == false);
589
590	kmem_free(buf, size);
591}
592
593void *
594usbd_get_buffer(struct usbd_xfer *xfer)
595{
596	return xfer->ux_buf;
597}
598
599struct usbd_pipe *
600usbd_get_pipe0(struct usbd_device *dev)
601{
602
603	return dev->ud_pipe0;
604}
605
606static struct usbd_xfer *
607usbd_alloc_xfer(struct usbd_device *dev, unsigned int nframes)
608{
609	struct usbd_xfer *xfer;
610
611	USBHIST_FUNC();
612
613	ASSERT_SLEEPABLE();
614
615	xfer = dev->ud_bus->ub_methods->ubm_allocx(dev->ud_bus, nframes);
616	if (xfer == NULL)
617		goto out;
618	xfer->ux_bus = dev->ud_bus;
619	callout_init(&xfer->ux_callout, CALLOUT_MPSAFE);
620	callout_setfunc(&xfer->ux_callout, usbd_xfer_timeout, xfer);
621	cv_init(&xfer->ux_cv, "usbxfer");
622	usb_init_task(&xfer->ux_aborttask, usbd_xfer_timeout_task, xfer,
623	    USB_TASKQ_MPSAFE);
624
625out:
626	USBHIST_CALLARGS(usbdebug, "returns %#jx", (uintptr_t)xfer, 0, 0, 0);
627
628	return xfer;
629}
630
631static void
632usbd_free_xfer(struct usbd_xfer *xfer)
633{
634	USBHIST_FUNC();
635	USBHIST_CALLARGS(usbdebug, "%#jx", (uintptr_t)xfer, 0, 0, 0);
636
637	if (xfer->ux_buf) {
638		usbd_free_buffer(xfer);
639	}
640
641	/* Wait for any straggling timeout to complete. */
642	mutex_enter(xfer->ux_bus->ub_lock);
643	xfer->ux_timeout_reset = false; /* do not resuscitate */
644	callout_halt(&xfer->ux_callout, xfer->ux_bus->ub_lock);
645	usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask,
646	    USB_TASKQ_HC, xfer->ux_bus->ub_lock);
647	mutex_exit(xfer->ux_bus->ub_lock);
648
649	cv_destroy(&xfer->ux_cv);
650	xfer->ux_bus->ub_methods->ubm_freex(xfer->ux_bus, xfer);
651}
652
653int
654usbd_create_xfer(struct usbd_pipe *pipe, size_t len, unsigned int flags,
655    unsigned int nframes, struct usbd_xfer **xp)
656{
657	KASSERT(xp != NULL);
658	void *buf = NULL;
659
660	struct usbd_xfer *xfer = usbd_alloc_xfer(pipe->up_dev, nframes);
661	if (xfer == NULL)
662		return ENOMEM;
663
664	xfer->ux_pipe = pipe;
665	xfer->ux_flags = flags;
666	xfer->ux_nframes = nframes;
667	xfer->ux_methods = pipe->up_methods;
668
669	if (len) {
670		buf = usbd_alloc_buffer(xfer, len);
671		if (!buf) {
672			usbd_free_xfer(xfer);
673			return ENOMEM;
674		}
675	}
676
677	if (xfer->ux_methods->upm_init) {
678		int err = xfer->ux_methods->upm_init(xfer);
679		if (err) {
680			usbd_free_xfer(xfer);
681			return err;
682		}
683	}
684
685	*xp = xfer;
686	SDT_PROBE5(usb, device, xfer, create,
687	    xfer, pipe, len, flags, nframes);
688	return 0;
689}
690
691void
692usbd_destroy_xfer(struct usbd_xfer *xfer)
693{
694
695	SDT_PROBE1(usb, device, xfer, destroy,  xfer);
696	if (xfer->ux_methods->upm_fini)
697		xfer->ux_methods->upm_fini(xfer);
698
699	usbd_free_xfer(xfer);
700}
701
702void
703usbd_setup_xfer(struct usbd_xfer *xfer, void *priv, void *buffer,
704    uint32_t length, uint16_t flags, uint32_t timeout, usbd_callback callback)
705{
706	KASSERT(xfer->ux_pipe);
707
708	xfer->ux_priv = priv;
709	xfer->ux_buffer = buffer;
710	xfer->ux_length = length;
711	xfer->ux_actlen = 0;
712	xfer->ux_flags = flags;
713	xfer->ux_timeout = timeout;
714	xfer->ux_status = USBD_NOT_STARTED;
715	xfer->ux_callback = callback;
716	xfer->ux_rqflags &= ~URQ_REQUEST;
717	xfer->ux_nframes = 0;
718}
719
720void
721usbd_setup_default_xfer(struct usbd_xfer *xfer, struct usbd_device *dev,
722    void *priv, uint32_t timeout, usb_device_request_t *req, void *buffer,
723    uint32_t length, uint16_t flags, usbd_callback callback)
724{
725	KASSERT(xfer->ux_pipe == dev->ud_pipe0);
726
727	xfer->ux_priv = priv;
728	xfer->ux_buffer = buffer;
729	xfer->ux_length = length;
730	xfer->ux_actlen = 0;
731	xfer->ux_flags = flags;
732	xfer->ux_timeout = timeout;
733	xfer->ux_status = USBD_NOT_STARTED;
734	xfer->ux_callback = callback;
735	xfer->ux_request = *req;
736	xfer->ux_rqflags |= URQ_REQUEST;
737	xfer->ux_nframes = 0;
738}
739
740void
741usbd_setup_isoc_xfer(struct usbd_xfer *xfer, void *priv, uint16_t *frlengths,
742    uint32_t nframes, uint16_t flags, usbd_callback callback)
743{
744	xfer->ux_priv = priv;
745	xfer->ux_buffer = NULL;
746	xfer->ux_length = 0;
747	xfer->ux_actlen = 0;
748	xfer->ux_flags = flags;
749	xfer->ux_timeout = USBD_NO_TIMEOUT;
750	xfer->ux_status = USBD_NOT_STARTED;
751	xfer->ux_callback = callback;
752	xfer->ux_rqflags &= ~URQ_REQUEST;
753	xfer->ux_frlengths = frlengths;
754	xfer->ux_nframes = nframes;
755
756	for (size_t i = 0; i < xfer->ux_nframes; i++)
757		xfer->ux_length += xfer->ux_frlengths[i];
758}
759
760void
761usbd_get_xfer_status(struct usbd_xfer *xfer, void **priv,
762		     void **buffer, uint32_t *count, usbd_status *status)
763{
764	if (priv != NULL)
765		*priv = xfer->ux_priv;
766	if (buffer != NULL)
767		*buffer = xfer->ux_buffer;
768	if (count != NULL)
769		*count = xfer->ux_actlen;
770	if (status != NULL)
771		*status = xfer->ux_status;
772}
773
774usb_config_descriptor_t *
775usbd_get_config_descriptor(struct usbd_device *dev)
776{
777	KASSERT(dev != NULL);
778
779	return dev->ud_cdesc;
780}
781
782usb_interface_descriptor_t *
783usbd_get_interface_descriptor(struct usbd_interface *iface)
784{
785	KASSERT(iface != NULL);
786
787	return iface->ui_idesc;
788}
789
790usb_device_descriptor_t *
791usbd_get_device_descriptor(struct usbd_device *dev)
792{
793	KASSERT(dev != NULL);
794
795	return &dev->ud_ddesc;
796}
797
798usb_endpoint_descriptor_t *
799usbd_interface2endpoint_descriptor(struct usbd_interface *iface, uint8_t index)
800{
801
802	if (index >= iface->ui_idesc->bNumEndpoints)
803		return NULL;
804	return iface->ui_endpoints[index].ue_edesc;
805}
806
807/* Some drivers may wish to abort requests on the default pipe, *
808 * but there is no mechanism for getting a handle on it.        */
809void
810usbd_abort_default_pipe(struct usbd_device *device)
811{
812	usbd_abort_pipe(device->ud_pipe0);
813}
814
815void
816usbd_abort_pipe(struct usbd_pipe *pipe)
817{
818
819	usbd_suspend_pipe(pipe);
820	usbd_resume_pipe(pipe);
821}
822
823void
824usbd_suspend_pipe(struct usbd_pipe *pipe)
825{
826
827	usbd_lock_pipe(pipe);
828	usbd_ar_pipe(pipe);
829	usbd_unlock_pipe(pipe);
830}
831
832void
833usbd_resume_pipe(struct usbd_pipe *pipe)
834{
835
836	usbd_lock_pipe(pipe);
837	KASSERT(SIMPLEQ_EMPTY(&pipe->up_queue));
838	pipe->up_aborting = 0;
839	usbd_unlock_pipe(pipe);
840}
841
842usbd_status
843usbd_clear_endpoint_stall(struct usbd_pipe *pipe)
844{
845	struct usbd_device *dev = pipe->up_dev;
846	usbd_status err;
847
848	USBHIST_FUNC(); USBHIST_CALLED(usbdebug);
849	SDT_PROBE1(usb, device, pipe, clear__endpoint__stall,  pipe);
850
851	/*
852	 * Clearing en endpoint stall resets the endpoint toggle, so
853	 * do the same to the HC toggle.
854	 */
855	SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle,  pipe);
856	pipe->up_methods->upm_cleartoggle(pipe);
857
858	err = usbd_clear_endpoint_feature(dev,
859	    pipe->up_endpoint->ue_edesc->bEndpointAddress, UF_ENDPOINT_HALT);
860#if 0
861XXX should we do this?
862	if (!err) {
863		pipe->state = USBD_PIPE_ACTIVE;
864		/* XXX activate pipe */
865	}
866#endif
867	return err;
868}
869
870void
871usbd_clear_endpoint_stall_task(void *arg)
872{
873	struct usbd_pipe *pipe = arg;
874	struct usbd_device *dev = pipe->up_dev;
875
876	SDT_PROBE1(usb, device, pipe, clear__endpoint__stall,  pipe);
877	SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle,  pipe);
878	pipe->up_methods->upm_cleartoggle(pipe);
879
880	(void)usbd_clear_endpoint_feature(dev,
881	    pipe->up_endpoint->ue_edesc->bEndpointAddress, UF_ENDPOINT_HALT);
882}
883
884void
885usbd_clear_endpoint_stall_async(struct usbd_pipe *pipe)
886{
887	usb_add_task(pipe->up_dev, &pipe->up_async_task, USB_TASKQ_DRIVER);
888}
889
890void
891usbd_clear_endpoint_toggle(struct usbd_pipe *pipe)
892{
893
894	SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle,  pipe);
895	pipe->up_methods->upm_cleartoggle(pipe);
896}
897
898usbd_status
899usbd_endpoint_count(struct usbd_interface *iface, uint8_t *count)
900{
901	KASSERT(iface != NULL);
902	KASSERT(iface->ui_idesc != NULL);
903
904	*count = iface->ui_idesc->bNumEndpoints;
905	return USBD_NORMAL_COMPLETION;
906}
907
908usbd_status
909usbd_interface_count(struct usbd_device *dev, uint8_t *count)
910{
911
912	if (dev->ud_cdesc == NULL)
913		return USBD_NOT_CONFIGURED;
914	*count = dev->ud_cdesc->bNumInterface;
915	return USBD_NORMAL_COMPLETION;
916}
917
918void
919usbd_interface2device_handle(struct usbd_interface *iface,
920			     struct usbd_device **dev)
921{
922
923	*dev = iface->ui_dev;
924}
925
926usbd_status
927usbd_device2interface_handle(struct usbd_device *dev,
928			     uint8_t ifaceno, struct usbd_interface **iface)
929{
930
931	if (dev->ud_cdesc == NULL)
932		return USBD_NOT_CONFIGURED;
933	if (ifaceno >= dev->ud_cdesc->bNumInterface)
934		return USBD_INVAL;
935	*iface = &dev->ud_ifaces[ifaceno];
936	return USBD_NORMAL_COMPLETION;
937}
938
939struct usbd_device *
940usbd_pipe2device_handle(struct usbd_pipe *pipe)
941{
942	KASSERT(pipe != NULL);
943
944	return pipe->up_dev;
945}
946
947/* XXXX use altno */
948usbd_status
949usbd_set_interface(struct usbd_interface *iface, int altidx)
950{
951	bool locked = false;
952	usb_device_request_t req;
953	usbd_status err;
954
955	USBHIST_FUNC();
956	USBHIST_CALLARGS(usbdebug, "iface %#jx", (uintptr_t)iface, 0, 0, 0);
957
958	err = usbd_iface_lock(iface);
959	if (err)
960		goto out;
961	locked = true;
962
963	err = usbd_fill_iface_data(iface->ui_dev, iface->ui_index, altidx);
964	if (err)
965		goto out;
966
967	req.bmRequestType = UT_WRITE_INTERFACE;
968	req.bRequest = UR_SET_INTERFACE;
969	USETW(req.wValue, iface->ui_idesc->bAlternateSetting);
970	USETW(req.wIndex, iface->ui_idesc->bInterfaceNumber);
971	USETW(req.wLength, 0);
972	err = usbd_do_request(iface->ui_dev, &req, 0);
973
974out:	/* XXX back out iface data?  */
975	if (locked)
976		usbd_iface_unlock(iface);
977	return err;
978}
979
980int
981usbd_get_no_alts(usb_config_descriptor_t *cdesc, int ifaceno)
982{
983	char *p = (char *)cdesc;
984	char *end = p + UGETW(cdesc->wTotalLength);
985	usb_descriptor_t *desc;
986	usb_interface_descriptor_t *idesc;
987	int n;
988
989	for (n = 0; end - p >= sizeof(*desc); p += desc->bLength) {
990		desc = (usb_descriptor_t *)p;
991		if (desc->bLength < sizeof(*desc) || desc->bLength > end - p)
992			break;
993		if (desc->bDescriptorType != UDESC_INTERFACE)
994			continue;
995		if (desc->bLength < sizeof(*idesc))
996			break;
997		idesc = (usb_interface_descriptor_t *)desc;
998		if (idesc->bInterfaceNumber == ifaceno) {
999			n++;
1000			if (n == INT_MAX)
1001				break;
1002		}
1003	}
1004	return n;
1005}
1006
1007int
1008usbd_get_interface_altindex(struct usbd_interface *iface)
1009{
1010	return iface->ui_altindex;
1011}
1012
1013usbd_status
1014usbd_get_interface(struct usbd_interface *iface, uint8_t *aiface)
1015{
1016	usb_device_request_t req;
1017
1018	req.bmRequestType = UT_READ_INTERFACE;
1019	req.bRequest = UR_GET_INTERFACE;
1020	USETW(req.wValue, 0);
1021	USETW(req.wIndex, iface->ui_idesc->bInterfaceNumber);
1022	USETW(req.wLength, 1);
1023	return usbd_do_request(iface->ui_dev, &req, aiface);
1024}
1025
1026/*** Internal routines ***/
1027
1028/* Dequeue all pipe operations, called with bus lock held. */
1029Static void
1030usbd_ar_pipe(struct usbd_pipe *pipe)
1031{
1032	struct usbd_xfer *xfer;
1033
1034	USBHIST_FUNC();
1035	USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0);
1036	SDT_PROBE1(usb, device, pipe, abort__start,  pipe);
1037
1038	ASSERT_SLEEPABLE();
1039	KASSERT(mutex_owned(pipe->up_dev->ud_bus->ub_lock));
1040	KASSERT(pipe->up_dev->ud_bus->ub_usepolling == 0);
1041
1042	/*
1043	 * Allow only one thread at a time to abort the pipe, so we
1044	 * don't get confused if upm_abort drops the lock in the middle
1045	 * of the abort to wait for hardware completion softints to
1046	 * stop using the xfer before returning.
1047	 */
1048	KASSERTMSG(pipe->up_abortlwp == NULL, "pipe->up_abortlwp=%p",
1049	    pipe->up_abortlwp);
1050	pipe->up_abortlwp = curlwp;
1051
1052#ifdef USB_DEBUG
1053	if (usbdebug > 5)
1054		usbd_dump_queue(pipe);
1055#endif
1056	pipe->up_repeat = 0;
1057	pipe->up_running = 0;
1058	pipe->up_aborting = 1;
1059	while ((xfer = SIMPLEQ_FIRST(&pipe->up_queue)) != NULL) {
1060		USBHIST_LOG(usbdebug, "pipe = %#jx xfer = %#jx "
1061		    "(methods = %#jx)", (uintptr_t)pipe, (uintptr_t)xfer,
1062		    (uintptr_t)pipe->up_methods, 0);
1063		if (xfer->ux_status == USBD_NOT_STARTED) {
1064			SDT_PROBE1(usb, device, xfer, preabort,  xfer);
1065#ifdef DIAGNOSTIC
1066			xfer->ux_state = XFER_BUSY;
1067#endif
1068			SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next);
1069		} else {
1070			/* Make the HC abort it (and invoke the callback). */
1071			SDT_PROBE1(usb, device, xfer, abort,  xfer);
1072			pipe->up_methods->upm_abort(xfer);
1073			while (pipe->up_callingxfer == xfer) {
1074				USBHIST_LOG(usbdebug, "wait for callback"
1075				    "pipe = %#jx xfer = %#jx",
1076				    (uintptr_t)pipe, (uintptr_t)xfer, 0, 0);
1077				cv_wait(&pipe->up_callingcv,
1078				    pipe->up_dev->ud_bus->ub_lock);
1079			}
1080			/* XXX only for non-0 usbd_clear_endpoint_stall(pipe); */
1081		}
1082	}
1083
1084	/*
1085	 * There may be an xfer callback already in progress which was
1086	 * taken off the queue before we got to it.  We must wait for
1087	 * the callback to finish before returning control to the
1088	 * caller.
1089	 */
1090	while (pipe->up_callingxfer) {
1091		USBHIST_LOG(usbdebug, "wait for callback"
1092		    "pipe = %#jx xfer = %#jx",
1093		    (uintptr_t)pipe, (uintptr_t)pipe->up_callingxfer, 0, 0);
1094		cv_wait(&pipe->up_callingcv, pipe->up_dev->ud_bus->ub_lock);
1095	}
1096
1097	KASSERT(mutex_owned(pipe->up_dev->ud_bus->ub_lock));
1098	KASSERTMSG(pipe->up_abortlwp == curlwp, "pipe->up_abortlwp=%p",
1099	    pipe->up_abortlwp);
1100	pipe->up_abortlwp = NULL;
1101
1102	SDT_PROBE1(usb, device, pipe, abort__done,  pipe);
1103}
1104
1105/* Called with USB lock held. */
1106void
1107usb_transfer_complete(struct usbd_xfer *xfer)
1108{
1109	struct usbd_pipe *pipe = xfer->ux_pipe;
1110	struct usbd_bus *bus = pipe->up_dev->ud_bus;
1111	int sync = xfer->ux_flags & USBD_SYNCHRONOUS;
1112	int erred;
1113	int polling = bus->ub_usepolling;
1114	int repeat = pipe->up_repeat;
1115
1116	USBHIST_FUNC();
1117	USBHIST_CALLARGS(usbdebug, "pipe = %#jx xfer = %#jx status = %jd "
1118	    "actlen = %jd", (uintptr_t)pipe, (uintptr_t)xfer, xfer->ux_status,
1119	    xfer->ux_actlen);
1120
1121	KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock));
1122	KASSERTMSG(xfer->ux_state == XFER_ONQU, "xfer %p state is %x", xfer,
1123	    xfer->ux_state);
1124	KASSERT(pipe != NULL);
1125
1126	/*
1127	 * If device is known to miss out ack, then pretend that
1128	 * output timeout is a success. Userland should handle
1129	 * the logic to verify that the operation succeeded.
1130	 */
1131	if (pipe->up_dev->ud_quirks &&
1132	    pipe->up_dev->ud_quirks->uq_flags & UQ_MISS_OUT_ACK &&
1133	    xfer->ux_status == USBD_TIMEOUT &&
1134	    !usbd_xfer_isread(xfer)) {
1135		USBHIST_LOG(usbdebug, "Possible output ack miss for xfer %#jx: "
1136		    "hiding write timeout to %jd.%jd for %ju bytes written",
1137		    (uintptr_t)xfer, curlwp->l_proc->p_pid, curlwp->l_lid,
1138		    xfer->ux_length);
1139
1140		xfer->ux_status = USBD_NORMAL_COMPLETION;
1141		xfer->ux_actlen = xfer->ux_length;
1142	}
1143
1144	erred = xfer->ux_status == USBD_CANCELLED ||
1145	        xfer->ux_status == USBD_TIMEOUT;
1146
1147	if (!repeat) {
1148		/* Remove request from queue. */
1149
1150		KASSERTMSG(!SIMPLEQ_EMPTY(&pipe->up_queue),
1151		    "pipe %p is empty, but xfer %p wants to complete", pipe,
1152		     xfer);
1153		KASSERTMSG(xfer == SIMPLEQ_FIRST(&pipe->up_queue),
1154		    "xfer %p is not start of queue (%p is at start)", xfer,
1155		   SIMPLEQ_FIRST(&pipe->up_queue));
1156
1157#ifdef DIAGNOSTIC
1158		xfer->ux_state = XFER_BUSY;
1159#endif
1160		SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next);
1161	}
1162	USBHIST_LOG(usbdebug, "xfer %#jx: repeat %jd new head = %#jx",
1163	    (uintptr_t)xfer, repeat, (uintptr_t)SIMPLEQ_FIRST(&pipe->up_queue),
1164	    0);
1165
1166	/* Count completed transfers. */
1167	++pipe->up_dev->ud_bus->ub_stats.uds_requests
1168		[pipe->up_endpoint->ue_edesc->bmAttributes & UE_XFERTYPE];
1169
1170	xfer->ux_done = 1;
1171	if (!xfer->ux_status && xfer->ux_actlen < xfer->ux_length &&
1172	    !(xfer->ux_flags & USBD_SHORT_XFER_OK)) {
1173		USBHIST_LOG(usbdebug, "short transfer %jd < %jd",
1174		    xfer->ux_actlen, xfer->ux_length, 0, 0);
1175		xfer->ux_status = USBD_SHORT_XFER;
1176	}
1177
1178	USBHIST_LOG(usbdebug, "xfer %#jx doing done %#jx", (uintptr_t)xfer,
1179	    (uintptr_t)pipe->up_methods->upm_done, 0, 0);
1180	SDT_PROBE2(usb, device, xfer, done,  xfer, xfer->ux_status);
1181	pipe->up_methods->upm_done(xfer);
1182
1183	if (xfer->ux_length != 0 && xfer->ux_buffer != xfer->ux_buf) {
1184		KDASSERTMSG(xfer->ux_actlen <= xfer->ux_length,
1185		    "actlen %d length %d",xfer->ux_actlen, xfer->ux_length);
1186
1187		/* Only if IN transfer */
1188		if (usbd_xfer_isread(xfer)) {
1189			memcpy(xfer->ux_buffer, xfer->ux_buf, xfer->ux_actlen);
1190		}
1191	}
1192
1193	USBHIST_LOG(usbdebug, "xfer %#jx doing callback %#jx status %jd",
1194	    (uintptr_t)xfer, (uintptr_t)xfer->ux_callback, xfer->ux_status, 0);
1195
1196	if (xfer->ux_callback) {
1197		if (!polling) {
1198			KASSERT(pipe->up_callingxfer == NULL);
1199			pipe->up_callingxfer = xfer;
1200			mutex_exit(pipe->up_dev->ud_bus->ub_lock);
1201			if (!(pipe->up_flags & USBD_MPSAFE))
1202				KERNEL_LOCK(1, curlwp);
1203		}
1204
1205		xfer->ux_callback(xfer, xfer->ux_priv, xfer->ux_status);
1206
1207		if (!polling) {
1208			if (!(pipe->up_flags & USBD_MPSAFE))
1209				KERNEL_UNLOCK_ONE(curlwp);
1210			mutex_enter(pipe->up_dev->ud_bus->ub_lock);
1211			KASSERT(pipe->up_callingxfer == xfer);
1212			pipe->up_callingxfer = NULL;
1213			cv_broadcast(&pipe->up_callingcv);
1214		}
1215	}
1216
1217	if (sync && !polling) {
1218		USBHIST_LOG(usbdebug, "<- done xfer %#jx, wakeup",
1219		    (uintptr_t)xfer, 0, 0, 0);
1220		cv_broadcast(&xfer->ux_cv);
1221	}
1222
1223	if (repeat) {
1224		xfer->ux_actlen = 0;
1225		xfer->ux_status = USBD_NOT_STARTED;
1226	} else {
1227		/* XXX should we stop the queue on all errors? */
1228		if (erred && pipe->up_iface != NULL)	/* not control pipe */
1229			pipe->up_running = 0;
1230	}
1231	if (pipe->up_running && pipe->up_serialise)
1232		usbd_start_next(pipe);
1233}
1234
1235/* Called with USB lock held. */
1236void
1237usbd_start_next(struct usbd_pipe *pipe)
1238{
1239	struct usbd_xfer *xfer;
1240	usbd_status err;
1241
1242	USBHIST_FUNC();
1243
1244	KASSERT(pipe != NULL);
1245	KASSERT(pipe->up_methods != NULL);
1246	KASSERT(pipe->up_methods->upm_start != NULL);
1247	KASSERT(pipe->up_serialise == true);
1248
1249	int polling = pipe->up_dev->ud_bus->ub_usepolling;
1250	KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock));
1251
1252	/* Get next request in queue. */
1253	xfer = SIMPLEQ_FIRST(&pipe->up_queue);
1254	USBHIST_CALLARGS(usbdebug, "pipe = %#jx, xfer = %#jx", (uintptr_t)pipe,
1255	    (uintptr_t)xfer, 0, 0);
1256	if (xfer == NULL) {
1257		pipe->up_running = 0;
1258	} else {
1259		SDT_PROBE2(usb, device, pipe, start,  pipe, xfer);
1260		err = pipe->up_methods->upm_start(xfer);
1261
1262		if (err != USBD_IN_PROGRESS) {
1263			USBHIST_LOG(usbdebug, "error = %jd", err, 0, 0, 0);
1264			pipe->up_running = 0;
1265			/* XXX do what? */
1266		}
1267	}
1268
1269	KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock));
1270}
1271
1272usbd_status
1273usbd_do_request(struct usbd_device *dev, usb_device_request_t *req, void *data)
1274{
1275
1276	return usbd_do_request_flags(dev, req, data, 0, 0,
1277	    USBD_DEFAULT_TIMEOUT);
1278}
1279
1280usbd_status
1281usbd_do_request_flags(struct usbd_device *dev, usb_device_request_t *req,
1282    void *data, uint16_t flags, int *actlen, uint32_t timeout)
1283{
1284	size_t len = UGETW(req->wLength);
1285
1286	return usbd_do_request_len(dev, req, len, data, flags, actlen, timeout);
1287}
1288
1289usbd_status
1290usbd_do_request_len(struct usbd_device *dev, usb_device_request_t *req,
1291    size_t len, void *data, uint16_t flags, int *actlen, uint32_t timeout)
1292{
1293	struct usbd_xfer *xfer;
1294	usbd_status err;
1295
1296	KASSERT(len >= UGETW(req->wLength));
1297
1298	USBHIST_FUNC();
1299	USBHIST_CALLARGS(usbdebug, "dev=%#jx req=%jx flags=%jx len=%jx",
1300	    (uintptr_t)dev, (uintptr_t)req, flags, len);
1301
1302	ASSERT_SLEEPABLE();
1303
1304	SDT_PROBE5(usb, device, request, start,
1305	    dev, req, len, flags, timeout);
1306
1307	int error = usbd_create_xfer(dev->ud_pipe0, len, 0, 0, &xfer);
1308	if (error) {
1309		SDT_PROBE7(usb, device, request, done,
1310		    dev, req, /*actlen*/0, flags, timeout, data, USBD_NOMEM);
1311		return USBD_NOMEM;
1312	}
1313
1314	usbd_setup_default_xfer(xfer, dev, 0, timeout, req, data,
1315	    UGETW(req->wLength), flags, NULL);
1316	KASSERT(xfer->ux_pipe == dev->ud_pipe0);
1317	err = usbd_sync_transfer(xfer);
1318#if defined(USB_DEBUG) || defined(DIAGNOSTIC)
1319	if (xfer->ux_actlen > xfer->ux_length) {
1320		USBHIST_LOG(usbdebug, "overrun addr = %jd type = 0x%02jx",
1321		    dev->ud_addr, xfer->ux_request.bmRequestType, 0, 0);
1322		USBHIST_LOG(usbdebug, "     req = 0x%02jx val = %jd "
1323		    "index = %jd",
1324		    xfer->ux_request.bRequest, UGETW(xfer->ux_request.wValue),
1325		    UGETW(xfer->ux_request.wIndex), 0);
1326		USBHIST_LOG(usbdebug, "     rlen = %jd length = %jd "
1327		    "actlen = %jd",
1328		    UGETW(xfer->ux_request.wLength),
1329		    xfer->ux_length, xfer->ux_actlen, 0);
1330	}
1331#endif
1332	if (actlen != NULL)
1333		*actlen = xfer->ux_actlen;
1334
1335	usbd_destroy_xfer(xfer);
1336
1337	SDT_PROBE7(usb, device, request, done,
1338	    dev, req, xfer->ux_actlen, flags, timeout, data, err);
1339
1340	if (err) {
1341		USBHIST_LOG(usbdebug, "returning err = %jd", err, 0, 0, 0);
1342	}
1343	return err;
1344}
1345
1346const struct usbd_quirks *
1347usbd_get_quirks(struct usbd_device *dev)
1348{
1349#ifdef DIAGNOSTIC
1350	if (dev == NULL) {
1351		printf("usbd_get_quirks: dev == NULL\n");
1352		return 0;
1353	}
1354#endif
1355	return dev->ud_quirks;
1356}
1357
1358/* XXX do periodic free() of free list */
1359
1360/*
1361 * Called from keyboard driver when in polling mode.
1362 */
1363void
1364usbd_dopoll(struct usbd_interface *iface)
1365{
1366	iface->ui_dev->ud_bus->ub_methods->ubm_dopoll(iface->ui_dev->ud_bus);
1367}
1368
1369/*
1370 * This is for keyboard driver as well, which only operates in polling
1371 * mode from the ask root, etc., prompt and from DDB.
1372 */
1373void
1374usbd_set_polling(struct usbd_device *dev, int on)
1375{
1376
1377	if (!db_active)
1378		mutex_enter(dev->ud_bus->ub_lock);
1379
1380	/*
1381	 * We call softint routine on polling transitions, so
1382	 * that completed/failed transfers have their callbacks
1383	 * called. In-progress transfers started before transition
1384	 * remain flying, and their completion after transition
1385	 * must be taken into account.
1386	 *
1387	 * The softint routine is called after enabling polling
1388	 * and before disabling it, so that holding sc->sc_lock
1389	 * is not required. DDB needs this because it cannot wait
1390	 * to acquire sc->sc_lock from a suspended thread.
1391	 */
1392	if (on) {
1393		KASSERT(dev->ud_bus->ub_usepolling < __type_max(char));
1394		dev->ud_bus->ub_usepolling++;
1395		if (dev->ud_bus->ub_usepolling == 1)
1396			dev->ud_bus->ub_methods->ubm_softint(dev->ud_bus);
1397	} else {
1398		KASSERT(dev->ud_bus->ub_usepolling > 0);
1399		if (dev->ud_bus->ub_usepolling == 1)
1400			dev->ud_bus->ub_methods->ubm_softint(dev->ud_bus);
1401		dev->ud_bus->ub_usepolling--;
1402	}
1403
1404	if (!db_active)
1405		mutex_exit(dev->ud_bus->ub_lock);
1406}
1407
1408
1409usb_endpoint_descriptor_t *
1410usbd_get_endpoint_descriptor(struct usbd_interface *iface, uint8_t address)
1411{
1412	struct usbd_endpoint *ep;
1413	int i;
1414
1415	for (i = 0; i < iface->ui_idesc->bNumEndpoints; i++) {
1416		ep = &iface->ui_endpoints[i];
1417		if (ep->ue_edesc->bEndpointAddress == address)
1418			return iface->ui_endpoints[i].ue_edesc;
1419	}
1420	return NULL;
1421}
1422
1423/*
1424 * usbd_ratecheck() can limit the number of error messages that occurs.
1425 * When a device is unplugged it may take up to 0.25s for the hub driver
1426 * to notice it.  If the driver continuously tries to do I/O operations
1427 * this can generate a large number of messages.
1428 */
1429int
1430usbd_ratecheck(struct timeval *last)
1431{
1432	static struct timeval errinterval = { 0, 250000 }; /* 0.25 s*/
1433
1434	return ratecheck(last, &errinterval);
1435}
1436
1437/*
1438 * Search for a vendor/product pair in an array.  The item size is
1439 * given as an argument.
1440 */
1441const struct usb_devno *
1442usb_match_device(const struct usb_devno *tbl, u_int nentries, u_int sz,
1443		 uint16_t vendor, uint16_t product)
1444{
1445	while (nentries-- > 0) {
1446		uint16_t tproduct = tbl->ud_product;
1447		if (tbl->ud_vendor == vendor &&
1448		    (tproduct == product || tproduct == USB_PRODUCT_ANY))
1449			return tbl;
1450		tbl = (const struct usb_devno *)((const char *)tbl + sz);
1451	}
1452	return NULL;
1453}
1454
1455usbd_status
1456usbd_get_string(struct usbd_device *dev, int si, char *buf)
1457{
1458	return usbd_get_string0(dev, si, buf, 1);
1459}
1460
1461usbd_status
1462usbd_get_string0(struct usbd_device *dev, int si, char *buf, int unicode)
1463{
1464	int swap = dev->ud_quirks->uq_flags & UQ_SWAP_UNICODE;
1465	usb_string_descriptor_t us;
1466	char *s;
1467	int i, n;
1468	uint16_t c;
1469	usbd_status err;
1470	int size;
1471
1472	USBHIST_FUNC(); USBHIST_CALLED(usbdebug);
1473
1474	buf[0] = '\0';
1475	if (si == 0)
1476		return USBD_INVAL;
1477	if (dev->ud_quirks->uq_flags & UQ_NO_STRINGS)
1478		return USBD_STALLED;
1479	if (dev->ud_langid == USBD_NOLANG) {
1480		/* Set up default language */
1481		err = usbd_get_string_desc(dev, USB_LANGUAGE_TABLE, 0, &us,
1482		    &size);
1483		if (err || size < 4) {
1484			USBHIST_LOG(usbdebug, "getting lang failed, using 0",
1485			    0, 0, 0, 0);
1486			dev->ud_langid = 0; /* Well, just pick something then */
1487		} else {
1488			/* Pick the first language as the default. */
1489			dev->ud_langid = UGETW(us.bString[0]);
1490		}
1491	}
1492	err = usbd_get_string_desc(dev, si, dev->ud_langid, &us, &size);
1493	if (err)
1494		return err;
1495	s = buf;
1496	n = size / 2 - 1;
1497	if (unicode) {
1498		for (i = 0; i < n; i++) {
1499			c = UGETW(us.bString[i]);
1500			if (swap)
1501				c = (c >> 8) | (c << 8);
1502			s += wput_utf8(s, 3, c);
1503		}
1504		*s++ = 0;
1505	}
1506#ifdef COMPAT_30
1507	else {
1508		for (i = 0; i < n; i++) {
1509			c = UGETW(us.bString[i]);
1510			if (swap)
1511				c = (c >> 8) | (c << 8);
1512			*s++ = (c < 0x80) ? c : '?';
1513		}
1514		*s++ = 0;
1515	}
1516#endif
1517	return USBD_NORMAL_COMPLETION;
1518}
1519
1520/*
1521 * usbd_xfer_trycomplete(xfer)
1522 *
1523 *	Try to claim xfer for completion.  Return true if successful,
1524 *	false if the xfer has been synchronously aborted or has timed
1525 *	out.
1526 *
1527 *	If this returns true, caller is responsible for setting
1528 *	xfer->ux_status and calling usb_transfer_complete.  To be used
1529 *	in a host controller interrupt handler.
1530 *
1531 *	Caller must either hold the bus lock or have the bus in polling
1532 *	mode.  If this succeeds, caller must proceed to call
1533 *	usb_complete_transfer under the bus lock or with polling
1534 *	enabled -- must not release and reacquire the bus lock in the
1535 *	meantime.  Failing to heed this rule may lead to catastrophe
1536 *	with abort or timeout.
1537 */
1538bool
1539usbd_xfer_trycomplete(struct usbd_xfer *xfer)
1540{
1541	struct usbd_bus *bus __diagused = xfer->ux_bus;
1542
1543	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
1544
1545	USBHIST_FUNC();
1546	USBHIST_CALLARGS(usbdebug, "xfer %#jx status %jd",
1547	    (uintptr_t)xfer, xfer->ux_status, 0, 0);
1548
1549	/*
1550	 * If software has completed it, either by synchronous abort or
1551	 * by timeout, too late.
1552	 */
1553	if (xfer->ux_status != USBD_IN_PROGRESS)
1554		return false;
1555
1556	/*
1557	 * We are completing the xfer.  Cancel the timeout if we can,
1558	 * but only asynchronously.  See usbd_xfer_cancel_timeout_async
1559	 * for why we need not wait for the callout or task here.
1560	 */
1561	usbd_xfer_cancel_timeout_async(xfer);
1562
1563	/* Success!  Note: Caller must set xfer->ux_status afterwar.  */
1564	return true;
1565}
1566
1567/*
1568 * usbd_xfer_abort(xfer)
1569 *
1570 *	Try to claim xfer to abort.  If successful, mark it completed
1571 *	with USBD_CANCELLED and call the bus-specific method to abort
1572 *	at the hardware level.
1573 *
1574 *	To be called in thread context from struct
1575 *	usbd_pipe_methods::upm_abort.
1576 *
1577 *	Caller must hold the bus lock.
1578 */
1579void
1580usbd_xfer_abort(struct usbd_xfer *xfer)
1581{
1582	struct usbd_bus *bus = xfer->ux_bus;
1583
1584	KASSERT(mutex_owned(bus->ub_lock));
1585
1586	USBHIST_FUNC();
1587	USBHIST_CALLARGS(usbdebug, "xfer %#jx status %jd",
1588	    (uintptr_t)xfer, xfer->ux_status, 0, 0);
1589
1590	/*
1591	 * If host controller interrupt or timer interrupt has
1592	 * completed it, too late.  But the xfer cannot be
1593	 * cancelled already -- only one caller can synchronously
1594	 * abort.
1595	 */
1596	KASSERT(xfer->ux_status != USBD_CANCELLED);
1597	if (xfer->ux_status != USBD_IN_PROGRESS)
1598		return;
1599
1600	/*
1601	 * Cancel the timeout if we can, but only asynchronously; see
1602	 * usbd_xfer_cancel_timeout_async for why we need not wait for
1603	 * the callout or task here.
1604	 */
1605	usbd_xfer_cancel_timeout_async(xfer);
1606
1607	/*
1608	 * We beat everyone else.  Claim the status as cancelled, do
1609	 * the bus-specific dance to abort the hardware, and complete
1610	 * the xfer.
1611	 */
1612	xfer->ux_status = USBD_CANCELLED;
1613	bus->ub_methods->ubm_abortx(xfer);
1614	usb_transfer_complete(xfer);
1615}
1616
1617/*
1618 * usbd_xfer_timeout(xfer)
1619 *
1620 *	Called at IPL_SOFTCLOCK when too much time has elapsed waiting
1621 *	for xfer to complete.  Since we can't abort the xfer at
1622 *	IPL_SOFTCLOCK, defer to a usb_task to run it in thread context,
1623 *	unless the xfer has completed or aborted concurrently -- and if
1624 *	the xfer has also been resubmitted, take care of rescheduling
1625 *	the callout.
1626 */
1627static void
1628usbd_xfer_timeout(void *cookie)
1629{
1630	struct usbd_xfer *xfer = cookie;
1631	struct usbd_bus *bus = xfer->ux_bus;
1632	struct usbd_device *dev = xfer->ux_pipe->up_dev;
1633
1634	/* Acquire the lock so we can transition the timeout state.  */
1635	mutex_enter(bus->ub_lock);
1636
1637	USBHIST_FUNC();
1638	USBHIST_CALLARGS(usbdebug, "xfer %#jx status %jd",
1639	    (uintptr_t)xfer, xfer->ux_status, 0, 0);
1640
1641	/*
1642	 * Use usbd_xfer_probe_timeout to check whether the timeout is
1643	 * still valid, or to reschedule the callout if necessary.  If
1644	 * it is still valid, schedule the task.
1645	 */
1646	if (usbd_xfer_probe_timeout(xfer)) {
1647		USBHIST_LOG(usbdebug, "xfer %#jx schedule timeout task",
1648		    (uintptr_t)xfer, 0, 0, 0);
1649		usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC);
1650	} else {
1651		USBHIST_LOG(usbdebug, "xfer %#jx timeout cancelled",
1652		    (uintptr_t)xfer, 0, 0, 0);
1653	}
1654
1655	/*
1656	 * Notify usbd_xfer_cancel_timeout_async that we may have
1657	 * scheduled the task.  This causes callout_invoking to return
1658	 * false in usbd_xfer_cancel_timeout_async so that it can tell
1659	 * which stage in the callout->task->abort process we're at.
1660	 */
1661	callout_ack(&xfer->ux_callout);
1662
1663	/* All done -- release the lock.  */
1664	mutex_exit(bus->ub_lock);
1665}
1666
1667/*
1668 * usbd_xfer_timeout_task(xfer)
1669 *
1670 *	Called in thread context when too much time has elapsed waiting
1671 *	for xfer to complete.  Abort the xfer with USBD_TIMEOUT, unless
1672 *	it has completed or aborted concurrently -- and if the xfer has
1673 *	also been resubmitted, take care of rescheduling the callout.
1674 */
1675static void
1676usbd_xfer_timeout_task(void *cookie)
1677{
1678	struct usbd_xfer *xfer = cookie;
1679	struct usbd_bus *bus = xfer->ux_bus;
1680
1681	/* Acquire the lock so we can transition the timeout state.  */
1682	mutex_enter(bus->ub_lock);
1683
1684	USBHIST_FUNC();
1685	USBHIST_CALLARGS(usbdebug, "xfer %#jx status %jd",
1686	    (uintptr_t)xfer, xfer->ux_status, 0, 0);
1687
1688	/*
1689	 * Use usbd_xfer_probe_timeout to check whether the timeout is
1690	 * still valid, or to reschedule the callout if necessary.  If
1691	 * it is not valid -- the timeout has been asynchronously
1692	 * cancelled, or the xfer has already been resubmitted -- then
1693	 * we're done here.
1694	 */
1695	if (!usbd_xfer_probe_timeout(xfer)) {
1696		USBHIST_LOG(usbdebug, "xfer %#jx timeout cancelled",
1697		    (uintptr_t)xfer, 0, 0, 0);
1698		goto out;
1699	}
1700
1701	/*
1702	 * After this point, no further timeout probing will happen for
1703	 * the current incarnation of the timeout, so make the next
1704	 * usbd_xfer_schedule_timeout schedule a new callout.
1705	 * usbd_xfer_probe_timeout has already processed any reset.
1706	 */
1707	KASSERT(!xfer->ux_timeout_reset);
1708	xfer->ux_timeout_set = false;
1709
1710	/*
1711	 * May have completed or been aborted, but we're the only one
1712	 * who can time it out.  If it has completed or been aborted,
1713	 * no need to timeout.
1714	 */
1715	KASSERT(xfer->ux_status != USBD_TIMEOUT);
1716	if (xfer->ux_status != USBD_IN_PROGRESS) {
1717		USBHIST_LOG(usbdebug, "xfer %#jx timeout raced",
1718		    (uintptr_t)xfer, 0, 0, 0);
1719		goto out;
1720	}
1721
1722	/*
1723	 * We beat everyone else.  Claim the status as timed out, do
1724	 * the bus-specific dance to abort the hardware, and complete
1725	 * the xfer.
1726	 */
1727	USBHIST_LOG(usbdebug, "xfer %#jx timed out",
1728	    (uintptr_t)xfer, 0, 0, 0);
1729	xfer->ux_status = USBD_TIMEOUT;
1730	bus->ub_methods->ubm_abortx(xfer);
1731	usb_transfer_complete(xfer);
1732
1733out:	/* All done -- release the lock.  */
1734	mutex_exit(bus->ub_lock);
1735}
1736
1737/*
1738 * usbd_xfer_probe_timeout(xfer)
1739 *
1740 *	Probe the status of xfer's timeout.  Acknowledge and process a
1741 *	request to reschedule.  Return true if the timeout is still
1742 *	valid and the caller should take further action (queueing a
1743 *	task or aborting the xfer), false if it must stop here.
1744 */
1745static bool
1746usbd_xfer_probe_timeout(struct usbd_xfer *xfer)
1747{
1748	struct usbd_bus *bus = xfer->ux_bus;
1749	bool valid;
1750
1751	USBHIST_FUNC();
1752	USBHIST_CALLARGS(usbdebug, "xfer %#jx timeout %jdms"
1753	    " set %jd reset %jd",
1754	    (uintptr_t)xfer, xfer->ux_timeout,
1755	    xfer->ux_timeout_set, xfer->ux_timeout_reset);
1756
1757	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
1758
1759	/* The timeout must be set.  */
1760	KASSERT(xfer->ux_timeout_set);
1761
1762	/*
1763	 * Neither callout nor task may be pending; they execute
1764	 * alternately in lock step.
1765	 */
1766	KASSERT(!callout_pending(&xfer->ux_callout));
1767	KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask));
1768
1769	/* There are a few cases... */
1770	if (bus->ub_methods->ubm_dying(bus)) {
1771		/* Host controller dying.  Drop it all on the floor.  */
1772		USBHIST_LOG(usbdebug, "xfer %#jx bus dying, not rescheduling",
1773		    (uintptr_t)xfer, 0, 0, 0);
1774		xfer->ux_timeout_set = false;
1775		xfer->ux_timeout_reset = false;
1776		valid = false;
1777	} else if (xfer->ux_timeout_reset) {
1778		/*
1779		 * The xfer completed _and_ got resubmitted while we
1780		 * waited for the lock.  Acknowledge the request to
1781		 * reschedule, and reschedule it if there is a timeout
1782		 * and the bus is not polling.
1783		 */
1784		xfer->ux_timeout_reset = false;
1785		if (xfer->ux_timeout && !bus->ub_usepolling) {
1786			USBHIST_LOG(usbdebug, "xfer %#jx resubmitted,"
1787			    " rescheduling timer for %jdms",
1788			    (uintptr_t)xfer, xfer->ux_timeout, 0, 0);
1789			KASSERT(xfer->ux_timeout_set);
1790			callout_schedule(&xfer->ux_callout,
1791			    mstohz(xfer->ux_timeout));
1792		} else {
1793			/* No more callout or task scheduled.  */
1794			USBHIST_LOG(usbdebug, "xfer %#jx resubmitted"
1795			    " and completed, not rescheduling",
1796			    (uintptr_t)xfer, 0, 0, 0);
1797			xfer->ux_timeout_set = false;
1798		}
1799		valid = false;
1800	} else if (xfer->ux_status != USBD_IN_PROGRESS) {
1801		/*
1802		 * The xfer has completed by hardware completion or by
1803		 * software abort, and has not been resubmitted, so the
1804		 * timeout must be unset, and is no longer valid for
1805		 * the caller.
1806		 */
1807		USBHIST_LOG(usbdebug, "xfer %#jx timeout lost race,"
1808		    " status=%jd, not rescheduling",
1809		    (uintptr_t)xfer, xfer->ux_status, 0, 0);
1810		xfer->ux_timeout_set = false;
1811		valid = false;
1812	} else {
1813		/*
1814		 * The xfer has not yet completed, so the timeout is
1815		 * valid.
1816		 */
1817		USBHIST_LOG(usbdebug, "xfer %#jx timing out",
1818		    (uintptr_t)xfer, 0, 0, 0);
1819		valid = true;
1820	}
1821
1822	/* Any reset must have been processed.  */
1823	KASSERT(!xfer->ux_timeout_reset);
1824
1825	/*
1826	 * Either we claim the timeout is set, or the callout is idle.
1827	 * If the timeout is still set, we may be handing off to the
1828	 * task instead, so this is an if but not an iff.
1829	 */
1830	KASSERT(xfer->ux_timeout_set || !callout_pending(&xfer->ux_callout));
1831
1832	/*
1833	 * The task must be idle now.
1834	 *
1835	 * - If the caller is the callout, _and_ the timeout is still
1836	 *   valid, the caller will schedule it, but it hasn't been
1837	 *   scheduled yet.  (If the timeout is not valid, the task
1838	 *   should not be scheduled.)
1839	 *
1840	 * - If the caller is the task, it cannot be scheduled again
1841	 *   until the callout runs again, which won't happen until we
1842	 *   next release the lock.
1843	 */
1844	KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask));
1845
1846	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
1847
1848	return valid;
1849}
1850
1851/*
1852 * usbd_xfer_schedule_timeout(xfer)
1853 *
1854 *	Ensure that xfer has a timeout.  If the callout is already
1855 *	queued or the task is already running, request that they
1856 *	reschedule the callout.  If not, and if we're not polling,
1857 *	schedule the callout anew.
1858 *
1859 *	To be called in thread context from struct
1860 *	usbd_pipe_methods::upm_start.
1861 */
1862void
1863usbd_xfer_schedule_timeout(struct usbd_xfer *xfer)
1864{
1865	struct usbd_bus *bus = xfer->ux_bus;
1866
1867	USBHIST_FUNC();
1868	USBHIST_CALLARGS(usbdebug, "xfer %#jx timeout %jdms"
1869	    " set %jd reset %jd",
1870	    (uintptr_t)xfer, xfer->ux_timeout,
1871	    xfer->ux_timeout_set, xfer->ux_timeout_reset);
1872
1873	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
1874	KASSERTMSG(xfer->ux_status == USBD_IN_PROGRESS, "xfer=%p status=%d",
1875	    xfer, xfer->ux_status);
1876
1877	if (xfer->ux_timeout_set) {
1878		/*
1879		 * Callout or task has fired from a prior completed
1880		 * xfer but has not yet noticed that the xfer is done.
1881		 * Ask it to reschedule itself to ux_timeout.
1882		 */
1883		xfer->ux_timeout_reset = true;
1884	} else if (xfer->ux_timeout && !bus->ub_usepolling) {
1885		/* Callout is not scheduled.  Schedule it.  */
1886		KASSERT(!callout_pending(&xfer->ux_callout));
1887		callout_schedule(&xfer->ux_callout, mstohz(xfer->ux_timeout));
1888		xfer->ux_timeout_set = true;
1889	}
1890
1891	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
1892}
1893
1894/*
1895 * usbd_xfer_cancel_timeout_async(xfer)
1896 *
1897 *	Cancel the callout and the task of xfer, which have not yet run
1898 *	to completion, but don't wait for the callout or task to finish
1899 *	running.
1900 *
1901 *	If they have already fired, at worst they are waiting for the
1902 *	bus lock.  They will see that the xfer is no longer in progress
1903 *	and give up, or they will see that the xfer has been
1904 *	resubmitted with a new timeout and reschedule the callout.
1905 *
1906 *	If a resubmitted request completed so fast that the callout
1907 *	didn't have time to process a timer reset, just cancel the
1908 *	timer reset.
1909 */
1910static void
1911usbd_xfer_cancel_timeout_async(struct usbd_xfer *xfer)
1912{
1913	struct usbd_bus *bus __diagused = xfer->ux_bus;
1914
1915	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
1916
1917	USBHIST_FUNC();
1918	USBHIST_CALLARGS(usbdebug, "xfer %#jx timeout %jdms"
1919	    " set %jd reset %jd",
1920	    (uintptr_t)xfer, xfer->ux_timeout,
1921	    xfer->ux_timeout_set, xfer->ux_timeout_reset);
1922
1923	/*
1924	 * If the timer wasn't running anyway, forget about it.  This
1925	 * can happen if we are completing an isochronous transfer
1926	 * which doesn't use the same timeout logic.
1927	 */
1928	if (!xfer->ux_timeout_set) {
1929		USBHIST_LOG(usbdebug, "xfer %#jx timer not running",
1930		    (uintptr_t)xfer, 0, 0, 0);
1931		return;
1932	}
1933
1934	xfer->ux_timeout_reset = false;
1935	if (!callout_stop(&xfer->ux_callout)) {
1936		/*
1937		 * We stopped the callout before it ran.  The timeout
1938		 * is no longer set.
1939		 */
1940		USBHIST_LOG(usbdebug, "xfer %#jx timer stopped",
1941		    (uintptr_t)xfer, 0, 0, 0);
1942		xfer->ux_timeout_set = false;
1943	} else if (callout_invoking(&xfer->ux_callout)) {
1944		/*
1945		 * The callout has begun to run but it has not yet
1946		 * acquired the lock and called callout_ack.  The task
1947		 * cannot be queued yet, and the callout cannot have
1948		 * been rescheduled yet.
1949		 *
1950		 * By the time the callout acquires the lock, we will
1951		 * have transitioned from USBD_IN_PROGRESS to a
1952		 * completed status, and possibly also resubmitted the
1953		 * xfer and set xfer->ux_timeout_reset = true.  In both
1954		 * cases, the callout will DTRT, so no further action
1955		 * is needed here.
1956		 */
1957		USBHIST_LOG(usbdebug, "xfer %#jx timer fired",
1958		    (uintptr_t)xfer, 0, 0, 0);
1959	} else if (usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)) {
1960		/*
1961		 * The callout had fired and scheduled the task, but we
1962		 * stopped the task before it could run.  The timeout
1963		 * is therefore no longer set -- the next resubmission
1964		 * of the xfer must schedule a new timeout.
1965		 *
1966		 * The callout should not be pending at this point:
1967		 * it is scheduled only under the lock, and only when
1968		 * xfer->ux_timeout_set is false, or by the callout or
1969		 * task itself when xfer->ux_timeout_reset is true.
1970		 */
1971		USBHIST_LOG(usbdebug, "xfer %#jx task fired",
1972		    (uintptr_t)xfer, 0, 0, 0);
1973		xfer->ux_timeout_set = false;
1974	} else {
1975		USBHIST_LOG(usbdebug, "xfer %#jx task stopped",
1976		    (uintptr_t)xfer, 0, 0, 0);
1977	}
1978
1979	/*
1980	 * The callout cannot be scheduled and the task cannot be
1981	 * queued at this point.  Either we cancelled them, or they are
1982	 * already running and waiting for the bus lock.
1983	 */
1984	KASSERT(!callout_pending(&xfer->ux_callout));
1985	KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask));
1986
1987	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
1988}
1989