dwc2_hcdqueue.c revision 1.11 1 /* $NetBSD: dwc2_hcdqueue.c,v 1.11 2014/09/03 10:00:08 skrll Exp $ */
2
3 /*
4 * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
5 *
6 * Copyright (C) 2004-2013 Synopsys, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The names of the above-listed copyright holders may not be used
18 * to endorse or promote products derived from this software without
19 * specific prior written permission.
20 *
21 * ALTERNATIVELY, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") as published by the Free Software
23 * Foundation; either version 2 of the License, or (at your option) any
24 * later version.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * This file contains the functions to manage Queue Heads and Queue
41 * Transfer Descriptors for Host mode
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdqueue.c,v 1.11 2014/09/03 10:00:08 skrll Exp $");
46
47 #include <sys/types.h>
48 #include <sys/kmem.h>
49 #include <sys/pool.h>
50
51 #include <dev/usb/usb.h>
52 #include <dev/usb/usbdi.h>
53 #include <dev/usb/usbdivar.h>
54 #include <dev/usb/usb_mem.h>
55
56 #include <machine/param.h>
57
58 #include <linux/kernel.h>
59
60 #include <dwc2/dwc2.h>
61 #include <dwc2/dwc2var.h>
62
63 #include "dwc2_core.h"
64 #include "dwc2_hcd.h"
65
66 static u32 dwc2_calc_bus_time(struct dwc2_hsotg *, int, int, int, int);
67
68 /**
69 * dwc2_qh_init() - Initializes a QH structure
70 *
71 * @hsotg: The HCD state structure for the DWC OTG controller
72 * @qh: The QH to init
73 * @urb: Holds the information about the device/endpoint needed to initialize
74 * the QH
75 */
76 #define SCHEDULE_SLOP 10
77 static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
78 struct dwc2_hcd_urb *urb)
79 {
80 int dev_speed, hub_addr, hub_port;
81
82 dev_vdbg(hsotg->dev, "%s()\n", __func__);
83
84 /* Initialize QH */
85 qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
86 qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
87
88 qh->data_toggle = DWC2_HC_PID_DATA0;
89 qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
90 INIT_LIST_HEAD(&qh->qtd_list);
91 INIT_LIST_HEAD(&qh->qh_list_entry);
92
93 /* FS/LS Endpoint on HS Hub, NOT virtual root hub */
94 dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
95
96 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
97 qh->nak_frame = 0xffff;
98
99 if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
100 hub_addr != 0 && hub_addr != 1) {
101 dev_vdbg(hsotg->dev,
102 "QH init: EP %d: TT found at hub addr %d, for port %d\n",
103 dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
104 hub_port);
105 qh->do_split = 1;
106 }
107
108 if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
109 qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
110 /* Compute scheduling parameters once and save them */
111 u32 hprt, prtspd;
112
113 /* Todo: Account for split transfers in the bus time */
114 int bytecount =
115 dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
116
117 qh->usecs = dwc2_calc_bus_time(hsotg, qh->do_split ?
118 USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
119 qh->ep_type == USB_ENDPOINT_XFER_ISOC,
120 bytecount);
121 /* Start in a slightly future (micro)frame */
122 qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
123 SCHEDULE_SLOP);
124 qh->interval = urb->interval;
125 #if 0
126 /* Increase interrupt polling rate for debugging */
127 if (qh->ep_type == USB_ENDPOINT_XFER_INT)
128 qh->interval = 8;
129 #endif
130 hprt = DWC2_READ_4(hsotg, HPRT0);
131 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
132 if (prtspd == HPRT0_SPD_HIGH_SPEED &&
133 (dev_speed == USB_SPEED_LOW ||
134 dev_speed == USB_SPEED_FULL)) {
135 qh->interval *= 8;
136 qh->sched_frame |= 0x7;
137 qh->start_split_frame = qh->sched_frame;
138 }
139 dev_dbg(hsotg->dev, "interval=%d\n", qh->interval);
140 }
141
142 dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n");
143 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh);
144 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n",
145 dwc2_hcd_get_dev_addr(&urb->pipe_info));
146 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
147 dwc2_hcd_get_ep_num(&urb->pipe_info),
148 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
149
150 qh->dev_speed = dev_speed;
151
152 #ifdef DWC2_DEBUG
153 const char *speed, *type;
154 switch (dev_speed) {
155 case USB_SPEED_LOW:
156 speed = "low";
157 break;
158 case USB_SPEED_FULL:
159 speed = "full";
160 break;
161 case USB_SPEED_HIGH:
162 speed = "high";
163 break;
164 default:
165 speed = "?";
166 break;
167 }
168 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
169
170 switch (qh->ep_type) {
171 case USB_ENDPOINT_XFER_ISOC:
172 type = "isochronous";
173 break;
174 case USB_ENDPOINT_XFER_INT:
175 type = "interrupt";
176 break;
177 case USB_ENDPOINT_XFER_CONTROL:
178 type = "control";
179 break;
180 case USB_ENDPOINT_XFER_BULK:
181 type = "bulk";
182 break;
183 default:
184 type = "?";
185 break;
186 }
187
188 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type);
189 #endif
190
191 if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
192 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n",
193 qh->usecs);
194 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n",
195 qh->interval);
196 }
197 }
198
199 /**
200 * dwc2_hcd_qh_create() - Allocates and initializes a QH
201 *
202 * @hsotg: The HCD state structure for the DWC OTG controller
203 * @urb: Holds the information about the device/endpoint needed
204 * to initialize the QH
205 * @mem_flags: Flag to do atomic allocation if needed
206 *
207 * Return: Pointer to the newly allocated QH, or NULL on error
208 */
209 static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
210 struct dwc2_hcd_urb *urb,
211 gfp_t mem_flags)
212 {
213 struct dwc2_softc *sc = hsotg->hsotg_sc;
214 struct dwc2_qh *qh;
215
216 if (!urb->priv)
217 return NULL;
218
219 /* Allocate memory */
220 qh = pool_cache_get(sc->sc_qhpool, PR_NOWAIT);
221 if (!qh)
222 return NULL;
223
224 memset(qh, 0, sizeof(*qh));
225 dwc2_qh_init(hsotg, qh, urb);
226
227 if (hsotg->core_params->dma_desc_enable > 0 &&
228 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
229 dwc2_hcd_qh_free(hsotg, qh);
230 return NULL;
231 }
232
233 return qh;
234 }
235
236 /**
237 * dwc2_hcd_qh_free() - Frees the QH
238 *
239 * @hsotg: HCD instance
240 * @qh: The QH to free
241 *
242 * QH should already be removed from the list. QTD list should already be empty
243 * if called from URB Dequeue.
244 *
245 * Must NOT be called with interrupt disabled or spinlock held
246 */
247 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
248 {
249 struct dwc2_softc *sc = hsotg->hsotg_sc;
250
251 if (hsotg->core_params->dma_desc_enable > 0) {
252 dwc2_hcd_qh_free_ddma(hsotg, qh);
253 } else if (qh->dw_align_buf) {
254 /* XXXNH */
255 usb_freemem(&hsotg->hsotg_sc->sc_bus, &qh->dw_align_buf_usbdma);
256 }
257
258 pool_cache_put(sc->sc_qhpool, qh);
259 }
260
261 /**
262 * dwc2_periodic_channel_available() - Checks that a channel is available for a
263 * periodic transfer
264 *
265 * @hsotg: The HCD state structure for the DWC OTG controller
266 *
267 * Return: 0 if successful, negative error code otherwise
268 */
269 static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
270 {
271 /*
272 * Currently assuming that there is a dedicated host channel for
273 * each periodic transaction plus at least one host channel for
274 * non-periodic transactions
275 */
276 int status;
277 int num_channels;
278
279 num_channels = hsotg->core_params->host_channels;
280 if (hsotg->periodic_channels + hsotg->non_periodic_channels <
281 num_channels
282 && hsotg->periodic_channels < num_channels - 1) {
283 status = 0;
284 } else {
285 dev_dbg(hsotg->dev,
286 "%s: Total channels: %d, Periodic: %d, "
287 "Non-periodic: %d\n", __func__, num_channels,
288 hsotg->periodic_channels, hsotg->non_periodic_channels);
289 status = -ENOSPC;
290 }
291
292 return status;
293 }
294
295 /**
296 * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
297 * for the specified QH in the periodic schedule
298 *
299 * @hsotg: The HCD state structure for the DWC OTG controller
300 * @qh: QH containing periodic bandwidth required
301 *
302 * Return: 0 if successful, negative error code otherwise
303 *
304 * For simplicity, this calculation assumes that all the transfers in the
305 * periodic schedule may occur in the same (micro)frame
306 */
307 static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
308 struct dwc2_qh *qh)
309 {
310 int status;
311 s16 max_claimed_usecs;
312
313 status = 0;
314
315 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
316 /*
317 * High speed mode
318 * Max periodic usecs is 80% x 125 usec = 100 usec
319 */
320 max_claimed_usecs = 100 - qh->usecs;
321 } else {
322 /*
323 * Full speed mode
324 * Max periodic usecs is 90% x 1000 usec = 900 usec
325 */
326 max_claimed_usecs = 900 - qh->usecs;
327 }
328
329 if (hsotg->periodic_usecs > max_claimed_usecs) {
330 dev_err(hsotg->dev,
331 "%s: already claimed usecs %d, required usecs %d\n",
332 __func__, hsotg->periodic_usecs, qh->usecs);
333 status = -ENOSPC;
334 }
335
336 return status;
337 }
338
339 /**
340 * Microframe scheduler
341 * track the total use in hsotg->frame_usecs
342 * keep each qh use in qh->frame_usecs
343 * when surrendering the qh then donate the time back
344 */
345 static const unsigned short max_uframe_usecs[] = {
346 100, 100, 100, 100, 100, 100, 30, 0
347 };
348
349 void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
350 {
351 int i;
352
353 for (i = 0; i < 8; i++)
354 hsotg->frame_usecs[i] = max_uframe_usecs[i];
355 }
356
357 static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
358 {
359 unsigned short utime = qh->usecs;
360 int i;
361
362 for (i = 0; i < 8; i++) {
363 /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
364 if (utime <= hsotg->frame_usecs[i]) {
365 hsotg->frame_usecs[i] -= utime;
366 qh->frame_usecs[i] += utime;
367 return i;
368 }
369 }
370 return -ENOSPC;
371 }
372
373 /*
374 * use this for FS apps that can span multiple uframes
375 */
376 static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
377 {
378 unsigned short utime = qh->usecs;
379 unsigned short xtime;
380 int t_left;
381 int i;
382 int j;
383 int k;
384
385 for (i = 0; i < 8; i++) {
386 if (hsotg->frame_usecs[i] <= 0)
387 continue;
388
389 /*
390 * we need n consecutive slots so use j as a start slot
391 * j plus j+1 must be enough time (for now)
392 */
393 xtime = hsotg->frame_usecs[i];
394 for (j = i + 1; j < 8; j++) {
395 /*
396 * if we add this frame remaining time to xtime we may
397 * be OK, if not we need to test j for a complete frame
398 */
399 if (xtime + hsotg->frame_usecs[j] < utime) {
400 if (hsotg->frame_usecs[j] <
401 max_uframe_usecs[j])
402 continue;
403 }
404 if (xtime >= utime) {
405 t_left = utime;
406 for (k = i; k < 8; k++) {
407 t_left -= hsotg->frame_usecs[k];
408 if (t_left <= 0) {
409 qh->frame_usecs[k] +=
410 hsotg->frame_usecs[k]
411 + t_left;
412 hsotg->frame_usecs[k] = -t_left;
413 return i;
414 } else {
415 qh->frame_usecs[k] +=
416 hsotg->frame_usecs[k];
417 hsotg->frame_usecs[k] = 0;
418 }
419 }
420 }
421 /* add the frame time to x time */
422 xtime += hsotg->frame_usecs[j];
423 /* we must have a fully available next frame or break */
424 if (xtime < utime &&
425 hsotg->frame_usecs[j] == max_uframe_usecs[j])
426 continue;
427 }
428 }
429 return -ENOSPC;
430 }
431
432 static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
433 {
434 int ret;
435
436 if (qh->dev_speed == USB_SPEED_HIGH) {
437 /* if this is a hs transaction we need a full frame */
438 ret = dwc2_find_single_uframe(hsotg, qh);
439 } else {
440 /*
441 * if this is a fs transaction we may need a sequence
442 * of frames
443 */
444 ret = dwc2_find_multi_uframe(hsotg, qh);
445 }
446 return ret;
447 }
448
449 /**
450 * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
451 * host channel is large enough to handle the maximum data transfer in a single
452 * (micro)frame for a periodic transfer
453 *
454 * @hsotg: The HCD state structure for the DWC OTG controller
455 * @qh: QH for a periodic endpoint
456 *
457 * Return: 0 if successful, negative error code otherwise
458 */
459 static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
460 struct dwc2_qh *qh)
461 {
462 u32 max_xfer_size;
463 u32 max_channel_xfer_size;
464 int status = 0;
465
466 max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
467 max_channel_xfer_size = hsotg->core_params->max_transfer_size;
468
469 if (max_xfer_size > max_channel_xfer_size) {
470 dev_err(hsotg->dev,
471 "%s: Periodic xfer length %d > max xfer length for channel %d\n",
472 __func__, max_xfer_size, max_channel_xfer_size);
473 status = -ENOSPC;
474 }
475
476 return status;
477 }
478
479 /**
480 * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
481 * the periodic schedule
482 *
483 * @hsotg: The HCD state structure for the DWC OTG controller
484 * @qh: QH for the periodic transfer. The QH should already contain the
485 * scheduling information.
486 *
487 * Return: 0 if successful, negative error code otherwise
488 */
489 static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
490 {
491 int status;
492
493 if (hsotg->core_params->uframe_sched > 0) {
494 int frame = -1;
495
496 status = dwc2_find_uframe(hsotg, qh);
497 if (status == 0)
498 frame = 7;
499 else if (status > 0)
500 frame = status - 1;
501
502 /* Set the new frame up */
503 if (frame >= 0) {
504 qh->sched_frame &= ~0x7;
505 qh->sched_frame |= (frame & 7);
506 }
507
508 if (status > 0)
509 status = 0;
510 } else {
511 status = dwc2_periodic_channel_available(hsotg);
512 if (status) {
513 dev_info(hsotg->dev,
514 "%s: No host channel available for periodic transfer\n",
515 __func__);
516 return status;
517 }
518
519 status = dwc2_check_periodic_bandwidth(hsotg, qh);
520 }
521
522 if (status) {
523 dev_dbg(hsotg->dev,
524 "%s: Insufficient periodic bandwidth for periodic transfer\n",
525 __func__);
526 return status;
527 }
528
529 status = dwc2_check_max_xfer_size(hsotg, qh);
530 if (status) {
531 dev_dbg(hsotg->dev,
532 "%s: Channel max transfer size too small for periodic transfer\n",
533 __func__);
534 return status;
535 }
536
537 if (hsotg->core_params->dma_desc_enable > 0)
538 /* Don't rely on SOF and start in ready schedule */
539 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
540 else
541 /* Always start in inactive schedule */
542 list_add_tail(&qh->qh_list_entry,
543 &hsotg->periodic_sched_inactive);
544
545 if (hsotg->core_params->uframe_sched <= 0)
546 /* Reserve periodic channel */
547 hsotg->periodic_channels++;
548
549 /* Update claimed usecs per (micro)frame */
550 hsotg->periodic_usecs += qh->usecs;
551
552 return status;
553 }
554
555 /**
556 * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
557 * from the periodic schedule
558 *
559 * @hsotg: The HCD state structure for the DWC OTG controller
560 * @qh: QH for the periodic transfer
561 */
562 static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
563 struct dwc2_qh *qh)
564 {
565 int i;
566
567 list_del_init(&qh->qh_list_entry);
568
569 /* Update claimed usecs per (micro)frame */
570 hsotg->periodic_usecs -= qh->usecs;
571
572 if (hsotg->core_params->uframe_sched > 0) {
573 for (i = 0; i < 8; i++) {
574 hsotg->frame_usecs[i] += qh->frame_usecs[i];
575 qh->frame_usecs[i] = 0;
576 }
577 } else {
578 /* Release periodic channel reservation */
579 hsotg->periodic_channels--;
580 }
581 }
582
583 /**
584 * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
585 * schedule if it is not already in the schedule. If the QH is already in
586 * the schedule, no action is taken.
587 *
588 * @hsotg: The HCD state structure for the DWC OTG controller
589 * @qh: The QH to add
590 *
591 * Return: 0 if successful, negative error code otherwise
592 */
593 int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
594 {
595 int status;
596 u32 intr_mask;
597
598 if (dbg_qh(qh))
599 dev_vdbg(hsotg->dev, "%s()\n", __func__);
600
601 if (!list_empty(&qh->qh_list_entry))
602 /* QH already in a schedule */
603 return 0;
604
605 /* Add the new QH to the appropriate schedule */
606 if (dwc2_qh_is_non_per(qh)) {
607 /* Always start in inactive schedule */
608 list_add_tail(&qh->qh_list_entry,
609 &hsotg->non_periodic_sched_inactive);
610 return 0;
611 }
612 status = dwc2_schedule_periodic(hsotg, qh);
613 if (status)
614 return status;
615 if (!hsotg->periodic_qh_count) {
616 intr_mask = DWC2_READ_4(hsotg, GINTMSK);
617 intr_mask |= GINTSTS_SOF;
618 DWC2_WRITE_4(hsotg, GINTMSK, intr_mask);
619 }
620 hsotg->periodic_qh_count++;
621
622 return 0;
623 }
624
625 /**
626 * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
627 * schedule. Memory is not freed.
628 *
629 * @hsotg: The HCD state structure
630 * @qh: QH to remove from schedule
631 */
632 void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
633 {
634 u32 intr_mask;
635
636 dev_vdbg(hsotg->dev, "%s()\n", __func__);
637
638 if (list_empty(&qh->qh_list_entry))
639 /* QH is not in a schedule */
640 return;
641
642 if (dwc2_qh_is_non_per(qh)) {
643 if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
644 hsotg->non_periodic_qh_ptr =
645 hsotg->non_periodic_qh_ptr->next;
646 list_del_init(&qh->qh_list_entry);
647 return;
648 }
649 dwc2_deschedule_periodic(hsotg, qh);
650 hsotg->periodic_qh_count--;
651 if (!hsotg->periodic_qh_count) {
652 intr_mask = DWC2_READ_4(hsotg, GINTMSK);
653 intr_mask &= ~GINTSTS_SOF;
654 DWC2_WRITE_4(hsotg, GINTMSK, intr_mask);
655 }
656 }
657
658 /*
659 * Schedule the next continuing periodic split transfer
660 */
661 static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
662 struct dwc2_qh *qh, u16 frame_number,
663 int sched_next_periodic_split)
664 {
665 u16 incr;
666
667 if (sched_next_periodic_split) {
668 qh->sched_frame = frame_number;
669 incr = dwc2_frame_num_inc(qh->start_split_frame, 1);
670 if (dwc2_frame_num_le(frame_number, incr)) {
671 /*
672 * Allow one frame to elapse after start split
673 * microframe before scheduling complete split, but
674 * DON'T if we are doing the next start split in the
675 * same frame for an ISOC out
676 */
677 if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
678 qh->ep_is_in != 0) {
679 qh->sched_frame =
680 dwc2_frame_num_inc(qh->sched_frame, 1);
681 }
682 }
683 } else {
684 qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame,
685 qh->interval);
686 if (dwc2_frame_num_le(qh->sched_frame, frame_number))
687 qh->sched_frame = frame_number;
688 qh->sched_frame |= 0x7;
689 qh->start_split_frame = qh->sched_frame;
690 }
691 }
692
693 /*
694 * Deactivates a QH. For non-periodic QHs, removes the QH from the active
695 * non-periodic schedule. The QH is added to the inactive non-periodic
696 * schedule if any QTDs are still attached to the QH.
697 *
698 * For periodic QHs, the QH is removed from the periodic queued schedule. If
699 * there are any QTDs still attached to the QH, the QH is added to either the
700 * periodic inactive schedule or the periodic ready schedule and its next
701 * scheduled frame is calculated. The QH is placed in the ready schedule if
702 * the scheduled frame has been reached already. Otherwise it's placed in the
703 * inactive schedule. If there are no QTDs attached to the QH, the QH is
704 * completely removed from the periodic schedule.
705 */
706 void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
707 int sched_next_periodic_split)
708 {
709 u16 frame_number;
710
711 if (dbg_qh(qh))
712 dev_vdbg(hsotg->dev, "%s()\n", __func__);
713
714 if (dwc2_qh_is_non_per(qh)) {
715 dwc2_hcd_qh_unlink(hsotg, qh);
716 if (!list_empty(&qh->qtd_list))
717 /* Add back to inactive non-periodic schedule */
718 dwc2_hcd_qh_add(hsotg, qh);
719 return;
720 }
721
722 frame_number = dwc2_hcd_get_frame_number(hsotg);
723
724 if (qh->do_split) {
725 dwc2_sched_periodic_split(hsotg, qh, frame_number,
726 sched_next_periodic_split);
727 } else {
728 qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame,
729 qh->interval);
730 if (dwc2_frame_num_le(qh->sched_frame, frame_number))
731 qh->sched_frame = frame_number;
732 }
733
734 if (list_empty(&qh->qtd_list)) {
735 dwc2_hcd_qh_unlink(hsotg, qh);
736 return;
737 }
738 /*
739 * Remove from periodic_sched_queued and move to
740 * appropriate queue
741 */
742 if ((hsotg->core_params->uframe_sched > 0 &&
743 dwc2_frame_num_le(qh->sched_frame, frame_number)) ||
744 (hsotg->core_params->uframe_sched <= 0 &&
745 qh->sched_frame == frame_number))
746 list_move(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
747 else
748 list_move(&qh->qh_list_entry, &hsotg->periodic_sched_inactive);
749 }
750
751 /**
752 * dwc2_hcd_qtd_init() - Initializes a QTD structure
753 *
754 * @qtd: The QTD to initialize
755 * @urb: The associated URB
756 */
757 void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
758 {
759 qtd->urb = urb;
760 if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
761 USB_ENDPOINT_XFER_CONTROL) {
762 /*
763 * The only time the QTD data toggle is used is on the data
764 * phase of control transfers. This phase always starts with
765 * DATA1.
766 */
767 qtd->data_toggle = DWC2_HC_PID_DATA1;
768 qtd->control_phase = DWC2_CONTROL_SETUP;
769 }
770
771 /* Start split */
772 qtd->complete_split = 0;
773 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
774 qtd->isoc_split_offset = 0;
775 qtd->in_process = 0;
776
777 /* Store the qtd ptr in the urb to reference the QTD */
778 urb->qtd = qtd;
779 }
780
781 /**
782 * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
783 *
784 * @hsotg: The DWC HCD structure
785 * @qtd: The QTD to add
786 * @qh: Out parameter to return queue head
787 * @mem_flags: Flag to do atomic alloc if needed
788 *
789 * Return: 0 if successful, negative error code otherwise
790 *
791 * Finds the correct QH to place the QTD into. If it does not find a QH, it
792 * will create a new QH. If the QH to which the QTD is added is not currently
793 * scheduled, it is placed into the proper schedule based on its EP type.
794 *
795 * HCD lock must be held and interrupts must be disabled on entry
796 */
797 int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
798 struct dwc2_qh **qh, gfp_t mem_flags)
799 {
800 struct dwc2_hcd_urb *urb = qtd->urb;
801 int allocated = 0;
802 int retval;
803
804 /*
805 * Get the QH which holds the QTD-list to insert to. Create QH if it
806 * doesn't exist.
807 */
808 if (*qh == NULL) {
809 *qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags);
810 if (*qh == NULL)
811 return -ENOMEM;
812 allocated = 1;
813 }
814
815 retval = dwc2_hcd_qh_add(hsotg, *qh);
816 if (retval)
817 goto fail;
818
819 qtd->qh = *qh;
820 list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list);
821
822 return 0;
823
824 fail:
825 if (allocated) {
826 struct dwc2_qtd *qtd2, *qtd2_tmp;
827 struct dwc2_qh *qh_tmp = *qh;
828
829 *qh = NULL;
830 dwc2_hcd_qh_unlink(hsotg, qh_tmp);
831
832 /* Free each QTD in the QH's QTD list */
833 list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list,
834 qtd_list_entry)
835 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp);
836
837 dwc2_hcd_qh_free(hsotg, qh_tmp);
838 }
839
840 return retval;
841 }
842
843 void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
844 struct dwc2_qtd *qtd,
845 struct dwc2_qh *qh)
846 {
847 struct dwc2_softc *sc = hsotg->hsotg_sc;
848
849 list_del_init(&qtd->qtd_list_entry);
850 pool_cache_put(sc->sc_qtdpool, qtd);
851 }
852
853 #define BITSTUFFTIME(bytecount) ((8 * 7 * (bytecount)) / 6)
854 #define HS_HOST_DELAY 5 /* nanoseconds */
855 #define FS_LS_HOST_DELAY 1000 /* nanoseconds */
856 #define HUB_LS_SETUP 333 /* nanoseconds */
857
858 static u32 dwc2_calc_bus_time(struct dwc2_hsotg *hsotg, int speed, int is_in,
859 int is_isoc, int bytecount)
860 {
861 unsigned long retval;
862
863 switch (speed) {
864 case USB_SPEED_HIGH:
865 if (is_isoc)
866 retval =
867 ((38 * 8 * 2083) +
868 (2083 * (3 + BITSTUFFTIME(bytecount)))) / 1000 +
869 HS_HOST_DELAY;
870 else
871 retval =
872 ((55 * 8 * 2083) +
873 (2083 * (3 + BITSTUFFTIME(bytecount)))) / 1000 +
874 HS_HOST_DELAY;
875 break;
876 case USB_SPEED_FULL:
877 if (is_isoc) {
878 retval =
879 (8354 * (31 + 10 * BITSTUFFTIME(bytecount))) / 1000;
880 if (is_in)
881 retval = 7268 + FS_LS_HOST_DELAY + retval;
882 else
883 retval = 6265 + FS_LS_HOST_DELAY + retval;
884 } else {
885 retval =
886 (8354 * (31 + 10 * BITSTUFFTIME(bytecount))) / 1000;
887 retval = 9107 + FS_LS_HOST_DELAY + retval;
888 }
889 break;
890 case USB_SPEED_LOW:
891 if (is_in) {
892 retval =
893 (67667 * (31 + 10 * BITSTUFFTIME(bytecount))) /
894 1000;
895 retval =
896 64060 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
897 retval;
898 } else {
899 retval =
900 (66700 * (31 + 10 * BITSTUFFTIME(bytecount))) /
901 1000;
902 retval =
903 64107 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
904 retval;
905 }
906 break;
907 default:
908 dev_warn(hsotg->dev, "Unknown device speed\n");
909 retval = -1;
910 }
911
912 return NS_TO_US(retval);
913 }
914