dwc2_hcdqueue.c revision 1.1 1 /* $NetBSD: dwc2_hcdqueue.c,v 1.1 2013/09/05 07:53:12 skrll Exp $ */
2
3 /*
4 * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
5 *
6 * Copyright (C) 2004-2013 Synopsys, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The names of the above-listed copyright holders may not be used
18 * to endorse or promote products derived from this software without
19 * specific prior written permission.
20 *
21 * ALTERNATIVELY, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") as published by the Free Software
23 * Foundation; either version 2 of the License, or (at your option) any
24 * later version.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * This file contains the functions to manage Queue Heads and Queue
41 * Transfer Descriptors for Host mode
42 */
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/io.h>
49 #include <linux/slab.h>
50 #include <linux/usb.h>
51
52 #include <linux/usb/hcd.h>
53 #include <linux/usb/ch11.h>
54
55 #include "core.h"
56 #include "hcd.h"
57
58 /**
59 * dwc2_qh_init() - Initializes a QH structure
60 *
61 * @hsotg: The HCD state structure for the DWC OTG controller
62 * @qh: The QH to init
63 * @urb: Holds the information about the device/endpoint needed to initialize
64 * the QH
65 */
66 #define SCHEDULE_SLOP 10
67 static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
68 struct dwc2_hcd_urb *urb)
69 {
70 int dev_speed, hub_addr, hub_port;
71 char *speed, *type;
72
73 dev_vdbg(hsotg->dev, "%s()\n", __func__);
74
75 /* Initialize QH */
76 qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
77 qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
78
79 qh->data_toggle = DWC2_HC_PID_DATA0;
80 qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
81 INIT_LIST_HEAD(&qh->qtd_list);
82 INIT_LIST_HEAD(&qh->qh_list_entry);
83
84 /* FS/LS Endpoint on HS Hub, NOT virtual root hub */
85 dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
86
87 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
88 qh->nak_frame = 0xffff;
89
90 if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
91 hub_addr != 0 && hub_addr != 1) {
92 dev_vdbg(hsotg->dev,
93 "QH init: EP %d: TT found at hub addr %d, for port %d\n",
94 dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
95 hub_port);
96 qh->do_split = 1;
97 }
98
99 if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
100 qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
101 /* Compute scheduling parameters once and save them */
102 u32 hprt, prtspd;
103
104 /* Todo: Account for split transfers in the bus time */
105 int bytecount =
106 dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
107
108 qh->usecs = NS_TO_US(usb_calc_bus_time(qh->do_split ?
109 USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
110 qh->ep_type == USB_ENDPOINT_XFER_ISOC,
111 bytecount));
112 /* Start in a slightly future (micro)frame */
113 qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
114 SCHEDULE_SLOP);
115 qh->interval = urb->interval;
116 #if 0
117 /* Increase interrupt polling rate for debugging */
118 if (qh->ep_type == USB_ENDPOINT_XFER_INT)
119 qh->interval = 8;
120 #endif
121 hprt = readl(hsotg->regs + HPRT0);
122 prtspd = hprt & HPRT0_SPD_MASK;
123 if (prtspd == HPRT0_SPD_HIGH_SPEED &&
124 (dev_speed == USB_SPEED_LOW ||
125 dev_speed == USB_SPEED_FULL)) {
126 qh->interval *= 8;
127 qh->sched_frame |= 0x7;
128 qh->start_split_frame = qh->sched_frame;
129 }
130 dev_dbg(hsotg->dev, "interval=%d\n", qh->interval);
131 }
132
133 dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n");
134 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh);
135 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n",
136 dwc2_hcd_get_dev_addr(&urb->pipe_info));
137 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
138 dwc2_hcd_get_ep_num(&urb->pipe_info),
139 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
140
141 qh->dev_speed = dev_speed;
142
143 switch (dev_speed) {
144 case USB_SPEED_LOW:
145 speed = "low";
146 break;
147 case USB_SPEED_FULL:
148 speed = "full";
149 break;
150 case USB_SPEED_HIGH:
151 speed = "high";
152 break;
153 default:
154 speed = "?";
155 break;
156 }
157 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
158
159 switch (qh->ep_type) {
160 case USB_ENDPOINT_XFER_ISOC:
161 type = "isochronous";
162 break;
163 case USB_ENDPOINT_XFER_INT:
164 type = "interrupt";
165 break;
166 case USB_ENDPOINT_XFER_CONTROL:
167 type = "control";
168 break;
169 case USB_ENDPOINT_XFER_BULK:
170 type = "bulk";
171 break;
172 default:
173 type = "?";
174 break;
175 }
176
177 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type);
178
179 if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
180 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n",
181 qh->usecs);
182 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n",
183 qh->interval);
184 }
185 }
186
187 /**
188 * dwc2_hcd_qh_create() - Allocates and initializes a QH
189 *
190 * @hsotg: The HCD state structure for the DWC OTG controller
191 * @urb: Holds the information about the device/endpoint needed
192 * to initialize the QH
193 * @atomic_alloc: Flag to do atomic allocation if needed
194 *
195 * Return: Pointer to the newly allocated QH, or NULL on error
196 */
197 static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
198 struct dwc2_hcd_urb *urb,
199 gfp_t mem_flags)
200 {
201 struct dwc2_qh *qh;
202
203 if (!urb->priv)
204 return NULL;
205
206 /* Allocate memory */
207 qh = kzalloc(sizeof(*qh), mem_flags);
208 if (!qh)
209 return NULL;
210
211 dwc2_qh_init(hsotg, qh, urb);
212
213 if (hsotg->core_params->dma_desc_enable > 0 &&
214 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
215 dwc2_hcd_qh_free(hsotg, qh);
216 return NULL;
217 }
218
219 return qh;
220 }
221
222 /**
223 * dwc2_hcd_qh_free() - Frees the QH
224 *
225 * @hsotg: HCD instance
226 * @qh: The QH to free
227 *
228 * QH should already be removed from the list. QTD list should already be empty
229 * if called from URB Dequeue.
230 *
231 * Must NOT be called with interrupt disabled or spinlock held
232 */
233 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
234 {
235 u32 buf_size;
236
237 if (hsotg->core_params->dma_desc_enable > 0) {
238 dwc2_hcd_qh_free_ddma(hsotg, qh);
239 } else if (qh->dw_align_buf) {
240 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
241 buf_size = 4096;
242 else
243 buf_size = hsotg->core_params->max_transfer_size;
244 dma_free_coherent(hsotg->dev, buf_size, qh->dw_align_buf,
245 qh->dw_align_buf_dma);
246 }
247
248 kfree(qh);
249 }
250
251 /**
252 * dwc2_periodic_channel_available() - Checks that a channel is available for a
253 * periodic transfer
254 *
255 * @hsotg: The HCD state structure for the DWC OTG controller
256 *
257 * Return: 0 if successful, negative error code otherise
258 */
259 static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
260 {
261 /*
262 * Currently assuming that there is a dedicated host channnel for
263 * each periodic transaction plus at least one host channel for
264 * non-periodic transactions
265 */
266 int status;
267 int num_channels;
268
269 num_channels = hsotg->core_params->host_channels;
270 if (hsotg->periodic_channels + hsotg->non_periodic_channels <
271 num_channels
272 && hsotg->periodic_channels < num_channels - 1) {
273 status = 0;
274 } else {
275 dev_dbg(hsotg->dev,
276 "%s: Total channels: %d, Periodic: %d, "
277 "Non-periodic: %d\n", __func__, num_channels,
278 hsotg->periodic_channels, hsotg->non_periodic_channels);
279 status = -ENOSPC;
280 }
281
282 return status;
283 }
284
285 /**
286 * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
287 * for the specified QH in the periodic schedule
288 *
289 * @hsotg: The HCD state structure for the DWC OTG controller
290 * @qh: QH containing periodic bandwidth required
291 *
292 * Return: 0 if successful, negative error code otherwise
293 *
294 * For simplicity, this calculation assumes that all the transfers in the
295 * periodic schedule may occur in the same (micro)frame
296 */
297 static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
298 struct dwc2_qh *qh)
299 {
300 int status;
301 s16 max_claimed_usecs;
302
303 status = 0;
304
305 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
306 /*
307 * High speed mode
308 * Max periodic usecs is 80% x 125 usec = 100 usec
309 */
310 max_claimed_usecs = 100 - qh->usecs;
311 } else {
312 /*
313 * Full speed mode
314 * Max periodic usecs is 90% x 1000 usec = 900 usec
315 */
316 max_claimed_usecs = 900 - qh->usecs;
317 }
318
319 if (hsotg->periodic_usecs > max_claimed_usecs) {
320 dev_err(hsotg->dev,
321 "%s: already claimed usecs %d, required usecs %d\n",
322 __func__, hsotg->periodic_usecs, qh->usecs);
323 status = -ENOSPC;
324 }
325
326 return status;
327 }
328
329 /**
330 * Microframe scheduler
331 * track the total use in hsotg->frame_usecs
332 * keep each qh use in qh->frame_usecs
333 * when surrendering the qh then donate the time back
334 */
335 static const unsigned short max_uframe_usecs[] = {
336 100, 100, 100, 100, 100, 100, 30, 0
337 };
338
339 void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
340 {
341 int i;
342
343 for (i = 0; i < 8; i++)
344 hsotg->frame_usecs[i] = max_uframe_usecs[i];
345 }
346
347 static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
348 {
349 unsigned short utime = qh->usecs;
350 int done = 0;
351 int i = 0;
352 int ret = -1;
353
354 while (!done) {
355 /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
356 if (utime <= hsotg->frame_usecs[i]) {
357 hsotg->frame_usecs[i] -= utime;
358 qh->frame_usecs[i] += utime;
359 ret = i;
360 done = 1;
361 } else {
362 i++;
363 if (i == 8)
364 done = 1;
365 }
366 }
367
368 return ret;
369 }
370
371 /*
372 * use this for FS apps that can span multiple uframes
373 */
374 static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
375 {
376 unsigned short utime = qh->usecs;
377 unsigned short xtime;
378 int t_left = utime;
379 int done = 0;
380 int i = 0;
381 int j;
382 int ret = -1;
383
384 while (!done) {
385 if (hsotg->frame_usecs[i] <= 0) {
386 i++;
387 if (i == 8) {
388 ret = -1;
389 done = 1;
390 }
391 continue;
392 }
393
394 /*
395 * we need n consecutive slots so use j as a start slot
396 * j plus j+1 must be enough time (for now)
397 */
398 xtime = hsotg->frame_usecs[i];
399 for (j = i + 1; j < 8; j++) {
400 /*
401 * if we add this frame remaining time to xtime we may
402 * be OK, if not we need to test j for a complete frame
403 */
404 if (xtime + hsotg->frame_usecs[j] < utime) {
405 if (hsotg->frame_usecs[j] <
406 max_uframe_usecs[j]) {
407 ret = -1;
408 break;
409 }
410 }
411 if (xtime >= utime) {
412 ret = i;
413 break;
414 }
415 /* add the frame time to x time */
416 xtime += hsotg->frame_usecs[j];
417 /* we must have a fully available next frame or break */
418 if (xtime < utime &&
419 hsotg->frame_usecs[j] == max_uframe_usecs[j]) {
420 ret = -1;
421 break;
422 }
423 }
424 if (ret >= 0) {
425 t_left = utime;
426 for (j = i; t_left > 0 && j < 8; j++) {
427 t_left -= hsotg->frame_usecs[j];
428 if (t_left <= 0) {
429 qh->frame_usecs[j] +=
430 hsotg->frame_usecs[j] + t_left;
431 hsotg->frame_usecs[j] = -t_left;
432 ret = i;
433 done = 1;
434 } else {
435 qh->frame_usecs[j] +=
436 hsotg->frame_usecs[j];
437 hsotg->frame_usecs[j] = 0;
438 }
439 }
440 } else {
441 i++;
442 if (i == 8) {
443 ret = -1;
444 done = 1;
445 }
446 }
447 }
448
449 return ret;
450 }
451
452 static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
453 {
454 int ret;
455
456 if (qh->dev_speed == USB_SPEED_HIGH) {
457 /* if this is a hs transaction we need a full frame */
458 ret = dwc2_find_single_uframe(hsotg, qh);
459 } else {
460 /*
461 * if this is a fs transaction we may need a sequence
462 * of frames
463 */
464 ret = dwc2_find_multi_uframe(hsotg, qh);
465 }
466 return ret;
467 }
468
469 /**
470 * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
471 * host channel is large enough to handle the maximum data transfer in a single
472 * (micro)frame for a periodic transfer
473 *
474 * @hsotg: The HCD state structure for the DWC OTG controller
475 * @qh: QH for a periodic endpoint
476 *
477 * Return: 0 if successful, negative error code otherwise
478 */
479 static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
480 struct dwc2_qh *qh)
481 {
482 u32 max_xfer_size;
483 u32 max_channel_xfer_size;
484 int status = 0;
485
486 max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
487 max_channel_xfer_size = hsotg->core_params->max_transfer_size;
488
489 if (max_xfer_size > max_channel_xfer_size) {
490 dev_err(hsotg->dev,
491 "%s: Periodic xfer length %d > max xfer length for channel %d\n",
492 __func__, max_xfer_size, max_channel_xfer_size);
493 status = -ENOSPC;
494 }
495
496 return status;
497 }
498
499 /**
500 * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
501 * the periodic schedule
502 *
503 * @hsotg: The HCD state structure for the DWC OTG controller
504 * @qh: QH for the periodic transfer. The QH should already contain the
505 * scheduling information.
506 *
507 * Return: 0 if successful, negative error code otherwise
508 */
509 static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
510 {
511 int status;
512
513 if (hsotg->core_params->uframe_sched > 0) {
514 int frame = -1;
515
516 status = dwc2_find_uframe(hsotg, qh);
517 if (status == 0)
518 frame = 7;
519 else if (status > 0)
520 frame = status - 1;
521
522 /* Set the new frame up */
523 if (frame > -1) {
524 qh->sched_frame &= ~0x7;
525 qh->sched_frame |= (frame & 7);
526 }
527
528 if (status != -1)
529 status = 0;
530 } else {
531 status = dwc2_periodic_channel_available(hsotg);
532 if (status) {
533 dev_info(hsotg->dev,
534 "%s: No host channel available for periodic transfer\n",
535 __func__);
536 return status;
537 }
538
539 status = dwc2_check_periodic_bandwidth(hsotg, qh);
540 }
541
542 if (status) {
543 dev_dbg(hsotg->dev,
544 "%s: Insufficient periodic bandwidth for periodic transfer\n",
545 __func__);
546 return status;
547 }
548
549 status = dwc2_check_max_xfer_size(hsotg, qh);
550 if (status) {
551 dev_dbg(hsotg->dev,
552 "%s: Channel max transfer size too small for periodic transfer\n",
553 __func__);
554 return status;
555 }
556
557 if (hsotg->core_params->dma_desc_enable > 0) {
558 /* Don't rely on SOF and start in ready schedule */
559 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
560 } else {
561 if (list_empty(&hsotg->periodic_sched_inactive) ||
562 dwc2_frame_num_le(qh->sched_frame, hsotg->next_sched_frame))
563 hsotg->next_sched_frame = qh->sched_frame;
564
565 /* Always start in inactive schedule */
566 list_add_tail(&qh->qh_list_entry,
567 &hsotg->periodic_sched_inactive);
568 }
569
570 if (hsotg->core_params->uframe_sched <= 0)
571 /* Reserve periodic channel */
572 hsotg->periodic_channels++;
573
574 /* Update claimed usecs per (micro)frame */
575 hsotg->periodic_usecs += qh->usecs;
576
577 return status;
578 }
579
580 /**
581 * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
582 * from the periodic schedule
583 *
584 * @hsotg: The HCD state structure for the DWC OTG controller
585 * @qh: QH for the periodic transfer
586 */
587 static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
588 struct dwc2_qh *qh)
589 {
590 int i;
591
592 list_del_init(&qh->qh_list_entry);
593
594 /* Update claimed usecs per (micro)frame */
595 hsotg->periodic_usecs -= qh->usecs;
596
597 if (hsotg->core_params->uframe_sched > 0) {
598 for (i = 0; i < 8; i++) {
599 hsotg->frame_usecs[i] += qh->frame_usecs[i];
600 qh->frame_usecs[i] = 0;
601 }
602 } else {
603 /* Release periodic channel reservation */
604 hsotg->periodic_channels--;
605 }
606 }
607
608 /**
609 * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
610 * schedule if it is not already in the schedule. If the QH is already in
611 * the schedule, no action is taken.
612 *
613 * @hsotg: The HCD state structure for the DWC OTG controller
614 * @qh: The QH to add
615 *
616 * Return: 0 if successful, negative error code otherwise
617 */
618 int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
619 {
620 int status = 0;
621 u32 intr_mask;
622
623 if (dbg_qh(qh))
624 dev_vdbg(hsotg->dev, "%s()\n", __func__);
625
626 if (!list_empty(&qh->qh_list_entry))
627 /* QH already in a schedule */
628 return status;
629
630 /* Add the new QH to the appropriate schedule */
631 if (dwc2_qh_is_non_per(qh)) {
632 /* Always start in inactive schedule */
633 list_add_tail(&qh->qh_list_entry,
634 &hsotg->non_periodic_sched_inactive);
635 } else {
636 status = dwc2_schedule_periodic(hsotg, qh);
637 if (status == 0) {
638 if (!hsotg->periodic_qh_count) {
639 intr_mask = readl(hsotg->regs + GINTMSK);
640 intr_mask |= GINTSTS_SOF;
641 writel(intr_mask, hsotg->regs + GINTMSK);
642 }
643 hsotg->periodic_qh_count++;
644 }
645 }
646
647 return status;
648 }
649
650 /**
651 * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
652 * schedule. Memory is not freed.
653 *
654 * @hsotg: The HCD state structure
655 * @qh: QH to remove from schedule
656 */
657 void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
658 {
659 u32 intr_mask;
660
661 dev_vdbg(hsotg->dev, "%s()\n", __func__);
662
663 if (list_empty(&qh->qh_list_entry))
664 /* QH is not in a schedule */
665 return;
666
667 if (dwc2_qh_is_non_per(qh)) {
668 if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
669 hsotg->non_periodic_qh_ptr =
670 hsotg->non_periodic_qh_ptr->next;
671 list_del_init(&qh->qh_list_entry);
672 } else {
673 dwc2_deschedule_periodic(hsotg, qh);
674 hsotg->periodic_qh_count--;
675 if (!hsotg->periodic_qh_count) {
676 intr_mask = readl(hsotg->regs + GINTMSK);
677 intr_mask &= ~GINTSTS_SOF;
678 writel(intr_mask, hsotg->regs + GINTMSK);
679 }
680 }
681 }
682
683 /*
684 * Schedule the next continuing periodic split transfer
685 */
686 static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
687 struct dwc2_qh *qh, u16 frame_number,
688 int sched_next_periodic_split)
689 {
690 u16 incr;
691
692 if (sched_next_periodic_split) {
693 qh->sched_frame = frame_number;
694 incr = dwc2_frame_num_inc(qh->start_split_frame, 1);
695 if (dwc2_frame_num_le(frame_number, incr)) {
696 /*
697 * Allow one frame to elapse after start split
698 * microframe before scheduling complete split, but
699 * DON'T if we are doing the next start split in the
700 * same frame for an ISOC out
701 */
702 if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
703 qh->ep_is_in != 0) {
704 qh->sched_frame =
705 dwc2_frame_num_inc(qh->sched_frame, 1);
706 }
707 }
708 } else {
709 qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame,
710 qh->interval);
711 if (dwc2_frame_num_le(qh->sched_frame, frame_number))
712 qh->sched_frame = frame_number;
713 qh->sched_frame |= 0x7;
714 qh->start_split_frame = qh->sched_frame;
715 }
716 }
717
718 /*
719 * Deactivates a QH. For non-periodic QHs, removes the QH from the active
720 * non-periodic schedule. The QH is added to the inactive non-periodic
721 * schedule if any QTDs are still attached to the QH.
722 *
723 * For periodic QHs, the QH is removed from the periodic queued schedule. If
724 * there are any QTDs still attached to the QH, the QH is added to either the
725 * periodic inactive schedule or the periodic ready schedule and its next
726 * scheduled frame is calculated. The QH is placed in the ready schedule if
727 * the scheduled frame has been reached already. Otherwise it's placed in the
728 * inactive schedule. If there are no QTDs attached to the QH, the QH is
729 * completely removed from the periodic schedule.
730 */
731 void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
732 int sched_next_periodic_split)
733 {
734 if (dbg_qh(qh))
735 dev_vdbg(hsotg->dev, "%s()\n", __func__);
736
737 if (dwc2_qh_is_non_per(qh)) {
738 dwc2_hcd_qh_unlink(hsotg, qh);
739 if (!list_empty(&qh->qtd_list))
740 /* Add back to inactive non-periodic schedule */
741 dwc2_hcd_qh_add(hsotg, qh);
742 } else {
743 u16 frame_number = dwc2_hcd_get_frame_number(hsotg);
744
745 if (qh->do_split) {
746 dwc2_sched_periodic_split(hsotg, qh, frame_number,
747 sched_next_periodic_split);
748 } else {
749 qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame,
750 qh->interval);
751 if (dwc2_frame_num_le(qh->sched_frame, frame_number))
752 qh->sched_frame = frame_number;
753 }
754
755 if (list_empty(&qh->qtd_list)) {
756 dwc2_hcd_qh_unlink(hsotg, qh);
757 } else {
758 /*
759 * Remove from periodic_sched_queued and move to
760 * appropriate queue
761 */
762 if ((hsotg->core_params->uframe_sched > 0 &&
763 dwc2_frame_num_le(qh->sched_frame, frame_number))
764 || (hsotg->core_params->uframe_sched <= 0 &&
765 qh->sched_frame == frame_number)) {
766 list_move(&qh->qh_list_entry,
767 &hsotg->periodic_sched_ready);
768 } else {
769 if (!dwc2_frame_num_le(hsotg->next_sched_frame,
770 qh->sched_frame))
771 hsotg->next_sched_frame =
772 qh->sched_frame;
773 list_move(&qh->qh_list_entry,
774 &hsotg->periodic_sched_inactive);
775 }
776 }
777 }
778 }
779
780 /**
781 * dwc2_hcd_qtd_init() - Initializes a QTD structure
782 *
783 * @qtd: The QTD to initialize
784 * @urb: The associated URB
785 */
786 void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
787 {
788 qtd->urb = urb;
789 if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
790 USB_ENDPOINT_XFER_CONTROL) {
791 /*
792 * The only time the QTD data toggle is used is on the data
793 * phase of control transfers. This phase always starts with
794 * DATA1.
795 */
796 qtd->data_toggle = DWC2_HC_PID_DATA1;
797 qtd->control_phase = DWC2_CONTROL_SETUP;
798 }
799
800 /* Start split */
801 qtd->complete_split = 0;
802 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
803 qtd->isoc_split_offset = 0;
804 qtd->in_process = 0;
805
806 /* Store the qtd ptr in the urb to reference the QTD */
807 urb->qtd = qtd;
808 }
809
810 /**
811 * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
812 *
813 * @hsotg: The DWC HCD structure
814 * @qtd: The QTD to add
815 * @qh: Out parameter to return queue head
816 * @atomic_alloc: Flag to do atomic alloc if needed
817 *
818 * Return: 0 if successful, negative error code otherwise
819 *
820 * Finds the correct QH to place the QTD into. If it does not find a QH, it
821 * will create a new QH. If the QH to which the QTD is added is not currently
822 * scheduled, it is placed into the proper schedule based on its EP type.
823 */
824 int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
825 struct dwc2_qh **qh, gfp_t mem_flags)
826 {
827 struct dwc2_hcd_urb *urb = qtd->urb;
828 unsigned long flags;
829 int allocated = 0;
830 int retval;
831
832 /*
833 * Get the QH which holds the QTD-list to insert to. Create QH if it
834 * doesn't exist.
835 */
836 if (*qh == NULL) {
837 *qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags);
838 if (*qh == NULL)
839 return -ENOMEM;
840 allocated = 1;
841 }
842
843 spin_lock_irqsave(&hsotg->lock, flags);
844
845 retval = dwc2_hcd_qh_add(hsotg, *qh);
846 if (retval)
847 goto fail;
848
849 qtd->qh = *qh;
850 list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list);
851 spin_unlock_irqrestore(&hsotg->lock, flags);
852
853 return 0;
854
855 fail:
856 if (allocated) {
857 struct dwc2_qtd *qtd2, *qtd2_tmp;
858 struct dwc2_qh *qh_tmp = *qh;
859
860 *qh = NULL;
861 dwc2_hcd_qh_unlink(hsotg, qh_tmp);
862
863 /* Free each QTD in the QH's QTD list */
864 list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list,
865 qtd_list_entry)
866 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp);
867
868 spin_unlock_irqrestore(&hsotg->lock, flags);
869 dwc2_hcd_qh_free(hsotg, qh_tmp);
870 } else {
871 spin_unlock_irqrestore(&hsotg->lock, flags);
872 }
873
874 return retval;
875 }
876