dwc2_hcdintr.c revision 1.14 1 /* $NetBSD: dwc2_hcdintr.c,v 1.14 2018/08/08 07:20:44 simonb Exp $ */
2
3 /*
4 * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
5 *
6 * Copyright (C) 2004-2013 Synopsys, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The names of the above-listed copyright holders may not be used
18 * to endorse or promote products derived from this software without
19 * specific prior written permission.
20 *
21 * ALTERNATIVELY, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") as published by the Free Software
23 * Foundation; either version 2 of the License, or (at your option) any
24 * later version.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * This file contains the interrupt handlers for Host mode
41 */
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdintr.c,v 1.14 2018/08/08 07:20:44 simonb Exp $");
44
45 #include <sys/types.h>
46 #include <sys/pool.h>
47
48 #include <dev/usb/usb.h>
49 #include <dev/usb/usbdi.h>
50 #include <dev/usb/usbdivar.h>
51 #include <dev/usb/usb_mem.h>
52
53 #include <machine/param.h>
54
55 #include <linux/kernel.h>
56
57 #include <dwc2/dwc2.h>
58 #include <dwc2/dwc2var.h>
59
60 #include "dwc2_core.h"
61 #include "dwc2_hcd.h"
62
63 /*
64 * If we get this many NAKs on a split transaction we'll slow down
65 * retransmission. A 1 here means delay after the first NAK.
66 */
67 #define DWC2_NAKS_BEFORE_DELAY 3
68 int dwc2_naks_before_delay = DWC2_NAKS_BEFORE_DELAY;
69
70 /* This function is for debug only */
71 static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
72 {
73 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
74 u16 curr_frame_number = hsotg->frame_number;
75
76 if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
77 if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) !=
78 curr_frame_number) {
79 hsotg->frame_num_array[hsotg->frame_num_idx] =
80 curr_frame_number;
81 hsotg->last_frame_num_array[hsotg->frame_num_idx] =
82 hsotg->last_frame_num;
83 hsotg->frame_num_idx++;
84 }
85 } else if (!hsotg->dumped_frame_num_array) {
86 int i;
87
88 dev_info(hsotg->dev, "Frame Last Frame\n");
89 dev_info(hsotg->dev, "----- ----------\n");
90 for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
91 dev_info(hsotg->dev, "0x%04x 0x%04x\n",
92 hsotg->frame_num_array[i],
93 hsotg->last_frame_num_array[i]);
94 }
95 hsotg->dumped_frame_num_array = 1;
96 }
97 hsotg->last_frame_num = curr_frame_number;
98 #endif
99 }
100
101 static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
102 struct dwc2_host_chan *chan,
103 struct dwc2_qtd *qtd)
104 {
105 // struct urb *usb_urb;
106
107 if (!chan->qh)
108 return;
109
110 if (chan->qh->dev_speed == USB_SPEED_HIGH)
111 return;
112
113 if (!qtd->urb)
114 return;
115
116
117 if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
118 chan->qh->tt_buffer_dirty = 1;
119 chan->qh->tt_buffer_dirty = 0;
120 }
121 }
122
123 /*
124 * Handles the start-of-frame interrupt in host mode. Non-periodic
125 * transactions may be queued to the DWC_otg controller for the current
126 * (micro)frame. Periodic transactions may be queued to the controller
127 * for the next (micro)frame.
128 */
129 static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
130 {
131 struct list_head *qh_entry;
132 struct dwc2_qh *qh;
133 enum dwc2_transaction_type tr_type;
134
135 /* Clear interrupt */
136 DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_SOF);
137
138 #ifdef DEBUG_SOF
139 dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
140 #endif
141
142 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
143
144 dwc2_track_missed_sofs(hsotg);
145
146 /* Determine whether any periodic QHs should be executed */
147 qh_entry = hsotg->periodic_sched_inactive.next;
148 while (qh_entry != &hsotg->periodic_sched_inactive) {
149 qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
150 qh_entry = qh_entry->next;
151 if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number))
152 /*
153 * Move QH to the ready list to be executed next
154 * (micro)frame
155 */
156 list_move(&qh->qh_list_entry,
157 &hsotg->periodic_sched_ready);
158 }
159 tr_type = dwc2_hcd_select_transactions(hsotg);
160 if (tr_type != DWC2_TRANSACTION_NONE)
161 dwc2_hcd_queue_transactions(hsotg, tr_type);
162 }
163
164 /*
165 * Handles the Rx FIFO Level Interrupt, which indicates that there is
166 * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
167 * memory if the DWC_otg controller is operating in Slave mode.
168 */
169 static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
170 {
171 u32 grxsts, chnum, bcnt, pktsts;
172 struct dwc2_host_chan *chan;
173
174 if (dbg_perio())
175 dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
176
177 grxsts = DWC2_READ_4(hsotg, GRXSTSP);
178 chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
179 chan = hsotg->hc_ptr_array[chnum];
180 if (!chan) {
181 dev_err(hsotg->dev, "Unable to get corresponding channel\n");
182 return;
183 }
184
185 bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
186 pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
187
188 /* Packet Status */
189 if (dbg_perio()) {
190 dev_vdbg(hsotg->dev, " Ch num = %d\n", chnum);
191 dev_vdbg(hsotg->dev, " Count = %d\n", bcnt);
192 dev_vdbg(hsotg->dev, " DPID = %d, chan.dpid = %d\n",
193 (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT,
194 chan->data_pid_start);
195 dev_vdbg(hsotg->dev, " PStatus = %d\n", pktsts);
196 }
197
198 switch (pktsts) {
199 case GRXSTS_PKTSTS_HCHIN:
200 /* Read the data into the host buffer */
201 if (bcnt > 0) {
202 dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
203
204 /* Update the HC fields for the next packet received */
205 chan->xfer_count += bcnt;
206 chan->xfer_buf += bcnt;
207 }
208 break;
209 case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
210 case GRXSTS_PKTSTS_DATATOGGLEERR:
211 case GRXSTS_PKTSTS_HCHHALTED:
212 /* Handled in interrupt, just ignore data */
213 break;
214 default:
215 dev_err(hsotg->dev,
216 "RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
217 break;
218 }
219 }
220
221 /*
222 * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
223 * data packets may be written to the FIFO for OUT transfers. More requests
224 * may be written to the non-periodic request queue for IN transfers. This
225 * interrupt is enabled only in Slave mode.
226 */
227 static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
228 {
229 dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
230 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
231 }
232
233 /*
234 * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
235 * packets may be written to the FIFO for OUT transfers. More requests may be
236 * written to the periodic request queue for IN transfers. This interrupt is
237 * enabled only in Slave mode.
238 */
239 static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
240 {
241 if (dbg_perio())
242 dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
243 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
244 }
245
246 static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
247 u32 *hprt0_modify)
248 {
249 struct dwc2_core_params *params = hsotg->core_params;
250 int do_reset = 0;
251 u32 usbcfg;
252 u32 prtspd;
253 u32 hcfg;
254 u32 fslspclksel;
255 u32 hfir;
256
257 dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
258
259 /* Every time when port enables calculate HFIR.FrInterval */
260 hfir = DWC2_READ_4(hsotg, HFIR);
261 hfir &= ~HFIR_FRINT_MASK;
262 hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
263 HFIR_FRINT_MASK;
264 DWC2_WRITE_4(hsotg, HFIR, hfir);
265
266 /* Check if we need to adjust the PHY clock speed for low power */
267 if (!params->host_support_fs_ls_low_power) {
268 /* Port has been enabled, set the reset change flag */
269 hsotg->flags.b.port_reset_change = 1;
270
271 dwc2_root_intr(hsotg->hsotg_sc);
272 return;
273 }
274
275 usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
276 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
277
278 if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
279 /* Low power */
280 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
281 /* Set PHY low power clock select for FS/LS devices */
282 usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
283 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
284 do_reset = 1;
285 }
286
287 hcfg = DWC2_READ_4(hsotg, HCFG);
288 fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
289 HCFG_FSLSPCLKSEL_SHIFT;
290
291 if (prtspd == HPRT0_SPD_LOW_SPEED &&
292 params->host_ls_low_power_phy_clk ==
293 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
294 /* 6 MHZ */
295 dev_vdbg(hsotg->dev,
296 "FS_PHY programming HCFG to 6 MHz\n");
297 if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
298 fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
299 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
300 hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
301 DWC2_WRITE_4(hsotg, HCFG, hcfg);
302 do_reset = 1;
303 }
304 } else {
305 /* 48 MHZ */
306 dev_vdbg(hsotg->dev,
307 "FS_PHY programming HCFG to 48 MHz\n");
308 if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
309 fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
310 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
311 hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
312 DWC2_WRITE_4(hsotg, HCFG, hcfg);
313 do_reset = 1;
314 }
315 }
316 } else {
317 /* Not low power */
318 if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
319 usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
320 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
321 do_reset = 1;
322 }
323 }
324
325 if (do_reset) {
326 *hprt0_modify |= HPRT0_RST;
327 DWC2_WRITE_4(hsotg, HPRT0, *hprt0_modify);
328 queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
329 msecs_to_jiffies(60));
330 } else {
331 /* Port has been enabled, set the reset change flag */
332 hsotg->flags.b.port_reset_change = 1;
333 dwc2_root_intr(hsotg->hsotg_sc);
334
335 }
336 }
337
338 /*
339 * There are multiple conditions that can cause a port interrupt. This function
340 * determines which interrupt conditions have occurred and handles them
341 * appropriately.
342 */
343 static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
344 {
345 u32 hprt0;
346 u32 hprt0_modify;
347
348 dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
349
350 hprt0 = DWC2_READ_4(hsotg, HPRT0);
351 hprt0_modify = hprt0;
352
353 /*
354 * Clear appropriate bits in HPRT0 to clear the interrupt bit in
355 * GINTSTS
356 */
357 hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
358 HPRT0_OVRCURRCHG);
359
360 /*
361 * Port Connect Detected
362 * Set flag and clear if detected
363 */
364 if (hprt0 & HPRT0_CONNDET) {
365 DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify | HPRT0_CONNDET);
366
367 dev_vdbg(hsotg->dev,
368 "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
369 hprt0);
370 dwc2_hcd_connect(hsotg);
371
372 /*
373 * The Hub driver asserts a reset when it sees port connect
374 * status change flag
375 */
376 }
377
378 /*
379 * Port Enable Changed
380 * Clear if detected - Set internal flag if disabled
381 */
382 if (hprt0 & HPRT0_ENACHG) {
383 DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify | HPRT0_ENACHG);
384 dev_vdbg(hsotg->dev,
385 " --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
386 hprt0, !!(hprt0 & HPRT0_ENA));
387 if (hprt0 & HPRT0_ENA) {
388 hsotg->new_connection = true;
389 dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
390 } else {
391 hsotg->flags.b.port_enable_change = 1;
392 if (hsotg->core_params->dma_desc_fs_enable) {
393 u32 hcfg;
394
395 hsotg->core_params->dma_desc_enable = 0;
396 hsotg->new_connection = false;
397 hcfg = DWC2_READ_4(hsotg, HCFG);
398 hcfg &= ~HCFG_DESCDMA;
399 DWC2_WRITE_4(hsotg, HCFG, hcfg);
400 }
401 }
402 }
403
404 /* Overcurrent Change Interrupt */
405 if (hprt0 & HPRT0_OVRCURRCHG) {
406 DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify | HPRT0_OVRCURRCHG);
407 dev_vdbg(hsotg->dev,
408 " --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
409 hprt0);
410 hsotg->flags.b.port_over_current_change = 1;
411 }
412
413 if (hsotg->flags.b.port_connect_status_change ||
414 hsotg->flags.b.port_enable_change ||
415 hsotg->flags.b.port_over_current_change)
416 dwc2_root_intr(hsotg->hsotg_sc);
417 }
418
419 /*
420 * Gets the actual length of a transfer after the transfer halts. halt_status
421 * holds the reason for the halt.
422 *
423 * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
424 * is set to 1 upon return if less than the requested number of bytes were
425 * transferred. short_read may also be NULL on entry, in which case it remains
426 * unchanged.
427 */
428 static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
429 struct dwc2_host_chan *chan, int chnum,
430 struct dwc2_qtd *qtd,
431 enum dwc2_halt_status halt_status,
432 int *short_read)
433 {
434 u32 hctsiz, count, length;
435
436 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
437
438 if (halt_status == DWC2_HC_XFER_COMPLETE) {
439 if (chan->ep_is_in) {
440 count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
441 TSIZ_XFERSIZE_SHIFT;
442 length = chan->xfer_len - count;
443 if (short_read != NULL)
444 *short_read = (count != 0);
445 } else if (chan->qh->do_split) {
446 length = qtd->ssplit_out_xfer_count;
447 } else {
448 length = chan->xfer_len;
449 }
450 } else {
451 /*
452 * Must use the hctsiz.pktcnt field to determine how much data
453 * has been transferred. This field reflects the number of
454 * packets that have been transferred via the USB. This is
455 * always an integral number of packets if the transfer was
456 * halted before its normal completion. (Can't use the
457 * hctsiz.xfersize field because that reflects the number of
458 * bytes transferred via the AHB, not the USB).
459 */
460 count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
461 length = (chan->start_pkt_count - count) * chan->max_packet;
462 }
463
464 return length;
465 }
466
467 /**
468 * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
469 * Complete interrupt on the host channel. Updates the actual_length field
470 * of the URB based on the number of bytes transferred via the host channel.
471 * Sets the URB status if the data transfer is finished.
472 *
473 * Return: 1 if the data transfer specified by the URB is completely finished,
474 * 0 otherwise
475 */
476 static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
477 struct dwc2_host_chan *chan, int chnum,
478 struct dwc2_hcd_urb *urb,
479 struct dwc2_qtd *qtd)
480 {
481 int xfer_done = 0;
482 int short_read = 0;
483 int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
484 DWC2_HC_XFER_COMPLETE,
485 &short_read);
486
487 if (urb->actual_length + xfer_length > urb->length) {
488 dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
489 xfer_length = urb->length - urb->actual_length;
490 }
491
492 /* Non DWORD-aligned buffer case handling */
493 if (chan->align_buf && xfer_length) {
494 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
495 usb_syncmem(urb->usbdma, 0, chan->qh->dw_align_buf_size,
496 chan->ep_is_in ?
497 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
498 if (chan->ep_is_in)
499 memcpy(urb->buf + urb->actual_length,
500 chan->qh->dw_align_buf, xfer_length);
501 usb_syncmem(urb->usbdma, 0, chan->qh->dw_align_buf_size,
502 chan->ep_is_in ?
503 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
504 }
505
506 dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
507 urb->actual_length, xfer_length);
508 urb->actual_length += xfer_length;
509
510 if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
511 (urb->flags & URB_SEND_ZERO_PACKET) &&
512 urb->actual_length >= urb->length &&
513 !(urb->length % chan->max_packet)) {
514 xfer_done = 0;
515 } else if (short_read || urb->actual_length >= urb->length) {
516 xfer_done = 1;
517 urb->status = 0;
518 }
519
520 dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
521 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
522 dev_vdbg(hsotg->dev, " chan->xfer_len %d\n", chan->xfer_len);
523 dev_vdbg(hsotg->dev, " hctsiz.xfersize %d\n",
524 (DWC2_READ_4(hsotg, HCTSIZ(chnum)) & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
525 dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n", urb->length);
526 dev_vdbg(hsotg->dev, " urb->actual_length %d\n", urb->actual_length);
527 dev_vdbg(hsotg->dev, " short_read %d, xfer_done %d\n", short_read,
528 xfer_done);
529
530 return xfer_done;
531 }
532
533 /*
534 * Save the starting data toggle for the next transfer. The data toggle is
535 * saved in the QH for non-control transfers and it's saved in the QTD for
536 * control transfers.
537 */
538 void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
539 struct dwc2_host_chan *chan, int chnum,
540 struct dwc2_qtd *qtd)
541 {
542 u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
543 u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
544
545 if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
546 if (pid == TSIZ_SC_MC_PID_DATA0)
547 chan->qh->data_toggle = DWC2_HC_PID_DATA0;
548 else
549 chan->qh->data_toggle = DWC2_HC_PID_DATA1;
550 } else {
551 if (pid == TSIZ_SC_MC_PID_DATA0)
552 qtd->data_toggle = DWC2_HC_PID_DATA0;
553 else
554 qtd->data_toggle = DWC2_HC_PID_DATA1;
555 }
556 }
557
558 /**
559 * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
560 * the transfer is stopped for any reason. The fields of the current entry in
561 * the frame descriptor array are set based on the transfer state and the input
562 * halt_status. Completes the Isochronous URB if all the URB frames have been
563 * completed.
564 *
565 * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
566 * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
567 */
568 static enum dwc2_halt_status dwc2_update_isoc_urb_state(
569 struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
570 int chnum, struct dwc2_qtd *qtd,
571 enum dwc2_halt_status halt_status)
572 {
573 struct dwc2_hcd_iso_packet_desc *frame_desc;
574 struct dwc2_hcd_urb *urb = qtd->urb;
575
576 if (!urb)
577 return DWC2_HC_XFER_NO_HALT_STATUS;
578
579 frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
580
581 switch (halt_status) {
582 case DWC2_HC_XFER_COMPLETE:
583 frame_desc->status = 0;
584 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
585 chan, chnum, qtd, halt_status, NULL);
586
587 /* Non DWORD-aligned buffer case handling */
588 if (chan->align_buf && frame_desc->actual_length) {
589 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
590 __func__);
591 usb_dma_t *ud = &chan->qh->dw_align_buf_usbdma;
592
593 usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
594 chan->ep_is_in ?
595 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
596 if (chan->ep_is_in)
597 memcpy(urb->buf + frame_desc->offset +
598 qtd->isoc_split_offset,
599 chan->qh->dw_align_buf,
600 frame_desc->actual_length);
601 usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
602 chan->ep_is_in ?
603 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
604 }
605 break;
606 case DWC2_HC_XFER_FRAME_OVERRUN:
607 urb->error_count++;
608 if (chan->ep_is_in)
609 frame_desc->status = -ENOSR;
610 else
611 frame_desc->status = -ECOMM;
612 frame_desc->actual_length = 0;
613 break;
614 case DWC2_HC_XFER_BABBLE_ERR:
615 urb->error_count++;
616 frame_desc->status = -EOVERFLOW;
617 /* Don't need to update actual_length in this case */
618 break;
619 case DWC2_HC_XFER_XACT_ERR:
620 urb->error_count++;
621 frame_desc->status = -EPROTO;
622 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
623 chan, chnum, qtd, halt_status, NULL);
624
625 /* Non DWORD-aligned buffer case handling */
626 if (chan->align_buf && frame_desc->actual_length) {
627 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
628 __func__);
629 usb_dma_t *ud = &chan->qh->dw_align_buf_usbdma;
630
631 usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
632 chan->ep_is_in ?
633 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
634 if (chan->ep_is_in)
635 memcpy(urb->buf + frame_desc->offset +
636 qtd->isoc_split_offset,
637 chan->qh->dw_align_buf,
638 frame_desc->actual_length);
639 usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
640 chan->ep_is_in ?
641 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
642 }
643
644 /* Skip whole frame */
645 if (chan->qh->do_split &&
646 chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
647 hsotg->core_params->dma_enable > 0) {
648 qtd->complete_split = 0;
649 qtd->isoc_split_offset = 0;
650 }
651
652 break;
653 default:
654 dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
655 halt_status);
656 break;
657 }
658
659 if (++qtd->isoc_frame_index == urb->packet_count) {
660 /*
661 * urb->status is not used for isoc transfers. The individual
662 * frame_desc statuses are used instead.
663 */
664 dwc2_host_complete(hsotg, qtd, 0);
665 halt_status = DWC2_HC_XFER_URB_COMPLETE;
666 } else {
667 halt_status = DWC2_HC_XFER_COMPLETE;
668 }
669
670 return halt_status;
671 }
672
673 /*
674 * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
675 * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
676 * still linked to the QH, the QH is added to the end of the inactive
677 * non-periodic schedule. For periodic QHs, removes the QH from the periodic
678 * schedule if no more QTDs are linked to the QH.
679 */
680 static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
681 int free_qtd)
682 {
683 int continue_split = 0;
684 struct dwc2_qtd *qtd;
685
686 if (dbg_qh(qh))
687 dev_vdbg(hsotg->dev, " %s(%p,%p,%d)\n", __func__,
688 hsotg, qh, free_qtd);
689
690 if (list_empty(&qh->qtd_list)) {
691 dev_dbg(hsotg->dev, "## QTD list empty ##\n");
692 goto no_qtd;
693 }
694
695 qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
696
697 if (qtd->complete_split)
698 continue_split = 1;
699 else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
700 qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
701 continue_split = 1;
702
703 if (free_qtd) {
704 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
705 continue_split = 0;
706 }
707
708 no_qtd:
709 if (qh->channel)
710 qh->channel->align_buf = 0;
711 qh->channel = NULL;
712 dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
713 }
714
715 /**
716 * dwc2_release_channel() - Releases a host channel for use by other transfers
717 *
718 * @hsotg: The HCD state structure
719 * @chan: The host channel to release
720 * @qtd: The QTD associated with the host channel. This QTD may be
721 * freed if the transfer is complete or an error has occurred.
722 * @halt_status: Reason the channel is being released. This status
723 * determines the actions taken by this function.
724 *
725 * Also attempts to select and queue more transactions since at least one host
726 * channel is available.
727 */
728 static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
729 struct dwc2_host_chan *chan,
730 struct dwc2_qtd *qtd,
731 enum dwc2_halt_status halt_status)
732 {
733 enum dwc2_transaction_type tr_type;
734 u32 haintmsk;
735 int free_qtd = 0;
736
737 if (dbg_hc(chan))
738 dev_vdbg(hsotg->dev, " %s: channel %d, halt_status %d\n",
739 __func__, chan->hc_num, halt_status);
740
741 switch (halt_status) {
742 case DWC2_HC_XFER_URB_COMPLETE:
743 free_qtd = 1;
744 break;
745 case DWC2_HC_XFER_AHB_ERR:
746 case DWC2_HC_XFER_STALL:
747 case DWC2_HC_XFER_BABBLE_ERR:
748 free_qtd = 1;
749 break;
750 case DWC2_HC_XFER_XACT_ERR:
751 if (qtd && qtd->error_count >= 3) {
752 dev_vdbg(hsotg->dev,
753 " Complete URB with transaction error\n");
754 free_qtd = 1;
755 dwc2_host_complete(hsotg, qtd, -EPROTO);
756 }
757 break;
758 case DWC2_HC_XFER_URB_DEQUEUE:
759 /*
760 * The QTD has already been removed and the QH has been
761 * deactivated. Don't want to do anything except release the
762 * host channel and try to queue more transfers.
763 */
764 goto cleanup;
765 case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
766 dev_vdbg(hsotg->dev, " Complete URB with I/O error\n");
767 free_qtd = 1;
768 dwc2_host_complete(hsotg, qtd, -EIO);
769 break;
770 case DWC2_HC_XFER_NO_HALT_STATUS:
771 default:
772 break;
773 }
774
775 dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
776
777 cleanup:
778 /*
779 * Release the host channel for use by other transfers. The cleanup
780 * function clears the channel interrupt enables and conditions, so
781 * there's no need to clear the Channel Halted interrupt separately.
782 */
783 if (!list_empty(&chan->hc_list_entry))
784 list_del(&chan->hc_list_entry);
785 dwc2_hc_cleanup(hsotg, chan);
786 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
787
788 if (hsotg->core_params->uframe_sched > 0) {
789 hsotg->available_host_channels++;
790 } else {
791 switch (chan->ep_type) {
792 case USB_ENDPOINT_XFER_CONTROL:
793 case USB_ENDPOINT_XFER_BULK:
794 hsotg->non_periodic_channels--;
795 break;
796 default:
797 /*
798 * Don't release reservations for periodic channels
799 * here. That's done when a periodic transfer is
800 * descheduled (i.e. when the QH is removed from the
801 * periodic schedule).
802 */
803 break;
804 }
805 }
806
807 haintmsk = DWC2_READ_4(hsotg, HAINTMSK);
808 haintmsk &= ~(1 << chan->hc_num);
809 DWC2_WRITE_4(hsotg, HAINTMSK, haintmsk);
810
811 /* Try to queue more transfers now that there's a free channel */
812 tr_type = dwc2_hcd_select_transactions(hsotg);
813 if (tr_type != DWC2_TRANSACTION_NONE)
814 dwc2_hcd_queue_transactions(hsotg, tr_type);
815 }
816
817 /*
818 * Halts a host channel. If the channel cannot be halted immediately because
819 * the request queue is full, this function ensures that the FIFO empty
820 * interrupt for the appropriate queue is enabled so that the halt request can
821 * be queued when there is space in the request queue.
822 *
823 * This function may also be called in DMA mode. In that case, the channel is
824 * simply released since the core always halts the channel automatically in
825 * DMA mode.
826 */
827 static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
828 struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
829 enum dwc2_halt_status halt_status)
830 {
831 if (dbg_hc(chan))
832 dev_vdbg(hsotg->dev, "%s()\n", __func__);
833
834 if (hsotg->core_params->dma_enable > 0) {
835 if (dbg_hc(chan))
836 dev_vdbg(hsotg->dev, "DMA enabled\n");
837 dwc2_release_channel(hsotg, chan, qtd, halt_status);
838 return;
839 }
840
841 /* Slave mode processing */
842 dwc2_hc_halt(hsotg, chan, halt_status);
843
844 if (chan->halt_on_queue) {
845 u32 gintmsk;
846
847 dev_vdbg(hsotg->dev, "Halt on queue\n");
848 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
849 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
850 dev_vdbg(hsotg->dev, "control/bulk\n");
851 /*
852 * Make sure the Non-periodic Tx FIFO empty interrupt
853 * is enabled so that the non-periodic schedule will
854 * be processed
855 */
856 gintmsk = DWC2_READ_4(hsotg, GINTMSK);
857 gintmsk |= GINTSTS_NPTXFEMP;
858 DWC2_WRITE_4(hsotg, GINTMSK, gintmsk);
859 } else {
860 dev_vdbg(hsotg->dev, "isoc/intr\n");
861 /*
862 * Move the QH from the periodic queued schedule to
863 * the periodic assigned schedule. This allows the
864 * halt to be queued when the periodic schedule is
865 * processed.
866 */
867 list_move(&chan->qh->qh_list_entry,
868 &hsotg->periodic_sched_assigned);
869
870 /*
871 * Make sure the Periodic Tx FIFO Empty interrupt is
872 * enabled so that the periodic schedule will be
873 * processed
874 */
875 gintmsk = DWC2_READ_4(hsotg, GINTMSK);
876 gintmsk |= GINTSTS_PTXFEMP;
877 DWC2_WRITE_4(hsotg, GINTMSK, gintmsk);
878 }
879 }
880 }
881
882 /*
883 * Performs common cleanup for non-periodic transfers after a Transfer
884 * Complete interrupt. This function should be called after any endpoint type
885 * specific handling is finished to release the host channel.
886 */
887 static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
888 struct dwc2_host_chan *chan,
889 int chnum, struct dwc2_qtd *qtd,
890 enum dwc2_halt_status halt_status)
891 {
892 dev_vdbg(hsotg->dev, "%s()\n", __func__);
893
894 qtd->error_count = 0;
895
896 if (chan->hcint & HCINTMSK_NYET) {
897 /*
898 * Got a NYET on the last transaction of the transfer. This
899 * means that the endpoint should be in the PING state at the
900 * beginning of the next transfer.
901 */
902 dev_vdbg(hsotg->dev, "got NYET\n");
903 chan->qh->ping_state = 1;
904 }
905
906 /*
907 * Always halt and release the host channel to make it available for
908 * more transfers. There may still be more phases for a control
909 * transfer or more data packets for a bulk transfer at this point,
910 * but the host channel is still halted. A channel will be reassigned
911 * to the transfer when the non-periodic schedule is processed after
912 * the channel is released. This allows transactions to be queued
913 * properly via dwc2_hcd_queue_transactions, which also enables the
914 * Tx FIFO Empty interrupt if necessary.
915 */
916 if (chan->ep_is_in) {
917 /*
918 * IN transfers in Slave mode require an explicit disable to
919 * halt the channel. (In DMA mode, this call simply releases
920 * the channel.)
921 */
922 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
923 } else {
924 /*
925 * The channel is automatically disabled by the core for OUT
926 * transfers in Slave mode
927 */
928 dwc2_release_channel(hsotg, chan, qtd, halt_status);
929 }
930 }
931
932 /*
933 * Performs common cleanup for periodic transfers after a Transfer Complete
934 * interrupt. This function should be called after any endpoint type specific
935 * handling is finished to release the host channel.
936 */
937 static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
938 struct dwc2_host_chan *chan, int chnum,
939 struct dwc2_qtd *qtd,
940 enum dwc2_halt_status halt_status)
941 {
942 u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
943
944 qtd->error_count = 0;
945
946 if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
947 /* Core halts channel in these cases */
948 dwc2_release_channel(hsotg, chan, qtd, halt_status);
949 else
950 /* Flush any outstanding requests from the Tx queue */
951 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
952 }
953
954 static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
955 struct dwc2_host_chan *chan, int chnum,
956 struct dwc2_qtd *qtd)
957 {
958 struct dwc2_hcd_iso_packet_desc *frame_desc;
959 u32 len;
960
961 if (!qtd->urb)
962 return 0;
963
964 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
965 len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
966 DWC2_HC_XFER_COMPLETE, NULL);
967 if (!len) {
968 qtd->complete_split = 0;
969 qtd->isoc_split_offset = 0;
970 return 0;
971 }
972
973 frame_desc->actual_length += len;
974
975 if (chan->align_buf) {
976 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
977 usb_syncmem(qtd->urb->usbdma, chan->qh->dw_align_buf_dma,
978 chan->qh->dw_align_buf_size, BUS_DMASYNC_POSTREAD);
979 memcpy(qtd->urb->buf + frame_desc->offset +
980 qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
981 usb_syncmem(qtd->urb->usbdma, chan->qh->dw_align_buf_dma,
982 chan->qh->dw_align_buf_size, BUS_DMASYNC_PREREAD);
983 }
984
985 qtd->isoc_split_offset += len;
986
987 if (frame_desc->actual_length >= frame_desc->length) {
988 frame_desc->status = 0;
989 qtd->isoc_frame_index++;
990 qtd->complete_split = 0;
991 qtd->isoc_split_offset = 0;
992 }
993
994 if (qtd->isoc_frame_index == qtd->urb->packet_count) {
995 dwc2_host_complete(hsotg, qtd, 0);
996 dwc2_release_channel(hsotg, chan, qtd,
997 DWC2_HC_XFER_URB_COMPLETE);
998 } else {
999 dwc2_release_channel(hsotg, chan, qtd,
1000 DWC2_HC_XFER_NO_HALT_STATUS);
1001 }
1002
1003 return 1; /* Indicates that channel released */
1004 }
1005
1006 /*
1007 * Handles a host channel Transfer Complete interrupt. This handler may be
1008 * called in either DMA mode or Slave mode.
1009 */
1010 static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
1011 struct dwc2_host_chan *chan, int chnum,
1012 struct dwc2_qtd *qtd)
1013 {
1014 struct dwc2_hcd_urb *urb = qtd->urb;
1015 enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
1016 int pipe_type;
1017 int urb_xfer_done;
1018
1019 if (dbg_hc(chan))
1020 dev_vdbg(hsotg->dev,
1021 "--Host Channel %d Interrupt: Transfer Complete--\n",
1022 chnum);
1023
1024 if (!urb)
1025 goto handle_xfercomp_done;
1026
1027 pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1028
1029 if (hsotg->core_params->dma_desc_enable > 0) {
1030 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
1031 if (pipe_type == USB_ENDPOINT_XFER_ISOC)
1032 /* Do not disable the interrupt, just clear it */
1033 return;
1034 goto handle_xfercomp_done;
1035 }
1036
1037 /* Handle xfer complete on CSPLIT */
1038 if (chan->qh->do_split) {
1039 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
1040 hsotg->core_params->dma_enable > 0) {
1041 if (qtd->complete_split &&
1042 dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
1043 qtd))
1044 goto handle_xfercomp_done;
1045 } else {
1046 qtd->complete_split = 0;
1047 }
1048 }
1049
1050 /* Update the QTD and URB states */
1051 switch (pipe_type) {
1052 case USB_ENDPOINT_XFER_CONTROL:
1053 switch (qtd->control_phase) {
1054 case DWC2_CONTROL_SETUP:
1055 if (urb->length > 0)
1056 qtd->control_phase = DWC2_CONTROL_DATA;
1057 else
1058 qtd->control_phase = DWC2_CONTROL_STATUS;
1059 dev_vdbg(hsotg->dev,
1060 " Control setup transaction done\n");
1061 halt_status = DWC2_HC_XFER_COMPLETE;
1062 break;
1063 case DWC2_CONTROL_DATA:
1064 urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
1065 chnum, urb, qtd);
1066 if (urb_xfer_done) {
1067 qtd->control_phase = DWC2_CONTROL_STATUS;
1068 dev_vdbg(hsotg->dev,
1069 " Control data transfer done\n");
1070 } else {
1071 dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1072 qtd);
1073 }
1074 halt_status = DWC2_HC_XFER_COMPLETE;
1075 break;
1076 case DWC2_CONTROL_STATUS:
1077 dev_vdbg(hsotg->dev, " Control transfer complete\n");
1078 if (urb->status == -EINPROGRESS)
1079 urb->status = 0;
1080 dwc2_host_complete(hsotg, qtd, urb->status);
1081 halt_status = DWC2_HC_XFER_URB_COMPLETE;
1082 break;
1083 }
1084
1085 dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1086 halt_status);
1087 break;
1088 case USB_ENDPOINT_XFER_BULK:
1089 dev_vdbg(hsotg->dev, " Bulk transfer complete\n");
1090 urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1091 qtd);
1092 if (urb_xfer_done) {
1093 dwc2_host_complete(hsotg, qtd, urb->status);
1094 halt_status = DWC2_HC_XFER_URB_COMPLETE;
1095 } else {
1096 halt_status = DWC2_HC_XFER_COMPLETE;
1097 }
1098
1099 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1100 dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1101 halt_status);
1102 break;
1103 case USB_ENDPOINT_XFER_INT:
1104 dev_vdbg(hsotg->dev, " Interrupt transfer complete\n");
1105 urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1106 qtd);
1107
1108 /*
1109 * Interrupt URB is done on the first transfer complete
1110 * interrupt
1111 */
1112 if (urb_xfer_done) {
1113 dwc2_host_complete(hsotg, qtd, urb->status);
1114 halt_status = DWC2_HC_XFER_URB_COMPLETE;
1115 } else {
1116 halt_status = DWC2_HC_XFER_COMPLETE;
1117 }
1118
1119 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1120 dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1121 halt_status);
1122 break;
1123 case USB_ENDPOINT_XFER_ISOC:
1124 if (dbg_perio())
1125 dev_vdbg(hsotg->dev, " Isochronous transfer complete\n");
1126 if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
1127 halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1128 chnum, qtd, DWC2_HC_XFER_COMPLETE);
1129 dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1130 halt_status);
1131 break;
1132 }
1133
1134 handle_xfercomp_done:
1135 disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
1136 }
1137
1138 /*
1139 * Handles a host channel STALL interrupt. This handler may be called in
1140 * either DMA mode or Slave mode.
1141 */
1142 static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
1143 struct dwc2_host_chan *chan, int chnum,
1144 struct dwc2_qtd *qtd)
1145 {
1146 struct dwc2_hcd_urb *urb = qtd->urb;
1147 int pipe_type;
1148
1149 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
1150 chnum);
1151
1152 if (hsotg->core_params->dma_desc_enable > 0) {
1153 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1154 DWC2_HC_XFER_STALL);
1155 goto handle_stall_done;
1156 }
1157
1158 if (!urb)
1159 goto handle_stall_halt;
1160
1161 pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1162
1163 if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
1164 dwc2_host_complete(hsotg, qtd, -EPIPE);
1165
1166 if (pipe_type == USB_ENDPOINT_XFER_BULK ||
1167 pipe_type == USB_ENDPOINT_XFER_INT) {
1168 dwc2_host_complete(hsotg, qtd, -EPIPE);
1169 /*
1170 * USB protocol requires resetting the data toggle for bulk
1171 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1172 * setup command is issued to the endpoint. Anticipate the
1173 * CLEAR_FEATURE command since a STALL has occurred and reset
1174 * the data toggle now.
1175 */
1176 chan->qh->data_toggle = 0;
1177 }
1178
1179 handle_stall_halt:
1180 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
1181
1182 handle_stall_done:
1183 disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
1184 }
1185
1186 /*
1187 * Updates the state of the URB when a transfer has been stopped due to an
1188 * abnormal condition before the transfer completes. Modifies the
1189 * actual_length field of the URB to reflect the number of bytes that have
1190 * actually been transferred via the host channel.
1191 */
1192 static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1193 struct dwc2_host_chan *chan, int chnum,
1194 struct dwc2_hcd_urb *urb,
1195 struct dwc2_qtd *qtd,
1196 enum dwc2_halt_status halt_status)
1197 {
1198 u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
1199 qtd, halt_status, NULL);
1200
1201 if (urb->actual_length + xfer_length > urb->length) {
1202 dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
1203 xfer_length = urb->length - urb->actual_length;
1204 }
1205
1206 /* Non DWORD-aligned buffer case handling */
1207 if (chan->align_buf && xfer_length && chan->ep_is_in) {
1208 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
1209
1210 usb_dma_t *ud = &chan->qh->dw_align_buf_usbdma;
1211
1212 usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
1213 chan->ep_is_in ?
1214 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1215 if (chan->ep_is_in)
1216 memcpy(urb->buf + urb->actual_length,
1217 chan->qh->dw_align_buf,
1218 xfer_length);
1219 usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
1220 chan->ep_is_in ?
1221 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1222 }
1223
1224 urb->actual_length += xfer_length;
1225
1226 dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
1227 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
1228 dev_vdbg(hsotg->dev, " chan->start_pkt_count %d\n",
1229 chan->start_pkt_count);
1230 dev_vdbg(hsotg->dev, " hctsiz.pktcnt %d\n",
1231 (DWC2_READ_4(hsotg, HCTSIZ(chnum)) & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
1232 dev_vdbg(hsotg->dev, " chan->max_packet %d\n", chan->max_packet);
1233 dev_vdbg(hsotg->dev, " bytes_transferred %d\n",
1234 xfer_length);
1235 dev_vdbg(hsotg->dev, " urb->actual_length %d\n",
1236 urb->actual_length);
1237 dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n",
1238 urb->length);
1239 }
1240
1241 /*
1242 * Handles a host channel NAK interrupt. This handler may be called in either
1243 * DMA mode or Slave mode.
1244 */
1245 static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1246 struct dwc2_host_chan *chan, int chnum,
1247 struct dwc2_qtd *qtd)
1248 {
1249 if (!qtd) {
1250 dev_dbg(hsotg->dev, "%s: qtd is NULL\n", __func__);
1251 return;
1252 }
1253
1254 if (!qtd->urb) {
1255 dev_dbg(hsotg->dev, "%s: qtd->urb is NULL\n", __func__);
1256 return;
1257 }
1258
1259 if (dbg_hc(chan))
1260 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
1261 chnum);
1262
1263 /*
1264 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1265 * interrupt. Re-start the SSPLIT transfer.
1266 *
1267 * Normally for non-periodic transfers we'll retry right away, but to
1268 * avoid interrupt storms we'll wait before retrying if we've got
1269 * several NAKs. If we didn't do this we'd retry directly from the
1270 * interrupt handler and could end up quickly getting another
1271 * interrupt (another NAK), which we'd retry.
1272 *
1273 * Note that in DMA mode software only gets involved to re-send NAKed
1274 * transfers for split transactions unless the core is missing OUT NAK
1275 * enhancement.
1276 */
1277 if (chan->do_split) {
1278 /*
1279 * When we get control/bulk NAKs then remember this so we holdoff on
1280 * this qh until the beginning of the next frame
1281 */
1282 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1283 case USB_ENDPOINT_XFER_CONTROL:
1284 case USB_ENDPOINT_XFER_BULK:
1285 chan->qh->nak_frame = dwc2_hcd_get_frame_number(hsotg);
1286 break;
1287 }
1288
1289 if (chan->complete_split)
1290 qtd->error_count = 0;
1291 qtd->complete_split = 0;
1292 qtd->num_naks++;
1293 qtd->qh->want_wait = qtd->num_naks >= dwc2_naks_before_delay;
1294 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1295 goto handle_nak_done;
1296 }
1297
1298 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1299 case USB_ENDPOINT_XFER_CONTROL:
1300 case USB_ENDPOINT_XFER_BULK:
1301 if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
1302 /*
1303 * NAK interrupts are enabled on bulk/control IN
1304 * transfers in DMA mode for the sole purpose of
1305 * resetting the error count after a transaction error
1306 * occurs. The core will continue transferring data.
1307 */
1308 qtd->error_count = 0;
1309 break;
1310 }
1311
1312 /*
1313 * NAK interrupts normally occur during OUT transfers in DMA
1314 * or Slave mode. For IN transfers, more requests will be
1315 * queued as request queue space is available.
1316 */
1317 qtd->error_count = 0;
1318
1319 if (hsotg->core_params->dma_enable > 0 && !chan->ep_is_in) {
1320 /*
1321 * Avoid interrupt storms.
1322 */
1323 qtd->qh->want_wait = 1;
1324 } else if (!chan->qh->ping_state) {
1325 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1326 qtd, DWC2_HC_XFER_NAK);
1327 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1328
1329 if (chan->speed == USB_SPEED_HIGH)
1330 chan->qh->ping_state = 1;
1331 }
1332
1333 /*
1334 * Halt the channel so the transfer can be re-started from
1335 * the appropriate point or the PING protocol will
1336 * start/continue
1337 */
1338 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1339 break;
1340 case USB_ENDPOINT_XFER_INT:
1341 qtd->error_count = 0;
1342 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1343 break;
1344 case USB_ENDPOINT_XFER_ISOC:
1345 /* Should never get called for isochronous transfers */
1346 dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
1347 break;
1348 }
1349
1350 handle_nak_done:
1351 disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
1352 }
1353
1354 /*
1355 * Handles a host channel ACK interrupt. This interrupt is enabled when
1356 * performing the PING protocol in Slave mode, when errors occur during
1357 * either Slave mode or DMA mode, and during Start Split transactions.
1358 */
1359 static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
1360 struct dwc2_host_chan *chan, int chnum,
1361 struct dwc2_qtd *qtd)
1362 {
1363 struct dwc2_hcd_iso_packet_desc *frame_desc;
1364
1365 if (dbg_hc(chan))
1366 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
1367 chnum);
1368
1369 if (chan->do_split) {
1370 /* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
1371 if (!chan->ep_is_in &&
1372 chan->data_pid_start != DWC2_HC_PID_SETUP)
1373 qtd->ssplit_out_xfer_count = chan->xfer_len;
1374
1375 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
1376 qtd->complete_split = 1;
1377 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1378 } else {
1379 /* ISOC OUT */
1380 switch (chan->xact_pos) {
1381 case DWC2_HCSPLT_XACTPOS_ALL:
1382 break;
1383 case DWC2_HCSPLT_XACTPOS_END:
1384 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1385 qtd->isoc_split_offset = 0;
1386 break;
1387 case DWC2_HCSPLT_XACTPOS_BEGIN:
1388 case DWC2_HCSPLT_XACTPOS_MID:
1389 /*
1390 * For BEGIN or MID, calculate the length for
1391 * the next microframe to determine the correct
1392 * SSPLIT token, either MID or END
1393 */
1394 frame_desc = &qtd->urb->iso_descs[
1395 qtd->isoc_frame_index];
1396 qtd->isoc_split_offset += 188;
1397
1398 if (frame_desc->length - qtd->isoc_split_offset
1399 <= 188)
1400 qtd->isoc_split_pos =
1401 DWC2_HCSPLT_XACTPOS_END;
1402 else
1403 qtd->isoc_split_pos =
1404 DWC2_HCSPLT_XACTPOS_MID;
1405 break;
1406 }
1407 }
1408 } else {
1409 qtd->error_count = 0;
1410
1411 if (chan->qh->ping_state) {
1412 chan->qh->ping_state = 0;
1413 /*
1414 * Halt the channel so the transfer can be re-started
1415 * from the appropriate point. This only happens in
1416 * Slave mode. In DMA mode, the ping_state is cleared
1417 * when the transfer is started because the core
1418 * automatically executes the PING, then the transfer.
1419 */
1420 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1421 }
1422 }
1423
1424 /*
1425 * If the ACK occurred when _not_ in the PING state, let the channel
1426 * continue transferring data after clearing the error count
1427 */
1428 disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
1429 }
1430
1431 /*
1432 * Handles a host channel NYET interrupt. This interrupt should only occur on
1433 * Bulk and Control OUT endpoints and for complete split transactions. If a
1434 * NYET occurs at the same time as a Transfer Complete interrupt, it is
1435 * handled in the xfercomp interrupt handler, not here. This handler may be
1436 * called in either DMA mode or Slave mode.
1437 */
1438 static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1439 struct dwc2_host_chan *chan, int chnum,
1440 struct dwc2_qtd *qtd)
1441 {
1442 if (dbg_hc(chan))
1443 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
1444 chnum);
1445
1446 /*
1447 * NYET on CSPLIT
1448 * re-do the CSPLIT immediately on non-periodic
1449 */
1450 if (chan->do_split && chan->complete_split) {
1451 if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
1452 hsotg->core_params->dma_enable > 0) {
1453 qtd->complete_split = 0;
1454 qtd->isoc_split_offset = 0;
1455 qtd->isoc_frame_index++;
1456 if (qtd->urb &&
1457 qtd->isoc_frame_index == qtd->urb->packet_count) {
1458 dwc2_host_complete(hsotg, qtd, 0);
1459 dwc2_release_channel(hsotg, chan, qtd,
1460 DWC2_HC_XFER_URB_COMPLETE);
1461 } else {
1462 dwc2_release_channel(hsotg, chan, qtd,
1463 DWC2_HC_XFER_NO_HALT_STATUS);
1464 }
1465 goto handle_nyet_done;
1466 }
1467
1468 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1469 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1470 int frnum = dwc2_hcd_get_frame_number(hsotg);
1471
1472 if (dwc2_full_frame_num(frnum) !=
1473 dwc2_full_frame_num(chan->qh->sched_frame)) {
1474 /*
1475 * No longer in the same full speed frame.
1476 * Treat this as a transaction error.
1477 */
1478 #if 0
1479 /*
1480 * Todo: Fix system performance so this can
1481 * be treated as an error. Right now complete
1482 * splits cannot be scheduled precisely enough
1483 * due to other system activity, so this error
1484 * occurs regularly in Slave mode.
1485 */
1486 qtd->error_count++;
1487 #endif
1488 qtd->complete_split = 0;
1489 dwc2_halt_channel(hsotg, chan, qtd,
1490 DWC2_HC_XFER_XACT_ERR);
1491 /* Todo: add support for isoc release */
1492 goto handle_nyet_done;
1493 }
1494 }
1495
1496 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1497 goto handle_nyet_done;
1498 }
1499
1500 chan->qh->ping_state = 1;
1501 qtd->error_count = 0;
1502
1503 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
1504 DWC2_HC_XFER_NYET);
1505 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1506
1507 /*
1508 * Halt the channel and re-start the transfer so the PING protocol
1509 * will start
1510 */
1511 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1512
1513 handle_nyet_done:
1514 disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
1515 }
1516
1517 /*
1518 * Handles a host channel babble interrupt. This handler may be called in
1519 * either DMA mode or Slave mode.
1520 */
1521 static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
1522 struct dwc2_host_chan *chan, int chnum,
1523 struct dwc2_qtd *qtd)
1524 {
1525 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
1526 chnum);
1527
1528 // dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1529
1530 if (hsotg->core_params->dma_desc_enable > 0) {
1531 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1532 DWC2_HC_XFER_BABBLE_ERR);
1533 goto disable_int;
1534 }
1535
1536 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1537 dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
1538 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
1539 } else {
1540 enum dwc2_halt_status halt_status;
1541
1542 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1543 qtd, DWC2_HC_XFER_BABBLE_ERR);
1544 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1545 }
1546
1547 disable_int:
1548 disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
1549 }
1550
1551 /*
1552 * Handles a host channel AHB error interrupt. This handler is only called in
1553 * DMA mode.
1554 */
1555 static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1556 struct dwc2_host_chan *chan, int chnum,
1557 struct dwc2_qtd *qtd)
1558 {
1559 struct dwc2_hcd_urb *urb = qtd->urb;
1560
1561 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
1562 chnum);
1563
1564 if (!urb)
1565 goto handle_ahberr_halt;
1566
1567 // dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1568
1569 #ifdef DWC2_DEBUG
1570 const char *pipetype, *speed;
1571
1572 u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1573 u32 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chnum));
1574 u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
1575 u32 hc_dma = DWC2_READ_4(hsotg, HCDMA(chnum));
1576
1577 dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
1578 dev_err(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
1579 dev_err(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
1580 dev_err(hsotg->dev, " Device address: %d\n",
1581 dwc2_hcd_get_dev_addr(&urb->pipe_info));
1582 dev_err(hsotg->dev, " Endpoint: %d, %s\n",
1583 dwc2_hcd_get_ep_num(&urb->pipe_info),
1584 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
1585
1586 switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
1587 case USB_ENDPOINT_XFER_CONTROL:
1588 pipetype = "CONTROL";
1589 break;
1590 case USB_ENDPOINT_XFER_BULK:
1591 pipetype = "BULK";
1592 break;
1593 case USB_ENDPOINT_XFER_INT:
1594 pipetype = "INTERRUPT";
1595 break;
1596 case USB_ENDPOINT_XFER_ISOC:
1597 pipetype = "ISOCHRONOUS";
1598 break;
1599 default:
1600 pipetype = "UNKNOWN";
1601 break;
1602 }
1603
1604 dev_err(hsotg->dev, " Endpoint type: %s\n", pipetype);
1605
1606 switch (chan->speed) {
1607 case USB_SPEED_HIGH:
1608 speed = "HIGH";
1609 break;
1610 case USB_SPEED_FULL:
1611 speed = "FULL";
1612 break;
1613 case USB_SPEED_LOW:
1614 speed = "LOW";
1615 break;
1616 default:
1617 speed = "UNKNOWN";
1618 break;
1619 }
1620
1621 dev_err(hsotg->dev, " Speed: %s\n", speed);
1622
1623 dev_err(hsotg->dev, " Max packet size: %d\n",
1624 dwc2_hcd_get_mps(&urb->pipe_info));
1625 dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length);
1626 dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
1627 urb->buf, (unsigned long)urb->dma);
1628 dev_err(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n",
1629 urb->setup_packet, (unsigned long)urb->setup_dma);
1630 dev_err(hsotg->dev, " Interval: %d\n", urb->interval);
1631 #endif
1632
1633 /* Core halts the channel for Descriptor DMA mode */
1634 if (hsotg->core_params->dma_desc_enable > 0) {
1635 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1636 DWC2_HC_XFER_AHB_ERR);
1637 goto handle_ahberr_done;
1638 }
1639
1640 dwc2_host_complete(hsotg, qtd, -EIO);
1641
1642 handle_ahberr_halt:
1643 /*
1644 * Force a channel halt. Don't call dwc2_halt_channel because that won't
1645 * write to the HCCHARn register in DMA mode to force the halt.
1646 */
1647 dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
1648
1649 handle_ahberr_done:
1650 disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
1651 }
1652
1653 /*
1654 * Handles a host channel transaction error interrupt. This handler may be
1655 * called in either DMA mode or Slave mode.
1656 */
1657 static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
1658 struct dwc2_host_chan *chan, int chnum,
1659 struct dwc2_qtd *qtd)
1660 {
1661 dev_dbg(hsotg->dev,
1662 "--Host Channel %d Interrupt: Transaction Error--\n", chnum);
1663
1664 // dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1665
1666 if (hsotg->core_params->dma_desc_enable > 0) {
1667 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1668 DWC2_HC_XFER_XACT_ERR);
1669 goto handle_xacterr_done;
1670 }
1671
1672 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1673 case USB_ENDPOINT_XFER_CONTROL:
1674 case USB_ENDPOINT_XFER_BULK:
1675 qtd->error_count++;
1676 if (!chan->qh->ping_state) {
1677
1678 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1679 qtd, DWC2_HC_XFER_XACT_ERR);
1680 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1681 if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
1682 chan->qh->ping_state = 1;
1683 }
1684
1685 /*
1686 * Halt the channel so the transfer can be re-started from
1687 * the appropriate point or the PING protocol will start
1688 */
1689 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1690 break;
1691 case USB_ENDPOINT_XFER_INT:
1692 qtd->error_count++;
1693 if (chan->do_split && chan->complete_split)
1694 qtd->complete_split = 0;
1695 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1696 break;
1697 case USB_ENDPOINT_XFER_ISOC:
1698 {
1699 enum dwc2_halt_status halt_status;
1700
1701 halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1702 chnum, qtd, DWC2_HC_XFER_XACT_ERR);
1703 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1704 }
1705 break;
1706 }
1707
1708 handle_xacterr_done:
1709 disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
1710 }
1711
1712 /*
1713 * Handles a host channel frame overrun interrupt. This handler may be called
1714 * in either DMA mode or Slave mode.
1715 */
1716 static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
1717 struct dwc2_host_chan *chan, int chnum,
1718 struct dwc2_qtd *qtd)
1719 {
1720 enum dwc2_halt_status halt_status;
1721
1722 if (dbg_hc(chan))
1723 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
1724 chnum);
1725
1726 dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1727
1728 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1729 case USB_ENDPOINT_XFER_CONTROL:
1730 case USB_ENDPOINT_XFER_BULK:
1731 break;
1732 case USB_ENDPOINT_XFER_INT:
1733 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1734 break;
1735 case USB_ENDPOINT_XFER_ISOC:
1736 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1737 qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1738 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1739 break;
1740 }
1741
1742 disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
1743 }
1744
1745 /*
1746 * Handles a host channel data toggle error interrupt. This handler may be
1747 * called in either DMA mode or Slave mode.
1748 */
1749 static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
1750 struct dwc2_host_chan *chan, int chnum,
1751 struct dwc2_qtd *qtd)
1752 {
1753 dev_dbg(hsotg->dev,
1754 "--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
1755
1756 if (chan->ep_is_in)
1757 qtd->error_count = 0;
1758 else
1759 dev_err(hsotg->dev,
1760 "Data Toggle Error on OUT transfer, channel %d\n",
1761 chnum);
1762
1763 // dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1764 disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
1765 }
1766
1767 /*
1768 * For debug only. It checks that a valid halt status is set and that
1769 * HCCHARn.chdis is clear. If there's a problem, corrective action is
1770 * taken and a warning is issued.
1771 *
1772 * Return: true if halt status is ok, false otherwise
1773 */
1774 static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
1775 struct dwc2_host_chan *chan, int chnum,
1776 struct dwc2_qtd *qtd)
1777 {
1778 #ifdef DWC2_DEBUG
1779 u32 hcchar;
1780 u32 hctsiz;
1781 u32 hcintmsk;
1782 u32 hcsplt;
1783
1784 if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
1785 /*
1786 * This code is here only as a check. This condition should
1787 * never happen. Ignore the halt if it does occur.
1788 */
1789 hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1790 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
1791 hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1792 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chnum));
1793 dev_dbg(hsotg->dev,
1794 "%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
1795 __func__);
1796 dev_dbg(hsotg->dev,
1797 "channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
1798 chnum, hcchar, hctsiz);
1799 dev_dbg(hsotg->dev,
1800 "hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
1801 chan->hcint, hcintmsk, hcsplt);
1802 if (qtd)
1803 dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
1804 qtd->complete_split);
1805 dev_warn(hsotg->dev,
1806 "%s: no halt status, channel %d, ignoring interrupt\n",
1807 __func__, chnum);
1808 return false;
1809 }
1810
1811 /*
1812 * This code is here only as a check. hcchar.chdis should never be set
1813 * when the halt interrupt occurs. Halt the channel again if it does
1814 * occur.
1815 */
1816 hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1817 if (hcchar & HCCHAR_CHDIS) {
1818 dev_warn(hsotg->dev,
1819 "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
1820 __func__, hcchar);
1821 chan->halt_pending = 0;
1822 dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
1823 return false;
1824 }
1825 #endif
1826
1827 return true;
1828 }
1829
1830 /*
1831 * Handles a host Channel Halted interrupt in DMA mode. This handler
1832 * determines the reason the channel halted and proceeds accordingly.
1833 */
1834 static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
1835 struct dwc2_host_chan *chan, int chnum,
1836 struct dwc2_qtd *qtd)
1837 {
1838 u32 hcintmsk;
1839 int out_nak_enh = 0;
1840
1841 if (dbg_hc(chan))
1842 dev_vdbg(hsotg->dev,
1843 "--Host Channel %d Interrupt: DMA Channel Halted--\n",
1844 chnum);
1845
1846 /*
1847 * For core with OUT NAK enhancement, the flow for high-speed
1848 * CONTROL/BULK OUT is handled a little differently
1849 */
1850 if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
1851 if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
1852 (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1853 chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
1854 out_nak_enh = 1;
1855 }
1856 }
1857
1858 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1859 (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
1860 hsotg->core_params->dma_desc_enable <= 0)) {
1861 if (hsotg->core_params->dma_desc_enable > 0)
1862 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1863 chan->halt_status);
1864 else
1865 /*
1866 * Just release the channel. A dequeue can happen on a
1867 * transfer timeout. In the case of an AHB Error, the
1868 * channel was forced to halt because there's no way to
1869 * gracefully recover.
1870 */
1871 dwc2_release_channel(hsotg, chan, qtd,
1872 chan->halt_status);
1873 return;
1874 }
1875
1876 hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1877
1878 if (chan->hcint & HCINTMSK_XFERCOMPL) {
1879 /*
1880 * Todo: This is here because of a possible hardware bug. Spec
1881 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1882 * interrupt w/ACK bit set should occur, but I only see the
1883 * XFERCOMP bit, even with it masked out. This is a workaround
1884 * for that behavior. Should fix this when hardware is fixed.
1885 */
1886 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
1887 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1888 dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
1889 } else if (chan->hcint & HCINTMSK_STALL) {
1890 dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
1891 } else if ((chan->hcint & HCINTMSK_XACTERR) &&
1892 hsotg->core_params->dma_desc_enable <= 0) {
1893 if (out_nak_enh) {
1894 if (chan->hcint &
1895 (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
1896 dev_vdbg(hsotg->dev,
1897 "XactErr with NYET/NAK/ACK\n");
1898 qtd->error_count = 0;
1899 } else {
1900 dev_vdbg(hsotg->dev,
1901 "XactErr without NYET/NAK/ACK\n");
1902 }
1903 }
1904
1905 /*
1906 * Must handle xacterr before nak or ack. Could get a xacterr
1907 * at the same time as either of these on a BULK/CONTROL OUT
1908 * that started with a PING. The xacterr takes precedence.
1909 */
1910 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1911 } else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
1912 hsotg->core_params->dma_desc_enable > 0) {
1913 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1914 } else if ((chan->hcint & HCINTMSK_AHBERR) &&
1915 hsotg->core_params->dma_desc_enable > 0) {
1916 dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
1917 } else if (chan->hcint & HCINTMSK_BBLERR) {
1918 dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
1919 } else if (chan->hcint & HCINTMSK_FRMOVRUN) {
1920 dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
1921 } else if (!out_nak_enh) {
1922 if (chan->hcint & HCINTMSK_NYET) {
1923 /*
1924 * Must handle nyet before nak or ack. Could get a nyet
1925 * at the same time as either of those on a BULK/CONTROL
1926 * OUT that started with a PING. The nyet takes
1927 * precedence.
1928 */
1929 dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
1930 } else if ((chan->hcint & HCINTMSK_NAK) &&
1931 !(hcintmsk & HCINTMSK_NAK)) {
1932 /*
1933 * If nak is not masked, it's because a non-split IN
1934 * transfer is in an error state. In that case, the nak
1935 * is handled by the nak interrupt handler, not here.
1936 * Handle nak here for BULK/CONTROL OUT transfers, which
1937 * halt on a NAK to allow rewinding the buffer pointer.
1938 */
1939 dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
1940 } else if ((chan->hcint & HCINTMSK_ACK) &&
1941 !(hcintmsk & HCINTMSK_ACK)) {
1942 /*
1943 * If ack is not masked, it's because a non-split IN
1944 * transfer is in an error state. In that case, the ack
1945 * is handled by the ack interrupt handler, not here.
1946 * Handle ack here for split transfers. Start splits
1947 * halt on ACK.
1948 */
1949 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1950 } else {
1951 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1952 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1953 /*
1954 * A periodic transfer halted with no other
1955 * channel interrupts set. Assume it was halted
1956 * by the core because it could not be completed
1957 * in its scheduled (micro)frame.
1958 */
1959 dev_dbg(hsotg->dev,
1960 "%s: Halt channel %d (assume incomplete periodic transfer)\n",
1961 __func__, chnum);
1962 dwc2_halt_channel(hsotg, chan, qtd,
1963 DWC2_HC_XFER_PERIODIC_INCOMPLETE);
1964 } else {
1965 dev_err(hsotg->dev,
1966 "%s: Channel %d - ChHltd set, but reason is unknown\n",
1967 __func__, chnum);
1968 dev_err(hsotg->dev,
1969 "hcint 0x%08x, intsts 0x%08x\n",
1970 chan->hcint,
1971 DWC2_READ_4(hsotg, GINTSTS));
1972 goto error;
1973 }
1974 }
1975 } else {
1976 dev_info(hsotg->dev,
1977 "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
1978 chan->hcint);
1979 error:
1980 /* Failthrough: use 3-strikes rule */
1981 qtd->error_count++;
1982 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1983 qtd, DWC2_HC_XFER_XACT_ERR);
1984 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1985 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1986 }
1987 }
1988
1989 /*
1990 * Handles a host channel Channel Halted interrupt
1991 *
1992 * In slave mode, this handler is called only when the driver specifically
1993 * requests a halt. This occurs during handling other host channel interrupts
1994 * (e.g. nak, xacterr, stall, nyet, etc.).
1995 *
1996 * In DMA mode, this is the interrupt that occurs when the core has finished
1997 * processing a transfer on a channel. Other host channel interrupts (except
1998 * ahberr) are disabled in DMA mode.
1999 */
2000 static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
2001 struct dwc2_host_chan *chan, int chnum,
2002 struct dwc2_qtd *qtd)
2003 {
2004 if (dbg_hc(chan))
2005 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
2006 chnum);
2007
2008 if (hsotg->core_params->dma_enable > 0) {
2009 dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
2010 } else {
2011 if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
2012 return;
2013 dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
2014 }
2015 }
2016
2017 /*
2018 * Check if the given qtd is still the top of the list (and thus valid).
2019 *
2020 * If dwc2_hcd_qtd_unlink_and_free() has been called since we grabbed
2021 * the qtd from the top of the list, this will return false (otherwise true).
2022 */
2023 static bool dwc2_check_qtd_still_ok(struct dwc2_qtd *qtd, struct dwc2_qh *qh)
2024 {
2025 struct dwc2_qtd *cur_head;
2026
2027 if (qh == NULL)
2028 return false;
2029
2030 cur_head = list_first_entry(&qh->qtd_list, struct dwc2_qtd,
2031 qtd_list_entry);
2032 return (cur_head == qtd);
2033 }
2034
2035 /* Handles interrupt for a specific Host Channel */
2036 static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
2037 {
2038 struct dwc2_qtd *qtd;
2039 struct dwc2_host_chan *chan;
2040 u32 hcint, hcintmsk;
2041
2042 chan = hsotg->hc_ptr_array[chnum];
2043
2044 hcint = DWC2_READ_4(hsotg, HCINT(chnum));
2045 hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
2046 if (!chan) {
2047 dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
2048 DWC2_WRITE_4(hsotg, HCINT(chnum), hcint);
2049 return;
2050 }
2051
2052 if (dbg_hc(chan)) {
2053 dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
2054 chnum);
2055 dev_vdbg(hsotg->dev,
2056 " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2057 hcint, hcintmsk, hcint & hcintmsk);
2058 }
2059
2060 DWC2_WRITE_4(hsotg, HCINT(chnum), hcint);
2061 chan->hcint = hcint;
2062 hcint &= hcintmsk;
2063
2064 /*
2065 * If the channel was halted due to a dequeue, the qtd list might
2066 * be empty or at least the first entry will not be the active qtd.
2067 * In this case, take a shortcut and just release the channel.
2068 */
2069 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
2070 /*
2071 * If the channel was halted, this should be the only
2072 * interrupt unmasked
2073 */
2074 WARN_ON(hcint != HCINTMSK_CHHLTD);
2075 if (hsotg->core_params->dma_desc_enable > 0)
2076 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
2077 chan->halt_status);
2078 else
2079 dwc2_release_channel(hsotg, chan, NULL,
2080 chan->halt_status);
2081 return;
2082 }
2083
2084 if (list_empty(&chan->qh->qtd_list)) {
2085 /*
2086 * TODO: Will this ever happen with the
2087 * DWC2_HC_XFER_URB_DEQUEUE handling above?
2088 */
2089 dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
2090 chnum);
2091 dev_dbg(hsotg->dev,
2092 " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2093 chan->hcint, hcintmsk, hcint);
2094 chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
2095 disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
2096 chan->hcint = 0;
2097 return;
2098 }
2099
2100 qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
2101 qtd_list_entry);
2102
2103 if (hsotg->core_params->dma_enable <= 0) {
2104 if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
2105 hcint &= ~HCINTMSK_CHHLTD;
2106 }
2107
2108 if (hcint & HCINTMSK_XFERCOMPL) {
2109 dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
2110 /*
2111 * If NYET occurred at same time as Xfer Complete, the NYET is
2112 * handled by the Xfer Complete interrupt handler. Don't want
2113 * to call the NYET interrupt handler in this case.
2114 */
2115 hcint &= ~HCINTMSK_NYET;
2116 }
2117
2118 if (hcint & HCINTMSK_CHHLTD) {
2119 dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
2120 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2121 goto exit;
2122 }
2123 if (hcint & HCINTMSK_AHBERR) {
2124 dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
2125 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2126 goto exit;
2127 }
2128 if (hcint & HCINTMSK_STALL) {
2129 dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
2130 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2131 goto exit;
2132 }
2133 if (hcint & HCINTMSK_NAK) {
2134 dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
2135 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2136 goto exit;
2137 }
2138 if (hcint & HCINTMSK_ACK) {
2139 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
2140 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2141 goto exit;
2142 }
2143 if (hcint & HCINTMSK_NYET) {
2144 dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
2145 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2146 goto exit;
2147 }
2148 if (hcint & HCINTMSK_XACTERR) {
2149 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
2150 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2151 goto exit;
2152 }
2153 if (hcint & HCINTMSK_BBLERR) {
2154 dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
2155 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2156 goto exit;
2157 }
2158 if (hcint & HCINTMSK_FRMOVRUN) {
2159 dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
2160 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2161 goto exit;
2162 }
2163 if (hcint & HCINTMSK_DATATGLERR) {
2164 dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
2165 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2166 goto exit;
2167 }
2168
2169 exit:
2170 chan->hcint = 0;
2171 }
2172
2173 /*
2174 * This interrupt indicates that one or more host channels has a pending
2175 * interrupt. There are multiple conditions that can cause each host channel
2176 * interrupt. This function determines which conditions have occurred for each
2177 * host channel interrupt and handles them appropriately.
2178 */
2179 static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2180 {
2181 u32 haint;
2182 int i;
2183
2184 haint = DWC2_READ_4(hsotg, HAINT);
2185 if (dbg_perio()) {
2186 dev_vdbg(hsotg->dev, "%s()\n", __func__);
2187
2188 dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2189 }
2190
2191 for (i = 0; i < hsotg->core_params->host_channels; i++) {
2192 if (haint & (1 << i))
2193 dwc2_hc_n_intr(hsotg, i);
2194 }
2195 }
2196
2197 /* This function handles interrupts for the HCD */
2198 irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
2199 {
2200 u32 gintsts, dbg_gintsts;
2201 irqreturn_t retval = IRQ_NONE;
2202
2203 if (!dwc2_is_controller_alive(hsotg)) {
2204 dev_warn(hsotg->dev, "Controller is dead\n");
2205 return retval;
2206 }
2207
2208 KASSERT(mutex_owned(&hsotg->lock));
2209
2210 /* Check if HOST Mode */
2211 if (dwc2_is_host_mode(hsotg)) {
2212 gintsts = dwc2_read_core_intr(hsotg);
2213 if (!gintsts) {
2214 return retval;
2215 }
2216
2217 retval = IRQ_HANDLED;
2218
2219 dbg_gintsts = gintsts;
2220 #ifndef DEBUG_SOF
2221 dbg_gintsts &= ~GINTSTS_SOF;
2222 #endif
2223 if (!dbg_perio())
2224 dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
2225 GINTSTS_PTXFEMP);
2226
2227 /* Only print if there are any non-suppressed interrupts left */
2228 if (dbg_gintsts)
2229 dev_vdbg(hsotg->dev,
2230 "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
2231 gintsts);
2232
2233 if (gintsts & GINTSTS_SOF)
2234 dwc2_sof_intr(hsotg);
2235 if (gintsts & GINTSTS_RXFLVL)
2236 dwc2_rx_fifo_level_intr(hsotg);
2237 if (gintsts & GINTSTS_NPTXFEMP)
2238 dwc2_np_tx_fifo_empty_intr(hsotg);
2239 if (gintsts & GINTSTS_PRTINT)
2240 dwc2_port_intr(hsotg);
2241 if (gintsts & GINTSTS_HCHINT)
2242 dwc2_hc_intr(hsotg);
2243 if (gintsts & GINTSTS_PTXFEMP)
2244 dwc2_perio_tx_fifo_empty_intr(hsotg);
2245
2246 if (dbg_gintsts) {
2247 dev_vdbg(hsotg->dev,
2248 "DWC OTG HCD Finished Servicing Interrupts\n");
2249 dev_vdbg(hsotg->dev,
2250 "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
2251 DWC2_READ_4(hsotg, GINTSTS),
2252 DWC2_READ_4(hsotg, GINTMSK));
2253 }
2254 }
2255
2256 return retval;
2257 }
2258