dwc2_hcdintr.c revision 1.14 1 1.14 simonb /* $NetBSD: dwc2_hcdintr.c,v 1.14 2018/08/08 07:20:44 simonb Exp $ */
2 1.1 skrll
3 1.1 skrll /*
4 1.1 skrll * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
5 1.1 skrll *
6 1.1 skrll * Copyright (C) 2004-2013 Synopsys, Inc.
7 1.1 skrll *
8 1.1 skrll * Redistribution and use in source and binary forms, with or without
9 1.1 skrll * modification, are permitted provided that the following conditions
10 1.1 skrll * are met:
11 1.1 skrll * 1. Redistributions of source code must retain the above copyright
12 1.1 skrll * notice, this list of conditions, and the following disclaimer,
13 1.1 skrll * without modification.
14 1.1 skrll * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 skrll * notice, this list of conditions and the following disclaimer in the
16 1.1 skrll * documentation and/or other materials provided with the distribution.
17 1.1 skrll * 3. The names of the above-listed copyright holders may not be used
18 1.1 skrll * to endorse or promote products derived from this software without
19 1.1 skrll * specific prior written permission.
20 1.1 skrll *
21 1.1 skrll * ALTERNATIVELY, this software may be distributed under the terms of the
22 1.1 skrll * GNU General Public License ("GPL") as published by the Free Software
23 1.1 skrll * Foundation; either version 2 of the License, or (at your option) any
24 1.1 skrll * later version.
25 1.1 skrll *
26 1.1 skrll * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 1.1 skrll * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 1.1 skrll * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 skrll * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 1.1 skrll * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 1.1 skrll * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 1.1 skrll * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 1.1 skrll * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 1.1 skrll * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 1.1 skrll * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 1.1 skrll * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 1.1 skrll */
38 1.1 skrll
39 1.1 skrll /*
40 1.1 skrll * This file contains the interrupt handlers for Host mode
41 1.1 skrll */
42 1.2 skrll #include <sys/cdefs.h>
43 1.14 simonb __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdintr.c,v 1.14 2018/08/08 07:20:44 simonb Exp $");
44 1.2 skrll
45 1.2 skrll #include <sys/types.h>
46 1.2 skrll #include <sys/pool.h>
47 1.2 skrll
48 1.2 skrll #include <dev/usb/usb.h>
49 1.2 skrll #include <dev/usb/usbdi.h>
50 1.2 skrll #include <dev/usb/usbdivar.h>
51 1.2 skrll #include <dev/usb/usb_mem.h>
52 1.2 skrll
53 1.2 skrll #include <machine/param.h>
54 1.2 skrll
55 1.1 skrll #include <linux/kernel.h>
56 1.1 skrll
57 1.2 skrll #include <dwc2/dwc2.h>
58 1.2 skrll #include <dwc2/dwc2var.h>
59 1.1 skrll
60 1.2 skrll #include "dwc2_core.h"
61 1.2 skrll #include "dwc2_hcd.h"
62 1.1 skrll
63 1.14 simonb /*
64 1.14 simonb * If we get this many NAKs on a split transaction we'll slow down
65 1.14 simonb * retransmission. A 1 here means delay after the first NAK.
66 1.14 simonb */
67 1.14 simonb #define DWC2_NAKS_BEFORE_DELAY 3
68 1.14 simonb int dwc2_naks_before_delay = DWC2_NAKS_BEFORE_DELAY;
69 1.14 simonb
70 1.1 skrll /* This function is for debug only */
71 1.1 skrll static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
72 1.1 skrll {
73 1.1 skrll #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
74 1.1 skrll u16 curr_frame_number = hsotg->frame_number;
75 1.1 skrll
76 1.1 skrll if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
77 1.1 skrll if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) !=
78 1.1 skrll curr_frame_number) {
79 1.1 skrll hsotg->frame_num_array[hsotg->frame_num_idx] =
80 1.1 skrll curr_frame_number;
81 1.1 skrll hsotg->last_frame_num_array[hsotg->frame_num_idx] =
82 1.1 skrll hsotg->last_frame_num;
83 1.1 skrll hsotg->frame_num_idx++;
84 1.1 skrll }
85 1.1 skrll } else if (!hsotg->dumped_frame_num_array) {
86 1.1 skrll int i;
87 1.1 skrll
88 1.1 skrll dev_info(hsotg->dev, "Frame Last Frame\n");
89 1.1 skrll dev_info(hsotg->dev, "----- ----------\n");
90 1.1 skrll for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
91 1.1 skrll dev_info(hsotg->dev, "0x%04x 0x%04x\n",
92 1.1 skrll hsotg->frame_num_array[i],
93 1.1 skrll hsotg->last_frame_num_array[i]);
94 1.1 skrll }
95 1.1 skrll hsotg->dumped_frame_num_array = 1;
96 1.1 skrll }
97 1.1 skrll hsotg->last_frame_num = curr_frame_number;
98 1.1 skrll #endif
99 1.1 skrll }
100 1.1 skrll
101 1.1 skrll static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
102 1.1 skrll struct dwc2_host_chan *chan,
103 1.1 skrll struct dwc2_qtd *qtd)
104 1.1 skrll {
105 1.2 skrll // struct urb *usb_urb;
106 1.1 skrll
107 1.1 skrll if (!chan->qh)
108 1.1 skrll return;
109 1.1 skrll
110 1.1 skrll if (chan->qh->dev_speed == USB_SPEED_HIGH)
111 1.1 skrll return;
112 1.1 skrll
113 1.1 skrll if (!qtd->urb)
114 1.1 skrll return;
115 1.1 skrll
116 1.1 skrll
117 1.1 skrll if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
118 1.1 skrll chan->qh->tt_buffer_dirty = 1;
119 1.1 skrll chan->qh->tt_buffer_dirty = 0;
120 1.1 skrll }
121 1.1 skrll }
122 1.1 skrll
123 1.1 skrll /*
124 1.1 skrll * Handles the start-of-frame interrupt in host mode. Non-periodic
125 1.1 skrll * transactions may be queued to the DWC_otg controller for the current
126 1.1 skrll * (micro)frame. Periodic transactions may be queued to the controller
127 1.1 skrll * for the next (micro)frame.
128 1.1 skrll */
129 1.1 skrll static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
130 1.1 skrll {
131 1.1 skrll struct list_head *qh_entry;
132 1.1 skrll struct dwc2_qh *qh;
133 1.3 skrll enum dwc2_transaction_type tr_type;
134 1.1 skrll
135 1.13 skrll /* Clear interrupt */
136 1.13 skrll DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_SOF);
137 1.13 skrll
138 1.1 skrll #ifdef DEBUG_SOF
139 1.1 skrll dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
140 1.1 skrll #endif
141 1.1 skrll
142 1.1 skrll hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
143 1.1 skrll
144 1.1 skrll dwc2_track_missed_sofs(hsotg);
145 1.1 skrll
146 1.1 skrll /* Determine whether any periodic QHs should be executed */
147 1.1 skrll qh_entry = hsotg->periodic_sched_inactive.next;
148 1.1 skrll while (qh_entry != &hsotg->periodic_sched_inactive) {
149 1.1 skrll qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
150 1.1 skrll qh_entry = qh_entry->next;
151 1.3 skrll if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number))
152 1.1 skrll /*
153 1.1 skrll * Move QH to the ready list to be executed next
154 1.1 skrll * (micro)frame
155 1.1 skrll */
156 1.1 skrll list_move(&qh->qh_list_entry,
157 1.1 skrll &hsotg->periodic_sched_ready);
158 1.1 skrll }
159 1.1 skrll tr_type = dwc2_hcd_select_transactions(hsotg);
160 1.1 skrll if (tr_type != DWC2_TRANSACTION_NONE)
161 1.1 skrll dwc2_hcd_queue_transactions(hsotg, tr_type);
162 1.1 skrll }
163 1.1 skrll
164 1.1 skrll /*
165 1.1 skrll * Handles the Rx FIFO Level Interrupt, which indicates that there is
166 1.1 skrll * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
167 1.1 skrll * memory if the DWC_otg controller is operating in Slave mode.
168 1.1 skrll */
169 1.1 skrll static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
170 1.1 skrll {
171 1.7 skrll u32 grxsts, chnum, bcnt, pktsts;
172 1.1 skrll struct dwc2_host_chan *chan;
173 1.1 skrll
174 1.1 skrll if (dbg_perio())
175 1.1 skrll dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
176 1.1 skrll
177 1.2 skrll grxsts = DWC2_READ_4(hsotg, GRXSTSP);
178 1.3 skrll chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
179 1.1 skrll chan = hsotg->hc_ptr_array[chnum];
180 1.1 skrll if (!chan) {
181 1.1 skrll dev_err(hsotg->dev, "Unable to get corresponding channel\n");
182 1.1 skrll return;
183 1.1 skrll }
184 1.1 skrll
185 1.3 skrll bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
186 1.3 skrll pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
187 1.1 skrll
188 1.1 skrll /* Packet Status */
189 1.1 skrll if (dbg_perio()) {
190 1.1 skrll dev_vdbg(hsotg->dev, " Ch num = %d\n", chnum);
191 1.1 skrll dev_vdbg(hsotg->dev, " Count = %d\n", bcnt);
192 1.7 skrll dev_vdbg(hsotg->dev, " DPID = %d, chan.dpid = %d\n",
193 1.7 skrll (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT,
194 1.1 skrll chan->data_pid_start);
195 1.3 skrll dev_vdbg(hsotg->dev, " PStatus = %d\n", pktsts);
196 1.1 skrll }
197 1.1 skrll
198 1.1 skrll switch (pktsts) {
199 1.1 skrll case GRXSTS_PKTSTS_HCHIN:
200 1.1 skrll /* Read the data into the host buffer */
201 1.1 skrll if (bcnt > 0) {
202 1.1 skrll dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
203 1.1 skrll
204 1.1 skrll /* Update the HC fields for the next packet received */
205 1.1 skrll chan->xfer_count += bcnt;
206 1.1 skrll chan->xfer_buf += bcnt;
207 1.1 skrll }
208 1.1 skrll break;
209 1.1 skrll case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
210 1.1 skrll case GRXSTS_PKTSTS_DATATOGGLEERR:
211 1.1 skrll case GRXSTS_PKTSTS_HCHHALTED:
212 1.1 skrll /* Handled in interrupt, just ignore data */
213 1.1 skrll break;
214 1.1 skrll default:
215 1.1 skrll dev_err(hsotg->dev,
216 1.1 skrll "RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
217 1.1 skrll break;
218 1.1 skrll }
219 1.1 skrll }
220 1.1 skrll
221 1.1 skrll /*
222 1.1 skrll * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
223 1.1 skrll * data packets may be written to the FIFO for OUT transfers. More requests
224 1.1 skrll * may be written to the non-periodic request queue for IN transfers. This
225 1.1 skrll * interrupt is enabled only in Slave mode.
226 1.1 skrll */
227 1.1 skrll static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
228 1.1 skrll {
229 1.1 skrll dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
230 1.1 skrll dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
231 1.1 skrll }
232 1.1 skrll
233 1.1 skrll /*
234 1.1 skrll * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
235 1.1 skrll * packets may be written to the FIFO for OUT transfers. More requests may be
236 1.1 skrll * written to the periodic request queue for IN transfers. This interrupt is
237 1.1 skrll * enabled only in Slave mode.
238 1.1 skrll */
239 1.1 skrll static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
240 1.1 skrll {
241 1.1 skrll if (dbg_perio())
242 1.1 skrll dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
243 1.1 skrll dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
244 1.1 skrll }
245 1.1 skrll
246 1.1 skrll static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
247 1.1 skrll u32 *hprt0_modify)
248 1.1 skrll {
249 1.1 skrll struct dwc2_core_params *params = hsotg->core_params;
250 1.1 skrll int do_reset = 0;
251 1.1 skrll u32 usbcfg;
252 1.1 skrll u32 prtspd;
253 1.1 skrll u32 hcfg;
254 1.1 skrll u32 fslspclksel;
255 1.1 skrll u32 hfir;
256 1.1 skrll
257 1.1 skrll dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
258 1.1 skrll
259 1.1 skrll /* Every time when port enables calculate HFIR.FrInterval */
260 1.2 skrll hfir = DWC2_READ_4(hsotg, HFIR);
261 1.1 skrll hfir &= ~HFIR_FRINT_MASK;
262 1.1 skrll hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
263 1.1 skrll HFIR_FRINT_MASK;
264 1.2 skrll DWC2_WRITE_4(hsotg, HFIR, hfir);
265 1.1 skrll
266 1.1 skrll /* Check if we need to adjust the PHY clock speed for low power */
267 1.1 skrll if (!params->host_support_fs_ls_low_power) {
268 1.1 skrll /* Port has been enabled, set the reset change flag */
269 1.1 skrll hsotg->flags.b.port_reset_change = 1;
270 1.2 skrll
271 1.2 skrll dwc2_root_intr(hsotg->hsotg_sc);
272 1.1 skrll return;
273 1.1 skrll }
274 1.1 skrll
275 1.2 skrll usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
276 1.3 skrll prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
277 1.1 skrll
278 1.1 skrll if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
279 1.1 skrll /* Low power */
280 1.1 skrll if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
281 1.1 skrll /* Set PHY low power clock select for FS/LS devices */
282 1.1 skrll usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
283 1.2 skrll DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
284 1.1 skrll do_reset = 1;
285 1.1 skrll }
286 1.1 skrll
287 1.2 skrll hcfg = DWC2_READ_4(hsotg, HCFG);
288 1.3 skrll fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
289 1.3 skrll HCFG_FSLSPCLKSEL_SHIFT;
290 1.1 skrll
291 1.1 skrll if (prtspd == HPRT0_SPD_LOW_SPEED &&
292 1.1 skrll params->host_ls_low_power_phy_clk ==
293 1.1 skrll DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
294 1.1 skrll /* 6 MHZ */
295 1.1 skrll dev_vdbg(hsotg->dev,
296 1.1 skrll "FS_PHY programming HCFG to 6 MHz\n");
297 1.1 skrll if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
298 1.3 skrll fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
299 1.1 skrll hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
300 1.3 skrll hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
301 1.2 skrll DWC2_WRITE_4(hsotg, HCFG, hcfg);
302 1.1 skrll do_reset = 1;
303 1.1 skrll }
304 1.1 skrll } else {
305 1.1 skrll /* 48 MHZ */
306 1.1 skrll dev_vdbg(hsotg->dev,
307 1.1 skrll "FS_PHY programming HCFG to 48 MHz\n");
308 1.1 skrll if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
309 1.3 skrll fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
310 1.1 skrll hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
311 1.3 skrll hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
312 1.2 skrll DWC2_WRITE_4(hsotg, HCFG, hcfg);
313 1.1 skrll do_reset = 1;
314 1.1 skrll }
315 1.1 skrll }
316 1.1 skrll } else {
317 1.1 skrll /* Not low power */
318 1.1 skrll if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
319 1.1 skrll usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
320 1.2 skrll DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
321 1.1 skrll do_reset = 1;
322 1.1 skrll }
323 1.1 skrll }
324 1.1 skrll
325 1.1 skrll if (do_reset) {
326 1.1 skrll *hprt0_modify |= HPRT0_RST;
327 1.13 skrll DWC2_WRITE_4(hsotg, HPRT0, *hprt0_modify);
328 1.1 skrll queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
329 1.1 skrll msecs_to_jiffies(60));
330 1.1 skrll } else {
331 1.1 skrll /* Port has been enabled, set the reset change flag */
332 1.1 skrll hsotg->flags.b.port_reset_change = 1;
333 1.2 skrll dwc2_root_intr(hsotg->hsotg_sc);
334 1.2 skrll
335 1.1 skrll }
336 1.1 skrll }
337 1.1 skrll
338 1.1 skrll /*
339 1.1 skrll * There are multiple conditions that can cause a port interrupt. This function
340 1.1 skrll * determines which interrupt conditions have occurred and handles them
341 1.1 skrll * appropriately.
342 1.1 skrll */
343 1.1 skrll static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
344 1.1 skrll {
345 1.1 skrll u32 hprt0;
346 1.1 skrll u32 hprt0_modify;
347 1.1 skrll
348 1.1 skrll dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
349 1.1 skrll
350 1.2 skrll hprt0 = DWC2_READ_4(hsotg, HPRT0);
351 1.1 skrll hprt0_modify = hprt0;
352 1.1 skrll
353 1.1 skrll /*
354 1.1 skrll * Clear appropriate bits in HPRT0 to clear the interrupt bit in
355 1.1 skrll * GINTSTS
356 1.1 skrll */
357 1.1 skrll hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
358 1.1 skrll HPRT0_OVRCURRCHG);
359 1.1 skrll
360 1.1 skrll /*
361 1.1 skrll * Port Connect Detected
362 1.1 skrll * Set flag and clear if detected
363 1.1 skrll */
364 1.1 skrll if (hprt0 & HPRT0_CONNDET) {
365 1.13 skrll DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify | HPRT0_CONNDET);
366 1.13 skrll
367 1.1 skrll dev_vdbg(hsotg->dev,
368 1.1 skrll "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
369 1.1 skrll hprt0);
370 1.13 skrll dwc2_hcd_connect(hsotg);
371 1.1 skrll
372 1.1 skrll /*
373 1.1 skrll * The Hub driver asserts a reset when it sees port connect
374 1.1 skrll * status change flag
375 1.1 skrll */
376 1.1 skrll }
377 1.1 skrll
378 1.1 skrll /*
379 1.1 skrll * Port Enable Changed
380 1.1 skrll * Clear if detected - Set internal flag if disabled
381 1.1 skrll */
382 1.1 skrll if (hprt0 & HPRT0_ENACHG) {
383 1.13 skrll DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify | HPRT0_ENACHG);
384 1.1 skrll dev_vdbg(hsotg->dev,
385 1.1 skrll " --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
386 1.1 skrll hprt0, !!(hprt0 & HPRT0_ENA));
387 1.13 skrll if (hprt0 & HPRT0_ENA) {
388 1.13 skrll hsotg->new_connection = true;
389 1.1 skrll dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
390 1.13 skrll } else {
391 1.1 skrll hsotg->flags.b.port_enable_change = 1;
392 1.13 skrll if (hsotg->core_params->dma_desc_fs_enable) {
393 1.13 skrll u32 hcfg;
394 1.13 skrll
395 1.13 skrll hsotg->core_params->dma_desc_enable = 0;
396 1.13 skrll hsotg->new_connection = false;
397 1.13 skrll hcfg = DWC2_READ_4(hsotg, HCFG);
398 1.13 skrll hcfg &= ~HCFG_DESCDMA;
399 1.13 skrll DWC2_WRITE_4(hsotg, HCFG, hcfg);
400 1.13 skrll }
401 1.13 skrll }
402 1.1 skrll }
403 1.1 skrll
404 1.1 skrll /* Overcurrent Change Interrupt */
405 1.1 skrll if (hprt0 & HPRT0_OVRCURRCHG) {
406 1.13 skrll DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify | HPRT0_OVRCURRCHG);
407 1.1 skrll dev_vdbg(hsotg->dev,
408 1.1 skrll " --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
409 1.1 skrll hprt0);
410 1.1 skrll hsotg->flags.b.port_over_current_change = 1;
411 1.1 skrll }
412 1.1 skrll
413 1.2 skrll if (hsotg->flags.b.port_connect_status_change ||
414 1.2 skrll hsotg->flags.b.port_enable_change ||
415 1.2 skrll hsotg->flags.b.port_over_current_change)
416 1.2 skrll dwc2_root_intr(hsotg->hsotg_sc);
417 1.1 skrll }
418 1.1 skrll
419 1.1 skrll /*
420 1.1 skrll * Gets the actual length of a transfer after the transfer halts. halt_status
421 1.1 skrll * holds the reason for the halt.
422 1.1 skrll *
423 1.1 skrll * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
424 1.1 skrll * is set to 1 upon return if less than the requested number of bytes were
425 1.1 skrll * transferred. short_read may also be NULL on entry, in which case it remains
426 1.1 skrll * unchanged.
427 1.1 skrll */
428 1.1 skrll static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
429 1.1 skrll struct dwc2_host_chan *chan, int chnum,
430 1.1 skrll struct dwc2_qtd *qtd,
431 1.1 skrll enum dwc2_halt_status halt_status,
432 1.1 skrll int *short_read)
433 1.1 skrll {
434 1.1 skrll u32 hctsiz, count, length;
435 1.1 skrll
436 1.2 skrll hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
437 1.1 skrll
438 1.1 skrll if (halt_status == DWC2_HC_XFER_COMPLETE) {
439 1.1 skrll if (chan->ep_is_in) {
440 1.3 skrll count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
441 1.3 skrll TSIZ_XFERSIZE_SHIFT;
442 1.1 skrll length = chan->xfer_len - count;
443 1.1 skrll if (short_read != NULL)
444 1.1 skrll *short_read = (count != 0);
445 1.1 skrll } else if (chan->qh->do_split) {
446 1.1 skrll length = qtd->ssplit_out_xfer_count;
447 1.1 skrll } else {
448 1.1 skrll length = chan->xfer_len;
449 1.1 skrll }
450 1.1 skrll } else {
451 1.1 skrll /*
452 1.1 skrll * Must use the hctsiz.pktcnt field to determine how much data
453 1.1 skrll * has been transferred. This field reflects the number of
454 1.1 skrll * packets that have been transferred via the USB. This is
455 1.1 skrll * always an integral number of packets if the transfer was
456 1.1 skrll * halted before its normal completion. (Can't use the
457 1.1 skrll * hctsiz.xfersize field because that reflects the number of
458 1.1 skrll * bytes transferred via the AHB, not the USB).
459 1.1 skrll */
460 1.3 skrll count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
461 1.1 skrll length = (chan->start_pkt_count - count) * chan->max_packet;
462 1.1 skrll }
463 1.1 skrll
464 1.1 skrll return length;
465 1.1 skrll }
466 1.1 skrll
467 1.1 skrll /**
468 1.1 skrll * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
469 1.1 skrll * Complete interrupt on the host channel. Updates the actual_length field
470 1.1 skrll * of the URB based on the number of bytes transferred via the host channel.
471 1.1 skrll * Sets the URB status if the data transfer is finished.
472 1.1 skrll *
473 1.1 skrll * Return: 1 if the data transfer specified by the URB is completely finished,
474 1.1 skrll * 0 otherwise
475 1.1 skrll */
476 1.1 skrll static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
477 1.1 skrll struct dwc2_host_chan *chan, int chnum,
478 1.1 skrll struct dwc2_hcd_urb *urb,
479 1.1 skrll struct dwc2_qtd *qtd)
480 1.1 skrll {
481 1.1 skrll int xfer_done = 0;
482 1.1 skrll int short_read = 0;
483 1.1 skrll int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
484 1.1 skrll DWC2_HC_XFER_COMPLETE,
485 1.1 skrll &short_read);
486 1.1 skrll
487 1.1 skrll if (urb->actual_length + xfer_length > urb->length) {
488 1.1 skrll dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
489 1.1 skrll xfer_length = urb->length - urb->actual_length;
490 1.1 skrll }
491 1.1 skrll
492 1.1 skrll /* Non DWORD-aligned buffer case handling */
493 1.12 skrll if (chan->align_buf && xfer_length) {
494 1.1 skrll dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
495 1.12 skrll usb_syncmem(urb->usbdma, 0, chan->qh->dw_align_buf_size,
496 1.12 skrll chan->ep_is_in ?
497 1.12 skrll BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
498 1.12 skrll if (chan->ep_is_in)
499 1.12 skrll memcpy(urb->buf + urb->actual_length,
500 1.12 skrll chan->qh->dw_align_buf, xfer_length);
501 1.12 skrll usb_syncmem(urb->usbdma, 0, chan->qh->dw_align_buf_size,
502 1.12 skrll chan->ep_is_in ?
503 1.12 skrll BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
504 1.1 skrll }
505 1.1 skrll
506 1.1 skrll dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
507 1.1 skrll urb->actual_length, xfer_length);
508 1.1 skrll urb->actual_length += xfer_length;
509 1.1 skrll
510 1.1 skrll if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
511 1.1 skrll (urb->flags & URB_SEND_ZERO_PACKET) &&
512 1.1 skrll urb->actual_length >= urb->length &&
513 1.1 skrll !(urb->length % chan->max_packet)) {
514 1.1 skrll xfer_done = 0;
515 1.1 skrll } else if (short_read || urb->actual_length >= urb->length) {
516 1.1 skrll xfer_done = 1;
517 1.1 skrll urb->status = 0;
518 1.1 skrll }
519 1.1 skrll
520 1.1 skrll dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
521 1.1 skrll __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
522 1.1 skrll dev_vdbg(hsotg->dev, " chan->xfer_len %d\n", chan->xfer_len);
523 1.1 skrll dev_vdbg(hsotg->dev, " hctsiz.xfersize %d\n",
524 1.7 skrll (DWC2_READ_4(hsotg, HCTSIZ(chnum)) & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
525 1.1 skrll dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n", urb->length);
526 1.1 skrll dev_vdbg(hsotg->dev, " urb->actual_length %d\n", urb->actual_length);
527 1.1 skrll dev_vdbg(hsotg->dev, " short_read %d, xfer_done %d\n", short_read,
528 1.1 skrll xfer_done);
529 1.1 skrll
530 1.1 skrll return xfer_done;
531 1.1 skrll }
532 1.1 skrll
533 1.1 skrll /*
534 1.1 skrll * Save the starting data toggle for the next transfer. The data toggle is
535 1.1 skrll * saved in the QH for non-control transfers and it's saved in the QTD for
536 1.1 skrll * control transfers.
537 1.1 skrll */
538 1.1 skrll void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
539 1.1 skrll struct dwc2_host_chan *chan, int chnum,
540 1.1 skrll struct dwc2_qtd *qtd)
541 1.1 skrll {
542 1.2 skrll u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
543 1.3 skrll u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
544 1.1 skrll
545 1.1 skrll if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
546 1.1 skrll if (pid == TSIZ_SC_MC_PID_DATA0)
547 1.1 skrll chan->qh->data_toggle = DWC2_HC_PID_DATA0;
548 1.1 skrll else
549 1.1 skrll chan->qh->data_toggle = DWC2_HC_PID_DATA1;
550 1.1 skrll } else {
551 1.1 skrll if (pid == TSIZ_SC_MC_PID_DATA0)
552 1.1 skrll qtd->data_toggle = DWC2_HC_PID_DATA0;
553 1.1 skrll else
554 1.1 skrll qtd->data_toggle = DWC2_HC_PID_DATA1;
555 1.1 skrll }
556 1.1 skrll }
557 1.1 skrll
558 1.1 skrll /**
559 1.1 skrll * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
560 1.1 skrll * the transfer is stopped for any reason. The fields of the current entry in
561 1.1 skrll * the frame descriptor array are set based on the transfer state and the input
562 1.1 skrll * halt_status. Completes the Isochronous URB if all the URB frames have been
563 1.1 skrll * completed.
564 1.1 skrll *
565 1.1 skrll * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
566 1.1 skrll * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
567 1.1 skrll */
568 1.1 skrll static enum dwc2_halt_status dwc2_update_isoc_urb_state(
569 1.1 skrll struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
570 1.1 skrll int chnum, struct dwc2_qtd *qtd,
571 1.1 skrll enum dwc2_halt_status halt_status)
572 1.1 skrll {
573 1.1 skrll struct dwc2_hcd_iso_packet_desc *frame_desc;
574 1.1 skrll struct dwc2_hcd_urb *urb = qtd->urb;
575 1.1 skrll
576 1.1 skrll if (!urb)
577 1.1 skrll return DWC2_HC_XFER_NO_HALT_STATUS;
578 1.1 skrll
579 1.1 skrll frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
580 1.1 skrll
581 1.1 skrll switch (halt_status) {
582 1.1 skrll case DWC2_HC_XFER_COMPLETE:
583 1.1 skrll frame_desc->status = 0;
584 1.1 skrll frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
585 1.1 skrll chan, chnum, qtd, halt_status, NULL);
586 1.1 skrll
587 1.1 skrll /* Non DWORD-aligned buffer case handling */
588 1.12 skrll if (chan->align_buf && frame_desc->actual_length) {
589 1.1 skrll dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
590 1.1 skrll __func__);
591 1.12 skrll usb_dma_t *ud = &chan->qh->dw_align_buf_usbdma;
592 1.12 skrll
593 1.12 skrll usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
594 1.12 skrll chan->ep_is_in ?
595 1.12 skrll BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
596 1.12 skrll if (chan->ep_is_in)
597 1.12 skrll memcpy(urb->buf + frame_desc->offset +
598 1.12 skrll qtd->isoc_split_offset,
599 1.12 skrll chan->qh->dw_align_buf,
600 1.12 skrll frame_desc->actual_length);
601 1.12 skrll usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
602 1.12 skrll chan->ep_is_in ?
603 1.12 skrll BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
604 1.1 skrll }
605 1.1 skrll break;
606 1.1 skrll case DWC2_HC_XFER_FRAME_OVERRUN:
607 1.1 skrll urb->error_count++;
608 1.1 skrll if (chan->ep_is_in)
609 1.1 skrll frame_desc->status = -ENOSR;
610 1.1 skrll else
611 1.1 skrll frame_desc->status = -ECOMM;
612 1.1 skrll frame_desc->actual_length = 0;
613 1.1 skrll break;
614 1.1 skrll case DWC2_HC_XFER_BABBLE_ERR:
615 1.1 skrll urb->error_count++;
616 1.1 skrll frame_desc->status = -EOVERFLOW;
617 1.1 skrll /* Don't need to update actual_length in this case */
618 1.1 skrll break;
619 1.1 skrll case DWC2_HC_XFER_XACT_ERR:
620 1.1 skrll urb->error_count++;
621 1.1 skrll frame_desc->status = -EPROTO;
622 1.1 skrll frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
623 1.1 skrll chan, chnum, qtd, halt_status, NULL);
624 1.1 skrll
625 1.1 skrll /* Non DWORD-aligned buffer case handling */
626 1.12 skrll if (chan->align_buf && frame_desc->actual_length) {
627 1.1 skrll dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
628 1.1 skrll __func__);
629 1.12 skrll usb_dma_t *ud = &chan->qh->dw_align_buf_usbdma;
630 1.12 skrll
631 1.12 skrll usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
632 1.12 skrll chan->ep_is_in ?
633 1.12 skrll BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
634 1.12 skrll if (chan->ep_is_in)
635 1.12 skrll memcpy(urb->buf + frame_desc->offset +
636 1.12 skrll qtd->isoc_split_offset,
637 1.12 skrll chan->qh->dw_align_buf,
638 1.12 skrll frame_desc->actual_length);
639 1.12 skrll usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
640 1.12 skrll chan->ep_is_in ?
641 1.12 skrll BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
642 1.1 skrll }
643 1.1 skrll
644 1.1 skrll /* Skip whole frame */
645 1.1 skrll if (chan->qh->do_split &&
646 1.1 skrll chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
647 1.1 skrll hsotg->core_params->dma_enable > 0) {
648 1.1 skrll qtd->complete_split = 0;
649 1.1 skrll qtd->isoc_split_offset = 0;
650 1.1 skrll }
651 1.1 skrll
652 1.1 skrll break;
653 1.1 skrll default:
654 1.1 skrll dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
655 1.1 skrll halt_status);
656 1.1 skrll break;
657 1.1 skrll }
658 1.1 skrll
659 1.1 skrll if (++qtd->isoc_frame_index == urb->packet_count) {
660 1.1 skrll /*
661 1.1 skrll * urb->status is not used for isoc transfers. The individual
662 1.1 skrll * frame_desc statuses are used instead.
663 1.1 skrll */
664 1.1 skrll dwc2_host_complete(hsotg, qtd, 0);
665 1.1 skrll halt_status = DWC2_HC_XFER_URB_COMPLETE;
666 1.1 skrll } else {
667 1.1 skrll halt_status = DWC2_HC_XFER_COMPLETE;
668 1.1 skrll }
669 1.1 skrll
670 1.1 skrll return halt_status;
671 1.1 skrll }
672 1.1 skrll
673 1.1 skrll /*
674 1.1 skrll * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
675 1.1 skrll * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
676 1.1 skrll * still linked to the QH, the QH is added to the end of the inactive
677 1.1 skrll * non-periodic schedule. For periodic QHs, removes the QH from the periodic
678 1.1 skrll * schedule if no more QTDs are linked to the QH.
679 1.1 skrll */
680 1.1 skrll static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
681 1.1 skrll int free_qtd)
682 1.1 skrll {
683 1.1 skrll int continue_split = 0;
684 1.1 skrll struct dwc2_qtd *qtd;
685 1.1 skrll
686 1.1 skrll if (dbg_qh(qh))
687 1.1 skrll dev_vdbg(hsotg->dev, " %s(%p,%p,%d)\n", __func__,
688 1.1 skrll hsotg, qh, free_qtd);
689 1.1 skrll
690 1.1 skrll if (list_empty(&qh->qtd_list)) {
691 1.1 skrll dev_dbg(hsotg->dev, "## QTD list empty ##\n");
692 1.1 skrll goto no_qtd;
693 1.1 skrll }
694 1.1 skrll
695 1.1 skrll qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
696 1.1 skrll
697 1.1 skrll if (qtd->complete_split)
698 1.1 skrll continue_split = 1;
699 1.1 skrll else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
700 1.1 skrll qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
701 1.1 skrll continue_split = 1;
702 1.1 skrll
703 1.1 skrll if (free_qtd) {
704 1.1 skrll dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
705 1.1 skrll continue_split = 0;
706 1.1 skrll }
707 1.1 skrll
708 1.1 skrll no_qtd:
709 1.1 skrll if (qh->channel)
710 1.1 skrll qh->channel->align_buf = 0;
711 1.1 skrll qh->channel = NULL;
712 1.1 skrll dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
713 1.1 skrll }
714 1.1 skrll
715 1.1 skrll /**
716 1.1 skrll * dwc2_release_channel() - Releases a host channel for use by other transfers
717 1.1 skrll *
718 1.1 skrll * @hsotg: The HCD state structure
719 1.1 skrll * @chan: The host channel to release
720 1.1 skrll * @qtd: The QTD associated with the host channel. This QTD may be
721 1.1 skrll * freed if the transfer is complete or an error has occurred.
722 1.1 skrll * @halt_status: Reason the channel is being released. This status
723 1.1 skrll * determines the actions taken by this function.
724 1.1 skrll *
725 1.1 skrll * Also attempts to select and queue more transactions since at least one host
726 1.1 skrll * channel is available.
727 1.1 skrll */
728 1.1 skrll static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
729 1.1 skrll struct dwc2_host_chan *chan,
730 1.1 skrll struct dwc2_qtd *qtd,
731 1.1 skrll enum dwc2_halt_status halt_status)
732 1.1 skrll {
733 1.1 skrll enum dwc2_transaction_type tr_type;
734 1.1 skrll u32 haintmsk;
735 1.1 skrll int free_qtd = 0;
736 1.1 skrll
737 1.1 skrll if (dbg_hc(chan))
738 1.1 skrll dev_vdbg(hsotg->dev, " %s: channel %d, halt_status %d\n",
739 1.1 skrll __func__, chan->hc_num, halt_status);
740 1.1 skrll
741 1.1 skrll switch (halt_status) {
742 1.1 skrll case DWC2_HC_XFER_URB_COMPLETE:
743 1.1 skrll free_qtd = 1;
744 1.1 skrll break;
745 1.1 skrll case DWC2_HC_XFER_AHB_ERR:
746 1.1 skrll case DWC2_HC_XFER_STALL:
747 1.1 skrll case DWC2_HC_XFER_BABBLE_ERR:
748 1.1 skrll free_qtd = 1;
749 1.1 skrll break;
750 1.1 skrll case DWC2_HC_XFER_XACT_ERR:
751 1.1 skrll if (qtd && qtd->error_count >= 3) {
752 1.1 skrll dev_vdbg(hsotg->dev,
753 1.1 skrll " Complete URB with transaction error\n");
754 1.1 skrll free_qtd = 1;
755 1.1 skrll dwc2_host_complete(hsotg, qtd, -EPROTO);
756 1.1 skrll }
757 1.1 skrll break;
758 1.1 skrll case DWC2_HC_XFER_URB_DEQUEUE:
759 1.1 skrll /*
760 1.1 skrll * The QTD has already been removed and the QH has been
761 1.1 skrll * deactivated. Don't want to do anything except release the
762 1.1 skrll * host channel and try to queue more transfers.
763 1.1 skrll */
764 1.1 skrll goto cleanup;
765 1.1 skrll case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
766 1.1 skrll dev_vdbg(hsotg->dev, " Complete URB with I/O error\n");
767 1.1 skrll free_qtd = 1;
768 1.1 skrll dwc2_host_complete(hsotg, qtd, -EIO);
769 1.1 skrll break;
770 1.1 skrll case DWC2_HC_XFER_NO_HALT_STATUS:
771 1.1 skrll default:
772 1.1 skrll break;
773 1.1 skrll }
774 1.1 skrll
775 1.1 skrll dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
776 1.1 skrll
777 1.1 skrll cleanup:
778 1.1 skrll /*
779 1.1 skrll * Release the host channel for use by other transfers. The cleanup
780 1.1 skrll * function clears the channel interrupt enables and conditions, so
781 1.1 skrll * there's no need to clear the Channel Halted interrupt separately.
782 1.1 skrll */
783 1.1 skrll if (!list_empty(&chan->hc_list_entry))
784 1.1 skrll list_del(&chan->hc_list_entry);
785 1.1 skrll dwc2_hc_cleanup(hsotg, chan);
786 1.1 skrll list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
787 1.1 skrll
788 1.1 skrll if (hsotg->core_params->uframe_sched > 0) {
789 1.1 skrll hsotg->available_host_channels++;
790 1.1 skrll } else {
791 1.1 skrll switch (chan->ep_type) {
792 1.1 skrll case USB_ENDPOINT_XFER_CONTROL:
793 1.1 skrll case USB_ENDPOINT_XFER_BULK:
794 1.1 skrll hsotg->non_periodic_channels--;
795 1.1 skrll break;
796 1.1 skrll default:
797 1.1 skrll /*
798 1.1 skrll * Don't release reservations for periodic channels
799 1.1 skrll * here. That's done when a periodic transfer is
800 1.1 skrll * descheduled (i.e. when the QH is removed from the
801 1.1 skrll * periodic schedule).
802 1.1 skrll */
803 1.1 skrll break;
804 1.1 skrll }
805 1.1 skrll }
806 1.1 skrll
807 1.2 skrll haintmsk = DWC2_READ_4(hsotg, HAINTMSK);
808 1.1 skrll haintmsk &= ~(1 << chan->hc_num);
809 1.2 skrll DWC2_WRITE_4(hsotg, HAINTMSK, haintmsk);
810 1.1 skrll
811 1.1 skrll /* Try to queue more transfers now that there's a free channel */
812 1.1 skrll tr_type = dwc2_hcd_select_transactions(hsotg);
813 1.1 skrll if (tr_type != DWC2_TRANSACTION_NONE)
814 1.1 skrll dwc2_hcd_queue_transactions(hsotg, tr_type);
815 1.1 skrll }
816 1.1 skrll
817 1.1 skrll /*
818 1.1 skrll * Halts a host channel. If the channel cannot be halted immediately because
819 1.1 skrll * the request queue is full, this function ensures that the FIFO empty
820 1.1 skrll * interrupt for the appropriate queue is enabled so that the halt request can
821 1.1 skrll * be queued when there is space in the request queue.
822 1.1 skrll *
823 1.1 skrll * This function may also be called in DMA mode. In that case, the channel is
824 1.1 skrll * simply released since the core always halts the channel automatically in
825 1.1 skrll * DMA mode.
826 1.1 skrll */
827 1.1 skrll static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
828 1.1 skrll struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
829 1.1 skrll enum dwc2_halt_status halt_status)
830 1.1 skrll {
831 1.1 skrll if (dbg_hc(chan))
832 1.1 skrll dev_vdbg(hsotg->dev, "%s()\n", __func__);
833 1.1 skrll
834 1.1 skrll if (hsotg->core_params->dma_enable > 0) {
835 1.1 skrll if (dbg_hc(chan))
836 1.1 skrll dev_vdbg(hsotg->dev, "DMA enabled\n");
837 1.1 skrll dwc2_release_channel(hsotg, chan, qtd, halt_status);
838 1.1 skrll return;
839 1.1 skrll }
840 1.1 skrll
841 1.1 skrll /* Slave mode processing */
842 1.1 skrll dwc2_hc_halt(hsotg, chan, halt_status);
843 1.1 skrll
844 1.1 skrll if (chan->halt_on_queue) {
845 1.1 skrll u32 gintmsk;
846 1.1 skrll
847 1.1 skrll dev_vdbg(hsotg->dev, "Halt on queue\n");
848 1.1 skrll if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
849 1.1 skrll chan->ep_type == USB_ENDPOINT_XFER_BULK) {
850 1.1 skrll dev_vdbg(hsotg->dev, "control/bulk\n");
851 1.1 skrll /*
852 1.1 skrll * Make sure the Non-periodic Tx FIFO empty interrupt
853 1.1 skrll * is enabled so that the non-periodic schedule will
854 1.1 skrll * be processed
855 1.1 skrll */
856 1.2 skrll gintmsk = DWC2_READ_4(hsotg, GINTMSK);
857 1.1 skrll gintmsk |= GINTSTS_NPTXFEMP;
858 1.2 skrll DWC2_WRITE_4(hsotg, GINTMSK, gintmsk);
859 1.1 skrll } else {
860 1.1 skrll dev_vdbg(hsotg->dev, "isoc/intr\n");
861 1.1 skrll /*
862 1.1 skrll * Move the QH from the periodic queued schedule to
863 1.1 skrll * the periodic assigned schedule. This allows the
864 1.1 skrll * halt to be queued when the periodic schedule is
865 1.1 skrll * processed.
866 1.1 skrll */
867 1.1 skrll list_move(&chan->qh->qh_list_entry,
868 1.1 skrll &hsotg->periodic_sched_assigned);
869 1.1 skrll
870 1.1 skrll /*
871 1.1 skrll * Make sure the Periodic Tx FIFO Empty interrupt is
872 1.1 skrll * enabled so that the periodic schedule will be
873 1.1 skrll * processed
874 1.1 skrll */
875 1.2 skrll gintmsk = DWC2_READ_4(hsotg, GINTMSK);
876 1.1 skrll gintmsk |= GINTSTS_PTXFEMP;
877 1.2 skrll DWC2_WRITE_4(hsotg, GINTMSK, gintmsk);
878 1.1 skrll }
879 1.1 skrll }
880 1.1 skrll }
881 1.1 skrll
882 1.1 skrll /*
883 1.1 skrll * Performs common cleanup for non-periodic transfers after a Transfer
884 1.1 skrll * Complete interrupt. This function should be called after any endpoint type
885 1.1 skrll * specific handling is finished to release the host channel.
886 1.1 skrll */
887 1.1 skrll static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
888 1.1 skrll struct dwc2_host_chan *chan,
889 1.1 skrll int chnum, struct dwc2_qtd *qtd,
890 1.1 skrll enum dwc2_halt_status halt_status)
891 1.1 skrll {
892 1.1 skrll dev_vdbg(hsotg->dev, "%s()\n", __func__);
893 1.1 skrll
894 1.1 skrll qtd->error_count = 0;
895 1.1 skrll
896 1.1 skrll if (chan->hcint & HCINTMSK_NYET) {
897 1.1 skrll /*
898 1.1 skrll * Got a NYET on the last transaction of the transfer. This
899 1.1 skrll * means that the endpoint should be in the PING state at the
900 1.1 skrll * beginning of the next transfer.
901 1.1 skrll */
902 1.1 skrll dev_vdbg(hsotg->dev, "got NYET\n");
903 1.1 skrll chan->qh->ping_state = 1;
904 1.1 skrll }
905 1.1 skrll
906 1.1 skrll /*
907 1.1 skrll * Always halt and release the host channel to make it available for
908 1.1 skrll * more transfers. There may still be more phases for a control
909 1.1 skrll * transfer or more data packets for a bulk transfer at this point,
910 1.1 skrll * but the host channel is still halted. A channel will be reassigned
911 1.1 skrll * to the transfer when the non-periodic schedule is processed after
912 1.1 skrll * the channel is released. This allows transactions to be queued
913 1.1 skrll * properly via dwc2_hcd_queue_transactions, which also enables the
914 1.1 skrll * Tx FIFO Empty interrupt if necessary.
915 1.1 skrll */
916 1.1 skrll if (chan->ep_is_in) {
917 1.1 skrll /*
918 1.1 skrll * IN transfers in Slave mode require an explicit disable to
919 1.1 skrll * halt the channel. (In DMA mode, this call simply releases
920 1.1 skrll * the channel.)
921 1.1 skrll */
922 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, halt_status);
923 1.1 skrll } else {
924 1.1 skrll /*
925 1.1 skrll * The channel is automatically disabled by the core for OUT
926 1.1 skrll * transfers in Slave mode
927 1.1 skrll */
928 1.1 skrll dwc2_release_channel(hsotg, chan, qtd, halt_status);
929 1.1 skrll }
930 1.1 skrll }
931 1.1 skrll
932 1.1 skrll /*
933 1.1 skrll * Performs common cleanup for periodic transfers after a Transfer Complete
934 1.1 skrll * interrupt. This function should be called after any endpoint type specific
935 1.1 skrll * handling is finished to release the host channel.
936 1.1 skrll */
937 1.1 skrll static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
938 1.1 skrll struct dwc2_host_chan *chan, int chnum,
939 1.1 skrll struct dwc2_qtd *qtd,
940 1.1 skrll enum dwc2_halt_status halt_status)
941 1.1 skrll {
942 1.2 skrll u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
943 1.1 skrll
944 1.1 skrll qtd->error_count = 0;
945 1.1 skrll
946 1.1 skrll if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
947 1.1 skrll /* Core halts channel in these cases */
948 1.1 skrll dwc2_release_channel(hsotg, chan, qtd, halt_status);
949 1.1 skrll else
950 1.1 skrll /* Flush any outstanding requests from the Tx queue */
951 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, halt_status);
952 1.1 skrll }
953 1.1 skrll
954 1.1 skrll static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
955 1.1 skrll struct dwc2_host_chan *chan, int chnum,
956 1.1 skrll struct dwc2_qtd *qtd)
957 1.1 skrll {
958 1.1 skrll struct dwc2_hcd_iso_packet_desc *frame_desc;
959 1.1 skrll u32 len;
960 1.1 skrll
961 1.1 skrll if (!qtd->urb)
962 1.1 skrll return 0;
963 1.1 skrll
964 1.1 skrll frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
965 1.1 skrll len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
966 1.1 skrll DWC2_HC_XFER_COMPLETE, NULL);
967 1.1 skrll if (!len) {
968 1.1 skrll qtd->complete_split = 0;
969 1.1 skrll qtd->isoc_split_offset = 0;
970 1.1 skrll return 0;
971 1.1 skrll }
972 1.1 skrll
973 1.1 skrll frame_desc->actual_length += len;
974 1.1 skrll
975 1.8 skrll if (chan->align_buf) {
976 1.1 skrll dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
977 1.12 skrll usb_syncmem(qtd->urb->usbdma, chan->qh->dw_align_buf_dma,
978 1.12 skrll chan->qh->dw_align_buf_size, BUS_DMASYNC_POSTREAD);
979 1.1 skrll memcpy(qtd->urb->buf + frame_desc->offset +
980 1.1 skrll qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
981 1.12 skrll usb_syncmem(qtd->urb->usbdma, chan->qh->dw_align_buf_dma,
982 1.12 skrll chan->qh->dw_align_buf_size, BUS_DMASYNC_PREREAD);
983 1.1 skrll }
984 1.1 skrll
985 1.1 skrll qtd->isoc_split_offset += len;
986 1.1 skrll
987 1.1 skrll if (frame_desc->actual_length >= frame_desc->length) {
988 1.1 skrll frame_desc->status = 0;
989 1.1 skrll qtd->isoc_frame_index++;
990 1.1 skrll qtd->complete_split = 0;
991 1.1 skrll qtd->isoc_split_offset = 0;
992 1.1 skrll }
993 1.1 skrll
994 1.1 skrll if (qtd->isoc_frame_index == qtd->urb->packet_count) {
995 1.1 skrll dwc2_host_complete(hsotg, qtd, 0);
996 1.1 skrll dwc2_release_channel(hsotg, chan, qtd,
997 1.1 skrll DWC2_HC_XFER_URB_COMPLETE);
998 1.1 skrll } else {
999 1.1 skrll dwc2_release_channel(hsotg, chan, qtd,
1000 1.1 skrll DWC2_HC_XFER_NO_HALT_STATUS);
1001 1.1 skrll }
1002 1.1 skrll
1003 1.1 skrll return 1; /* Indicates that channel released */
1004 1.1 skrll }
1005 1.1 skrll
1006 1.1 skrll /*
1007 1.1 skrll * Handles a host channel Transfer Complete interrupt. This handler may be
1008 1.1 skrll * called in either DMA mode or Slave mode.
1009 1.1 skrll */
1010 1.1 skrll static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
1011 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1012 1.1 skrll struct dwc2_qtd *qtd)
1013 1.1 skrll {
1014 1.1 skrll struct dwc2_hcd_urb *urb = qtd->urb;
1015 1.1 skrll enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
1016 1.8 skrll int pipe_type;
1017 1.1 skrll int urb_xfer_done;
1018 1.1 skrll
1019 1.1 skrll if (dbg_hc(chan))
1020 1.1 skrll dev_vdbg(hsotg->dev,
1021 1.1 skrll "--Host Channel %d Interrupt: Transfer Complete--\n",
1022 1.1 skrll chnum);
1023 1.1 skrll
1024 1.8 skrll if (!urb)
1025 1.8 skrll goto handle_xfercomp_done;
1026 1.8 skrll
1027 1.8 skrll pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1028 1.8 skrll
1029 1.1 skrll if (hsotg->core_params->dma_desc_enable > 0) {
1030 1.1 skrll dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
1031 1.1 skrll if (pipe_type == USB_ENDPOINT_XFER_ISOC)
1032 1.1 skrll /* Do not disable the interrupt, just clear it */
1033 1.1 skrll return;
1034 1.1 skrll goto handle_xfercomp_done;
1035 1.1 skrll }
1036 1.1 skrll
1037 1.1 skrll /* Handle xfer complete on CSPLIT */
1038 1.1 skrll if (chan->qh->do_split) {
1039 1.1 skrll if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
1040 1.1 skrll hsotg->core_params->dma_enable > 0) {
1041 1.1 skrll if (qtd->complete_split &&
1042 1.1 skrll dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
1043 1.1 skrll qtd))
1044 1.1 skrll goto handle_xfercomp_done;
1045 1.1 skrll } else {
1046 1.1 skrll qtd->complete_split = 0;
1047 1.1 skrll }
1048 1.1 skrll }
1049 1.1 skrll
1050 1.1 skrll /* Update the QTD and URB states */
1051 1.1 skrll switch (pipe_type) {
1052 1.1 skrll case USB_ENDPOINT_XFER_CONTROL:
1053 1.1 skrll switch (qtd->control_phase) {
1054 1.1 skrll case DWC2_CONTROL_SETUP:
1055 1.1 skrll if (urb->length > 0)
1056 1.1 skrll qtd->control_phase = DWC2_CONTROL_DATA;
1057 1.1 skrll else
1058 1.1 skrll qtd->control_phase = DWC2_CONTROL_STATUS;
1059 1.1 skrll dev_vdbg(hsotg->dev,
1060 1.1 skrll " Control setup transaction done\n");
1061 1.1 skrll halt_status = DWC2_HC_XFER_COMPLETE;
1062 1.1 skrll break;
1063 1.1 skrll case DWC2_CONTROL_DATA:
1064 1.1 skrll urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
1065 1.1 skrll chnum, urb, qtd);
1066 1.1 skrll if (urb_xfer_done) {
1067 1.1 skrll qtd->control_phase = DWC2_CONTROL_STATUS;
1068 1.1 skrll dev_vdbg(hsotg->dev,
1069 1.1 skrll " Control data transfer done\n");
1070 1.1 skrll } else {
1071 1.1 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1072 1.1 skrll qtd);
1073 1.1 skrll }
1074 1.1 skrll halt_status = DWC2_HC_XFER_COMPLETE;
1075 1.1 skrll break;
1076 1.1 skrll case DWC2_CONTROL_STATUS:
1077 1.1 skrll dev_vdbg(hsotg->dev, " Control transfer complete\n");
1078 1.1 skrll if (urb->status == -EINPROGRESS)
1079 1.1 skrll urb->status = 0;
1080 1.1 skrll dwc2_host_complete(hsotg, qtd, urb->status);
1081 1.1 skrll halt_status = DWC2_HC_XFER_URB_COMPLETE;
1082 1.1 skrll break;
1083 1.1 skrll }
1084 1.1 skrll
1085 1.1 skrll dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1086 1.1 skrll halt_status);
1087 1.1 skrll break;
1088 1.1 skrll case USB_ENDPOINT_XFER_BULK:
1089 1.1 skrll dev_vdbg(hsotg->dev, " Bulk transfer complete\n");
1090 1.1 skrll urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1091 1.1 skrll qtd);
1092 1.1 skrll if (urb_xfer_done) {
1093 1.1 skrll dwc2_host_complete(hsotg, qtd, urb->status);
1094 1.1 skrll halt_status = DWC2_HC_XFER_URB_COMPLETE;
1095 1.1 skrll } else {
1096 1.1 skrll halt_status = DWC2_HC_XFER_COMPLETE;
1097 1.1 skrll }
1098 1.1 skrll
1099 1.1 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1100 1.1 skrll dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1101 1.1 skrll halt_status);
1102 1.1 skrll break;
1103 1.1 skrll case USB_ENDPOINT_XFER_INT:
1104 1.1 skrll dev_vdbg(hsotg->dev, " Interrupt transfer complete\n");
1105 1.1 skrll urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1106 1.1 skrll qtd);
1107 1.1 skrll
1108 1.1 skrll /*
1109 1.1 skrll * Interrupt URB is done on the first transfer complete
1110 1.1 skrll * interrupt
1111 1.1 skrll */
1112 1.1 skrll if (urb_xfer_done) {
1113 1.1 skrll dwc2_host_complete(hsotg, qtd, urb->status);
1114 1.1 skrll halt_status = DWC2_HC_XFER_URB_COMPLETE;
1115 1.1 skrll } else {
1116 1.1 skrll halt_status = DWC2_HC_XFER_COMPLETE;
1117 1.1 skrll }
1118 1.1 skrll
1119 1.1 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1120 1.1 skrll dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1121 1.1 skrll halt_status);
1122 1.1 skrll break;
1123 1.1 skrll case USB_ENDPOINT_XFER_ISOC:
1124 1.1 skrll if (dbg_perio())
1125 1.1 skrll dev_vdbg(hsotg->dev, " Isochronous transfer complete\n");
1126 1.1 skrll if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
1127 1.1 skrll halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1128 1.1 skrll chnum, qtd, DWC2_HC_XFER_COMPLETE);
1129 1.1 skrll dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1130 1.1 skrll halt_status);
1131 1.1 skrll break;
1132 1.1 skrll }
1133 1.1 skrll
1134 1.1 skrll handle_xfercomp_done:
1135 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
1136 1.1 skrll }
1137 1.1 skrll
1138 1.1 skrll /*
1139 1.1 skrll * Handles a host channel STALL interrupt. This handler may be called in
1140 1.1 skrll * either DMA mode or Slave mode.
1141 1.1 skrll */
1142 1.1 skrll static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
1143 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1144 1.1 skrll struct dwc2_qtd *qtd)
1145 1.1 skrll {
1146 1.1 skrll struct dwc2_hcd_urb *urb = qtd->urb;
1147 1.8 skrll int pipe_type;
1148 1.1 skrll
1149 1.1 skrll dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
1150 1.1 skrll chnum);
1151 1.1 skrll
1152 1.1 skrll if (hsotg->core_params->dma_desc_enable > 0) {
1153 1.1 skrll dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1154 1.1 skrll DWC2_HC_XFER_STALL);
1155 1.1 skrll goto handle_stall_done;
1156 1.1 skrll }
1157 1.1 skrll
1158 1.1 skrll if (!urb)
1159 1.1 skrll goto handle_stall_halt;
1160 1.1 skrll
1161 1.8 skrll pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1162 1.8 skrll
1163 1.1 skrll if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
1164 1.1 skrll dwc2_host_complete(hsotg, qtd, -EPIPE);
1165 1.1 skrll
1166 1.1 skrll if (pipe_type == USB_ENDPOINT_XFER_BULK ||
1167 1.1 skrll pipe_type == USB_ENDPOINT_XFER_INT) {
1168 1.1 skrll dwc2_host_complete(hsotg, qtd, -EPIPE);
1169 1.1 skrll /*
1170 1.1 skrll * USB protocol requires resetting the data toggle for bulk
1171 1.1 skrll * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1172 1.1 skrll * setup command is issued to the endpoint. Anticipate the
1173 1.1 skrll * CLEAR_FEATURE command since a STALL has occurred and reset
1174 1.1 skrll * the data toggle now.
1175 1.1 skrll */
1176 1.1 skrll chan->qh->data_toggle = 0;
1177 1.1 skrll }
1178 1.1 skrll
1179 1.1 skrll handle_stall_halt:
1180 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
1181 1.1 skrll
1182 1.1 skrll handle_stall_done:
1183 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
1184 1.1 skrll }
1185 1.1 skrll
1186 1.1 skrll /*
1187 1.1 skrll * Updates the state of the URB when a transfer has been stopped due to an
1188 1.1 skrll * abnormal condition before the transfer completes. Modifies the
1189 1.1 skrll * actual_length field of the URB to reflect the number of bytes that have
1190 1.1 skrll * actually been transferred via the host channel.
1191 1.1 skrll */
1192 1.1 skrll static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1193 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1194 1.1 skrll struct dwc2_hcd_urb *urb,
1195 1.1 skrll struct dwc2_qtd *qtd,
1196 1.1 skrll enum dwc2_halt_status halt_status)
1197 1.1 skrll {
1198 1.1 skrll u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
1199 1.1 skrll qtd, halt_status, NULL);
1200 1.1 skrll
1201 1.1 skrll if (urb->actual_length + xfer_length > urb->length) {
1202 1.1 skrll dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
1203 1.1 skrll xfer_length = urb->length - urb->actual_length;
1204 1.1 skrll }
1205 1.1 skrll
1206 1.1 skrll /* Non DWORD-aligned buffer case handling */
1207 1.1 skrll if (chan->align_buf && xfer_length && chan->ep_is_in) {
1208 1.1 skrll dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
1209 1.12 skrll
1210 1.12 skrll usb_dma_t *ud = &chan->qh->dw_align_buf_usbdma;
1211 1.12 skrll
1212 1.12 skrll usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
1213 1.12 skrll chan->ep_is_in ?
1214 1.12 skrll BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1215 1.12 skrll if (chan->ep_is_in)
1216 1.12 skrll memcpy(urb->buf + urb->actual_length,
1217 1.12 skrll chan->qh->dw_align_buf,
1218 1.12 skrll xfer_length);
1219 1.12 skrll usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
1220 1.12 skrll chan->ep_is_in ?
1221 1.12 skrll BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1222 1.1 skrll }
1223 1.1 skrll
1224 1.1 skrll urb->actual_length += xfer_length;
1225 1.1 skrll
1226 1.1 skrll dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
1227 1.1 skrll __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
1228 1.1 skrll dev_vdbg(hsotg->dev, " chan->start_pkt_count %d\n",
1229 1.1 skrll chan->start_pkt_count);
1230 1.1 skrll dev_vdbg(hsotg->dev, " hctsiz.pktcnt %d\n",
1231 1.7 skrll (DWC2_READ_4(hsotg, HCTSIZ(chnum)) & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
1232 1.1 skrll dev_vdbg(hsotg->dev, " chan->max_packet %d\n", chan->max_packet);
1233 1.1 skrll dev_vdbg(hsotg->dev, " bytes_transferred %d\n",
1234 1.1 skrll xfer_length);
1235 1.1 skrll dev_vdbg(hsotg->dev, " urb->actual_length %d\n",
1236 1.1 skrll urb->actual_length);
1237 1.1 skrll dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n",
1238 1.1 skrll urb->length);
1239 1.1 skrll }
1240 1.1 skrll
1241 1.1 skrll /*
1242 1.1 skrll * Handles a host channel NAK interrupt. This handler may be called in either
1243 1.1 skrll * DMA mode or Slave mode.
1244 1.1 skrll */
1245 1.1 skrll static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1246 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1247 1.1 skrll struct dwc2_qtd *qtd)
1248 1.1 skrll {
1249 1.12 skrll if (!qtd) {
1250 1.12 skrll dev_dbg(hsotg->dev, "%s: qtd is NULL\n", __func__);
1251 1.12 skrll return;
1252 1.12 skrll }
1253 1.12 skrll
1254 1.12 skrll if (!qtd->urb) {
1255 1.12 skrll dev_dbg(hsotg->dev, "%s: qtd->urb is NULL\n", __func__);
1256 1.12 skrll return;
1257 1.12 skrll }
1258 1.12 skrll
1259 1.1 skrll if (dbg_hc(chan))
1260 1.1 skrll dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
1261 1.1 skrll chnum);
1262 1.1 skrll
1263 1.1 skrll /*
1264 1.1 skrll * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1265 1.1 skrll * interrupt. Re-start the SSPLIT transfer.
1266 1.14 simonb *
1267 1.14 simonb * Normally for non-periodic transfers we'll retry right away, but to
1268 1.14 simonb * avoid interrupt storms we'll wait before retrying if we've got
1269 1.14 simonb * several NAKs. If we didn't do this we'd retry directly from the
1270 1.14 simonb * interrupt handler and could end up quickly getting another
1271 1.14 simonb * interrupt (another NAK), which we'd retry.
1272 1.14 simonb *
1273 1.14 simonb * Note that in DMA mode software only gets involved to re-send NAKed
1274 1.14 simonb * transfers for split transactions unless the core is missing OUT NAK
1275 1.14 simonb * enhancement.
1276 1.1 skrll */
1277 1.1 skrll if (chan->do_split) {
1278 1.11 skrll /*
1279 1.11 skrll * When we get control/bulk NAKs then remember this so we holdoff on
1280 1.11 skrll * this qh until the beginning of the next frame
1281 1.11 skrll */
1282 1.11 skrll switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1283 1.11 skrll case USB_ENDPOINT_XFER_CONTROL:
1284 1.11 skrll case USB_ENDPOINT_XFER_BULK:
1285 1.11 skrll chan->qh->nak_frame = dwc2_hcd_get_frame_number(hsotg);
1286 1.11 skrll break;
1287 1.11 skrll }
1288 1.11 skrll
1289 1.1 skrll if (chan->complete_split)
1290 1.1 skrll qtd->error_count = 0;
1291 1.1 skrll qtd->complete_split = 0;
1292 1.14 simonb qtd->num_naks++;
1293 1.14 simonb qtd->qh->want_wait = qtd->num_naks >= dwc2_naks_before_delay;
1294 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1295 1.1 skrll goto handle_nak_done;
1296 1.1 skrll }
1297 1.1 skrll
1298 1.1 skrll switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1299 1.1 skrll case USB_ENDPOINT_XFER_CONTROL:
1300 1.1 skrll case USB_ENDPOINT_XFER_BULK:
1301 1.1 skrll if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
1302 1.1 skrll /*
1303 1.1 skrll * NAK interrupts are enabled on bulk/control IN
1304 1.1 skrll * transfers in DMA mode for the sole purpose of
1305 1.1 skrll * resetting the error count after a transaction error
1306 1.1 skrll * occurs. The core will continue transferring data.
1307 1.1 skrll */
1308 1.1 skrll qtd->error_count = 0;
1309 1.1 skrll break;
1310 1.1 skrll }
1311 1.1 skrll
1312 1.1 skrll /*
1313 1.1 skrll * NAK interrupts normally occur during OUT transfers in DMA
1314 1.1 skrll * or Slave mode. For IN transfers, more requests will be
1315 1.1 skrll * queued as request queue space is available.
1316 1.1 skrll */
1317 1.1 skrll qtd->error_count = 0;
1318 1.1 skrll
1319 1.14 simonb if (hsotg->core_params->dma_enable > 0 && !chan->ep_is_in) {
1320 1.14 simonb /*
1321 1.14 simonb * Avoid interrupt storms.
1322 1.14 simonb */
1323 1.14 simonb qtd->qh->want_wait = 1;
1324 1.14 simonb } else if (!chan->qh->ping_state) {
1325 1.1 skrll dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1326 1.1 skrll qtd, DWC2_HC_XFER_NAK);
1327 1.1 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1328 1.1 skrll
1329 1.1 skrll if (chan->speed == USB_SPEED_HIGH)
1330 1.1 skrll chan->qh->ping_state = 1;
1331 1.1 skrll }
1332 1.1 skrll
1333 1.1 skrll /*
1334 1.1 skrll * Halt the channel so the transfer can be re-started from
1335 1.1 skrll * the appropriate point or the PING protocol will
1336 1.1 skrll * start/continue
1337 1.1 skrll */
1338 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1339 1.1 skrll break;
1340 1.1 skrll case USB_ENDPOINT_XFER_INT:
1341 1.1 skrll qtd->error_count = 0;
1342 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1343 1.1 skrll break;
1344 1.1 skrll case USB_ENDPOINT_XFER_ISOC:
1345 1.1 skrll /* Should never get called for isochronous transfers */
1346 1.1 skrll dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
1347 1.1 skrll break;
1348 1.1 skrll }
1349 1.1 skrll
1350 1.1 skrll handle_nak_done:
1351 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
1352 1.1 skrll }
1353 1.1 skrll
1354 1.1 skrll /*
1355 1.1 skrll * Handles a host channel ACK interrupt. This interrupt is enabled when
1356 1.1 skrll * performing the PING protocol in Slave mode, when errors occur during
1357 1.1 skrll * either Slave mode or DMA mode, and during Start Split transactions.
1358 1.1 skrll */
1359 1.1 skrll static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
1360 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1361 1.1 skrll struct dwc2_qtd *qtd)
1362 1.1 skrll {
1363 1.1 skrll struct dwc2_hcd_iso_packet_desc *frame_desc;
1364 1.1 skrll
1365 1.1 skrll if (dbg_hc(chan))
1366 1.1 skrll dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
1367 1.1 skrll chnum);
1368 1.1 skrll
1369 1.1 skrll if (chan->do_split) {
1370 1.1 skrll /* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
1371 1.1 skrll if (!chan->ep_is_in &&
1372 1.1 skrll chan->data_pid_start != DWC2_HC_PID_SETUP)
1373 1.1 skrll qtd->ssplit_out_xfer_count = chan->xfer_len;
1374 1.1 skrll
1375 1.1 skrll if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
1376 1.1 skrll qtd->complete_split = 1;
1377 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1378 1.1 skrll } else {
1379 1.1 skrll /* ISOC OUT */
1380 1.1 skrll switch (chan->xact_pos) {
1381 1.1 skrll case DWC2_HCSPLT_XACTPOS_ALL:
1382 1.1 skrll break;
1383 1.1 skrll case DWC2_HCSPLT_XACTPOS_END:
1384 1.1 skrll qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1385 1.1 skrll qtd->isoc_split_offset = 0;
1386 1.1 skrll break;
1387 1.1 skrll case DWC2_HCSPLT_XACTPOS_BEGIN:
1388 1.1 skrll case DWC2_HCSPLT_XACTPOS_MID:
1389 1.1 skrll /*
1390 1.1 skrll * For BEGIN or MID, calculate the length for
1391 1.1 skrll * the next microframe to determine the correct
1392 1.1 skrll * SSPLIT token, either MID or END
1393 1.1 skrll */
1394 1.1 skrll frame_desc = &qtd->urb->iso_descs[
1395 1.1 skrll qtd->isoc_frame_index];
1396 1.1 skrll qtd->isoc_split_offset += 188;
1397 1.1 skrll
1398 1.1 skrll if (frame_desc->length - qtd->isoc_split_offset
1399 1.1 skrll <= 188)
1400 1.1 skrll qtd->isoc_split_pos =
1401 1.1 skrll DWC2_HCSPLT_XACTPOS_END;
1402 1.1 skrll else
1403 1.1 skrll qtd->isoc_split_pos =
1404 1.1 skrll DWC2_HCSPLT_XACTPOS_MID;
1405 1.1 skrll break;
1406 1.1 skrll }
1407 1.1 skrll }
1408 1.1 skrll } else {
1409 1.1 skrll qtd->error_count = 0;
1410 1.1 skrll
1411 1.1 skrll if (chan->qh->ping_state) {
1412 1.1 skrll chan->qh->ping_state = 0;
1413 1.1 skrll /*
1414 1.1 skrll * Halt the channel so the transfer can be re-started
1415 1.1 skrll * from the appropriate point. This only happens in
1416 1.1 skrll * Slave mode. In DMA mode, the ping_state is cleared
1417 1.1 skrll * when the transfer is started because the core
1418 1.1 skrll * automatically executes the PING, then the transfer.
1419 1.1 skrll */
1420 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1421 1.1 skrll }
1422 1.1 skrll }
1423 1.1 skrll
1424 1.1 skrll /*
1425 1.1 skrll * If the ACK occurred when _not_ in the PING state, let the channel
1426 1.1 skrll * continue transferring data after clearing the error count
1427 1.1 skrll */
1428 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
1429 1.1 skrll }
1430 1.1 skrll
1431 1.1 skrll /*
1432 1.1 skrll * Handles a host channel NYET interrupt. This interrupt should only occur on
1433 1.1 skrll * Bulk and Control OUT endpoints and for complete split transactions. If a
1434 1.1 skrll * NYET occurs at the same time as a Transfer Complete interrupt, it is
1435 1.1 skrll * handled in the xfercomp interrupt handler, not here. This handler may be
1436 1.1 skrll * called in either DMA mode or Slave mode.
1437 1.1 skrll */
1438 1.1 skrll static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1439 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1440 1.1 skrll struct dwc2_qtd *qtd)
1441 1.1 skrll {
1442 1.1 skrll if (dbg_hc(chan))
1443 1.1 skrll dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
1444 1.1 skrll chnum);
1445 1.1 skrll
1446 1.1 skrll /*
1447 1.1 skrll * NYET on CSPLIT
1448 1.1 skrll * re-do the CSPLIT immediately on non-periodic
1449 1.1 skrll */
1450 1.1 skrll if (chan->do_split && chan->complete_split) {
1451 1.1 skrll if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
1452 1.1 skrll hsotg->core_params->dma_enable > 0) {
1453 1.1 skrll qtd->complete_split = 0;
1454 1.1 skrll qtd->isoc_split_offset = 0;
1455 1.1 skrll qtd->isoc_frame_index++;
1456 1.1 skrll if (qtd->urb &&
1457 1.1 skrll qtd->isoc_frame_index == qtd->urb->packet_count) {
1458 1.1 skrll dwc2_host_complete(hsotg, qtd, 0);
1459 1.1 skrll dwc2_release_channel(hsotg, chan, qtd,
1460 1.1 skrll DWC2_HC_XFER_URB_COMPLETE);
1461 1.1 skrll } else {
1462 1.1 skrll dwc2_release_channel(hsotg, chan, qtd,
1463 1.1 skrll DWC2_HC_XFER_NO_HALT_STATUS);
1464 1.1 skrll }
1465 1.1 skrll goto handle_nyet_done;
1466 1.1 skrll }
1467 1.1 skrll
1468 1.1 skrll if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1469 1.1 skrll chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1470 1.1 skrll int frnum = dwc2_hcd_get_frame_number(hsotg);
1471 1.1 skrll
1472 1.1 skrll if (dwc2_full_frame_num(frnum) !=
1473 1.1 skrll dwc2_full_frame_num(chan->qh->sched_frame)) {
1474 1.1 skrll /*
1475 1.1 skrll * No longer in the same full speed frame.
1476 1.1 skrll * Treat this as a transaction error.
1477 1.1 skrll */
1478 1.1 skrll #if 0
1479 1.1 skrll /*
1480 1.1 skrll * Todo: Fix system performance so this can
1481 1.1 skrll * be treated as an error. Right now complete
1482 1.1 skrll * splits cannot be scheduled precisely enough
1483 1.1 skrll * due to other system activity, so this error
1484 1.1 skrll * occurs regularly in Slave mode.
1485 1.1 skrll */
1486 1.1 skrll qtd->error_count++;
1487 1.1 skrll #endif
1488 1.1 skrll qtd->complete_split = 0;
1489 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd,
1490 1.1 skrll DWC2_HC_XFER_XACT_ERR);
1491 1.1 skrll /* Todo: add support for isoc release */
1492 1.1 skrll goto handle_nyet_done;
1493 1.1 skrll }
1494 1.1 skrll }
1495 1.1 skrll
1496 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1497 1.1 skrll goto handle_nyet_done;
1498 1.1 skrll }
1499 1.1 skrll
1500 1.1 skrll chan->qh->ping_state = 1;
1501 1.1 skrll qtd->error_count = 0;
1502 1.1 skrll
1503 1.1 skrll dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
1504 1.1 skrll DWC2_HC_XFER_NYET);
1505 1.1 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1506 1.1 skrll
1507 1.1 skrll /*
1508 1.1 skrll * Halt the channel and re-start the transfer so the PING protocol
1509 1.1 skrll * will start
1510 1.1 skrll */
1511 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1512 1.1 skrll
1513 1.1 skrll handle_nyet_done:
1514 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
1515 1.1 skrll }
1516 1.1 skrll
1517 1.1 skrll /*
1518 1.1 skrll * Handles a host channel babble interrupt. This handler may be called in
1519 1.1 skrll * either DMA mode or Slave mode.
1520 1.1 skrll */
1521 1.1 skrll static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
1522 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1523 1.1 skrll struct dwc2_qtd *qtd)
1524 1.1 skrll {
1525 1.1 skrll dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
1526 1.1 skrll chnum);
1527 1.1 skrll
1528 1.2 skrll // dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1529 1.1 skrll
1530 1.1 skrll if (hsotg->core_params->dma_desc_enable > 0) {
1531 1.1 skrll dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1532 1.1 skrll DWC2_HC_XFER_BABBLE_ERR);
1533 1.1 skrll goto disable_int;
1534 1.1 skrll }
1535 1.1 skrll
1536 1.1 skrll if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1537 1.1 skrll dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
1538 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
1539 1.1 skrll } else {
1540 1.1 skrll enum dwc2_halt_status halt_status;
1541 1.1 skrll
1542 1.1 skrll halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1543 1.1 skrll qtd, DWC2_HC_XFER_BABBLE_ERR);
1544 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1545 1.1 skrll }
1546 1.1 skrll
1547 1.1 skrll disable_int:
1548 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
1549 1.1 skrll }
1550 1.1 skrll
1551 1.1 skrll /*
1552 1.1 skrll * Handles a host channel AHB error interrupt. This handler is only called in
1553 1.1 skrll * DMA mode.
1554 1.1 skrll */
1555 1.1 skrll static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1556 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1557 1.1 skrll struct dwc2_qtd *qtd)
1558 1.1 skrll {
1559 1.1 skrll struct dwc2_hcd_urb *urb = qtd->urb;
1560 1.1 skrll
1561 1.1 skrll dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
1562 1.1 skrll chnum);
1563 1.1 skrll
1564 1.1 skrll if (!urb)
1565 1.1 skrll goto handle_ahberr_halt;
1566 1.1 skrll
1567 1.2 skrll // dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1568 1.1 skrll
1569 1.7 skrll #ifdef DWC2_DEBUG
1570 1.7 skrll const char *pipetype, *speed;
1571 1.7 skrll
1572 1.7 skrll u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1573 1.7 skrll u32 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chnum));
1574 1.7 skrll u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
1575 1.7 skrll u32 hc_dma = DWC2_READ_4(hsotg, HCDMA(chnum));
1576 1.1 skrll
1577 1.1 skrll dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
1578 1.1 skrll dev_err(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
1579 1.1 skrll dev_err(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
1580 1.1 skrll dev_err(hsotg->dev, " Device address: %d\n",
1581 1.1 skrll dwc2_hcd_get_dev_addr(&urb->pipe_info));
1582 1.1 skrll dev_err(hsotg->dev, " Endpoint: %d, %s\n",
1583 1.1 skrll dwc2_hcd_get_ep_num(&urb->pipe_info),
1584 1.1 skrll dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
1585 1.1 skrll
1586 1.1 skrll switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
1587 1.1 skrll case USB_ENDPOINT_XFER_CONTROL:
1588 1.1 skrll pipetype = "CONTROL";
1589 1.1 skrll break;
1590 1.1 skrll case USB_ENDPOINT_XFER_BULK:
1591 1.1 skrll pipetype = "BULK";
1592 1.1 skrll break;
1593 1.1 skrll case USB_ENDPOINT_XFER_INT:
1594 1.1 skrll pipetype = "INTERRUPT";
1595 1.1 skrll break;
1596 1.1 skrll case USB_ENDPOINT_XFER_ISOC:
1597 1.1 skrll pipetype = "ISOCHRONOUS";
1598 1.1 skrll break;
1599 1.1 skrll default:
1600 1.1 skrll pipetype = "UNKNOWN";
1601 1.1 skrll break;
1602 1.1 skrll }
1603 1.1 skrll
1604 1.1 skrll dev_err(hsotg->dev, " Endpoint type: %s\n", pipetype);
1605 1.1 skrll
1606 1.1 skrll switch (chan->speed) {
1607 1.1 skrll case USB_SPEED_HIGH:
1608 1.1 skrll speed = "HIGH";
1609 1.1 skrll break;
1610 1.1 skrll case USB_SPEED_FULL:
1611 1.1 skrll speed = "FULL";
1612 1.1 skrll break;
1613 1.1 skrll case USB_SPEED_LOW:
1614 1.1 skrll speed = "LOW";
1615 1.1 skrll break;
1616 1.1 skrll default:
1617 1.1 skrll speed = "UNKNOWN";
1618 1.1 skrll break;
1619 1.1 skrll }
1620 1.1 skrll
1621 1.1 skrll dev_err(hsotg->dev, " Speed: %s\n", speed);
1622 1.1 skrll
1623 1.1 skrll dev_err(hsotg->dev, " Max packet size: %d\n",
1624 1.1 skrll dwc2_hcd_get_mps(&urb->pipe_info));
1625 1.1 skrll dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length);
1626 1.1 skrll dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
1627 1.1 skrll urb->buf, (unsigned long)urb->dma);
1628 1.1 skrll dev_err(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n",
1629 1.1 skrll urb->setup_packet, (unsigned long)urb->setup_dma);
1630 1.1 skrll dev_err(hsotg->dev, " Interval: %d\n", urb->interval);
1631 1.7 skrll #endif
1632 1.1 skrll
1633 1.1 skrll /* Core halts the channel for Descriptor DMA mode */
1634 1.1 skrll if (hsotg->core_params->dma_desc_enable > 0) {
1635 1.1 skrll dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1636 1.1 skrll DWC2_HC_XFER_AHB_ERR);
1637 1.1 skrll goto handle_ahberr_done;
1638 1.1 skrll }
1639 1.1 skrll
1640 1.1 skrll dwc2_host_complete(hsotg, qtd, -EIO);
1641 1.1 skrll
1642 1.1 skrll handle_ahberr_halt:
1643 1.1 skrll /*
1644 1.1 skrll * Force a channel halt. Don't call dwc2_halt_channel because that won't
1645 1.1 skrll * write to the HCCHARn register in DMA mode to force the halt.
1646 1.1 skrll */
1647 1.1 skrll dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
1648 1.1 skrll
1649 1.1 skrll handle_ahberr_done:
1650 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
1651 1.1 skrll }
1652 1.1 skrll
1653 1.1 skrll /*
1654 1.1 skrll * Handles a host channel transaction error interrupt. This handler may be
1655 1.1 skrll * called in either DMA mode or Slave mode.
1656 1.1 skrll */
1657 1.1 skrll static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
1658 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1659 1.1 skrll struct dwc2_qtd *qtd)
1660 1.1 skrll {
1661 1.1 skrll dev_dbg(hsotg->dev,
1662 1.1 skrll "--Host Channel %d Interrupt: Transaction Error--\n", chnum);
1663 1.1 skrll
1664 1.2 skrll // dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1665 1.1 skrll
1666 1.1 skrll if (hsotg->core_params->dma_desc_enable > 0) {
1667 1.1 skrll dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1668 1.1 skrll DWC2_HC_XFER_XACT_ERR);
1669 1.1 skrll goto handle_xacterr_done;
1670 1.1 skrll }
1671 1.1 skrll
1672 1.1 skrll switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1673 1.1 skrll case USB_ENDPOINT_XFER_CONTROL:
1674 1.1 skrll case USB_ENDPOINT_XFER_BULK:
1675 1.1 skrll qtd->error_count++;
1676 1.1 skrll if (!chan->qh->ping_state) {
1677 1.1 skrll
1678 1.1 skrll dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1679 1.1 skrll qtd, DWC2_HC_XFER_XACT_ERR);
1680 1.1 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1681 1.1 skrll if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
1682 1.1 skrll chan->qh->ping_state = 1;
1683 1.1 skrll }
1684 1.1 skrll
1685 1.1 skrll /*
1686 1.1 skrll * Halt the channel so the transfer can be re-started from
1687 1.1 skrll * the appropriate point or the PING protocol will start
1688 1.1 skrll */
1689 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1690 1.1 skrll break;
1691 1.1 skrll case USB_ENDPOINT_XFER_INT:
1692 1.1 skrll qtd->error_count++;
1693 1.1 skrll if (chan->do_split && chan->complete_split)
1694 1.1 skrll qtd->complete_split = 0;
1695 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1696 1.1 skrll break;
1697 1.1 skrll case USB_ENDPOINT_XFER_ISOC:
1698 1.1 skrll {
1699 1.1 skrll enum dwc2_halt_status halt_status;
1700 1.1 skrll
1701 1.1 skrll halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1702 1.1 skrll chnum, qtd, DWC2_HC_XFER_XACT_ERR);
1703 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1704 1.1 skrll }
1705 1.1 skrll break;
1706 1.1 skrll }
1707 1.1 skrll
1708 1.1 skrll handle_xacterr_done:
1709 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
1710 1.1 skrll }
1711 1.1 skrll
1712 1.1 skrll /*
1713 1.1 skrll * Handles a host channel frame overrun interrupt. This handler may be called
1714 1.1 skrll * in either DMA mode or Slave mode.
1715 1.1 skrll */
1716 1.1 skrll static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
1717 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1718 1.1 skrll struct dwc2_qtd *qtd)
1719 1.1 skrll {
1720 1.1 skrll enum dwc2_halt_status halt_status;
1721 1.1 skrll
1722 1.1 skrll if (dbg_hc(chan))
1723 1.1 skrll dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
1724 1.1 skrll chnum);
1725 1.1 skrll
1726 1.1 skrll dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1727 1.1 skrll
1728 1.1 skrll switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1729 1.1 skrll case USB_ENDPOINT_XFER_CONTROL:
1730 1.1 skrll case USB_ENDPOINT_XFER_BULK:
1731 1.1 skrll break;
1732 1.1 skrll case USB_ENDPOINT_XFER_INT:
1733 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1734 1.1 skrll break;
1735 1.1 skrll case USB_ENDPOINT_XFER_ISOC:
1736 1.1 skrll halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1737 1.1 skrll qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1738 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1739 1.1 skrll break;
1740 1.1 skrll }
1741 1.1 skrll
1742 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
1743 1.1 skrll }
1744 1.1 skrll
1745 1.1 skrll /*
1746 1.1 skrll * Handles a host channel data toggle error interrupt. This handler may be
1747 1.1 skrll * called in either DMA mode or Slave mode.
1748 1.1 skrll */
1749 1.1 skrll static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
1750 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1751 1.1 skrll struct dwc2_qtd *qtd)
1752 1.1 skrll {
1753 1.1 skrll dev_dbg(hsotg->dev,
1754 1.1 skrll "--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
1755 1.1 skrll
1756 1.1 skrll if (chan->ep_is_in)
1757 1.1 skrll qtd->error_count = 0;
1758 1.1 skrll else
1759 1.1 skrll dev_err(hsotg->dev,
1760 1.1 skrll "Data Toggle Error on OUT transfer, channel %d\n",
1761 1.1 skrll chnum);
1762 1.1 skrll
1763 1.2 skrll // dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1764 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
1765 1.1 skrll }
1766 1.1 skrll
1767 1.1 skrll /*
1768 1.1 skrll * For debug only. It checks that a valid halt status is set and that
1769 1.1 skrll * HCCHARn.chdis is clear. If there's a problem, corrective action is
1770 1.1 skrll * taken and a warning is issued.
1771 1.1 skrll *
1772 1.1 skrll * Return: true if halt status is ok, false otherwise
1773 1.1 skrll */
1774 1.1 skrll static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
1775 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1776 1.1 skrll struct dwc2_qtd *qtd)
1777 1.1 skrll {
1778 1.7 skrll #ifdef DWC2_DEBUG
1779 1.1 skrll u32 hcchar;
1780 1.1 skrll u32 hctsiz;
1781 1.1 skrll u32 hcintmsk;
1782 1.1 skrll u32 hcsplt;
1783 1.1 skrll
1784 1.1 skrll if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
1785 1.1 skrll /*
1786 1.1 skrll * This code is here only as a check. This condition should
1787 1.1 skrll * never happen. Ignore the halt if it does occur.
1788 1.1 skrll */
1789 1.2 skrll hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1790 1.2 skrll hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
1791 1.2 skrll hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1792 1.2 skrll hcsplt = DWC2_READ_4(hsotg, HCSPLT(chnum));
1793 1.1 skrll dev_dbg(hsotg->dev,
1794 1.1 skrll "%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
1795 1.1 skrll __func__);
1796 1.1 skrll dev_dbg(hsotg->dev,
1797 1.1 skrll "channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
1798 1.1 skrll chnum, hcchar, hctsiz);
1799 1.1 skrll dev_dbg(hsotg->dev,
1800 1.1 skrll "hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
1801 1.1 skrll chan->hcint, hcintmsk, hcsplt);
1802 1.1 skrll if (qtd)
1803 1.1 skrll dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
1804 1.1 skrll qtd->complete_split);
1805 1.1 skrll dev_warn(hsotg->dev,
1806 1.1 skrll "%s: no halt status, channel %d, ignoring interrupt\n",
1807 1.1 skrll __func__, chnum);
1808 1.1 skrll return false;
1809 1.1 skrll }
1810 1.1 skrll
1811 1.1 skrll /*
1812 1.1 skrll * This code is here only as a check. hcchar.chdis should never be set
1813 1.1 skrll * when the halt interrupt occurs. Halt the channel again if it does
1814 1.1 skrll * occur.
1815 1.1 skrll */
1816 1.2 skrll hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1817 1.1 skrll if (hcchar & HCCHAR_CHDIS) {
1818 1.1 skrll dev_warn(hsotg->dev,
1819 1.1 skrll "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
1820 1.1 skrll __func__, hcchar);
1821 1.1 skrll chan->halt_pending = 0;
1822 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
1823 1.1 skrll return false;
1824 1.1 skrll }
1825 1.1 skrll #endif
1826 1.1 skrll
1827 1.1 skrll return true;
1828 1.1 skrll }
1829 1.1 skrll
1830 1.1 skrll /*
1831 1.1 skrll * Handles a host Channel Halted interrupt in DMA mode. This handler
1832 1.1 skrll * determines the reason the channel halted and proceeds accordingly.
1833 1.1 skrll */
1834 1.1 skrll static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
1835 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1836 1.1 skrll struct dwc2_qtd *qtd)
1837 1.1 skrll {
1838 1.1 skrll u32 hcintmsk;
1839 1.1 skrll int out_nak_enh = 0;
1840 1.1 skrll
1841 1.1 skrll if (dbg_hc(chan))
1842 1.1 skrll dev_vdbg(hsotg->dev,
1843 1.1 skrll "--Host Channel %d Interrupt: DMA Channel Halted--\n",
1844 1.1 skrll chnum);
1845 1.1 skrll
1846 1.1 skrll /*
1847 1.1 skrll * For core with OUT NAK enhancement, the flow for high-speed
1848 1.1 skrll * CONTROL/BULK OUT is handled a little differently
1849 1.1 skrll */
1850 1.3 skrll if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
1851 1.1 skrll if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
1852 1.1 skrll (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1853 1.1 skrll chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
1854 1.1 skrll out_nak_enh = 1;
1855 1.1 skrll }
1856 1.1 skrll }
1857 1.1 skrll
1858 1.1 skrll if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1859 1.1 skrll (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
1860 1.1 skrll hsotg->core_params->dma_desc_enable <= 0)) {
1861 1.1 skrll if (hsotg->core_params->dma_desc_enable > 0)
1862 1.1 skrll dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1863 1.1 skrll chan->halt_status);
1864 1.1 skrll else
1865 1.1 skrll /*
1866 1.1 skrll * Just release the channel. A dequeue can happen on a
1867 1.1 skrll * transfer timeout. In the case of an AHB Error, the
1868 1.1 skrll * channel was forced to halt because there's no way to
1869 1.1 skrll * gracefully recover.
1870 1.1 skrll */
1871 1.1 skrll dwc2_release_channel(hsotg, chan, qtd,
1872 1.1 skrll chan->halt_status);
1873 1.1 skrll return;
1874 1.1 skrll }
1875 1.1 skrll
1876 1.2 skrll hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1877 1.1 skrll
1878 1.1 skrll if (chan->hcint & HCINTMSK_XFERCOMPL) {
1879 1.1 skrll /*
1880 1.1 skrll * Todo: This is here because of a possible hardware bug. Spec
1881 1.1 skrll * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1882 1.1 skrll * interrupt w/ACK bit set should occur, but I only see the
1883 1.1 skrll * XFERCOMP bit, even with it masked out. This is a workaround
1884 1.1 skrll * for that behavior. Should fix this when hardware is fixed.
1885 1.1 skrll */
1886 1.1 skrll if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
1887 1.1 skrll dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1888 1.1 skrll dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
1889 1.1 skrll } else if (chan->hcint & HCINTMSK_STALL) {
1890 1.1 skrll dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
1891 1.1 skrll } else if ((chan->hcint & HCINTMSK_XACTERR) &&
1892 1.1 skrll hsotg->core_params->dma_desc_enable <= 0) {
1893 1.1 skrll if (out_nak_enh) {
1894 1.1 skrll if (chan->hcint &
1895 1.1 skrll (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
1896 1.1 skrll dev_vdbg(hsotg->dev,
1897 1.1 skrll "XactErr with NYET/NAK/ACK\n");
1898 1.1 skrll qtd->error_count = 0;
1899 1.1 skrll } else {
1900 1.1 skrll dev_vdbg(hsotg->dev,
1901 1.1 skrll "XactErr without NYET/NAK/ACK\n");
1902 1.1 skrll }
1903 1.1 skrll }
1904 1.1 skrll
1905 1.1 skrll /*
1906 1.1 skrll * Must handle xacterr before nak or ack. Could get a xacterr
1907 1.1 skrll * at the same time as either of these on a BULK/CONTROL OUT
1908 1.1 skrll * that started with a PING. The xacterr takes precedence.
1909 1.1 skrll */
1910 1.1 skrll dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1911 1.1 skrll } else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
1912 1.1 skrll hsotg->core_params->dma_desc_enable > 0) {
1913 1.1 skrll dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1914 1.1 skrll } else if ((chan->hcint & HCINTMSK_AHBERR) &&
1915 1.1 skrll hsotg->core_params->dma_desc_enable > 0) {
1916 1.1 skrll dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
1917 1.1 skrll } else if (chan->hcint & HCINTMSK_BBLERR) {
1918 1.1 skrll dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
1919 1.1 skrll } else if (chan->hcint & HCINTMSK_FRMOVRUN) {
1920 1.1 skrll dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
1921 1.1 skrll } else if (!out_nak_enh) {
1922 1.1 skrll if (chan->hcint & HCINTMSK_NYET) {
1923 1.1 skrll /*
1924 1.1 skrll * Must handle nyet before nak or ack. Could get a nyet
1925 1.1 skrll * at the same time as either of those on a BULK/CONTROL
1926 1.1 skrll * OUT that started with a PING. The nyet takes
1927 1.1 skrll * precedence.
1928 1.1 skrll */
1929 1.1 skrll dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
1930 1.1 skrll } else if ((chan->hcint & HCINTMSK_NAK) &&
1931 1.1 skrll !(hcintmsk & HCINTMSK_NAK)) {
1932 1.1 skrll /*
1933 1.1 skrll * If nak is not masked, it's because a non-split IN
1934 1.1 skrll * transfer is in an error state. In that case, the nak
1935 1.1 skrll * is handled by the nak interrupt handler, not here.
1936 1.1 skrll * Handle nak here for BULK/CONTROL OUT transfers, which
1937 1.1 skrll * halt on a NAK to allow rewinding the buffer pointer.
1938 1.1 skrll */
1939 1.1 skrll dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
1940 1.1 skrll } else if ((chan->hcint & HCINTMSK_ACK) &&
1941 1.1 skrll !(hcintmsk & HCINTMSK_ACK)) {
1942 1.1 skrll /*
1943 1.1 skrll * If ack is not masked, it's because a non-split IN
1944 1.1 skrll * transfer is in an error state. In that case, the ack
1945 1.1 skrll * is handled by the ack interrupt handler, not here.
1946 1.1 skrll * Handle ack here for split transfers. Start splits
1947 1.1 skrll * halt on ACK.
1948 1.1 skrll */
1949 1.1 skrll dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1950 1.1 skrll } else {
1951 1.1 skrll if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1952 1.1 skrll chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1953 1.1 skrll /*
1954 1.1 skrll * A periodic transfer halted with no other
1955 1.1 skrll * channel interrupts set. Assume it was halted
1956 1.1 skrll * by the core because it could not be completed
1957 1.1 skrll * in its scheduled (micro)frame.
1958 1.1 skrll */
1959 1.1 skrll dev_dbg(hsotg->dev,
1960 1.1 skrll "%s: Halt channel %d (assume incomplete periodic transfer)\n",
1961 1.1 skrll __func__, chnum);
1962 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd,
1963 1.1 skrll DWC2_HC_XFER_PERIODIC_INCOMPLETE);
1964 1.1 skrll } else {
1965 1.1 skrll dev_err(hsotg->dev,
1966 1.1 skrll "%s: Channel %d - ChHltd set, but reason is unknown\n",
1967 1.1 skrll __func__, chnum);
1968 1.1 skrll dev_err(hsotg->dev,
1969 1.1 skrll "hcint 0x%08x, intsts 0x%08x\n",
1970 1.1 skrll chan->hcint,
1971 1.2 skrll DWC2_READ_4(hsotg, GINTSTS));
1972 1.9 skrll goto error;
1973 1.1 skrll }
1974 1.1 skrll }
1975 1.1 skrll } else {
1976 1.1 skrll dev_info(hsotg->dev,
1977 1.1 skrll "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
1978 1.1 skrll chan->hcint);
1979 1.9 skrll error:
1980 1.12 skrll /* Failthrough: use 3-strikes rule */
1981 1.9 skrll qtd->error_count++;
1982 1.9 skrll dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1983 1.12 skrll qtd, DWC2_HC_XFER_XACT_ERR);
1984 1.9 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1985 1.9 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1986 1.1 skrll }
1987 1.1 skrll }
1988 1.1 skrll
1989 1.1 skrll /*
1990 1.1 skrll * Handles a host channel Channel Halted interrupt
1991 1.1 skrll *
1992 1.1 skrll * In slave mode, this handler is called only when the driver specifically
1993 1.1 skrll * requests a halt. This occurs during handling other host channel interrupts
1994 1.1 skrll * (e.g. nak, xacterr, stall, nyet, etc.).
1995 1.1 skrll *
1996 1.1 skrll * In DMA mode, this is the interrupt that occurs when the core has finished
1997 1.1 skrll * processing a transfer on a channel. Other host channel interrupts (except
1998 1.1 skrll * ahberr) are disabled in DMA mode.
1999 1.1 skrll */
2000 1.1 skrll static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
2001 1.1 skrll struct dwc2_host_chan *chan, int chnum,
2002 1.1 skrll struct dwc2_qtd *qtd)
2003 1.1 skrll {
2004 1.1 skrll if (dbg_hc(chan))
2005 1.1 skrll dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
2006 1.1 skrll chnum);
2007 1.1 skrll
2008 1.1 skrll if (hsotg->core_params->dma_enable > 0) {
2009 1.1 skrll dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
2010 1.1 skrll } else {
2011 1.1 skrll if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
2012 1.1 skrll return;
2013 1.1 skrll dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
2014 1.1 skrll }
2015 1.1 skrll }
2016 1.1 skrll
2017 1.13 skrll /*
2018 1.13 skrll * Check if the given qtd is still the top of the list (and thus valid).
2019 1.13 skrll *
2020 1.13 skrll * If dwc2_hcd_qtd_unlink_and_free() has been called since we grabbed
2021 1.13 skrll * the qtd from the top of the list, this will return false (otherwise true).
2022 1.13 skrll */
2023 1.13 skrll static bool dwc2_check_qtd_still_ok(struct dwc2_qtd *qtd, struct dwc2_qh *qh)
2024 1.13 skrll {
2025 1.13 skrll struct dwc2_qtd *cur_head;
2026 1.13 skrll
2027 1.13 skrll if (qh == NULL)
2028 1.13 skrll return false;
2029 1.13 skrll
2030 1.13 skrll cur_head = list_first_entry(&qh->qtd_list, struct dwc2_qtd,
2031 1.13 skrll qtd_list_entry);
2032 1.13 skrll return (cur_head == qtd);
2033 1.13 skrll }
2034 1.13 skrll
2035 1.1 skrll /* Handles interrupt for a specific Host Channel */
2036 1.1 skrll static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
2037 1.1 skrll {
2038 1.1 skrll struct dwc2_qtd *qtd;
2039 1.1 skrll struct dwc2_host_chan *chan;
2040 1.1 skrll u32 hcint, hcintmsk;
2041 1.1 skrll
2042 1.1 skrll chan = hsotg->hc_ptr_array[chnum];
2043 1.1 skrll
2044 1.2 skrll hcint = DWC2_READ_4(hsotg, HCINT(chnum));
2045 1.2 skrll hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
2046 1.1 skrll if (!chan) {
2047 1.1 skrll dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
2048 1.2 skrll DWC2_WRITE_4(hsotg, HCINT(chnum), hcint);
2049 1.1 skrll return;
2050 1.1 skrll }
2051 1.1 skrll
2052 1.6 skrll if (dbg_hc(chan)) {
2053 1.6 skrll dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
2054 1.6 skrll chnum);
2055 1.6 skrll dev_vdbg(hsotg->dev,
2056 1.6 skrll " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2057 1.6 skrll hcint, hcintmsk, hcint & hcintmsk);
2058 1.6 skrll }
2059 1.6 skrll
2060 1.2 skrll DWC2_WRITE_4(hsotg, HCINT(chnum), hcint);
2061 1.1 skrll chan->hcint = hcint;
2062 1.1 skrll hcint &= hcintmsk;
2063 1.1 skrll
2064 1.1 skrll /*
2065 1.1 skrll * If the channel was halted due to a dequeue, the qtd list might
2066 1.1 skrll * be empty or at least the first entry will not be the active qtd.
2067 1.1 skrll * In this case, take a shortcut and just release the channel.
2068 1.1 skrll */
2069 1.1 skrll if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
2070 1.1 skrll /*
2071 1.1 skrll * If the channel was halted, this should be the only
2072 1.1 skrll * interrupt unmasked
2073 1.1 skrll */
2074 1.1 skrll WARN_ON(hcint != HCINTMSK_CHHLTD);
2075 1.1 skrll if (hsotg->core_params->dma_desc_enable > 0)
2076 1.1 skrll dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
2077 1.1 skrll chan->halt_status);
2078 1.1 skrll else
2079 1.1 skrll dwc2_release_channel(hsotg, chan, NULL,
2080 1.1 skrll chan->halt_status);
2081 1.1 skrll return;
2082 1.1 skrll }
2083 1.1 skrll
2084 1.1 skrll if (list_empty(&chan->qh->qtd_list)) {
2085 1.1 skrll /*
2086 1.1 skrll * TODO: Will this ever happen with the
2087 1.1 skrll * DWC2_HC_XFER_URB_DEQUEUE handling above?
2088 1.1 skrll */
2089 1.1 skrll dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
2090 1.1 skrll chnum);
2091 1.1 skrll dev_dbg(hsotg->dev,
2092 1.1 skrll " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2093 1.1 skrll chan->hcint, hcintmsk, hcint);
2094 1.1 skrll chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
2095 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
2096 1.1 skrll chan->hcint = 0;
2097 1.1 skrll return;
2098 1.1 skrll }
2099 1.1 skrll
2100 1.1 skrll qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
2101 1.1 skrll qtd_list_entry);
2102 1.1 skrll
2103 1.1 skrll if (hsotg->core_params->dma_enable <= 0) {
2104 1.1 skrll if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
2105 1.1 skrll hcint &= ~HCINTMSK_CHHLTD;
2106 1.1 skrll }
2107 1.1 skrll
2108 1.1 skrll if (hcint & HCINTMSK_XFERCOMPL) {
2109 1.1 skrll dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
2110 1.1 skrll /*
2111 1.1 skrll * If NYET occurred at same time as Xfer Complete, the NYET is
2112 1.1 skrll * handled by the Xfer Complete interrupt handler. Don't want
2113 1.1 skrll * to call the NYET interrupt handler in this case.
2114 1.1 skrll */
2115 1.1 skrll hcint &= ~HCINTMSK_NYET;
2116 1.1 skrll }
2117 1.13 skrll
2118 1.13 skrll if (hcint & HCINTMSK_CHHLTD) {
2119 1.1 skrll dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
2120 1.13 skrll if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2121 1.13 skrll goto exit;
2122 1.13 skrll }
2123 1.13 skrll if (hcint & HCINTMSK_AHBERR) {
2124 1.1 skrll dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
2125 1.13 skrll if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2126 1.13 skrll goto exit;
2127 1.13 skrll }
2128 1.13 skrll if (hcint & HCINTMSK_STALL) {
2129 1.1 skrll dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
2130 1.13 skrll if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2131 1.13 skrll goto exit;
2132 1.13 skrll }
2133 1.13 skrll if (hcint & HCINTMSK_NAK) {
2134 1.1 skrll dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
2135 1.13 skrll if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2136 1.13 skrll goto exit;
2137 1.13 skrll }
2138 1.13 skrll if (hcint & HCINTMSK_ACK) {
2139 1.1 skrll dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
2140 1.13 skrll if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2141 1.13 skrll goto exit;
2142 1.13 skrll }
2143 1.13 skrll if (hcint & HCINTMSK_NYET) {
2144 1.1 skrll dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
2145 1.13 skrll if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2146 1.13 skrll goto exit;
2147 1.13 skrll }
2148 1.13 skrll if (hcint & HCINTMSK_XACTERR) {
2149 1.1 skrll dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
2150 1.13 skrll if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2151 1.13 skrll goto exit;
2152 1.13 skrll }
2153 1.13 skrll if (hcint & HCINTMSK_BBLERR) {
2154 1.1 skrll dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
2155 1.13 skrll if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2156 1.13 skrll goto exit;
2157 1.13 skrll }
2158 1.13 skrll if (hcint & HCINTMSK_FRMOVRUN) {
2159 1.1 skrll dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
2160 1.13 skrll if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2161 1.13 skrll goto exit;
2162 1.13 skrll }
2163 1.13 skrll if (hcint & HCINTMSK_DATATGLERR) {
2164 1.1 skrll dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
2165 1.13 skrll if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2166 1.13 skrll goto exit;
2167 1.13 skrll }
2168 1.1 skrll
2169 1.13 skrll exit:
2170 1.1 skrll chan->hcint = 0;
2171 1.1 skrll }
2172 1.1 skrll
2173 1.1 skrll /*
2174 1.1 skrll * This interrupt indicates that one or more host channels has a pending
2175 1.1 skrll * interrupt. There are multiple conditions that can cause each host channel
2176 1.1 skrll * interrupt. This function determines which conditions have occurred for each
2177 1.1 skrll * host channel interrupt and handles them appropriately.
2178 1.1 skrll */
2179 1.1 skrll static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2180 1.1 skrll {
2181 1.1 skrll u32 haint;
2182 1.1 skrll int i;
2183 1.1 skrll
2184 1.2 skrll haint = DWC2_READ_4(hsotg, HAINT);
2185 1.1 skrll if (dbg_perio()) {
2186 1.1 skrll dev_vdbg(hsotg->dev, "%s()\n", __func__);
2187 1.1 skrll
2188 1.1 skrll dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2189 1.1 skrll }
2190 1.1 skrll
2191 1.1 skrll for (i = 0; i < hsotg->core_params->host_channels; i++) {
2192 1.1 skrll if (haint & (1 << i))
2193 1.1 skrll dwc2_hc_n_intr(hsotg, i);
2194 1.1 skrll }
2195 1.1 skrll }
2196 1.1 skrll
2197 1.1 skrll /* This function handles interrupts for the HCD */
2198 1.1 skrll irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
2199 1.1 skrll {
2200 1.1 skrll u32 gintsts, dbg_gintsts;
2201 1.1 skrll irqreturn_t retval = IRQ_NONE;
2202 1.1 skrll
2203 1.8 skrll if (!dwc2_is_controller_alive(hsotg)) {
2204 1.8 skrll dev_warn(hsotg->dev, "Controller is dead\n");
2205 1.1 skrll return retval;
2206 1.1 skrll }
2207 1.1 skrll
2208 1.4 skrll KASSERT(mutex_owned(&hsotg->lock));
2209 1.1 skrll
2210 1.1 skrll /* Check if HOST Mode */
2211 1.1 skrll if (dwc2_is_host_mode(hsotg)) {
2212 1.1 skrll gintsts = dwc2_read_core_intr(hsotg);
2213 1.1 skrll if (!gintsts) {
2214 1.1 skrll return retval;
2215 1.1 skrll }
2216 1.1 skrll
2217 1.1 skrll retval = IRQ_HANDLED;
2218 1.1 skrll
2219 1.1 skrll dbg_gintsts = gintsts;
2220 1.1 skrll #ifndef DEBUG_SOF
2221 1.1 skrll dbg_gintsts &= ~GINTSTS_SOF;
2222 1.1 skrll #endif
2223 1.1 skrll if (!dbg_perio())
2224 1.1 skrll dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
2225 1.1 skrll GINTSTS_PTXFEMP);
2226 1.1 skrll
2227 1.1 skrll /* Only print if there are any non-suppressed interrupts left */
2228 1.1 skrll if (dbg_gintsts)
2229 1.1 skrll dev_vdbg(hsotg->dev,
2230 1.1 skrll "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
2231 1.1 skrll gintsts);
2232 1.1 skrll
2233 1.1 skrll if (gintsts & GINTSTS_SOF)
2234 1.1 skrll dwc2_sof_intr(hsotg);
2235 1.1 skrll if (gintsts & GINTSTS_RXFLVL)
2236 1.1 skrll dwc2_rx_fifo_level_intr(hsotg);
2237 1.1 skrll if (gintsts & GINTSTS_NPTXFEMP)
2238 1.1 skrll dwc2_np_tx_fifo_empty_intr(hsotg);
2239 1.1 skrll if (gintsts & GINTSTS_PRTINT)
2240 1.1 skrll dwc2_port_intr(hsotg);
2241 1.1 skrll if (gintsts & GINTSTS_HCHINT)
2242 1.1 skrll dwc2_hc_intr(hsotg);
2243 1.1 skrll if (gintsts & GINTSTS_PTXFEMP)
2244 1.1 skrll dwc2_perio_tx_fifo_empty_intr(hsotg);
2245 1.1 skrll
2246 1.1 skrll if (dbg_gintsts) {
2247 1.1 skrll dev_vdbg(hsotg->dev,
2248 1.1 skrll "DWC OTG HCD Finished Servicing Interrupts\n");
2249 1.1 skrll dev_vdbg(hsotg->dev,
2250 1.1 skrll "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
2251 1.2 skrll DWC2_READ_4(hsotg, GINTSTS),
2252 1.2 skrll DWC2_READ_4(hsotg, GINTMSK));
2253 1.1 skrll }
2254 1.1 skrll }
2255 1.1 skrll
2256 1.1 skrll return retval;
2257 1.1 skrll }
2258