dwc2_hcdintr.c revision 1.1 1 1.1 skrll /* $NetBSD: dwc2_hcdintr.c,v 1.1 2013/09/05 07:53:12 skrll Exp $ */
2 1.1 skrll
3 1.1 skrll /*
4 1.1 skrll * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
5 1.1 skrll *
6 1.1 skrll * Copyright (C) 2004-2013 Synopsys, Inc.
7 1.1 skrll *
8 1.1 skrll * Redistribution and use in source and binary forms, with or without
9 1.1 skrll * modification, are permitted provided that the following conditions
10 1.1 skrll * are met:
11 1.1 skrll * 1. Redistributions of source code must retain the above copyright
12 1.1 skrll * notice, this list of conditions, and the following disclaimer,
13 1.1 skrll * without modification.
14 1.1 skrll * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 skrll * notice, this list of conditions and the following disclaimer in the
16 1.1 skrll * documentation and/or other materials provided with the distribution.
17 1.1 skrll * 3. The names of the above-listed copyright holders may not be used
18 1.1 skrll * to endorse or promote products derived from this software without
19 1.1 skrll * specific prior written permission.
20 1.1 skrll *
21 1.1 skrll * ALTERNATIVELY, this software may be distributed under the terms of the
22 1.1 skrll * GNU General Public License ("GPL") as published by the Free Software
23 1.1 skrll * Foundation; either version 2 of the License, or (at your option) any
24 1.1 skrll * later version.
25 1.1 skrll *
26 1.1 skrll * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 1.1 skrll * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 1.1 skrll * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 skrll * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 1.1 skrll * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 1.1 skrll * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 1.1 skrll * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 1.1 skrll * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 1.1 skrll * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 1.1 skrll * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 1.1 skrll * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 1.1 skrll */
38 1.1 skrll
39 1.1 skrll /*
40 1.1 skrll * This file contains the interrupt handlers for Host mode
41 1.1 skrll */
42 1.1 skrll #include <linux/kernel.h>
43 1.1 skrll #include <linux/module.h>
44 1.1 skrll #include <linux/spinlock.h>
45 1.1 skrll #include <linux/interrupt.h>
46 1.1 skrll #include <linux/dma-mapping.h>
47 1.1 skrll #include <linux/io.h>
48 1.1 skrll #include <linux/slab.h>
49 1.1 skrll #include <linux/usb.h>
50 1.1 skrll
51 1.1 skrll #include <linux/usb/hcd.h>
52 1.1 skrll #include <linux/usb/ch11.h>
53 1.1 skrll
54 1.1 skrll #include "core.h"
55 1.1 skrll #include "hcd.h"
56 1.1 skrll
57 1.1 skrll /* This function is for debug only */
58 1.1 skrll static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
59 1.1 skrll {
60 1.1 skrll #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
61 1.1 skrll u16 curr_frame_number = hsotg->frame_number;
62 1.1 skrll
63 1.1 skrll if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
64 1.1 skrll if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) !=
65 1.1 skrll curr_frame_number) {
66 1.1 skrll hsotg->frame_num_array[hsotg->frame_num_idx] =
67 1.1 skrll curr_frame_number;
68 1.1 skrll hsotg->last_frame_num_array[hsotg->frame_num_idx] =
69 1.1 skrll hsotg->last_frame_num;
70 1.1 skrll hsotg->frame_num_idx++;
71 1.1 skrll }
72 1.1 skrll } else if (!hsotg->dumped_frame_num_array) {
73 1.1 skrll int i;
74 1.1 skrll
75 1.1 skrll dev_info(hsotg->dev, "Frame Last Frame\n");
76 1.1 skrll dev_info(hsotg->dev, "----- ----------\n");
77 1.1 skrll for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
78 1.1 skrll dev_info(hsotg->dev, "0x%04x 0x%04x\n",
79 1.1 skrll hsotg->frame_num_array[i],
80 1.1 skrll hsotg->last_frame_num_array[i]);
81 1.1 skrll }
82 1.1 skrll hsotg->dumped_frame_num_array = 1;
83 1.1 skrll }
84 1.1 skrll hsotg->last_frame_num = curr_frame_number;
85 1.1 skrll #endif
86 1.1 skrll }
87 1.1 skrll
88 1.1 skrll static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
89 1.1 skrll struct dwc2_host_chan *chan,
90 1.1 skrll struct dwc2_qtd *qtd)
91 1.1 skrll {
92 1.1 skrll struct urb *usb_urb;
93 1.1 skrll
94 1.1 skrll if (!chan->qh)
95 1.1 skrll return;
96 1.1 skrll
97 1.1 skrll if (chan->qh->dev_speed == USB_SPEED_HIGH)
98 1.1 skrll return;
99 1.1 skrll
100 1.1 skrll if (!qtd->urb)
101 1.1 skrll return;
102 1.1 skrll
103 1.1 skrll usb_urb = qtd->urb->priv;
104 1.1 skrll if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt)
105 1.1 skrll return;
106 1.1 skrll
107 1.1 skrll if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
108 1.1 skrll chan->qh->tt_buffer_dirty = 1;
109 1.1 skrll if (usb_hub_clear_tt_buffer(usb_urb))
110 1.1 skrll /* Clear failed; let's hope things work anyway */
111 1.1 skrll chan->qh->tt_buffer_dirty = 0;
112 1.1 skrll }
113 1.1 skrll }
114 1.1 skrll
115 1.1 skrll /*
116 1.1 skrll * Handles the start-of-frame interrupt in host mode. Non-periodic
117 1.1 skrll * transactions may be queued to the DWC_otg controller for the current
118 1.1 skrll * (micro)frame. Periodic transactions may be queued to the controller
119 1.1 skrll * for the next (micro)frame.
120 1.1 skrll */
121 1.1 skrll static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
122 1.1 skrll {
123 1.1 skrll enum dwc2_transaction_type tr_type;
124 1.1 skrll struct list_head *qh_entry;
125 1.1 skrll struct dwc2_qh *qh;
126 1.1 skrll int next_sched_frame = -1;
127 1.1 skrll
128 1.1 skrll #ifdef DEBUG_SOF
129 1.1 skrll dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
130 1.1 skrll #endif
131 1.1 skrll
132 1.1 skrll hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
133 1.1 skrll
134 1.1 skrll dwc2_track_missed_sofs(hsotg);
135 1.1 skrll
136 1.1 skrll /* Determine whether any periodic QHs should be executed */
137 1.1 skrll qh_entry = hsotg->periodic_sched_inactive.next;
138 1.1 skrll while (qh_entry != &hsotg->periodic_sched_inactive) {
139 1.1 skrll qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
140 1.1 skrll qh_entry = qh_entry->next;
141 1.1 skrll if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number)) {
142 1.1 skrll /*
143 1.1 skrll * Move QH to the ready list to be executed next
144 1.1 skrll * (micro)frame
145 1.1 skrll */
146 1.1 skrll list_move(&qh->qh_list_entry,
147 1.1 skrll &hsotg->periodic_sched_ready);
148 1.1 skrll } else {
149 1.1 skrll if (next_sched_frame < 0 ||
150 1.1 skrll dwc2_frame_num_le(qh->sched_frame,
151 1.1 skrll next_sched_frame))
152 1.1 skrll next_sched_frame = qh->sched_frame;
153 1.1 skrll }
154 1.1 skrll }
155 1.1 skrll
156 1.1 skrll hsotg->next_sched_frame = next_sched_frame;
157 1.1 skrll
158 1.1 skrll tr_type = dwc2_hcd_select_transactions(hsotg);
159 1.1 skrll if (tr_type != DWC2_TRANSACTION_NONE)
160 1.1 skrll dwc2_hcd_queue_transactions(hsotg, tr_type);
161 1.1 skrll
162 1.1 skrll /* Clear interrupt */
163 1.1 skrll writel(GINTSTS_SOF, hsotg->regs + GINTSTS);
164 1.1 skrll }
165 1.1 skrll
166 1.1 skrll /*
167 1.1 skrll * Handles the Rx FIFO Level Interrupt, which indicates that there is
168 1.1 skrll * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
169 1.1 skrll * memory if the DWC_otg controller is operating in Slave mode.
170 1.1 skrll */
171 1.1 skrll static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
172 1.1 skrll {
173 1.1 skrll u32 grxsts, chnum, bcnt, dpid, pktsts;
174 1.1 skrll struct dwc2_host_chan *chan;
175 1.1 skrll
176 1.1 skrll if (dbg_perio())
177 1.1 skrll dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
178 1.1 skrll
179 1.1 skrll grxsts = readl(hsotg->regs + GRXSTSP);
180 1.1 skrll chnum = grxsts >> GRXSTS_HCHNUM_SHIFT &
181 1.1 skrll GRXSTS_HCHNUM_MASK >> GRXSTS_HCHNUM_SHIFT;
182 1.1 skrll chan = hsotg->hc_ptr_array[chnum];
183 1.1 skrll if (!chan) {
184 1.1 skrll dev_err(hsotg->dev, "Unable to get corresponding channel\n");
185 1.1 skrll return;
186 1.1 skrll }
187 1.1 skrll
188 1.1 skrll bcnt = grxsts >> GRXSTS_BYTECNT_SHIFT &
189 1.1 skrll GRXSTS_BYTECNT_MASK >> GRXSTS_BYTECNT_SHIFT;
190 1.1 skrll dpid = grxsts >> GRXSTS_DPID_SHIFT &
191 1.1 skrll GRXSTS_DPID_MASK >> GRXSTS_DPID_SHIFT;
192 1.1 skrll pktsts = grxsts & GRXSTS_PKTSTS_MASK;
193 1.1 skrll
194 1.1 skrll /* Packet Status */
195 1.1 skrll if (dbg_perio()) {
196 1.1 skrll dev_vdbg(hsotg->dev, " Ch num = %d\n", chnum);
197 1.1 skrll dev_vdbg(hsotg->dev, " Count = %d\n", bcnt);
198 1.1 skrll dev_vdbg(hsotg->dev, " DPID = %d, chan.dpid = %d\n", dpid,
199 1.1 skrll chan->data_pid_start);
200 1.1 skrll dev_vdbg(hsotg->dev, " PStatus = %d\n",
201 1.1 skrll pktsts >> GRXSTS_PKTSTS_SHIFT &
202 1.1 skrll GRXSTS_PKTSTS_MASK >> GRXSTS_PKTSTS_SHIFT);
203 1.1 skrll }
204 1.1 skrll
205 1.1 skrll switch (pktsts) {
206 1.1 skrll case GRXSTS_PKTSTS_HCHIN:
207 1.1 skrll /* Read the data into the host buffer */
208 1.1 skrll if (bcnt > 0) {
209 1.1 skrll dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
210 1.1 skrll
211 1.1 skrll /* Update the HC fields for the next packet received */
212 1.1 skrll chan->xfer_count += bcnt;
213 1.1 skrll chan->xfer_buf += bcnt;
214 1.1 skrll }
215 1.1 skrll break;
216 1.1 skrll case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
217 1.1 skrll case GRXSTS_PKTSTS_DATATOGGLEERR:
218 1.1 skrll case GRXSTS_PKTSTS_HCHHALTED:
219 1.1 skrll /* Handled in interrupt, just ignore data */
220 1.1 skrll break;
221 1.1 skrll default:
222 1.1 skrll dev_err(hsotg->dev,
223 1.1 skrll "RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
224 1.1 skrll break;
225 1.1 skrll }
226 1.1 skrll }
227 1.1 skrll
228 1.1 skrll /*
229 1.1 skrll * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
230 1.1 skrll * data packets may be written to the FIFO for OUT transfers. More requests
231 1.1 skrll * may be written to the non-periodic request queue for IN transfers. This
232 1.1 skrll * interrupt is enabled only in Slave mode.
233 1.1 skrll */
234 1.1 skrll static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
235 1.1 skrll {
236 1.1 skrll dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
237 1.1 skrll dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
238 1.1 skrll }
239 1.1 skrll
240 1.1 skrll /*
241 1.1 skrll * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
242 1.1 skrll * packets may be written to the FIFO for OUT transfers. More requests may be
243 1.1 skrll * written to the periodic request queue for IN transfers. This interrupt is
244 1.1 skrll * enabled only in Slave mode.
245 1.1 skrll */
246 1.1 skrll static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
247 1.1 skrll {
248 1.1 skrll if (dbg_perio())
249 1.1 skrll dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
250 1.1 skrll dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
251 1.1 skrll }
252 1.1 skrll
253 1.1 skrll static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
254 1.1 skrll u32 *hprt0_modify)
255 1.1 skrll {
256 1.1 skrll struct dwc2_core_params *params = hsotg->core_params;
257 1.1 skrll int do_reset = 0;
258 1.1 skrll u32 usbcfg;
259 1.1 skrll u32 prtspd;
260 1.1 skrll u32 hcfg;
261 1.1 skrll u32 fslspclksel;
262 1.1 skrll u32 hfir;
263 1.1 skrll
264 1.1 skrll dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
265 1.1 skrll
266 1.1 skrll /* Every time when port enables calculate HFIR.FrInterval */
267 1.1 skrll hfir = readl(hsotg->regs + HFIR);
268 1.1 skrll hfir &= ~HFIR_FRINT_MASK;
269 1.1 skrll hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
270 1.1 skrll HFIR_FRINT_MASK;
271 1.1 skrll writel(hfir, hsotg->regs + HFIR);
272 1.1 skrll
273 1.1 skrll /* Check if we need to adjust the PHY clock speed for low power */
274 1.1 skrll if (!params->host_support_fs_ls_low_power) {
275 1.1 skrll /* Port has been enabled, set the reset change flag */
276 1.1 skrll hsotg->flags.b.port_reset_change = 1;
277 1.1 skrll return;
278 1.1 skrll }
279 1.1 skrll
280 1.1 skrll usbcfg = readl(hsotg->regs + GUSBCFG);
281 1.1 skrll prtspd = hprt0 & HPRT0_SPD_MASK;
282 1.1 skrll
283 1.1 skrll if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
284 1.1 skrll /* Low power */
285 1.1 skrll if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
286 1.1 skrll /* Set PHY low power clock select for FS/LS devices */
287 1.1 skrll usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
288 1.1 skrll writel(usbcfg, hsotg->regs + GUSBCFG);
289 1.1 skrll do_reset = 1;
290 1.1 skrll }
291 1.1 skrll
292 1.1 skrll hcfg = readl(hsotg->regs + HCFG);
293 1.1 skrll fslspclksel = hcfg & HCFG_FSLSPCLKSEL_MASK;
294 1.1 skrll
295 1.1 skrll if (prtspd == HPRT0_SPD_LOW_SPEED &&
296 1.1 skrll params->host_ls_low_power_phy_clk ==
297 1.1 skrll DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
298 1.1 skrll /* 6 MHZ */
299 1.1 skrll dev_vdbg(hsotg->dev,
300 1.1 skrll "FS_PHY programming HCFG to 6 MHz\n");
301 1.1 skrll if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
302 1.1 skrll hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
303 1.1 skrll hcfg |= HCFG_FSLSPCLKSEL_6_MHZ;
304 1.1 skrll writel(hcfg, hsotg->regs + HCFG);
305 1.1 skrll do_reset = 1;
306 1.1 skrll }
307 1.1 skrll } else {
308 1.1 skrll /* 48 MHZ */
309 1.1 skrll dev_vdbg(hsotg->dev,
310 1.1 skrll "FS_PHY programming HCFG to 48 MHz\n");
311 1.1 skrll if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
312 1.1 skrll hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
313 1.1 skrll hcfg |= HCFG_FSLSPCLKSEL_48_MHZ;
314 1.1 skrll writel(hcfg, hsotg->regs + HCFG);
315 1.1 skrll do_reset = 1;
316 1.1 skrll }
317 1.1 skrll }
318 1.1 skrll } else {
319 1.1 skrll /* Not low power */
320 1.1 skrll if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
321 1.1 skrll usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
322 1.1 skrll writel(usbcfg, hsotg->regs + GUSBCFG);
323 1.1 skrll do_reset = 1;
324 1.1 skrll }
325 1.1 skrll }
326 1.1 skrll
327 1.1 skrll if (do_reset) {
328 1.1 skrll *hprt0_modify |= HPRT0_RST;
329 1.1 skrll queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
330 1.1 skrll msecs_to_jiffies(60));
331 1.1 skrll } else {
332 1.1 skrll /* Port has been enabled, set the reset change flag */
333 1.1 skrll hsotg->flags.b.port_reset_change = 1;
334 1.1 skrll }
335 1.1 skrll }
336 1.1 skrll
337 1.1 skrll /*
338 1.1 skrll * There are multiple conditions that can cause a port interrupt. This function
339 1.1 skrll * determines which interrupt conditions have occurred and handles them
340 1.1 skrll * appropriately.
341 1.1 skrll */
342 1.1 skrll static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
343 1.1 skrll {
344 1.1 skrll u32 hprt0;
345 1.1 skrll u32 hprt0_modify;
346 1.1 skrll
347 1.1 skrll dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
348 1.1 skrll
349 1.1 skrll hprt0 = readl(hsotg->regs + HPRT0);
350 1.1 skrll hprt0_modify = hprt0;
351 1.1 skrll
352 1.1 skrll /*
353 1.1 skrll * Clear appropriate bits in HPRT0 to clear the interrupt bit in
354 1.1 skrll * GINTSTS
355 1.1 skrll */
356 1.1 skrll hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
357 1.1 skrll HPRT0_OVRCURRCHG);
358 1.1 skrll
359 1.1 skrll /*
360 1.1 skrll * Port Connect Detected
361 1.1 skrll * Set flag and clear if detected
362 1.1 skrll */
363 1.1 skrll if (hprt0 & HPRT0_CONNDET) {
364 1.1 skrll dev_vdbg(hsotg->dev,
365 1.1 skrll "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
366 1.1 skrll hprt0);
367 1.1 skrll hsotg->flags.b.port_connect_status_change = 1;
368 1.1 skrll hsotg->flags.b.port_connect_status = 1;
369 1.1 skrll hprt0_modify |= HPRT0_CONNDET;
370 1.1 skrll
371 1.1 skrll /*
372 1.1 skrll * The Hub driver asserts a reset when it sees port connect
373 1.1 skrll * status change flag
374 1.1 skrll */
375 1.1 skrll }
376 1.1 skrll
377 1.1 skrll /*
378 1.1 skrll * Port Enable Changed
379 1.1 skrll * Clear if detected - Set internal flag if disabled
380 1.1 skrll */
381 1.1 skrll if (hprt0 & HPRT0_ENACHG) {
382 1.1 skrll dev_vdbg(hsotg->dev,
383 1.1 skrll " --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
384 1.1 skrll hprt0, !!(hprt0 & HPRT0_ENA));
385 1.1 skrll hprt0_modify |= HPRT0_ENACHG;
386 1.1 skrll if (hprt0 & HPRT0_ENA)
387 1.1 skrll dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
388 1.1 skrll else
389 1.1 skrll hsotg->flags.b.port_enable_change = 1;
390 1.1 skrll }
391 1.1 skrll
392 1.1 skrll /* Overcurrent Change Interrupt */
393 1.1 skrll if (hprt0 & HPRT0_OVRCURRCHG) {
394 1.1 skrll dev_vdbg(hsotg->dev,
395 1.1 skrll " --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
396 1.1 skrll hprt0);
397 1.1 skrll hsotg->flags.b.port_over_current_change = 1;
398 1.1 skrll hprt0_modify |= HPRT0_OVRCURRCHG;
399 1.1 skrll }
400 1.1 skrll
401 1.1 skrll /* Clear Port Interrupts */
402 1.1 skrll writel(hprt0_modify, hsotg->regs + HPRT0);
403 1.1 skrll }
404 1.1 skrll
405 1.1 skrll /*
406 1.1 skrll * Gets the actual length of a transfer after the transfer halts. halt_status
407 1.1 skrll * holds the reason for the halt.
408 1.1 skrll *
409 1.1 skrll * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
410 1.1 skrll * is set to 1 upon return if less than the requested number of bytes were
411 1.1 skrll * transferred. short_read may also be NULL on entry, in which case it remains
412 1.1 skrll * unchanged.
413 1.1 skrll */
414 1.1 skrll static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
415 1.1 skrll struct dwc2_host_chan *chan, int chnum,
416 1.1 skrll struct dwc2_qtd *qtd,
417 1.1 skrll enum dwc2_halt_status halt_status,
418 1.1 skrll int *short_read)
419 1.1 skrll {
420 1.1 skrll u32 hctsiz, count, length;
421 1.1 skrll
422 1.1 skrll hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
423 1.1 skrll
424 1.1 skrll if (halt_status == DWC2_HC_XFER_COMPLETE) {
425 1.1 skrll if (chan->ep_is_in) {
426 1.1 skrll count = hctsiz >> TSIZ_XFERSIZE_SHIFT &
427 1.1 skrll TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT;
428 1.1 skrll length = chan->xfer_len - count;
429 1.1 skrll if (short_read != NULL)
430 1.1 skrll *short_read = (count != 0);
431 1.1 skrll } else if (chan->qh->do_split) {
432 1.1 skrll length = qtd->ssplit_out_xfer_count;
433 1.1 skrll } else {
434 1.1 skrll length = chan->xfer_len;
435 1.1 skrll }
436 1.1 skrll } else {
437 1.1 skrll /*
438 1.1 skrll * Must use the hctsiz.pktcnt field to determine how much data
439 1.1 skrll * has been transferred. This field reflects the number of
440 1.1 skrll * packets that have been transferred via the USB. This is
441 1.1 skrll * always an integral number of packets if the transfer was
442 1.1 skrll * halted before its normal completion. (Can't use the
443 1.1 skrll * hctsiz.xfersize field because that reflects the number of
444 1.1 skrll * bytes transferred via the AHB, not the USB).
445 1.1 skrll */
446 1.1 skrll count = hctsiz >> TSIZ_PKTCNT_SHIFT &
447 1.1 skrll TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT;
448 1.1 skrll length = (chan->start_pkt_count - count) * chan->max_packet;
449 1.1 skrll }
450 1.1 skrll
451 1.1 skrll return length;
452 1.1 skrll }
453 1.1 skrll
454 1.1 skrll /**
455 1.1 skrll * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
456 1.1 skrll * Complete interrupt on the host channel. Updates the actual_length field
457 1.1 skrll * of the URB based on the number of bytes transferred via the host channel.
458 1.1 skrll * Sets the URB status if the data transfer is finished.
459 1.1 skrll *
460 1.1 skrll * Return: 1 if the data transfer specified by the URB is completely finished,
461 1.1 skrll * 0 otherwise
462 1.1 skrll */
463 1.1 skrll static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
464 1.1 skrll struct dwc2_host_chan *chan, int chnum,
465 1.1 skrll struct dwc2_hcd_urb *urb,
466 1.1 skrll struct dwc2_qtd *qtd)
467 1.1 skrll {
468 1.1 skrll u32 hctsiz;
469 1.1 skrll int xfer_done = 0;
470 1.1 skrll int short_read = 0;
471 1.1 skrll int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
472 1.1 skrll DWC2_HC_XFER_COMPLETE,
473 1.1 skrll &short_read);
474 1.1 skrll
475 1.1 skrll if (urb->actual_length + xfer_length > urb->length) {
476 1.1 skrll dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
477 1.1 skrll xfer_length = urb->length - urb->actual_length;
478 1.1 skrll }
479 1.1 skrll
480 1.1 skrll /* Non DWORD-aligned buffer case handling */
481 1.1 skrll if (chan->align_buf && xfer_length && chan->ep_is_in) {
482 1.1 skrll dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
483 1.1 skrll dma_sync_single_for_cpu(hsotg->dev, urb->dma, urb->length,
484 1.1 skrll DMA_FROM_DEVICE);
485 1.1 skrll memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
486 1.1 skrll xfer_length);
487 1.1 skrll dma_sync_single_for_device(hsotg->dev, urb->dma, urb->length,
488 1.1 skrll DMA_FROM_DEVICE);
489 1.1 skrll }
490 1.1 skrll
491 1.1 skrll dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
492 1.1 skrll urb->actual_length, xfer_length);
493 1.1 skrll urb->actual_length += xfer_length;
494 1.1 skrll
495 1.1 skrll if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
496 1.1 skrll (urb->flags & URB_SEND_ZERO_PACKET) &&
497 1.1 skrll urb->actual_length >= urb->length &&
498 1.1 skrll !(urb->length % chan->max_packet)) {
499 1.1 skrll xfer_done = 0;
500 1.1 skrll } else if (short_read || urb->actual_length >= urb->length) {
501 1.1 skrll xfer_done = 1;
502 1.1 skrll urb->status = 0;
503 1.1 skrll }
504 1.1 skrll
505 1.1 skrll hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
506 1.1 skrll dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
507 1.1 skrll __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
508 1.1 skrll dev_vdbg(hsotg->dev, " chan->xfer_len %d\n", chan->xfer_len);
509 1.1 skrll dev_vdbg(hsotg->dev, " hctsiz.xfersize %d\n",
510 1.1 skrll hctsiz >> TSIZ_XFERSIZE_SHIFT &
511 1.1 skrll TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT);
512 1.1 skrll dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n", urb->length);
513 1.1 skrll dev_vdbg(hsotg->dev, " urb->actual_length %d\n", urb->actual_length);
514 1.1 skrll dev_vdbg(hsotg->dev, " short_read %d, xfer_done %d\n", short_read,
515 1.1 skrll xfer_done);
516 1.1 skrll
517 1.1 skrll return xfer_done;
518 1.1 skrll }
519 1.1 skrll
520 1.1 skrll /*
521 1.1 skrll * Save the starting data toggle for the next transfer. The data toggle is
522 1.1 skrll * saved in the QH for non-control transfers and it's saved in the QTD for
523 1.1 skrll * control transfers.
524 1.1 skrll */
525 1.1 skrll void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
526 1.1 skrll struct dwc2_host_chan *chan, int chnum,
527 1.1 skrll struct dwc2_qtd *qtd)
528 1.1 skrll {
529 1.1 skrll u32 hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
530 1.1 skrll u32 pid = hctsiz & TSIZ_SC_MC_PID_MASK;
531 1.1 skrll
532 1.1 skrll if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
533 1.1 skrll if (pid == TSIZ_SC_MC_PID_DATA0)
534 1.1 skrll chan->qh->data_toggle = DWC2_HC_PID_DATA0;
535 1.1 skrll else
536 1.1 skrll chan->qh->data_toggle = DWC2_HC_PID_DATA1;
537 1.1 skrll } else {
538 1.1 skrll if (pid == TSIZ_SC_MC_PID_DATA0)
539 1.1 skrll qtd->data_toggle = DWC2_HC_PID_DATA0;
540 1.1 skrll else
541 1.1 skrll qtd->data_toggle = DWC2_HC_PID_DATA1;
542 1.1 skrll }
543 1.1 skrll }
544 1.1 skrll
545 1.1 skrll /**
546 1.1 skrll * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
547 1.1 skrll * the transfer is stopped for any reason. The fields of the current entry in
548 1.1 skrll * the frame descriptor array are set based on the transfer state and the input
549 1.1 skrll * halt_status. Completes the Isochronous URB if all the URB frames have been
550 1.1 skrll * completed.
551 1.1 skrll *
552 1.1 skrll * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
553 1.1 skrll * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
554 1.1 skrll */
555 1.1 skrll static enum dwc2_halt_status dwc2_update_isoc_urb_state(
556 1.1 skrll struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
557 1.1 skrll int chnum, struct dwc2_qtd *qtd,
558 1.1 skrll enum dwc2_halt_status halt_status)
559 1.1 skrll {
560 1.1 skrll struct dwc2_hcd_iso_packet_desc *frame_desc;
561 1.1 skrll struct dwc2_hcd_urb *urb = qtd->urb;
562 1.1 skrll
563 1.1 skrll if (!urb)
564 1.1 skrll return DWC2_HC_XFER_NO_HALT_STATUS;
565 1.1 skrll
566 1.1 skrll frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
567 1.1 skrll
568 1.1 skrll switch (halt_status) {
569 1.1 skrll case DWC2_HC_XFER_COMPLETE:
570 1.1 skrll frame_desc->status = 0;
571 1.1 skrll frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
572 1.1 skrll chan, chnum, qtd, halt_status, NULL);
573 1.1 skrll
574 1.1 skrll /* Non DWORD-aligned buffer case handling */
575 1.1 skrll if (chan->align_buf && frame_desc->actual_length &&
576 1.1 skrll chan->ep_is_in) {
577 1.1 skrll dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
578 1.1 skrll __func__);
579 1.1 skrll dma_sync_single_for_cpu(hsotg->dev, urb->dma,
580 1.1 skrll urb->length, DMA_FROM_DEVICE);
581 1.1 skrll memcpy(urb->buf + frame_desc->offset +
582 1.1 skrll qtd->isoc_split_offset, chan->qh->dw_align_buf,
583 1.1 skrll frame_desc->actual_length);
584 1.1 skrll dma_sync_single_for_device(hsotg->dev, urb->dma,
585 1.1 skrll urb->length,
586 1.1 skrll DMA_FROM_DEVICE);
587 1.1 skrll }
588 1.1 skrll break;
589 1.1 skrll case DWC2_HC_XFER_FRAME_OVERRUN:
590 1.1 skrll urb->error_count++;
591 1.1 skrll if (chan->ep_is_in)
592 1.1 skrll frame_desc->status = -ENOSR;
593 1.1 skrll else
594 1.1 skrll frame_desc->status = -ECOMM;
595 1.1 skrll frame_desc->actual_length = 0;
596 1.1 skrll break;
597 1.1 skrll case DWC2_HC_XFER_BABBLE_ERR:
598 1.1 skrll urb->error_count++;
599 1.1 skrll frame_desc->status = -EOVERFLOW;
600 1.1 skrll /* Don't need to update actual_length in this case */
601 1.1 skrll break;
602 1.1 skrll case DWC2_HC_XFER_XACT_ERR:
603 1.1 skrll urb->error_count++;
604 1.1 skrll frame_desc->status = -EPROTO;
605 1.1 skrll frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
606 1.1 skrll chan, chnum, qtd, halt_status, NULL);
607 1.1 skrll
608 1.1 skrll /* Non DWORD-aligned buffer case handling */
609 1.1 skrll if (chan->align_buf && frame_desc->actual_length &&
610 1.1 skrll chan->ep_is_in) {
611 1.1 skrll dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
612 1.1 skrll __func__);
613 1.1 skrll dma_sync_single_for_cpu(hsotg->dev, urb->dma,
614 1.1 skrll urb->length, DMA_FROM_DEVICE);
615 1.1 skrll memcpy(urb->buf + frame_desc->offset +
616 1.1 skrll qtd->isoc_split_offset, chan->qh->dw_align_buf,
617 1.1 skrll frame_desc->actual_length);
618 1.1 skrll dma_sync_single_for_device(hsotg->dev, urb->dma,
619 1.1 skrll urb->length,
620 1.1 skrll DMA_FROM_DEVICE);
621 1.1 skrll }
622 1.1 skrll
623 1.1 skrll /* Skip whole frame */
624 1.1 skrll if (chan->qh->do_split &&
625 1.1 skrll chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
626 1.1 skrll hsotg->core_params->dma_enable > 0) {
627 1.1 skrll qtd->complete_split = 0;
628 1.1 skrll qtd->isoc_split_offset = 0;
629 1.1 skrll }
630 1.1 skrll
631 1.1 skrll break;
632 1.1 skrll default:
633 1.1 skrll dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
634 1.1 skrll halt_status);
635 1.1 skrll break;
636 1.1 skrll }
637 1.1 skrll
638 1.1 skrll if (++qtd->isoc_frame_index == urb->packet_count) {
639 1.1 skrll /*
640 1.1 skrll * urb->status is not used for isoc transfers. The individual
641 1.1 skrll * frame_desc statuses are used instead.
642 1.1 skrll */
643 1.1 skrll dwc2_host_complete(hsotg, qtd, 0);
644 1.1 skrll halt_status = DWC2_HC_XFER_URB_COMPLETE;
645 1.1 skrll } else {
646 1.1 skrll halt_status = DWC2_HC_XFER_COMPLETE;
647 1.1 skrll }
648 1.1 skrll
649 1.1 skrll return halt_status;
650 1.1 skrll }
651 1.1 skrll
652 1.1 skrll /*
653 1.1 skrll * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
654 1.1 skrll * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
655 1.1 skrll * still linked to the QH, the QH is added to the end of the inactive
656 1.1 skrll * non-periodic schedule. For periodic QHs, removes the QH from the periodic
657 1.1 skrll * schedule if no more QTDs are linked to the QH.
658 1.1 skrll */
659 1.1 skrll static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
660 1.1 skrll int free_qtd)
661 1.1 skrll {
662 1.1 skrll int continue_split = 0;
663 1.1 skrll struct dwc2_qtd *qtd;
664 1.1 skrll
665 1.1 skrll if (dbg_qh(qh))
666 1.1 skrll dev_vdbg(hsotg->dev, " %s(%p,%p,%d)\n", __func__,
667 1.1 skrll hsotg, qh, free_qtd);
668 1.1 skrll
669 1.1 skrll if (list_empty(&qh->qtd_list)) {
670 1.1 skrll dev_dbg(hsotg->dev, "## QTD list empty ##\n");
671 1.1 skrll goto no_qtd;
672 1.1 skrll }
673 1.1 skrll
674 1.1 skrll qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
675 1.1 skrll
676 1.1 skrll if (qtd->complete_split)
677 1.1 skrll continue_split = 1;
678 1.1 skrll else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
679 1.1 skrll qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
680 1.1 skrll continue_split = 1;
681 1.1 skrll
682 1.1 skrll if (free_qtd) {
683 1.1 skrll dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
684 1.1 skrll continue_split = 0;
685 1.1 skrll }
686 1.1 skrll
687 1.1 skrll no_qtd:
688 1.1 skrll if (qh->channel)
689 1.1 skrll qh->channel->align_buf = 0;
690 1.1 skrll qh->channel = NULL;
691 1.1 skrll dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
692 1.1 skrll }
693 1.1 skrll
694 1.1 skrll /**
695 1.1 skrll * dwc2_release_channel() - Releases a host channel for use by other transfers
696 1.1 skrll *
697 1.1 skrll * @hsotg: The HCD state structure
698 1.1 skrll * @chan: The host channel to release
699 1.1 skrll * @qtd: The QTD associated with the host channel. This QTD may be
700 1.1 skrll * freed if the transfer is complete or an error has occurred.
701 1.1 skrll * @halt_status: Reason the channel is being released. This status
702 1.1 skrll * determines the actions taken by this function.
703 1.1 skrll *
704 1.1 skrll * Also attempts to select and queue more transactions since at least one host
705 1.1 skrll * channel is available.
706 1.1 skrll */
707 1.1 skrll static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
708 1.1 skrll struct dwc2_host_chan *chan,
709 1.1 skrll struct dwc2_qtd *qtd,
710 1.1 skrll enum dwc2_halt_status halt_status)
711 1.1 skrll {
712 1.1 skrll enum dwc2_transaction_type tr_type;
713 1.1 skrll u32 haintmsk;
714 1.1 skrll int free_qtd = 0;
715 1.1 skrll
716 1.1 skrll if (dbg_hc(chan))
717 1.1 skrll dev_vdbg(hsotg->dev, " %s: channel %d, halt_status %d\n",
718 1.1 skrll __func__, chan->hc_num, halt_status);
719 1.1 skrll
720 1.1 skrll switch (halt_status) {
721 1.1 skrll case DWC2_HC_XFER_URB_COMPLETE:
722 1.1 skrll free_qtd = 1;
723 1.1 skrll break;
724 1.1 skrll case DWC2_HC_XFER_AHB_ERR:
725 1.1 skrll case DWC2_HC_XFER_STALL:
726 1.1 skrll case DWC2_HC_XFER_BABBLE_ERR:
727 1.1 skrll free_qtd = 1;
728 1.1 skrll break;
729 1.1 skrll case DWC2_HC_XFER_XACT_ERR:
730 1.1 skrll if (qtd && qtd->error_count >= 3) {
731 1.1 skrll dev_vdbg(hsotg->dev,
732 1.1 skrll " Complete URB with transaction error\n");
733 1.1 skrll free_qtd = 1;
734 1.1 skrll dwc2_host_complete(hsotg, qtd, -EPROTO);
735 1.1 skrll }
736 1.1 skrll break;
737 1.1 skrll case DWC2_HC_XFER_URB_DEQUEUE:
738 1.1 skrll /*
739 1.1 skrll * The QTD has already been removed and the QH has been
740 1.1 skrll * deactivated. Don't want to do anything except release the
741 1.1 skrll * host channel and try to queue more transfers.
742 1.1 skrll */
743 1.1 skrll goto cleanup;
744 1.1 skrll case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
745 1.1 skrll dev_vdbg(hsotg->dev, " Complete URB with I/O error\n");
746 1.1 skrll free_qtd = 1;
747 1.1 skrll dwc2_host_complete(hsotg, qtd, -EIO);
748 1.1 skrll break;
749 1.1 skrll case DWC2_HC_XFER_NO_HALT_STATUS:
750 1.1 skrll default:
751 1.1 skrll break;
752 1.1 skrll }
753 1.1 skrll
754 1.1 skrll dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
755 1.1 skrll
756 1.1 skrll cleanup:
757 1.1 skrll /*
758 1.1 skrll * Release the host channel for use by other transfers. The cleanup
759 1.1 skrll * function clears the channel interrupt enables and conditions, so
760 1.1 skrll * there's no need to clear the Channel Halted interrupt separately.
761 1.1 skrll */
762 1.1 skrll if (!list_empty(&chan->hc_list_entry))
763 1.1 skrll list_del(&chan->hc_list_entry);
764 1.1 skrll dwc2_hc_cleanup(hsotg, chan);
765 1.1 skrll list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
766 1.1 skrll
767 1.1 skrll if (hsotg->core_params->uframe_sched > 0) {
768 1.1 skrll hsotg->available_host_channels++;
769 1.1 skrll } else {
770 1.1 skrll switch (chan->ep_type) {
771 1.1 skrll case USB_ENDPOINT_XFER_CONTROL:
772 1.1 skrll case USB_ENDPOINT_XFER_BULK:
773 1.1 skrll hsotg->non_periodic_channels--;
774 1.1 skrll break;
775 1.1 skrll default:
776 1.1 skrll /*
777 1.1 skrll * Don't release reservations for periodic channels
778 1.1 skrll * here. That's done when a periodic transfer is
779 1.1 skrll * descheduled (i.e. when the QH is removed from the
780 1.1 skrll * periodic schedule).
781 1.1 skrll */
782 1.1 skrll break;
783 1.1 skrll }
784 1.1 skrll }
785 1.1 skrll
786 1.1 skrll haintmsk = readl(hsotg->regs + HAINTMSK);
787 1.1 skrll haintmsk &= ~(1 << chan->hc_num);
788 1.1 skrll writel(haintmsk, hsotg->regs + HAINTMSK);
789 1.1 skrll
790 1.1 skrll /* Try to queue more transfers now that there's a free channel */
791 1.1 skrll tr_type = dwc2_hcd_select_transactions(hsotg);
792 1.1 skrll if (tr_type != DWC2_TRANSACTION_NONE)
793 1.1 skrll dwc2_hcd_queue_transactions(hsotg, tr_type);
794 1.1 skrll }
795 1.1 skrll
796 1.1 skrll /*
797 1.1 skrll * Halts a host channel. If the channel cannot be halted immediately because
798 1.1 skrll * the request queue is full, this function ensures that the FIFO empty
799 1.1 skrll * interrupt for the appropriate queue is enabled so that the halt request can
800 1.1 skrll * be queued when there is space in the request queue.
801 1.1 skrll *
802 1.1 skrll * This function may also be called in DMA mode. In that case, the channel is
803 1.1 skrll * simply released since the core always halts the channel automatically in
804 1.1 skrll * DMA mode.
805 1.1 skrll */
806 1.1 skrll static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
807 1.1 skrll struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
808 1.1 skrll enum dwc2_halt_status halt_status)
809 1.1 skrll {
810 1.1 skrll if (dbg_hc(chan))
811 1.1 skrll dev_vdbg(hsotg->dev, "%s()\n", __func__);
812 1.1 skrll
813 1.1 skrll if (hsotg->core_params->dma_enable > 0) {
814 1.1 skrll if (dbg_hc(chan))
815 1.1 skrll dev_vdbg(hsotg->dev, "DMA enabled\n");
816 1.1 skrll dwc2_release_channel(hsotg, chan, qtd, halt_status);
817 1.1 skrll return;
818 1.1 skrll }
819 1.1 skrll
820 1.1 skrll /* Slave mode processing */
821 1.1 skrll dwc2_hc_halt(hsotg, chan, halt_status);
822 1.1 skrll
823 1.1 skrll if (chan->halt_on_queue) {
824 1.1 skrll u32 gintmsk;
825 1.1 skrll
826 1.1 skrll dev_vdbg(hsotg->dev, "Halt on queue\n");
827 1.1 skrll if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
828 1.1 skrll chan->ep_type == USB_ENDPOINT_XFER_BULK) {
829 1.1 skrll dev_vdbg(hsotg->dev, "control/bulk\n");
830 1.1 skrll /*
831 1.1 skrll * Make sure the Non-periodic Tx FIFO empty interrupt
832 1.1 skrll * is enabled so that the non-periodic schedule will
833 1.1 skrll * be processed
834 1.1 skrll */
835 1.1 skrll gintmsk = readl(hsotg->regs + GINTMSK);
836 1.1 skrll gintmsk |= GINTSTS_NPTXFEMP;
837 1.1 skrll writel(gintmsk, hsotg->regs + GINTMSK);
838 1.1 skrll } else {
839 1.1 skrll dev_vdbg(hsotg->dev, "isoc/intr\n");
840 1.1 skrll /*
841 1.1 skrll * Move the QH from the periodic queued schedule to
842 1.1 skrll * the periodic assigned schedule. This allows the
843 1.1 skrll * halt to be queued when the periodic schedule is
844 1.1 skrll * processed.
845 1.1 skrll */
846 1.1 skrll list_move(&chan->qh->qh_list_entry,
847 1.1 skrll &hsotg->periodic_sched_assigned);
848 1.1 skrll
849 1.1 skrll /*
850 1.1 skrll * Make sure the Periodic Tx FIFO Empty interrupt is
851 1.1 skrll * enabled so that the periodic schedule will be
852 1.1 skrll * processed
853 1.1 skrll */
854 1.1 skrll gintmsk = readl(hsotg->regs + GINTMSK);
855 1.1 skrll gintmsk |= GINTSTS_PTXFEMP;
856 1.1 skrll writel(gintmsk, hsotg->regs + GINTMSK);
857 1.1 skrll }
858 1.1 skrll }
859 1.1 skrll }
860 1.1 skrll
861 1.1 skrll /*
862 1.1 skrll * Performs common cleanup for non-periodic transfers after a Transfer
863 1.1 skrll * Complete interrupt. This function should be called after any endpoint type
864 1.1 skrll * specific handling is finished to release the host channel.
865 1.1 skrll */
866 1.1 skrll static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
867 1.1 skrll struct dwc2_host_chan *chan,
868 1.1 skrll int chnum, struct dwc2_qtd *qtd,
869 1.1 skrll enum dwc2_halt_status halt_status)
870 1.1 skrll {
871 1.1 skrll dev_vdbg(hsotg->dev, "%s()\n", __func__);
872 1.1 skrll
873 1.1 skrll qtd->error_count = 0;
874 1.1 skrll
875 1.1 skrll if (chan->hcint & HCINTMSK_NYET) {
876 1.1 skrll /*
877 1.1 skrll * Got a NYET on the last transaction of the transfer. This
878 1.1 skrll * means that the endpoint should be in the PING state at the
879 1.1 skrll * beginning of the next transfer.
880 1.1 skrll */
881 1.1 skrll dev_vdbg(hsotg->dev, "got NYET\n");
882 1.1 skrll chan->qh->ping_state = 1;
883 1.1 skrll }
884 1.1 skrll
885 1.1 skrll /*
886 1.1 skrll * Always halt and release the host channel to make it available for
887 1.1 skrll * more transfers. There may still be more phases for a control
888 1.1 skrll * transfer or more data packets for a bulk transfer at this point,
889 1.1 skrll * but the host channel is still halted. A channel will be reassigned
890 1.1 skrll * to the transfer when the non-periodic schedule is processed after
891 1.1 skrll * the channel is released. This allows transactions to be queued
892 1.1 skrll * properly via dwc2_hcd_queue_transactions, which also enables the
893 1.1 skrll * Tx FIFO Empty interrupt if necessary.
894 1.1 skrll */
895 1.1 skrll if (chan->ep_is_in) {
896 1.1 skrll /*
897 1.1 skrll * IN transfers in Slave mode require an explicit disable to
898 1.1 skrll * halt the channel. (In DMA mode, this call simply releases
899 1.1 skrll * the channel.)
900 1.1 skrll */
901 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, halt_status);
902 1.1 skrll } else {
903 1.1 skrll /*
904 1.1 skrll * The channel is automatically disabled by the core for OUT
905 1.1 skrll * transfers in Slave mode
906 1.1 skrll */
907 1.1 skrll dwc2_release_channel(hsotg, chan, qtd, halt_status);
908 1.1 skrll }
909 1.1 skrll }
910 1.1 skrll
911 1.1 skrll /*
912 1.1 skrll * Performs common cleanup for periodic transfers after a Transfer Complete
913 1.1 skrll * interrupt. This function should be called after any endpoint type specific
914 1.1 skrll * handling is finished to release the host channel.
915 1.1 skrll */
916 1.1 skrll static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
917 1.1 skrll struct dwc2_host_chan *chan, int chnum,
918 1.1 skrll struct dwc2_qtd *qtd,
919 1.1 skrll enum dwc2_halt_status halt_status)
920 1.1 skrll {
921 1.1 skrll u32 hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
922 1.1 skrll
923 1.1 skrll qtd->error_count = 0;
924 1.1 skrll
925 1.1 skrll if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
926 1.1 skrll /* Core halts channel in these cases */
927 1.1 skrll dwc2_release_channel(hsotg, chan, qtd, halt_status);
928 1.1 skrll else
929 1.1 skrll /* Flush any outstanding requests from the Tx queue */
930 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, halt_status);
931 1.1 skrll }
932 1.1 skrll
933 1.1 skrll static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
934 1.1 skrll struct dwc2_host_chan *chan, int chnum,
935 1.1 skrll struct dwc2_qtd *qtd)
936 1.1 skrll {
937 1.1 skrll struct dwc2_hcd_iso_packet_desc *frame_desc;
938 1.1 skrll u32 len;
939 1.1 skrll
940 1.1 skrll if (!qtd->urb)
941 1.1 skrll return 0;
942 1.1 skrll
943 1.1 skrll frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
944 1.1 skrll len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
945 1.1 skrll DWC2_HC_XFER_COMPLETE, NULL);
946 1.1 skrll if (!len) {
947 1.1 skrll qtd->complete_split = 0;
948 1.1 skrll qtd->isoc_split_offset = 0;
949 1.1 skrll return 0;
950 1.1 skrll }
951 1.1 skrll
952 1.1 skrll frame_desc->actual_length += len;
953 1.1 skrll
954 1.1 skrll if (chan->align_buf && len) {
955 1.1 skrll dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
956 1.1 skrll dma_sync_single_for_cpu(hsotg->dev, qtd->urb->dma,
957 1.1 skrll qtd->urb->length, DMA_FROM_DEVICE);
958 1.1 skrll memcpy(qtd->urb->buf + frame_desc->offset +
959 1.1 skrll qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
960 1.1 skrll dma_sync_single_for_device(hsotg->dev, qtd->urb->dma,
961 1.1 skrll qtd->urb->length, DMA_FROM_DEVICE);
962 1.1 skrll }
963 1.1 skrll
964 1.1 skrll qtd->isoc_split_offset += len;
965 1.1 skrll
966 1.1 skrll if (frame_desc->actual_length >= frame_desc->length) {
967 1.1 skrll frame_desc->status = 0;
968 1.1 skrll qtd->isoc_frame_index++;
969 1.1 skrll qtd->complete_split = 0;
970 1.1 skrll qtd->isoc_split_offset = 0;
971 1.1 skrll }
972 1.1 skrll
973 1.1 skrll if (qtd->isoc_frame_index == qtd->urb->packet_count) {
974 1.1 skrll dwc2_host_complete(hsotg, qtd, 0);
975 1.1 skrll dwc2_release_channel(hsotg, chan, qtd,
976 1.1 skrll DWC2_HC_XFER_URB_COMPLETE);
977 1.1 skrll } else {
978 1.1 skrll dwc2_release_channel(hsotg, chan, qtd,
979 1.1 skrll DWC2_HC_XFER_NO_HALT_STATUS);
980 1.1 skrll }
981 1.1 skrll
982 1.1 skrll return 1; /* Indicates that channel released */
983 1.1 skrll }
984 1.1 skrll
985 1.1 skrll /*
986 1.1 skrll * Handles a host channel Transfer Complete interrupt. This handler may be
987 1.1 skrll * called in either DMA mode or Slave mode.
988 1.1 skrll */
989 1.1 skrll static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
990 1.1 skrll struct dwc2_host_chan *chan, int chnum,
991 1.1 skrll struct dwc2_qtd *qtd)
992 1.1 skrll {
993 1.1 skrll struct dwc2_hcd_urb *urb = qtd->urb;
994 1.1 skrll int pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
995 1.1 skrll enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
996 1.1 skrll int urb_xfer_done;
997 1.1 skrll
998 1.1 skrll if (dbg_hc(chan))
999 1.1 skrll dev_vdbg(hsotg->dev,
1000 1.1 skrll "--Host Channel %d Interrupt: Transfer Complete--\n",
1001 1.1 skrll chnum);
1002 1.1 skrll
1003 1.1 skrll if (hsotg->core_params->dma_desc_enable > 0) {
1004 1.1 skrll dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
1005 1.1 skrll if (pipe_type == USB_ENDPOINT_XFER_ISOC)
1006 1.1 skrll /* Do not disable the interrupt, just clear it */
1007 1.1 skrll return;
1008 1.1 skrll goto handle_xfercomp_done;
1009 1.1 skrll }
1010 1.1 skrll
1011 1.1 skrll /* Handle xfer complete on CSPLIT */
1012 1.1 skrll if (chan->qh->do_split) {
1013 1.1 skrll if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
1014 1.1 skrll hsotg->core_params->dma_enable > 0) {
1015 1.1 skrll if (qtd->complete_split &&
1016 1.1 skrll dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
1017 1.1 skrll qtd))
1018 1.1 skrll goto handle_xfercomp_done;
1019 1.1 skrll } else {
1020 1.1 skrll qtd->complete_split = 0;
1021 1.1 skrll }
1022 1.1 skrll }
1023 1.1 skrll
1024 1.1 skrll if (!urb)
1025 1.1 skrll goto handle_xfercomp_done;
1026 1.1 skrll
1027 1.1 skrll /* Update the QTD and URB states */
1028 1.1 skrll switch (pipe_type) {
1029 1.1 skrll case USB_ENDPOINT_XFER_CONTROL:
1030 1.1 skrll switch (qtd->control_phase) {
1031 1.1 skrll case DWC2_CONTROL_SETUP:
1032 1.1 skrll if (urb->length > 0)
1033 1.1 skrll qtd->control_phase = DWC2_CONTROL_DATA;
1034 1.1 skrll else
1035 1.1 skrll qtd->control_phase = DWC2_CONTROL_STATUS;
1036 1.1 skrll dev_vdbg(hsotg->dev,
1037 1.1 skrll " Control setup transaction done\n");
1038 1.1 skrll halt_status = DWC2_HC_XFER_COMPLETE;
1039 1.1 skrll break;
1040 1.1 skrll case DWC2_CONTROL_DATA:
1041 1.1 skrll urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
1042 1.1 skrll chnum, urb, qtd);
1043 1.1 skrll if (urb_xfer_done) {
1044 1.1 skrll qtd->control_phase = DWC2_CONTROL_STATUS;
1045 1.1 skrll dev_vdbg(hsotg->dev,
1046 1.1 skrll " Control data transfer done\n");
1047 1.1 skrll } else {
1048 1.1 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1049 1.1 skrll qtd);
1050 1.1 skrll }
1051 1.1 skrll halt_status = DWC2_HC_XFER_COMPLETE;
1052 1.1 skrll break;
1053 1.1 skrll case DWC2_CONTROL_STATUS:
1054 1.1 skrll dev_vdbg(hsotg->dev, " Control transfer complete\n");
1055 1.1 skrll if (urb->status == -EINPROGRESS)
1056 1.1 skrll urb->status = 0;
1057 1.1 skrll dwc2_host_complete(hsotg, qtd, urb->status);
1058 1.1 skrll halt_status = DWC2_HC_XFER_URB_COMPLETE;
1059 1.1 skrll break;
1060 1.1 skrll }
1061 1.1 skrll
1062 1.1 skrll dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1063 1.1 skrll halt_status);
1064 1.1 skrll break;
1065 1.1 skrll case USB_ENDPOINT_XFER_BULK:
1066 1.1 skrll dev_vdbg(hsotg->dev, " Bulk transfer complete\n");
1067 1.1 skrll urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1068 1.1 skrll qtd);
1069 1.1 skrll if (urb_xfer_done) {
1070 1.1 skrll dwc2_host_complete(hsotg, qtd, urb->status);
1071 1.1 skrll halt_status = DWC2_HC_XFER_URB_COMPLETE;
1072 1.1 skrll } else {
1073 1.1 skrll halt_status = DWC2_HC_XFER_COMPLETE;
1074 1.1 skrll }
1075 1.1 skrll
1076 1.1 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1077 1.1 skrll dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1078 1.1 skrll halt_status);
1079 1.1 skrll break;
1080 1.1 skrll case USB_ENDPOINT_XFER_INT:
1081 1.1 skrll dev_vdbg(hsotg->dev, " Interrupt transfer complete\n");
1082 1.1 skrll urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1083 1.1 skrll qtd);
1084 1.1 skrll
1085 1.1 skrll /*
1086 1.1 skrll * Interrupt URB is done on the first transfer complete
1087 1.1 skrll * interrupt
1088 1.1 skrll */
1089 1.1 skrll if (urb_xfer_done) {
1090 1.1 skrll dwc2_host_complete(hsotg, qtd, urb->status);
1091 1.1 skrll halt_status = DWC2_HC_XFER_URB_COMPLETE;
1092 1.1 skrll } else {
1093 1.1 skrll halt_status = DWC2_HC_XFER_COMPLETE;
1094 1.1 skrll }
1095 1.1 skrll
1096 1.1 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1097 1.1 skrll dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1098 1.1 skrll halt_status);
1099 1.1 skrll break;
1100 1.1 skrll case USB_ENDPOINT_XFER_ISOC:
1101 1.1 skrll if (dbg_perio())
1102 1.1 skrll dev_vdbg(hsotg->dev, " Isochronous transfer complete\n");
1103 1.1 skrll if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
1104 1.1 skrll halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1105 1.1 skrll chnum, qtd, DWC2_HC_XFER_COMPLETE);
1106 1.1 skrll dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1107 1.1 skrll halt_status);
1108 1.1 skrll break;
1109 1.1 skrll }
1110 1.1 skrll
1111 1.1 skrll handle_xfercomp_done:
1112 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
1113 1.1 skrll }
1114 1.1 skrll
1115 1.1 skrll /*
1116 1.1 skrll * Handles a host channel STALL interrupt. This handler may be called in
1117 1.1 skrll * either DMA mode or Slave mode.
1118 1.1 skrll */
1119 1.1 skrll static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
1120 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1121 1.1 skrll struct dwc2_qtd *qtd)
1122 1.1 skrll {
1123 1.1 skrll struct dwc2_hcd_urb *urb = qtd->urb;
1124 1.1 skrll int pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1125 1.1 skrll
1126 1.1 skrll dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
1127 1.1 skrll chnum);
1128 1.1 skrll
1129 1.1 skrll if (hsotg->core_params->dma_desc_enable > 0) {
1130 1.1 skrll dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1131 1.1 skrll DWC2_HC_XFER_STALL);
1132 1.1 skrll goto handle_stall_done;
1133 1.1 skrll }
1134 1.1 skrll
1135 1.1 skrll if (!urb)
1136 1.1 skrll goto handle_stall_halt;
1137 1.1 skrll
1138 1.1 skrll if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
1139 1.1 skrll dwc2_host_complete(hsotg, qtd, -EPIPE);
1140 1.1 skrll
1141 1.1 skrll if (pipe_type == USB_ENDPOINT_XFER_BULK ||
1142 1.1 skrll pipe_type == USB_ENDPOINT_XFER_INT) {
1143 1.1 skrll dwc2_host_complete(hsotg, qtd, -EPIPE);
1144 1.1 skrll /*
1145 1.1 skrll * USB protocol requires resetting the data toggle for bulk
1146 1.1 skrll * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1147 1.1 skrll * setup command is issued to the endpoint. Anticipate the
1148 1.1 skrll * CLEAR_FEATURE command since a STALL has occurred and reset
1149 1.1 skrll * the data toggle now.
1150 1.1 skrll */
1151 1.1 skrll chan->qh->data_toggle = 0;
1152 1.1 skrll }
1153 1.1 skrll
1154 1.1 skrll handle_stall_halt:
1155 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
1156 1.1 skrll
1157 1.1 skrll handle_stall_done:
1158 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
1159 1.1 skrll }
1160 1.1 skrll
1161 1.1 skrll /*
1162 1.1 skrll * Updates the state of the URB when a transfer has been stopped due to an
1163 1.1 skrll * abnormal condition before the transfer completes. Modifies the
1164 1.1 skrll * actual_length field of the URB to reflect the number of bytes that have
1165 1.1 skrll * actually been transferred via the host channel.
1166 1.1 skrll */
1167 1.1 skrll static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1168 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1169 1.1 skrll struct dwc2_hcd_urb *urb,
1170 1.1 skrll struct dwc2_qtd *qtd,
1171 1.1 skrll enum dwc2_halt_status halt_status)
1172 1.1 skrll {
1173 1.1 skrll u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
1174 1.1 skrll qtd, halt_status, NULL);
1175 1.1 skrll u32 hctsiz;
1176 1.1 skrll
1177 1.1 skrll if (urb->actual_length + xfer_length > urb->length) {
1178 1.1 skrll dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
1179 1.1 skrll xfer_length = urb->length - urb->actual_length;
1180 1.1 skrll }
1181 1.1 skrll
1182 1.1 skrll /* Non DWORD-aligned buffer case handling */
1183 1.1 skrll if (chan->align_buf && xfer_length && chan->ep_is_in) {
1184 1.1 skrll dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
1185 1.1 skrll dma_sync_single_for_cpu(hsotg->dev, urb->dma, urb->length,
1186 1.1 skrll DMA_FROM_DEVICE);
1187 1.1 skrll memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
1188 1.1 skrll xfer_length);
1189 1.1 skrll dma_sync_single_for_device(hsotg->dev, urb->dma, urb->length,
1190 1.1 skrll DMA_FROM_DEVICE);
1191 1.1 skrll }
1192 1.1 skrll
1193 1.1 skrll urb->actual_length += xfer_length;
1194 1.1 skrll
1195 1.1 skrll hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
1196 1.1 skrll dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
1197 1.1 skrll __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
1198 1.1 skrll dev_vdbg(hsotg->dev, " chan->start_pkt_count %d\n",
1199 1.1 skrll chan->start_pkt_count);
1200 1.1 skrll dev_vdbg(hsotg->dev, " hctsiz.pktcnt %d\n",
1201 1.1 skrll hctsiz >> TSIZ_PKTCNT_SHIFT &
1202 1.1 skrll TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT);
1203 1.1 skrll dev_vdbg(hsotg->dev, " chan->max_packet %d\n", chan->max_packet);
1204 1.1 skrll dev_vdbg(hsotg->dev, " bytes_transferred %d\n",
1205 1.1 skrll xfer_length);
1206 1.1 skrll dev_vdbg(hsotg->dev, " urb->actual_length %d\n",
1207 1.1 skrll urb->actual_length);
1208 1.1 skrll dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n",
1209 1.1 skrll urb->length);
1210 1.1 skrll }
1211 1.1 skrll
1212 1.1 skrll /*
1213 1.1 skrll * Handles a host channel NAK interrupt. This handler may be called in either
1214 1.1 skrll * DMA mode or Slave mode.
1215 1.1 skrll */
1216 1.1 skrll static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1217 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1218 1.1 skrll struct dwc2_qtd *qtd)
1219 1.1 skrll {
1220 1.1 skrll if (dbg_hc(chan))
1221 1.1 skrll dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
1222 1.1 skrll chnum);
1223 1.1 skrll
1224 1.1 skrll /*
1225 1.1 skrll * When we get bulk NAKs then remember this so we holdoff on this qh
1226 1.1 skrll * until the beginning of the next frame
1227 1.1 skrll */
1228 1.1 skrll switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1229 1.1 skrll case USB_ENDPOINT_XFER_BULK:
1230 1.1 skrll chan->qh->nak_frame = dwc2_hcd_get_frame_number(hsotg);
1231 1.1 skrll break;
1232 1.1 skrll }
1233 1.1 skrll
1234 1.1 skrll /*
1235 1.1 skrll * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1236 1.1 skrll * interrupt. Re-start the SSPLIT transfer.
1237 1.1 skrll */
1238 1.1 skrll if (chan->do_split) {
1239 1.1 skrll if (chan->complete_split)
1240 1.1 skrll qtd->error_count = 0;
1241 1.1 skrll qtd->complete_split = 0;
1242 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1243 1.1 skrll goto handle_nak_done;
1244 1.1 skrll }
1245 1.1 skrll
1246 1.1 skrll switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1247 1.1 skrll case USB_ENDPOINT_XFER_CONTROL:
1248 1.1 skrll case USB_ENDPOINT_XFER_BULK:
1249 1.1 skrll if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
1250 1.1 skrll /*
1251 1.1 skrll * NAK interrupts are enabled on bulk/control IN
1252 1.1 skrll * transfers in DMA mode for the sole purpose of
1253 1.1 skrll * resetting the error count after a transaction error
1254 1.1 skrll * occurs. The core will continue transferring data.
1255 1.1 skrll */
1256 1.1 skrll qtd->error_count = 0;
1257 1.1 skrll break;
1258 1.1 skrll }
1259 1.1 skrll
1260 1.1 skrll /*
1261 1.1 skrll * NAK interrupts normally occur during OUT transfers in DMA
1262 1.1 skrll * or Slave mode. For IN transfers, more requests will be
1263 1.1 skrll * queued as request queue space is available.
1264 1.1 skrll */
1265 1.1 skrll qtd->error_count = 0;
1266 1.1 skrll
1267 1.1 skrll if (!chan->qh->ping_state) {
1268 1.1 skrll dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1269 1.1 skrll qtd, DWC2_HC_XFER_NAK);
1270 1.1 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1271 1.1 skrll
1272 1.1 skrll if (chan->speed == USB_SPEED_HIGH)
1273 1.1 skrll chan->qh->ping_state = 1;
1274 1.1 skrll }
1275 1.1 skrll
1276 1.1 skrll /*
1277 1.1 skrll * Halt the channel so the transfer can be re-started from
1278 1.1 skrll * the appropriate point or the PING protocol will
1279 1.1 skrll * start/continue
1280 1.1 skrll */
1281 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1282 1.1 skrll break;
1283 1.1 skrll case USB_ENDPOINT_XFER_INT:
1284 1.1 skrll qtd->error_count = 0;
1285 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1286 1.1 skrll break;
1287 1.1 skrll case USB_ENDPOINT_XFER_ISOC:
1288 1.1 skrll /* Should never get called for isochronous transfers */
1289 1.1 skrll dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
1290 1.1 skrll break;
1291 1.1 skrll }
1292 1.1 skrll
1293 1.1 skrll handle_nak_done:
1294 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
1295 1.1 skrll }
1296 1.1 skrll
1297 1.1 skrll /*
1298 1.1 skrll * Handles a host channel ACK interrupt. This interrupt is enabled when
1299 1.1 skrll * performing the PING protocol in Slave mode, when errors occur during
1300 1.1 skrll * either Slave mode or DMA mode, and during Start Split transactions.
1301 1.1 skrll */
1302 1.1 skrll static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
1303 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1304 1.1 skrll struct dwc2_qtd *qtd)
1305 1.1 skrll {
1306 1.1 skrll struct dwc2_hcd_iso_packet_desc *frame_desc;
1307 1.1 skrll
1308 1.1 skrll if (dbg_hc(chan))
1309 1.1 skrll dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
1310 1.1 skrll chnum);
1311 1.1 skrll
1312 1.1 skrll if (chan->do_split) {
1313 1.1 skrll /* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
1314 1.1 skrll if (!chan->ep_is_in &&
1315 1.1 skrll chan->data_pid_start != DWC2_HC_PID_SETUP)
1316 1.1 skrll qtd->ssplit_out_xfer_count = chan->xfer_len;
1317 1.1 skrll
1318 1.1 skrll if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
1319 1.1 skrll qtd->complete_split = 1;
1320 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1321 1.1 skrll } else {
1322 1.1 skrll /* ISOC OUT */
1323 1.1 skrll switch (chan->xact_pos) {
1324 1.1 skrll case DWC2_HCSPLT_XACTPOS_ALL:
1325 1.1 skrll break;
1326 1.1 skrll case DWC2_HCSPLT_XACTPOS_END:
1327 1.1 skrll qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1328 1.1 skrll qtd->isoc_split_offset = 0;
1329 1.1 skrll break;
1330 1.1 skrll case DWC2_HCSPLT_XACTPOS_BEGIN:
1331 1.1 skrll case DWC2_HCSPLT_XACTPOS_MID:
1332 1.1 skrll /*
1333 1.1 skrll * For BEGIN or MID, calculate the length for
1334 1.1 skrll * the next microframe to determine the correct
1335 1.1 skrll * SSPLIT token, either MID or END
1336 1.1 skrll */
1337 1.1 skrll frame_desc = &qtd->urb->iso_descs[
1338 1.1 skrll qtd->isoc_frame_index];
1339 1.1 skrll qtd->isoc_split_offset += 188;
1340 1.1 skrll
1341 1.1 skrll if (frame_desc->length - qtd->isoc_split_offset
1342 1.1 skrll <= 188)
1343 1.1 skrll qtd->isoc_split_pos =
1344 1.1 skrll DWC2_HCSPLT_XACTPOS_END;
1345 1.1 skrll else
1346 1.1 skrll qtd->isoc_split_pos =
1347 1.1 skrll DWC2_HCSPLT_XACTPOS_MID;
1348 1.1 skrll break;
1349 1.1 skrll }
1350 1.1 skrll }
1351 1.1 skrll } else {
1352 1.1 skrll qtd->error_count = 0;
1353 1.1 skrll
1354 1.1 skrll if (chan->qh->ping_state) {
1355 1.1 skrll chan->qh->ping_state = 0;
1356 1.1 skrll /*
1357 1.1 skrll * Halt the channel so the transfer can be re-started
1358 1.1 skrll * from the appropriate point. This only happens in
1359 1.1 skrll * Slave mode. In DMA mode, the ping_state is cleared
1360 1.1 skrll * when the transfer is started because the core
1361 1.1 skrll * automatically executes the PING, then the transfer.
1362 1.1 skrll */
1363 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1364 1.1 skrll }
1365 1.1 skrll }
1366 1.1 skrll
1367 1.1 skrll /*
1368 1.1 skrll * If the ACK occurred when _not_ in the PING state, let the channel
1369 1.1 skrll * continue transferring data after clearing the error count
1370 1.1 skrll */
1371 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
1372 1.1 skrll }
1373 1.1 skrll
1374 1.1 skrll /*
1375 1.1 skrll * Handles a host channel NYET interrupt. This interrupt should only occur on
1376 1.1 skrll * Bulk and Control OUT endpoints and for complete split transactions. If a
1377 1.1 skrll * NYET occurs at the same time as a Transfer Complete interrupt, it is
1378 1.1 skrll * handled in the xfercomp interrupt handler, not here. This handler may be
1379 1.1 skrll * called in either DMA mode or Slave mode.
1380 1.1 skrll */
1381 1.1 skrll static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1382 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1383 1.1 skrll struct dwc2_qtd *qtd)
1384 1.1 skrll {
1385 1.1 skrll if (dbg_hc(chan))
1386 1.1 skrll dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
1387 1.1 skrll chnum);
1388 1.1 skrll
1389 1.1 skrll /*
1390 1.1 skrll * NYET on CSPLIT
1391 1.1 skrll * re-do the CSPLIT immediately on non-periodic
1392 1.1 skrll */
1393 1.1 skrll if (chan->do_split && chan->complete_split) {
1394 1.1 skrll if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
1395 1.1 skrll hsotg->core_params->dma_enable > 0) {
1396 1.1 skrll qtd->complete_split = 0;
1397 1.1 skrll qtd->isoc_split_offset = 0;
1398 1.1 skrll qtd->isoc_frame_index++;
1399 1.1 skrll if (qtd->urb &&
1400 1.1 skrll qtd->isoc_frame_index == qtd->urb->packet_count) {
1401 1.1 skrll dwc2_host_complete(hsotg, qtd, 0);
1402 1.1 skrll dwc2_release_channel(hsotg, chan, qtd,
1403 1.1 skrll DWC2_HC_XFER_URB_COMPLETE);
1404 1.1 skrll } else {
1405 1.1 skrll dwc2_release_channel(hsotg, chan, qtd,
1406 1.1 skrll DWC2_HC_XFER_NO_HALT_STATUS);
1407 1.1 skrll }
1408 1.1 skrll goto handle_nyet_done;
1409 1.1 skrll }
1410 1.1 skrll
1411 1.1 skrll if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1412 1.1 skrll chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1413 1.1 skrll int frnum = dwc2_hcd_get_frame_number(hsotg);
1414 1.1 skrll
1415 1.1 skrll if (dwc2_full_frame_num(frnum) !=
1416 1.1 skrll dwc2_full_frame_num(chan->qh->sched_frame)) {
1417 1.1 skrll /*
1418 1.1 skrll * No longer in the same full speed frame.
1419 1.1 skrll * Treat this as a transaction error.
1420 1.1 skrll */
1421 1.1 skrll #if 0
1422 1.1 skrll /*
1423 1.1 skrll * Todo: Fix system performance so this can
1424 1.1 skrll * be treated as an error. Right now complete
1425 1.1 skrll * splits cannot be scheduled precisely enough
1426 1.1 skrll * due to other system activity, so this error
1427 1.1 skrll * occurs regularly in Slave mode.
1428 1.1 skrll */
1429 1.1 skrll qtd->error_count++;
1430 1.1 skrll #endif
1431 1.1 skrll qtd->complete_split = 0;
1432 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd,
1433 1.1 skrll DWC2_HC_XFER_XACT_ERR);
1434 1.1 skrll /* Todo: add support for isoc release */
1435 1.1 skrll goto handle_nyet_done;
1436 1.1 skrll }
1437 1.1 skrll }
1438 1.1 skrll
1439 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1440 1.1 skrll goto handle_nyet_done;
1441 1.1 skrll }
1442 1.1 skrll
1443 1.1 skrll chan->qh->ping_state = 1;
1444 1.1 skrll qtd->error_count = 0;
1445 1.1 skrll
1446 1.1 skrll dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
1447 1.1 skrll DWC2_HC_XFER_NYET);
1448 1.1 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1449 1.1 skrll
1450 1.1 skrll /*
1451 1.1 skrll * Halt the channel and re-start the transfer so the PING protocol
1452 1.1 skrll * will start
1453 1.1 skrll */
1454 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1455 1.1 skrll
1456 1.1 skrll handle_nyet_done:
1457 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
1458 1.1 skrll }
1459 1.1 skrll
1460 1.1 skrll /*
1461 1.1 skrll * Handles a host channel babble interrupt. This handler may be called in
1462 1.1 skrll * either DMA mode or Slave mode.
1463 1.1 skrll */
1464 1.1 skrll static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
1465 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1466 1.1 skrll struct dwc2_qtd *qtd)
1467 1.1 skrll {
1468 1.1 skrll dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
1469 1.1 skrll chnum);
1470 1.1 skrll
1471 1.1 skrll dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1472 1.1 skrll
1473 1.1 skrll if (hsotg->core_params->dma_desc_enable > 0) {
1474 1.1 skrll dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1475 1.1 skrll DWC2_HC_XFER_BABBLE_ERR);
1476 1.1 skrll goto disable_int;
1477 1.1 skrll }
1478 1.1 skrll
1479 1.1 skrll if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1480 1.1 skrll dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
1481 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
1482 1.1 skrll } else {
1483 1.1 skrll enum dwc2_halt_status halt_status;
1484 1.1 skrll
1485 1.1 skrll halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1486 1.1 skrll qtd, DWC2_HC_XFER_BABBLE_ERR);
1487 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1488 1.1 skrll }
1489 1.1 skrll
1490 1.1 skrll disable_int:
1491 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
1492 1.1 skrll }
1493 1.1 skrll
1494 1.1 skrll /*
1495 1.1 skrll * Handles a host channel AHB error interrupt. This handler is only called in
1496 1.1 skrll * DMA mode.
1497 1.1 skrll */
1498 1.1 skrll static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1499 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1500 1.1 skrll struct dwc2_qtd *qtd)
1501 1.1 skrll {
1502 1.1 skrll struct dwc2_hcd_urb *urb = qtd->urb;
1503 1.1 skrll char *pipetype, *speed;
1504 1.1 skrll u32 hcchar;
1505 1.1 skrll u32 hcsplt;
1506 1.1 skrll u32 hctsiz;
1507 1.1 skrll u32 hc_dma;
1508 1.1 skrll
1509 1.1 skrll dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
1510 1.1 skrll chnum);
1511 1.1 skrll
1512 1.1 skrll if (!urb)
1513 1.1 skrll goto handle_ahberr_halt;
1514 1.1 skrll
1515 1.1 skrll dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1516 1.1 skrll
1517 1.1 skrll hcchar = readl(hsotg->regs + HCCHAR(chnum));
1518 1.1 skrll hcsplt = readl(hsotg->regs + HCSPLT(chnum));
1519 1.1 skrll hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
1520 1.1 skrll hc_dma = readl(hsotg->regs + HCDMA(chnum));
1521 1.1 skrll
1522 1.1 skrll dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
1523 1.1 skrll dev_err(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
1524 1.1 skrll dev_err(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
1525 1.1 skrll dev_err(hsotg->dev, " Device address: %d\n",
1526 1.1 skrll dwc2_hcd_get_dev_addr(&urb->pipe_info));
1527 1.1 skrll dev_err(hsotg->dev, " Endpoint: %d, %s\n",
1528 1.1 skrll dwc2_hcd_get_ep_num(&urb->pipe_info),
1529 1.1 skrll dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
1530 1.1 skrll
1531 1.1 skrll switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
1532 1.1 skrll case USB_ENDPOINT_XFER_CONTROL:
1533 1.1 skrll pipetype = "CONTROL";
1534 1.1 skrll break;
1535 1.1 skrll case USB_ENDPOINT_XFER_BULK:
1536 1.1 skrll pipetype = "BULK";
1537 1.1 skrll break;
1538 1.1 skrll case USB_ENDPOINT_XFER_INT:
1539 1.1 skrll pipetype = "INTERRUPT";
1540 1.1 skrll break;
1541 1.1 skrll case USB_ENDPOINT_XFER_ISOC:
1542 1.1 skrll pipetype = "ISOCHRONOUS";
1543 1.1 skrll break;
1544 1.1 skrll default:
1545 1.1 skrll pipetype = "UNKNOWN";
1546 1.1 skrll break;
1547 1.1 skrll }
1548 1.1 skrll
1549 1.1 skrll dev_err(hsotg->dev, " Endpoint type: %s\n", pipetype);
1550 1.1 skrll
1551 1.1 skrll switch (chan->speed) {
1552 1.1 skrll case USB_SPEED_HIGH:
1553 1.1 skrll speed = "HIGH";
1554 1.1 skrll break;
1555 1.1 skrll case USB_SPEED_FULL:
1556 1.1 skrll speed = "FULL";
1557 1.1 skrll break;
1558 1.1 skrll case USB_SPEED_LOW:
1559 1.1 skrll speed = "LOW";
1560 1.1 skrll break;
1561 1.1 skrll default:
1562 1.1 skrll speed = "UNKNOWN";
1563 1.1 skrll break;
1564 1.1 skrll }
1565 1.1 skrll
1566 1.1 skrll dev_err(hsotg->dev, " Speed: %s\n", speed);
1567 1.1 skrll
1568 1.1 skrll dev_err(hsotg->dev, " Max packet size: %d\n",
1569 1.1 skrll dwc2_hcd_get_mps(&urb->pipe_info));
1570 1.1 skrll dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length);
1571 1.1 skrll dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
1572 1.1 skrll urb->buf, (unsigned long)urb->dma);
1573 1.1 skrll dev_err(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n",
1574 1.1 skrll urb->setup_packet, (unsigned long)urb->setup_dma);
1575 1.1 skrll dev_err(hsotg->dev, " Interval: %d\n", urb->interval);
1576 1.1 skrll
1577 1.1 skrll /* Core halts the channel for Descriptor DMA mode */
1578 1.1 skrll if (hsotg->core_params->dma_desc_enable > 0) {
1579 1.1 skrll dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1580 1.1 skrll DWC2_HC_XFER_AHB_ERR);
1581 1.1 skrll goto handle_ahberr_done;
1582 1.1 skrll }
1583 1.1 skrll
1584 1.1 skrll dwc2_host_complete(hsotg, qtd, -EIO);
1585 1.1 skrll
1586 1.1 skrll handle_ahberr_halt:
1587 1.1 skrll /*
1588 1.1 skrll * Force a channel halt. Don't call dwc2_halt_channel because that won't
1589 1.1 skrll * write to the HCCHARn register in DMA mode to force the halt.
1590 1.1 skrll */
1591 1.1 skrll dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
1592 1.1 skrll
1593 1.1 skrll handle_ahberr_done:
1594 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
1595 1.1 skrll }
1596 1.1 skrll
1597 1.1 skrll /*
1598 1.1 skrll * Handles a host channel transaction error interrupt. This handler may be
1599 1.1 skrll * called in either DMA mode or Slave mode.
1600 1.1 skrll */
1601 1.1 skrll static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
1602 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1603 1.1 skrll struct dwc2_qtd *qtd)
1604 1.1 skrll {
1605 1.1 skrll dev_dbg(hsotg->dev,
1606 1.1 skrll "--Host Channel %d Interrupt: Transaction Error--\n", chnum);
1607 1.1 skrll
1608 1.1 skrll dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1609 1.1 skrll
1610 1.1 skrll if (hsotg->core_params->dma_desc_enable > 0) {
1611 1.1 skrll dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1612 1.1 skrll DWC2_HC_XFER_XACT_ERR);
1613 1.1 skrll goto handle_xacterr_done;
1614 1.1 skrll }
1615 1.1 skrll
1616 1.1 skrll switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1617 1.1 skrll case USB_ENDPOINT_XFER_CONTROL:
1618 1.1 skrll case USB_ENDPOINT_XFER_BULK:
1619 1.1 skrll qtd->error_count++;
1620 1.1 skrll if (!chan->qh->ping_state) {
1621 1.1 skrll
1622 1.1 skrll dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1623 1.1 skrll qtd, DWC2_HC_XFER_XACT_ERR);
1624 1.1 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1625 1.1 skrll if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
1626 1.1 skrll chan->qh->ping_state = 1;
1627 1.1 skrll }
1628 1.1 skrll
1629 1.1 skrll /*
1630 1.1 skrll * Halt the channel so the transfer can be re-started from
1631 1.1 skrll * the appropriate point or the PING protocol will start
1632 1.1 skrll */
1633 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1634 1.1 skrll break;
1635 1.1 skrll case USB_ENDPOINT_XFER_INT:
1636 1.1 skrll qtd->error_count++;
1637 1.1 skrll if (chan->do_split && chan->complete_split)
1638 1.1 skrll qtd->complete_split = 0;
1639 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1640 1.1 skrll break;
1641 1.1 skrll case USB_ENDPOINT_XFER_ISOC:
1642 1.1 skrll {
1643 1.1 skrll enum dwc2_halt_status halt_status;
1644 1.1 skrll
1645 1.1 skrll halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1646 1.1 skrll chnum, qtd, DWC2_HC_XFER_XACT_ERR);
1647 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1648 1.1 skrll }
1649 1.1 skrll break;
1650 1.1 skrll }
1651 1.1 skrll
1652 1.1 skrll handle_xacterr_done:
1653 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
1654 1.1 skrll }
1655 1.1 skrll
1656 1.1 skrll /*
1657 1.1 skrll * Handles a host channel frame overrun interrupt. This handler may be called
1658 1.1 skrll * in either DMA mode or Slave mode.
1659 1.1 skrll */
1660 1.1 skrll static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
1661 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1662 1.1 skrll struct dwc2_qtd *qtd)
1663 1.1 skrll {
1664 1.1 skrll enum dwc2_halt_status halt_status;
1665 1.1 skrll
1666 1.1 skrll if (dbg_hc(chan))
1667 1.1 skrll dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
1668 1.1 skrll chnum);
1669 1.1 skrll
1670 1.1 skrll dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1671 1.1 skrll
1672 1.1 skrll switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1673 1.1 skrll case USB_ENDPOINT_XFER_CONTROL:
1674 1.1 skrll case USB_ENDPOINT_XFER_BULK:
1675 1.1 skrll break;
1676 1.1 skrll case USB_ENDPOINT_XFER_INT:
1677 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1678 1.1 skrll break;
1679 1.1 skrll case USB_ENDPOINT_XFER_ISOC:
1680 1.1 skrll halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1681 1.1 skrll qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1682 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1683 1.1 skrll break;
1684 1.1 skrll }
1685 1.1 skrll
1686 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
1687 1.1 skrll }
1688 1.1 skrll
1689 1.1 skrll /*
1690 1.1 skrll * Handles a host channel data toggle error interrupt. This handler may be
1691 1.1 skrll * called in either DMA mode or Slave mode.
1692 1.1 skrll */
1693 1.1 skrll static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
1694 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1695 1.1 skrll struct dwc2_qtd *qtd)
1696 1.1 skrll {
1697 1.1 skrll dev_dbg(hsotg->dev,
1698 1.1 skrll "--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
1699 1.1 skrll
1700 1.1 skrll if (chan->ep_is_in)
1701 1.1 skrll qtd->error_count = 0;
1702 1.1 skrll else
1703 1.1 skrll dev_err(hsotg->dev,
1704 1.1 skrll "Data Toggle Error on OUT transfer, channel %d\n",
1705 1.1 skrll chnum);
1706 1.1 skrll
1707 1.1 skrll dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1708 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
1709 1.1 skrll }
1710 1.1 skrll
1711 1.1 skrll /*
1712 1.1 skrll * For debug only. It checks that a valid halt status is set and that
1713 1.1 skrll * HCCHARn.chdis is clear. If there's a problem, corrective action is
1714 1.1 skrll * taken and a warning is issued.
1715 1.1 skrll *
1716 1.1 skrll * Return: true if halt status is ok, false otherwise
1717 1.1 skrll */
1718 1.1 skrll static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
1719 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1720 1.1 skrll struct dwc2_qtd *qtd)
1721 1.1 skrll {
1722 1.1 skrll #ifdef DEBUG
1723 1.1 skrll u32 hcchar;
1724 1.1 skrll u32 hctsiz;
1725 1.1 skrll u32 hcintmsk;
1726 1.1 skrll u32 hcsplt;
1727 1.1 skrll
1728 1.1 skrll if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
1729 1.1 skrll /*
1730 1.1 skrll * This code is here only as a check. This condition should
1731 1.1 skrll * never happen. Ignore the halt if it does occur.
1732 1.1 skrll */
1733 1.1 skrll hcchar = readl(hsotg->regs + HCCHAR(chnum));
1734 1.1 skrll hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
1735 1.1 skrll hcintmsk = readl(hsotg->regs + HCINTMSK(chnum));
1736 1.1 skrll hcsplt = readl(hsotg->regs + HCSPLT(chnum));
1737 1.1 skrll dev_dbg(hsotg->dev,
1738 1.1 skrll "%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
1739 1.1 skrll __func__);
1740 1.1 skrll dev_dbg(hsotg->dev,
1741 1.1 skrll "channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
1742 1.1 skrll chnum, hcchar, hctsiz);
1743 1.1 skrll dev_dbg(hsotg->dev,
1744 1.1 skrll "hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
1745 1.1 skrll chan->hcint, hcintmsk, hcsplt);
1746 1.1 skrll if (qtd)
1747 1.1 skrll dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
1748 1.1 skrll qtd->complete_split);
1749 1.1 skrll dev_warn(hsotg->dev,
1750 1.1 skrll "%s: no halt status, channel %d, ignoring interrupt\n",
1751 1.1 skrll __func__, chnum);
1752 1.1 skrll return false;
1753 1.1 skrll }
1754 1.1 skrll
1755 1.1 skrll /*
1756 1.1 skrll * This code is here only as a check. hcchar.chdis should never be set
1757 1.1 skrll * when the halt interrupt occurs. Halt the channel again if it does
1758 1.1 skrll * occur.
1759 1.1 skrll */
1760 1.1 skrll hcchar = readl(hsotg->regs + HCCHAR(chnum));
1761 1.1 skrll if (hcchar & HCCHAR_CHDIS) {
1762 1.1 skrll dev_warn(hsotg->dev,
1763 1.1 skrll "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
1764 1.1 skrll __func__, hcchar);
1765 1.1 skrll chan->halt_pending = 0;
1766 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
1767 1.1 skrll return false;
1768 1.1 skrll }
1769 1.1 skrll #endif
1770 1.1 skrll
1771 1.1 skrll return true;
1772 1.1 skrll }
1773 1.1 skrll
1774 1.1 skrll /*
1775 1.1 skrll * Handles a host Channel Halted interrupt in DMA mode. This handler
1776 1.1 skrll * determines the reason the channel halted and proceeds accordingly.
1777 1.1 skrll */
1778 1.1 skrll static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
1779 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1780 1.1 skrll struct dwc2_qtd *qtd)
1781 1.1 skrll {
1782 1.1 skrll u32 hcintmsk;
1783 1.1 skrll int out_nak_enh = 0;
1784 1.1 skrll
1785 1.1 skrll if (dbg_hc(chan))
1786 1.1 skrll dev_vdbg(hsotg->dev,
1787 1.1 skrll "--Host Channel %d Interrupt: DMA Channel Halted--\n",
1788 1.1 skrll chnum);
1789 1.1 skrll
1790 1.1 skrll /*
1791 1.1 skrll * For core with OUT NAK enhancement, the flow for high-speed
1792 1.1 skrll * CONTROL/BULK OUT is handled a little differently
1793 1.1 skrll */
1794 1.1 skrll if (hsotg->snpsid >= DWC2_CORE_REV_2_71a) {
1795 1.1 skrll if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
1796 1.1 skrll (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1797 1.1 skrll chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
1798 1.1 skrll out_nak_enh = 1;
1799 1.1 skrll }
1800 1.1 skrll }
1801 1.1 skrll
1802 1.1 skrll if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1803 1.1 skrll (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
1804 1.1 skrll hsotg->core_params->dma_desc_enable <= 0)) {
1805 1.1 skrll if (hsotg->core_params->dma_desc_enable > 0)
1806 1.1 skrll dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1807 1.1 skrll chan->halt_status);
1808 1.1 skrll else
1809 1.1 skrll /*
1810 1.1 skrll * Just release the channel. A dequeue can happen on a
1811 1.1 skrll * transfer timeout. In the case of an AHB Error, the
1812 1.1 skrll * channel was forced to halt because there's no way to
1813 1.1 skrll * gracefully recover.
1814 1.1 skrll */
1815 1.1 skrll dwc2_release_channel(hsotg, chan, qtd,
1816 1.1 skrll chan->halt_status);
1817 1.1 skrll return;
1818 1.1 skrll }
1819 1.1 skrll
1820 1.1 skrll hcintmsk = readl(hsotg->regs + HCINTMSK(chnum));
1821 1.1 skrll
1822 1.1 skrll if (chan->hcint & HCINTMSK_XFERCOMPL) {
1823 1.1 skrll /*
1824 1.1 skrll * Todo: This is here because of a possible hardware bug. Spec
1825 1.1 skrll * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1826 1.1 skrll * interrupt w/ACK bit set should occur, but I only see the
1827 1.1 skrll * XFERCOMP bit, even with it masked out. This is a workaround
1828 1.1 skrll * for that behavior. Should fix this when hardware is fixed.
1829 1.1 skrll */
1830 1.1 skrll if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
1831 1.1 skrll dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1832 1.1 skrll dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
1833 1.1 skrll } else if (chan->hcint & HCINTMSK_STALL) {
1834 1.1 skrll dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
1835 1.1 skrll } else if ((chan->hcint & HCINTMSK_XACTERR) &&
1836 1.1 skrll hsotg->core_params->dma_desc_enable <= 0) {
1837 1.1 skrll if (out_nak_enh) {
1838 1.1 skrll if (chan->hcint &
1839 1.1 skrll (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
1840 1.1 skrll dev_vdbg(hsotg->dev,
1841 1.1 skrll "XactErr with NYET/NAK/ACK\n");
1842 1.1 skrll qtd->error_count = 0;
1843 1.1 skrll } else {
1844 1.1 skrll dev_vdbg(hsotg->dev,
1845 1.1 skrll "XactErr without NYET/NAK/ACK\n");
1846 1.1 skrll }
1847 1.1 skrll }
1848 1.1 skrll
1849 1.1 skrll /*
1850 1.1 skrll * Must handle xacterr before nak or ack. Could get a xacterr
1851 1.1 skrll * at the same time as either of these on a BULK/CONTROL OUT
1852 1.1 skrll * that started with a PING. The xacterr takes precedence.
1853 1.1 skrll */
1854 1.1 skrll dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1855 1.1 skrll } else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
1856 1.1 skrll hsotg->core_params->dma_desc_enable > 0) {
1857 1.1 skrll dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1858 1.1 skrll } else if ((chan->hcint & HCINTMSK_AHBERR) &&
1859 1.1 skrll hsotg->core_params->dma_desc_enable > 0) {
1860 1.1 skrll dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
1861 1.1 skrll } else if (chan->hcint & HCINTMSK_BBLERR) {
1862 1.1 skrll dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
1863 1.1 skrll } else if (chan->hcint & HCINTMSK_FRMOVRUN) {
1864 1.1 skrll dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
1865 1.1 skrll } else if (!out_nak_enh) {
1866 1.1 skrll if (chan->hcint & HCINTMSK_NYET) {
1867 1.1 skrll /*
1868 1.1 skrll * Must handle nyet before nak or ack. Could get a nyet
1869 1.1 skrll * at the same time as either of those on a BULK/CONTROL
1870 1.1 skrll * OUT that started with a PING. The nyet takes
1871 1.1 skrll * precedence.
1872 1.1 skrll */
1873 1.1 skrll dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
1874 1.1 skrll } else if ((chan->hcint & HCINTMSK_NAK) &&
1875 1.1 skrll !(hcintmsk & HCINTMSK_NAK)) {
1876 1.1 skrll /*
1877 1.1 skrll * If nak is not masked, it's because a non-split IN
1878 1.1 skrll * transfer is in an error state. In that case, the nak
1879 1.1 skrll * is handled by the nak interrupt handler, not here.
1880 1.1 skrll * Handle nak here for BULK/CONTROL OUT transfers, which
1881 1.1 skrll * halt on a NAK to allow rewinding the buffer pointer.
1882 1.1 skrll */
1883 1.1 skrll dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
1884 1.1 skrll } else if ((chan->hcint & HCINTMSK_ACK) &&
1885 1.1 skrll !(hcintmsk & HCINTMSK_ACK)) {
1886 1.1 skrll /*
1887 1.1 skrll * If ack is not masked, it's because a non-split IN
1888 1.1 skrll * transfer is in an error state. In that case, the ack
1889 1.1 skrll * is handled by the ack interrupt handler, not here.
1890 1.1 skrll * Handle ack here for split transfers. Start splits
1891 1.1 skrll * halt on ACK.
1892 1.1 skrll */
1893 1.1 skrll dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1894 1.1 skrll } else {
1895 1.1 skrll if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1896 1.1 skrll chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1897 1.1 skrll /*
1898 1.1 skrll * A periodic transfer halted with no other
1899 1.1 skrll * channel interrupts set. Assume it was halted
1900 1.1 skrll * by the core because it could not be completed
1901 1.1 skrll * in its scheduled (micro)frame.
1902 1.1 skrll */
1903 1.1 skrll dev_dbg(hsotg->dev,
1904 1.1 skrll "%s: Halt channel %d (assume incomplete periodic transfer)\n",
1905 1.1 skrll __func__, chnum);
1906 1.1 skrll dwc2_halt_channel(hsotg, chan, qtd,
1907 1.1 skrll DWC2_HC_XFER_PERIODIC_INCOMPLETE);
1908 1.1 skrll } else {
1909 1.1 skrll dev_err(hsotg->dev,
1910 1.1 skrll "%s: Channel %d - ChHltd set, but reason is unknown\n",
1911 1.1 skrll __func__, chnum);
1912 1.1 skrll dev_err(hsotg->dev,
1913 1.1 skrll "hcint 0x%08x, intsts 0x%08x\n",
1914 1.1 skrll chan->hcint,
1915 1.1 skrll readl(hsotg->regs + GINTSTS));
1916 1.1 skrll }
1917 1.1 skrll }
1918 1.1 skrll } else {
1919 1.1 skrll dev_info(hsotg->dev,
1920 1.1 skrll "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
1921 1.1 skrll chan->hcint);
1922 1.1 skrll }
1923 1.1 skrll }
1924 1.1 skrll
1925 1.1 skrll /*
1926 1.1 skrll * Handles a host channel Channel Halted interrupt
1927 1.1 skrll *
1928 1.1 skrll * In slave mode, this handler is called only when the driver specifically
1929 1.1 skrll * requests a halt. This occurs during handling other host channel interrupts
1930 1.1 skrll * (e.g. nak, xacterr, stall, nyet, etc.).
1931 1.1 skrll *
1932 1.1 skrll * In DMA mode, this is the interrupt that occurs when the core has finished
1933 1.1 skrll * processing a transfer on a channel. Other host channel interrupts (except
1934 1.1 skrll * ahberr) are disabled in DMA mode.
1935 1.1 skrll */
1936 1.1 skrll static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
1937 1.1 skrll struct dwc2_host_chan *chan, int chnum,
1938 1.1 skrll struct dwc2_qtd *qtd)
1939 1.1 skrll {
1940 1.1 skrll if (dbg_hc(chan))
1941 1.1 skrll dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
1942 1.1 skrll chnum);
1943 1.1 skrll
1944 1.1 skrll if (hsotg->core_params->dma_enable > 0) {
1945 1.1 skrll dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
1946 1.1 skrll } else {
1947 1.1 skrll if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
1948 1.1 skrll return;
1949 1.1 skrll dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
1950 1.1 skrll }
1951 1.1 skrll }
1952 1.1 skrll
1953 1.1 skrll /* Handles interrupt for a specific Host Channel */
1954 1.1 skrll static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
1955 1.1 skrll {
1956 1.1 skrll struct dwc2_qtd *qtd;
1957 1.1 skrll struct dwc2_host_chan *chan;
1958 1.1 skrll u32 hcint, hcintmsk;
1959 1.1 skrll
1960 1.1 skrll chan = hsotg->hc_ptr_array[chnum];
1961 1.1 skrll
1962 1.1 skrll if (dbg_hc(chan))
1963 1.1 skrll dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
1964 1.1 skrll chnum);
1965 1.1 skrll
1966 1.1 skrll hcint = readl(hsotg->regs + HCINT(chnum));
1967 1.1 skrll hcintmsk = readl(hsotg->regs + HCINTMSK(chnum));
1968 1.1 skrll if (dbg_hc(chan))
1969 1.1 skrll dev_vdbg(hsotg->dev,
1970 1.1 skrll " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1971 1.1 skrll hcint, hcintmsk, hcint & hcintmsk);
1972 1.1 skrll
1973 1.1 skrll if (!chan) {
1974 1.1 skrll dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
1975 1.1 skrll writel(hcint, hsotg->regs + HCINT(chnum));
1976 1.1 skrll return;
1977 1.1 skrll }
1978 1.1 skrll
1979 1.1 skrll writel(hcint, hsotg->regs + HCINT(chnum));
1980 1.1 skrll chan->hcint = hcint;
1981 1.1 skrll hcint &= hcintmsk;
1982 1.1 skrll
1983 1.1 skrll /*
1984 1.1 skrll * If the channel was halted due to a dequeue, the qtd list might
1985 1.1 skrll * be empty or at least the first entry will not be the active qtd.
1986 1.1 skrll * In this case, take a shortcut and just release the channel.
1987 1.1 skrll */
1988 1.1 skrll if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1989 1.1 skrll /*
1990 1.1 skrll * If the channel was halted, this should be the only
1991 1.1 skrll * interrupt unmasked
1992 1.1 skrll */
1993 1.1 skrll WARN_ON(hcint != HCINTMSK_CHHLTD);
1994 1.1 skrll if (hsotg->core_params->dma_desc_enable > 0)
1995 1.1 skrll dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1996 1.1 skrll chan->halt_status);
1997 1.1 skrll else
1998 1.1 skrll dwc2_release_channel(hsotg, chan, NULL,
1999 1.1 skrll chan->halt_status);
2000 1.1 skrll return;
2001 1.1 skrll }
2002 1.1 skrll
2003 1.1 skrll if (list_empty(&chan->qh->qtd_list)) {
2004 1.1 skrll /*
2005 1.1 skrll * TODO: Will this ever happen with the
2006 1.1 skrll * DWC2_HC_XFER_URB_DEQUEUE handling above?
2007 1.1 skrll */
2008 1.1 skrll dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
2009 1.1 skrll chnum);
2010 1.1 skrll dev_dbg(hsotg->dev,
2011 1.1 skrll " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2012 1.1 skrll chan->hcint, hcintmsk, hcint);
2013 1.1 skrll chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
2014 1.1 skrll disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
2015 1.1 skrll chan->hcint = 0;
2016 1.1 skrll return;
2017 1.1 skrll }
2018 1.1 skrll
2019 1.1 skrll qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
2020 1.1 skrll qtd_list_entry);
2021 1.1 skrll
2022 1.1 skrll if (hsotg->core_params->dma_enable <= 0) {
2023 1.1 skrll if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
2024 1.1 skrll hcint &= ~HCINTMSK_CHHLTD;
2025 1.1 skrll }
2026 1.1 skrll
2027 1.1 skrll if (hcint & HCINTMSK_XFERCOMPL) {
2028 1.1 skrll dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
2029 1.1 skrll /*
2030 1.1 skrll * If NYET occurred at same time as Xfer Complete, the NYET is
2031 1.1 skrll * handled by the Xfer Complete interrupt handler. Don't want
2032 1.1 skrll * to call the NYET interrupt handler in this case.
2033 1.1 skrll */
2034 1.1 skrll hcint &= ~HCINTMSK_NYET;
2035 1.1 skrll }
2036 1.1 skrll if (hcint & HCINTMSK_CHHLTD)
2037 1.1 skrll dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
2038 1.1 skrll if (hcint & HCINTMSK_AHBERR)
2039 1.1 skrll dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
2040 1.1 skrll if (hcint & HCINTMSK_STALL)
2041 1.1 skrll dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
2042 1.1 skrll if (hcint & HCINTMSK_NAK)
2043 1.1 skrll dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
2044 1.1 skrll if (hcint & HCINTMSK_ACK)
2045 1.1 skrll dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
2046 1.1 skrll if (hcint & HCINTMSK_NYET)
2047 1.1 skrll dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
2048 1.1 skrll if (hcint & HCINTMSK_XACTERR)
2049 1.1 skrll dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
2050 1.1 skrll if (hcint & HCINTMSK_BBLERR)
2051 1.1 skrll dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
2052 1.1 skrll if (hcint & HCINTMSK_FRMOVRUN)
2053 1.1 skrll dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
2054 1.1 skrll if (hcint & HCINTMSK_DATATGLERR)
2055 1.1 skrll dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
2056 1.1 skrll
2057 1.1 skrll chan->hcint = 0;
2058 1.1 skrll }
2059 1.1 skrll
2060 1.1 skrll /*
2061 1.1 skrll * This interrupt indicates that one or more host channels has a pending
2062 1.1 skrll * interrupt. There are multiple conditions that can cause each host channel
2063 1.1 skrll * interrupt. This function determines which conditions have occurred for each
2064 1.1 skrll * host channel interrupt and handles them appropriately.
2065 1.1 skrll */
2066 1.1 skrll static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2067 1.1 skrll {
2068 1.1 skrll u32 haint;
2069 1.1 skrll int i;
2070 1.1 skrll
2071 1.1 skrll haint = readl(hsotg->regs + HAINT);
2072 1.1 skrll if (dbg_perio()) {
2073 1.1 skrll dev_vdbg(hsotg->dev, "%s()\n", __func__);
2074 1.1 skrll
2075 1.1 skrll dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2076 1.1 skrll }
2077 1.1 skrll
2078 1.1 skrll for (i = 0; i < hsotg->core_params->host_channels; i++) {
2079 1.1 skrll if (haint & (1 << i))
2080 1.1 skrll dwc2_hc_n_intr(hsotg, i);
2081 1.1 skrll }
2082 1.1 skrll }
2083 1.1 skrll
2084 1.1 skrll /* This function handles interrupts for the HCD */
2085 1.1 skrll irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
2086 1.1 skrll {
2087 1.1 skrll u32 gintsts, dbg_gintsts;
2088 1.1 skrll irqreturn_t retval = IRQ_NONE;
2089 1.1 skrll
2090 1.1 skrll if (dwc2_check_core_status(hsotg) < 0) {
2091 1.1 skrll dev_warn(hsotg->dev, "Controller is disconnected\n");
2092 1.1 skrll return retval;
2093 1.1 skrll }
2094 1.1 skrll
2095 1.1 skrll spin_lock(&hsotg->lock);
2096 1.1 skrll
2097 1.1 skrll /* Check if HOST Mode */
2098 1.1 skrll if (dwc2_is_host_mode(hsotg)) {
2099 1.1 skrll gintsts = dwc2_read_core_intr(hsotg);
2100 1.1 skrll if (!gintsts) {
2101 1.1 skrll spin_unlock(&hsotg->lock);
2102 1.1 skrll return retval;
2103 1.1 skrll }
2104 1.1 skrll
2105 1.1 skrll retval = IRQ_HANDLED;
2106 1.1 skrll
2107 1.1 skrll dbg_gintsts = gintsts;
2108 1.1 skrll #ifndef DEBUG_SOF
2109 1.1 skrll dbg_gintsts &= ~GINTSTS_SOF;
2110 1.1 skrll #endif
2111 1.1 skrll if (!dbg_perio())
2112 1.1 skrll dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
2113 1.1 skrll GINTSTS_PTXFEMP);
2114 1.1 skrll
2115 1.1 skrll /* Only print if there are any non-suppressed interrupts left */
2116 1.1 skrll if (dbg_gintsts)
2117 1.1 skrll dev_vdbg(hsotg->dev,
2118 1.1 skrll "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
2119 1.1 skrll gintsts);
2120 1.1 skrll
2121 1.1 skrll if (gintsts & GINTSTS_SOF)
2122 1.1 skrll dwc2_sof_intr(hsotg);
2123 1.1 skrll if (gintsts & GINTSTS_RXFLVL)
2124 1.1 skrll dwc2_rx_fifo_level_intr(hsotg);
2125 1.1 skrll if (gintsts & GINTSTS_NPTXFEMP)
2126 1.1 skrll dwc2_np_tx_fifo_empty_intr(hsotg);
2127 1.1 skrll if (gintsts & GINTSTS_PRTINT)
2128 1.1 skrll dwc2_port_intr(hsotg);
2129 1.1 skrll if (gintsts & GINTSTS_HCHINT)
2130 1.1 skrll dwc2_hc_intr(hsotg);
2131 1.1 skrll if (gintsts & GINTSTS_PTXFEMP)
2132 1.1 skrll dwc2_perio_tx_fifo_empty_intr(hsotg);
2133 1.1 skrll
2134 1.1 skrll if (dbg_gintsts) {
2135 1.1 skrll dev_vdbg(hsotg->dev,
2136 1.1 skrll "DWC OTG HCD Finished Servicing Interrupts\n");
2137 1.1 skrll dev_vdbg(hsotg->dev,
2138 1.1 skrll "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
2139 1.1 skrll readl(hsotg->regs + GINTSTS),
2140 1.1 skrll readl(hsotg->regs + GINTMSK));
2141 1.1 skrll }
2142 1.1 skrll }
2143 1.1 skrll
2144 1.1 skrll spin_unlock(&hsotg->lock);
2145 1.1 skrll
2146 1.1 skrll return retval;
2147 1.1 skrll }
2148