1 1.12 mlelstv /* $NetBSD: dwc2_hcdddma.c,v 1.12 2025/04/12 08:22:31 mlelstv Exp $ */ 2 1.1 skrll 3 1.1 skrll /* 4 1.1 skrll * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines 5 1.1 skrll * 6 1.1 skrll * Copyright (C) 2004-2013 Synopsys, Inc. 7 1.1 skrll * 8 1.1 skrll * Redistribution and use in source and binary forms, with or without 9 1.1 skrll * modification, are permitted provided that the following conditions 10 1.1 skrll * are met: 11 1.1 skrll * 1. Redistributions of source code must retain the above copyright 12 1.1 skrll * notice, this list of conditions, and the following disclaimer, 13 1.1 skrll * without modification. 14 1.1 skrll * 2. Redistributions in binary form must reproduce the above copyright 15 1.1 skrll * notice, this list of conditions and the following disclaimer in the 16 1.1 skrll * documentation and/or other materials provided with the distribution. 17 1.1 skrll * 3. The names of the above-listed copyright holders may not be used 18 1.1 skrll * to endorse or promote products derived from this software without 19 1.1 skrll * specific prior written permission. 20 1.1 skrll * 21 1.1 skrll * ALTERNATIVELY, this software may be distributed under the terms of the 22 1.1 skrll * GNU General Public License ("GPL") as published by the Free Software 23 1.1 skrll * Foundation; either version 2 of the License, or (at your option) any 24 1.1 skrll * later version. 25 1.1 skrll * 26 1.1 skrll * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 27 1.1 skrll * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 28 1.1 skrll * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 1.1 skrll * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 30 1.1 skrll * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 31 1.1 skrll * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 32 1.1 skrll * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 33 1.1 skrll * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 34 1.1 skrll * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 35 1.1 skrll * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 36 1.1 skrll * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 1.1 skrll */ 38 1.1 skrll 39 1.1 skrll /* 40 1.1 skrll * This file contains the Descriptor DMA implementation for Host mode 41 1.1 skrll */ 42 1.2 skrll #include <sys/cdefs.h> 43 1.12 mlelstv __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdddma.c,v 1.12 2025/04/12 08:22:31 mlelstv Exp $"); 44 1.2 skrll 45 1.2 skrll #include <sys/param.h> 46 1.2 skrll #include <sys/types.h> 47 1.2 skrll #include <sys/kernel.h> 48 1.2 skrll #include <sys/kmem.h> 49 1.2 skrll #include <sys/cpu.h> 50 1.2 skrll 51 1.2 skrll #include <dev/usb/usb.h> 52 1.2 skrll #include <dev/usb/usbdi.h> 53 1.2 skrll #include <dev/usb/usbdivar.h> 54 1.2 skrll #include <dev/usb/usb_mem.h> 55 1.2 skrll 56 1.1 skrll #include <linux/kernel.h> 57 1.2 skrll #include <linux/list.h> 58 1.1 skrll 59 1.2 skrll #include <dwc2/dwc2.h> 60 1.2 skrll #include <dwc2/dwc2var.h> 61 1.1 skrll 62 1.2 skrll #include "dwc2_core.h" 63 1.2 skrll #include "dwc2_hcd.h" 64 1.1 skrll 65 1.1 skrll static u16 dwc2_frame_list_idx(u16 frame) 66 1.1 skrll { 67 1.1 skrll return frame & (FRLISTEN_64_SIZE - 1); 68 1.1 skrll } 69 1.1 skrll 70 1.1 skrll static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed) 71 1.1 skrll { 72 1.1 skrll return (idx + inc) & 73 1.1 skrll ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : 74 1.1 skrll MAX_DMA_DESC_NUM_GENERIC) - 1); 75 1.1 skrll } 76 1.1 skrll 77 1.1 skrll static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed) 78 1.1 skrll { 79 1.1 skrll return (idx - inc) & 80 1.1 skrll ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : 81 1.1 skrll MAX_DMA_DESC_NUM_GENERIC) - 1); 82 1.1 skrll } 83 1.1 skrll 84 1.1 skrll static u16 dwc2_max_desc_num(struct dwc2_qh *qh) 85 1.1 skrll { 86 1.1 skrll return (qh->ep_type == USB_ENDPOINT_XFER_ISOC && 87 1.1 skrll qh->dev_speed == USB_SPEED_HIGH) ? 88 1.1 skrll MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC; 89 1.1 skrll } 90 1.1 skrll 91 1.1 skrll static u16 dwc2_frame_incr_val(struct dwc2_qh *qh) 92 1.1 skrll { 93 1.1 skrll return qh->dev_speed == USB_SPEED_HIGH ? 94 1.1 skrll (qh->interval + 8 - 1) / 8 : qh->interval; 95 1.1 skrll } 96 1.1 skrll 97 1.1 skrll static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 98 1.1 skrll gfp_t flags) 99 1.1 skrll { 100 1.2 skrll int err; 101 1.2 skrll 102 1.11 riastrad KASSERT(!cpu_intr_p()); 103 1.11 riastrad KASSERT(!cpu_softintr_p()); 104 1.2 skrll 105 1.2 skrll qh->desc_list = NULL; 106 1.7 skrll qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) * 107 1.7 skrll dwc2_max_desc_num(qh); 108 1.7 skrll 109 1.10 skrll err = usb_allocmem(hsotg->hsotg_sc->sc_bus.ub_dmatag, 110 1.10 skrll qh->desc_list_sz, 0, USBMALLOC_COHERENT, &qh->desc_list_usbdma); 111 1.2 skrll 112 1.7 skrll if (err) 113 1.1 skrll return -ENOMEM; 114 1.1 skrll 115 1.7 skrll qh->desc_list = KERNADDR(&qh->desc_list_usbdma, 0); 116 1.7 skrll qh->desc_list_dma = DMAADDR(&qh->desc_list_usbdma, 0); 117 1.1 skrll 118 1.2 skrll qh->n_bytes = kmem_zalloc(sizeof(u32) * dwc2_max_desc_num(qh), KM_SLEEP); 119 1.1 skrll if (!qh->n_bytes) { 120 1.10 skrll usb_freemem(&qh->desc_list_usbdma); 121 1.1 skrll qh->desc_list = NULL; 122 1.1 skrll return -ENOMEM; 123 1.1 skrll } 124 1.1 skrll 125 1.1 skrll return 0; 126 1.1 skrll } 127 1.1 skrll 128 1.1 skrll static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 129 1.1 skrll { 130 1.7 skrll 131 1.1 skrll if (qh->desc_list) { 132 1.10 skrll usb_freemem(&qh->desc_list_usbdma); 133 1.1 skrll qh->desc_list = NULL; 134 1.1 skrll } 135 1.1 skrll 136 1.2 skrll kmem_free(qh->n_bytes, sizeof(u32) * dwc2_max_desc_num(qh)); 137 1.1 skrll qh->n_bytes = NULL; 138 1.1 skrll } 139 1.1 skrll 140 1.1 skrll static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags) 141 1.1 skrll { 142 1.2 skrll int err; 143 1.2 skrll 144 1.1 skrll if (hsotg->frame_list) 145 1.1 skrll return 0; 146 1.1 skrll 147 1.2 skrll /* XXXNH - pool_cache_t */ 148 1.7 skrll hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE; 149 1.2 skrll hsotg->frame_list = NULL; 150 1.10 skrll err = usb_allocmem(hsotg->hsotg_sc->sc_bus.ub_dmatag, 151 1.10 skrll hsotg->frame_list_sz, 0, USBMALLOC_COHERENT, &hsotg->frame_list_usbdma); 152 1.2 skrll 153 1.2 skrll if (!err) { 154 1.2 skrll hsotg->frame_list = KERNADDR(&hsotg->frame_list_usbdma, 0); 155 1.2 skrll hsotg->frame_list_dma = DMAADDR(&hsotg->frame_list_usbdma, 0); 156 1.2 skrll } 157 1.2 skrll 158 1.1 skrll if (!hsotg->frame_list) 159 1.1 skrll return -ENOMEM; 160 1.1 skrll 161 1.1 skrll return 0; 162 1.1 skrll } 163 1.1 skrll 164 1.1 skrll static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) 165 1.1 skrll { 166 1.5 skrll usb_dma_t frame_list_usbdma; 167 1.1 skrll unsigned long flags; 168 1.1 skrll 169 1.1 skrll spin_lock_irqsave(&hsotg->lock, flags); 170 1.1 skrll 171 1.1 skrll if (!hsotg->frame_list) { 172 1.1 skrll spin_unlock_irqrestore(&hsotg->lock, flags); 173 1.1 skrll return; 174 1.1 skrll } 175 1.1 skrll 176 1.5 skrll frame_list_usbdma = hsotg->frame_list_usbdma; 177 1.1 skrll hsotg->frame_list = NULL; 178 1.1 skrll 179 1.1 skrll spin_unlock_irqrestore(&hsotg->lock, flags); 180 1.1 skrll 181 1.10 skrll usb_freemem(&frame_list_usbdma); 182 1.1 skrll } 183 1.1 skrll 184 1.1 skrll static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en) 185 1.1 skrll { 186 1.1 skrll u32 hcfg; 187 1.1 skrll unsigned long flags; 188 1.1 skrll 189 1.1 skrll spin_lock_irqsave(&hsotg->lock, flags); 190 1.1 skrll 191 1.2 skrll hcfg = DWC2_READ_4(hsotg, HCFG); 192 1.1 skrll if (hcfg & HCFG_PERSCHEDENA) { 193 1.1 skrll /* already enabled */ 194 1.1 skrll spin_unlock_irqrestore(&hsotg->lock, flags); 195 1.1 skrll return; 196 1.1 skrll } 197 1.1 skrll 198 1.2 skrll DWC2_WRITE_4(hsotg, HFLBADDR, hsotg->frame_list_dma); 199 1.1 skrll 200 1.1 skrll hcfg &= ~HCFG_FRLISTEN_MASK; 201 1.1 skrll hcfg |= fr_list_en | HCFG_PERSCHEDENA; 202 1.1 skrll dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n"); 203 1.2 skrll DWC2_WRITE_4(hsotg, HCFG, hcfg); 204 1.1 skrll 205 1.1 skrll spin_unlock_irqrestore(&hsotg->lock, flags); 206 1.1 skrll } 207 1.1 skrll 208 1.1 skrll static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg) 209 1.1 skrll { 210 1.1 skrll u32 hcfg; 211 1.1 skrll unsigned long flags; 212 1.1 skrll 213 1.1 skrll spin_lock_irqsave(&hsotg->lock, flags); 214 1.1 skrll 215 1.2 skrll hcfg = DWC2_READ_4(hsotg, HCFG); 216 1.1 skrll if (!(hcfg & HCFG_PERSCHEDENA)) { 217 1.1 skrll /* already disabled */ 218 1.1 skrll spin_unlock_irqrestore(&hsotg->lock, flags); 219 1.1 skrll return; 220 1.1 skrll } 221 1.1 skrll 222 1.1 skrll hcfg &= ~HCFG_PERSCHEDENA; 223 1.1 skrll dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n"); 224 1.2 skrll DWC2_WRITE_4(hsotg, HCFG, hcfg); 225 1.1 skrll 226 1.1 skrll spin_unlock_irqrestore(&hsotg->lock, flags); 227 1.1 skrll } 228 1.1 skrll 229 1.1 skrll /* 230 1.1 skrll * Activates/Deactivates FrameList entries for the channel based on endpoint 231 1.1 skrll * servicing period 232 1.1 skrll */ 233 1.1 skrll static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 234 1.1 skrll int enable) 235 1.1 skrll { 236 1.1 skrll struct dwc2_host_chan *chan; 237 1.1 skrll u16 i, j, inc; 238 1.1 skrll 239 1.1 skrll if (!hsotg) { 240 1.2 skrll printf("hsotg = %p\n", hsotg); 241 1.1 skrll return; 242 1.1 skrll } 243 1.1 skrll 244 1.1 skrll if (!qh->channel) { 245 1.1 skrll dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel); 246 1.1 skrll return; 247 1.1 skrll } 248 1.1 skrll 249 1.1 skrll if (!hsotg->frame_list) { 250 1.1 skrll dev_err(hsotg->dev, "hsotg->frame_list = %p\n", 251 1.1 skrll hsotg->frame_list); 252 1.1 skrll return; 253 1.1 skrll } 254 1.1 skrll 255 1.1 skrll chan = qh->channel; 256 1.1 skrll inc = dwc2_frame_incr_val(qh); 257 1.1 skrll if (qh->ep_type == USB_ENDPOINT_XFER_ISOC) 258 1.1 skrll i = dwc2_frame_list_idx(qh->sched_frame); 259 1.1 skrll else 260 1.1 skrll i = 0; 261 1.1 skrll 262 1.1 skrll j = i; 263 1.1 skrll do { 264 1.1 skrll if (enable) 265 1.1 skrll hsotg->frame_list[j] |= 1 << chan->hc_num; 266 1.1 skrll else 267 1.1 skrll hsotg->frame_list[j] &= ~(1 << chan->hc_num); 268 1.1 skrll j = (j + inc) & (FRLISTEN_64_SIZE - 1); 269 1.1 skrll } while (j != i); 270 1.1 skrll 271 1.7 skrll /* 272 1.7 skrll * Sync frame list since controller will access it if periodic 273 1.7 skrll * channel is currently enabled. 274 1.7 skrll */ 275 1.7 skrll usb_syncmem(&hsotg->frame_list_usbdma, 0, hsotg->frame_list_sz, 276 1.7 skrll BUS_DMASYNC_PREWRITE); 277 1.7 skrll 278 1.1 skrll if (!enable) 279 1.1 skrll return; 280 1.1 skrll 281 1.1 skrll chan->schinfo = 0; 282 1.1 skrll if (chan->speed == USB_SPEED_HIGH && qh->interval) { 283 1.1 skrll j = 1; 284 1.1 skrll /* TODO - check this */ 285 1.1 skrll inc = (8 + qh->interval - 1) / qh->interval; 286 1.1 skrll for (i = 0; i < inc; i++) { 287 1.1 skrll chan->schinfo |= j; 288 1.1 skrll j = j << qh->interval; 289 1.1 skrll } 290 1.1 skrll } else { 291 1.1 skrll chan->schinfo = 0xff; 292 1.1 skrll } 293 1.1 skrll } 294 1.1 skrll 295 1.1 skrll static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, 296 1.1 skrll struct dwc2_qh *qh) 297 1.1 skrll { 298 1.1 skrll struct dwc2_host_chan *chan = qh->channel; 299 1.1 skrll 300 1.1 skrll if (dwc2_qh_is_non_per(qh)) { 301 1.1 skrll if (hsotg->core_params->uframe_sched > 0) 302 1.1 skrll hsotg->available_host_channels++; 303 1.1 skrll else 304 1.1 skrll hsotg->non_periodic_channels--; 305 1.1 skrll } else { 306 1.1 skrll dwc2_update_frame_list(hsotg, qh, 0); 307 1.7 skrll hsotg->available_host_channels++; 308 1.1 skrll } 309 1.1 skrll 310 1.1 skrll /* 311 1.1 skrll * The condition is added to prevent double cleanup try in case of 312 1.1 skrll * device disconnect. See channel cleanup in dwc2_hcd_disconnect(). 313 1.1 skrll */ 314 1.1 skrll if (chan->qh) { 315 1.1 skrll if (!list_empty(&chan->hc_list_entry)) 316 1.1 skrll list_del(&chan->hc_list_entry); 317 1.1 skrll dwc2_hc_cleanup(hsotg, chan); 318 1.1 skrll list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); 319 1.1 skrll chan->qh = NULL; 320 1.1 skrll } 321 1.1 skrll 322 1.1 skrll qh->channel = NULL; 323 1.1 skrll qh->ntd = 0; 324 1.1 skrll 325 1.1 skrll if (qh->desc_list) 326 1.1 skrll memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) * 327 1.1 skrll dwc2_max_desc_num(qh)); 328 1.1 skrll } 329 1.1 skrll 330 1.1 skrll /** 331 1.1 skrll * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA 332 1.1 skrll * related members 333 1.1 skrll * 334 1.1 skrll * @hsotg: The HCD state structure for the DWC OTG controller 335 1.1 skrll * @qh: The QH to init 336 1.1 skrll * 337 1.1 skrll * Return: 0 if successful, negative error code otherwise 338 1.1 skrll * 339 1.1 skrll * Allocates memory for the descriptor list. For the first periodic QH, 340 1.1 skrll * allocates memory for the FrameList and enables periodic scheduling. 341 1.1 skrll */ 342 1.1 skrll int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 343 1.1 skrll gfp_t mem_flags) 344 1.1 skrll { 345 1.1 skrll int retval; 346 1.1 skrll 347 1.1 skrll if (qh->do_split) { 348 1.1 skrll dev_err(hsotg->dev, 349 1.1 skrll "SPLIT Transfers are not supported in Descriptor DMA mode.\n"); 350 1.1 skrll retval = -EINVAL; 351 1.1 skrll goto err0; 352 1.1 skrll } 353 1.1 skrll 354 1.1 skrll retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags); 355 1.1 skrll if (retval) 356 1.1 skrll goto err0; 357 1.1 skrll 358 1.1 skrll if (qh->ep_type == USB_ENDPOINT_XFER_ISOC || 359 1.1 skrll qh->ep_type == USB_ENDPOINT_XFER_INT) { 360 1.1 skrll if (!hsotg->frame_list) { 361 1.1 skrll retval = dwc2_frame_list_alloc(hsotg, mem_flags); 362 1.1 skrll if (retval) 363 1.1 skrll goto err1; 364 1.1 skrll /* Enable periodic schedule on first periodic QH */ 365 1.1 skrll dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64); 366 1.1 skrll } 367 1.1 skrll } 368 1.1 skrll 369 1.1 skrll qh->ntd = 0; 370 1.1 skrll return 0; 371 1.1 skrll 372 1.1 skrll err1: 373 1.1 skrll dwc2_desc_list_free(hsotg, qh); 374 1.1 skrll err0: 375 1.1 skrll return retval; 376 1.1 skrll } 377 1.1 skrll 378 1.1 skrll /** 379 1.1 skrll * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related 380 1.1 skrll * members 381 1.1 skrll * 382 1.1 skrll * @hsotg: The HCD state structure for the DWC OTG controller 383 1.1 skrll * @qh: The QH to free 384 1.1 skrll * 385 1.1 skrll * Frees descriptor list memory associated with the QH. If QH is periodic and 386 1.1 skrll * the last, frees FrameList memory and disables periodic scheduling. 387 1.1 skrll */ 388 1.1 skrll void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 389 1.1 skrll { 390 1.7 skrll unsigned long flags; 391 1.7 skrll 392 1.1 skrll dwc2_desc_list_free(hsotg, qh); 393 1.1 skrll 394 1.1 skrll /* 395 1.1 skrll * Channel still assigned due to some reasons. 396 1.1 skrll * Seen on Isoc URB dequeue. Channel halted but no subsequent 397 1.1 skrll * ChHalted interrupt to release the channel. Afterwards 398 1.1 skrll * when it comes here from endpoint disable routine 399 1.1 skrll * channel remains assigned. 400 1.1 skrll */ 401 1.7 skrll spin_lock_irqsave(&hsotg->lock, flags); 402 1.1 skrll if (qh->channel) 403 1.1 skrll dwc2_release_channel_ddma(hsotg, qh); 404 1.7 skrll spin_unlock_irqrestore(&hsotg->lock, flags); 405 1.1 skrll 406 1.1 skrll if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC || 407 1.1 skrll qh->ep_type == USB_ENDPOINT_XFER_INT) && 408 1.1 skrll (hsotg->core_params->uframe_sched > 0 || 409 1.1 skrll !hsotg->periodic_channels) && hsotg->frame_list) { 410 1.1 skrll dwc2_per_sched_disable(hsotg); 411 1.1 skrll dwc2_frame_list_free(hsotg); 412 1.1 skrll } 413 1.1 skrll } 414 1.1 skrll 415 1.1 skrll static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx) 416 1.1 skrll { 417 1.1 skrll if (qh->dev_speed == USB_SPEED_HIGH) 418 1.1 skrll /* Descriptor set (8 descriptors) index which is 8-aligned */ 419 1.1 skrll return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8; 420 1.1 skrll else 421 1.1 skrll return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1); 422 1.1 skrll } 423 1.1 skrll 424 1.1 skrll /* 425 1.1 skrll * Determine starting frame for Isochronous transfer. 426 1.1 skrll * Few frames skipped to prevent race condition with HC. 427 1.1 skrll */ 428 1.1 skrll static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg, 429 1.1 skrll struct dwc2_qh *qh, u16 *skip_frames) 430 1.1 skrll { 431 1.1 skrll u16 frame; 432 1.1 skrll 433 1.1 skrll hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 434 1.1 skrll 435 1.1 skrll /* sched_frame is always frame number (not uFrame) both in FS and HS! */ 436 1.1 skrll 437 1.1 skrll /* 438 1.1 skrll * skip_frames is used to limit activated descriptors number 439 1.1 skrll * to avoid the situation when HC services the last activated 440 1.1 skrll * descriptor firstly. 441 1.1 skrll * Example for FS: 442 1.1 skrll * Current frame is 1, scheduled frame is 3. Since HC always fetches 443 1.1 skrll * the descriptor corresponding to curr_frame+1, the descriptor 444 1.1 skrll * corresponding to frame 2 will be fetched. If the number of 445 1.1 skrll * descriptors is max=64 (or greather) the list will be fully programmed 446 1.1 skrll * with Active descriptors and it is possible case (rare) that the 447 1.1 skrll * latest descriptor(considering rollback) corresponding to frame 2 will 448 1.1 skrll * be serviced first. HS case is more probable because, in fact, up to 449 1.1 skrll * 11 uframes (16 in the code) may be skipped. 450 1.1 skrll */ 451 1.1 skrll if (qh->dev_speed == USB_SPEED_HIGH) { 452 1.1 skrll /* 453 1.1 skrll * Consider uframe counter also, to start xfer asap. If half of 454 1.1 skrll * the frame elapsed skip 2 frames otherwise just 1 frame. 455 1.1 skrll * Starting descriptor index must be 8-aligned, so if the 456 1.1 skrll * current frame is near to complete the next one is skipped as 457 1.1 skrll * well. 458 1.1 skrll */ 459 1.1 skrll if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) { 460 1.1 skrll *skip_frames = 2 * 8; 461 1.1 skrll frame = dwc2_frame_num_inc(hsotg->frame_number, 462 1.1 skrll *skip_frames); 463 1.1 skrll } else { 464 1.1 skrll *skip_frames = 1 * 8; 465 1.1 skrll frame = dwc2_frame_num_inc(hsotg->frame_number, 466 1.1 skrll *skip_frames); 467 1.1 skrll } 468 1.1 skrll 469 1.1 skrll frame = dwc2_full_frame_num(frame); 470 1.1 skrll } else { 471 1.1 skrll /* 472 1.1 skrll * Two frames are skipped for FS - the current and the next. 473 1.1 skrll * But for descriptor programming, 1 frame (descriptor) is 474 1.1 skrll * enough, see example above. 475 1.1 skrll */ 476 1.1 skrll *skip_frames = 1; 477 1.1 skrll frame = dwc2_frame_num_inc(hsotg->frame_number, 2); 478 1.1 skrll } 479 1.1 skrll 480 1.1 skrll return frame; 481 1.1 skrll } 482 1.1 skrll 483 1.1 skrll /* 484 1.1 skrll * Calculate initial descriptor index for isochronous transfer based on 485 1.1 skrll * scheduled frame 486 1.1 skrll */ 487 1.1 skrll static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg, 488 1.1 skrll struct dwc2_qh *qh) 489 1.1 skrll { 490 1.1 skrll u16 frame, fr_idx, fr_idx_tmp, skip_frames; 491 1.1 skrll 492 1.1 skrll /* 493 1.1 skrll * With current ISOC processing algorithm the channel is being released 494 1.1 skrll * when no more QTDs in the list (qh->ntd == 0). Thus this function is 495 1.1 skrll * called only when qh->ntd == 0 and qh->channel == 0. 496 1.1 skrll * 497 1.1 skrll * So qh->channel != NULL branch is not used and just not removed from 498 1.1 skrll * the source file. It is required for another possible approach which 499 1.1 skrll * is, do not disable and release the channel when ISOC session 500 1.1 skrll * completed, just move QH to inactive schedule until new QTD arrives. 501 1.1 skrll * On new QTD, the QH moved back to 'ready' schedule, starting frame and 502 1.1 skrll * therefore starting desc_index are recalculated. In this case channel 503 1.1 skrll * is released only on ep_disable. 504 1.1 skrll */ 505 1.1 skrll 506 1.1 skrll /* 507 1.1 skrll * Calculate starting descriptor index. For INTERRUPT endpoint it is 508 1.1 skrll * always 0. 509 1.1 skrll */ 510 1.1 skrll if (qh->channel) { 511 1.1 skrll frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames); 512 1.1 skrll /* 513 1.1 skrll * Calculate initial descriptor index based on FrameList current 514 1.1 skrll * bitmap and servicing period 515 1.1 skrll */ 516 1.1 skrll fr_idx_tmp = dwc2_frame_list_idx(frame); 517 1.1 skrll fr_idx = (FRLISTEN_64_SIZE + 518 1.1 skrll dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp) 519 1.1 skrll % dwc2_frame_incr_val(qh); 520 1.1 skrll fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE; 521 1.1 skrll } else { 522 1.1 skrll qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh, 523 1.1 skrll &skip_frames); 524 1.1 skrll fr_idx = dwc2_frame_list_idx(qh->sched_frame); 525 1.1 skrll } 526 1.1 skrll 527 1.1 skrll qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx); 528 1.1 skrll 529 1.1 skrll return skip_frames; 530 1.1 skrll } 531 1.1 skrll 532 1.1 skrll #define ISOC_URB_GIVEBACK_ASAP 533 1.1 skrll 534 1.1 skrll #define MAX_ISOC_XFER_SIZE_FS 1023 535 1.1 skrll #define MAX_ISOC_XFER_SIZE_HS 3072 536 1.1 skrll #define DESCNUM_THRESHOLD 4 537 1.1 skrll 538 1.1 skrll static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, 539 1.1 skrll struct dwc2_qtd *qtd, 540 1.1 skrll struct dwc2_qh *qh, u32 max_xfer_size, 541 1.1 skrll u16 idx) 542 1.1 skrll { 543 1.1 skrll struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; 544 1.1 skrll struct dwc2_hcd_iso_packet_desc *frame_desc; 545 1.1 skrll 546 1.1 skrll memset(dma_desc, 0, sizeof(*dma_desc)); 547 1.1 skrll frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; 548 1.1 skrll 549 1.1 skrll if (frame_desc->length > max_xfer_size) 550 1.1 skrll qh->n_bytes[idx] = max_xfer_size; 551 1.1 skrll else 552 1.1 skrll qh->n_bytes[idx] = frame_desc->length; 553 1.1 skrll 554 1.2 skrll dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset)); 555 1.1 skrll dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT & 556 1.1 skrll HOST_DMA_ISOC_NBYTES_MASK; 557 1.1 skrll 558 1.7 skrll /* Set active bit */ 559 1.7 skrll dma_desc->status |= HOST_DMA_A; 560 1.7 skrll 561 1.7 skrll qh->ntd++; 562 1.7 skrll qtd->isoc_frame_index_last++; 563 1.7 skrll 564 1.1 skrll #ifdef ISOC_URB_GIVEBACK_ASAP 565 1.1 skrll /* Set IOC for each descriptor corresponding to last frame of URB */ 566 1.1 skrll if (qtd->isoc_frame_index_last == qtd->urb->packet_count) 567 1.1 skrll dma_desc->status |= HOST_DMA_IOC; 568 1.1 skrll #endif 569 1.1 skrll 570 1.8 skrll usb_syncmem(&qh->desc_list_usbdma, 571 1.7 skrll (idx * sizeof(struct dwc2_hcd_dma_desc)), 572 1.7 skrll sizeof(struct dwc2_hcd_dma_desc), 573 1.7 skrll BUS_DMASYNC_PREWRITE); 574 1.1 skrll } 575 1.1 skrll 576 1.1 skrll static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, 577 1.1 skrll struct dwc2_qh *qh, u16 skip_frames) 578 1.1 skrll { 579 1.1 skrll struct dwc2_qtd *qtd; 580 1.1 skrll u32 max_xfer_size; 581 1.7 skrll u16 idx, inc, n_desc = 0, ntd_max = 0; 582 1.7 skrll u16 cur_idx; 583 1.7 skrll u16 next_idx; 584 1.1 skrll 585 1.1 skrll idx = qh->td_last; 586 1.1 skrll inc = qh->interval; 587 1.7 skrll hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 588 1.7 skrll cur_idx = dwc2_frame_list_idx(hsotg->frame_number); 589 1.7 skrll next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed); 590 1.7 skrll 591 1.7 skrll /* 592 1.7 skrll * Ensure current frame number didn't overstep last scheduled 593 1.7 skrll * descriptor. If it happens, the only way to recover is to move 594 1.7 skrll * qh->td_last to current frame number + 1. 595 1.7 skrll * So that next isoc descriptor will be scheduled on frame number + 1 596 1.7 skrll * and not on a past frame. 597 1.7 skrll */ 598 1.7 skrll if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) { 599 1.7 skrll if (inc < 32) { 600 1.7 skrll dev_vdbg(hsotg->dev, 601 1.7 skrll "current frame number overstep last descriptor\n"); 602 1.7 skrll qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc, 603 1.7 skrll qh->dev_speed); 604 1.7 skrll idx = qh->td_last; 605 1.7 skrll } 606 1.7 skrll } 607 1.1 skrll 608 1.1 skrll if (qh->interval) { 609 1.1 skrll ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) / 610 1.1 skrll qh->interval; 611 1.1 skrll if (skip_frames && !qh->channel) 612 1.1 skrll ntd_max -= skip_frames / qh->interval; 613 1.1 skrll } 614 1.1 skrll 615 1.1 skrll max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ? 616 1.1 skrll MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS; 617 1.1 skrll 618 1.1 skrll list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { 619 1.7 skrll if (qtd->in_process && 620 1.7 skrll qtd->isoc_frame_index_last == 621 1.7 skrll qtd->urb->packet_count) 622 1.7 skrll continue; 623 1.7 skrll 624 1.7 skrll qtd->isoc_td_first = idx; 625 1.1 skrll while (qh->ntd < ntd_max && qtd->isoc_frame_index_last < 626 1.1 skrll qtd->urb->packet_count) { 627 1.1 skrll dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh, 628 1.1 skrll max_xfer_size, idx); 629 1.1 skrll idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed); 630 1.1 skrll n_desc++; 631 1.1 skrll } 632 1.7 skrll qtd->isoc_td_last = idx; 633 1.1 skrll qtd->in_process = 1; 634 1.1 skrll } 635 1.1 skrll 636 1.1 skrll qh->td_last = idx; 637 1.1 skrll 638 1.1 skrll #ifdef ISOC_URB_GIVEBACK_ASAP 639 1.1 skrll /* Set IOC for last descriptor if descriptor list is full */ 640 1.1 skrll if (qh->ntd == ntd_max) { 641 1.1 skrll idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); 642 1.1 skrll qh->desc_list[idx].status |= HOST_DMA_IOC; 643 1.7 skrll 644 1.8 skrll usb_syncmem(&qh->desc_list_usbdma, 645 1.7 skrll (idx * sizeof(struct dwc2_hcd_dma_desc)), 646 1.7 skrll sizeof(struct dwc2_hcd_dma_desc), 647 1.7 skrll BUS_DMASYNC_PREWRITE); 648 1.1 skrll } 649 1.1 skrll #else 650 1.1 skrll /* 651 1.1 skrll * Set IOC bit only for one descriptor. Always try to be ahead of HW 652 1.1 skrll * processing, i.e. on IOC generation driver activates next descriptor 653 1.1 skrll * but core continues to process descriptors following the one with IOC 654 1.1 skrll * set. 655 1.1 skrll */ 656 1.1 skrll 657 1.1 skrll if (n_desc > DESCNUM_THRESHOLD) 658 1.1 skrll /* 659 1.1 skrll * Move IOC "up". Required even if there is only one QTD 660 1.1 skrll * in the list, because QTDs might continue to be queued, 661 1.1 skrll * but during the activation it was only one queued. 662 1.1 skrll * Actually more than one QTD might be in the list if this 663 1.1 skrll * function called from XferCompletion - QTDs was queued during 664 1.1 skrll * HW processing of the previous descriptor chunk. 665 1.1 skrll */ 666 1.1 skrll idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), 667 1.1 skrll qh->dev_speed); 668 1.1 skrll else 669 1.1 skrll /* 670 1.1 skrll * Set the IOC for the latest descriptor if either number of 671 1.1 skrll * descriptors is not greater than threshold or no more new 672 1.1 skrll * descriptors activated 673 1.1 skrll */ 674 1.1 skrll idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); 675 1.1 skrll 676 1.1 skrll qh->desc_list[idx].status |= HOST_DMA_IOC; 677 1.8 skrll usb_syncmem(&qh->desc_list_usbdma, 678 1.7 skrll (idx * sizeof(struct dwc2_hcd_dma_desc)), 679 1.7 skrll sizeof(struct dwc2_hcd_dma_desc), 680 1.7 skrll BUS_DMASYNC_PREWRITE); 681 1.1 skrll #endif 682 1.1 skrll } 683 1.1 skrll 684 1.1 skrll static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, 685 1.1 skrll struct dwc2_host_chan *chan, 686 1.1 skrll struct dwc2_qtd *qtd, struct dwc2_qh *qh, 687 1.1 skrll int n_desc) 688 1.1 skrll { 689 1.1 skrll struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc]; 690 1.1 skrll int len = chan->xfer_len; 691 1.1 skrll 692 1.6 skrll if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1)) 693 1.6 skrll len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1); 694 1.1 skrll 695 1.1 skrll if (chan->ep_is_in) { 696 1.1 skrll int num_packets; 697 1.1 skrll 698 1.1 skrll if (len > 0 && chan->max_packet) 699 1.1 skrll num_packets = (len + chan->max_packet - 1) 700 1.1 skrll / chan->max_packet; 701 1.1 skrll else 702 1.1 skrll /* Need 1 packet for transfer length of 0 */ 703 1.1 skrll num_packets = 1; 704 1.1 skrll 705 1.1 skrll /* Always program an integral # of packets for IN transfers */ 706 1.1 skrll len = num_packets * chan->max_packet; 707 1.1 skrll } 708 1.1 skrll 709 1.1 skrll dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK; 710 1.1 skrll qh->n_bytes[n_desc] = len; 711 1.1 skrll 712 1.1 skrll if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL && 713 1.1 skrll qtd->control_phase == DWC2_CONTROL_SETUP) 714 1.1 skrll dma_desc->status |= HOST_DMA_SUP; 715 1.1 skrll 716 1.1 skrll dma_desc->buf = (u32)chan->xfer_dma; 717 1.1 skrll 718 1.8 skrll usb_syncmem(&qh->desc_list_usbdma, 719 1.7 skrll (n_desc * sizeof(struct dwc2_hcd_dma_desc)), 720 1.7 skrll sizeof(struct dwc2_hcd_dma_desc), 721 1.7 skrll BUS_DMASYNC_PREWRITE); 722 1.7 skrll 723 1.1 skrll /* 724 1.1 skrll * Last (or only) descriptor of IN transfer with actual size less 725 1.1 skrll * than MaxPacket 726 1.1 skrll */ 727 1.1 skrll if (len > chan->xfer_len) { 728 1.1 skrll chan->xfer_len = 0; 729 1.1 skrll } else { 730 1.2 skrll chan->xfer_dma += len; /* XXXNH safe */ 731 1.1 skrll chan->xfer_len -= len; 732 1.1 skrll } 733 1.1 skrll } 734 1.1 skrll 735 1.1 skrll static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, 736 1.1 skrll struct dwc2_qh *qh) 737 1.1 skrll { 738 1.1 skrll struct dwc2_qtd *qtd; 739 1.1 skrll struct dwc2_host_chan *chan = qh->channel; 740 1.1 skrll int n_desc = 0; 741 1.1 skrll 742 1.1 skrll dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh, 743 1.1 skrll (unsigned long)chan->xfer_dma, chan->xfer_len); 744 1.1 skrll 745 1.1 skrll /* 746 1.1 skrll * Start with chan->xfer_dma initialized in assign_and_init_hc(), then 747 1.1 skrll * if SG transfer consists of multiple URBs, this pointer is re-assigned 748 1.1 skrll * to the buffer of the currently processed QTD. For non-SG request 749 1.1 skrll * there is always one QTD active. 750 1.1 skrll */ 751 1.1 skrll 752 1.1 skrll list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { 753 1.1 skrll dev_vdbg(hsotg->dev, "qtd=%p\n", qtd); 754 1.1 skrll 755 1.1 skrll if (n_desc) { 756 1.1 skrll /* SG request - more than 1 QTD */ 757 1.2 skrll chan->xfer_dma = DMAADDR(qtd->urb->usbdma, 758 1.2 skrll qtd->urb->actual_length); 759 1.1 skrll chan->xfer_len = qtd->urb->length - 760 1.1 skrll qtd->urb->actual_length; 761 1.1 skrll dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n", 762 1.1 skrll (unsigned long)chan->xfer_dma, chan->xfer_len); 763 1.1 skrll } 764 1.1 skrll 765 1.1 skrll qtd->n_desc = 0; 766 1.1 skrll do { 767 1.1 skrll if (n_desc > 1) { 768 1.1 skrll qh->desc_list[n_desc - 1].status |= HOST_DMA_A; 769 1.1 skrll dev_vdbg(hsotg->dev, 770 1.1 skrll "set A bit in desc %d (%p)\n", 771 1.1 skrll n_desc - 1, 772 1.1 skrll &qh->desc_list[n_desc - 1]); 773 1.8 skrll usb_syncmem(&qh->desc_list_usbdma, 774 1.7 skrll ((n_desc - 1) * 775 1.7 skrll sizeof(struct dwc2_hcd_dma_desc)), 776 1.7 skrll sizeof(struct dwc2_hcd_dma_desc), 777 1.7 skrll BUS_DMASYNC_PREWRITE); 778 1.1 skrll } 779 1.1 skrll dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc); 780 1.1 skrll dev_vdbg(hsotg->dev, 781 1.1 skrll "desc %d (%p) buf=%08x status=%08x\n", 782 1.1 skrll n_desc, &qh->desc_list[n_desc], 783 1.1 skrll qh->desc_list[n_desc].buf, 784 1.1 skrll qh->desc_list[n_desc].status); 785 1.1 skrll qtd->n_desc++; 786 1.1 skrll n_desc++; 787 1.1 skrll } while (chan->xfer_len > 0 && 788 1.1 skrll n_desc != MAX_DMA_DESC_NUM_GENERIC); 789 1.1 skrll 790 1.1 skrll dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc); 791 1.1 skrll qtd->in_process = 1; 792 1.1 skrll if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) 793 1.1 skrll break; 794 1.1 skrll if (n_desc == MAX_DMA_DESC_NUM_GENERIC) 795 1.1 skrll break; 796 1.1 skrll } 797 1.1 skrll 798 1.1 skrll if (n_desc) { 799 1.1 skrll qh->desc_list[n_desc - 1].status |= 800 1.1 skrll HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A; 801 1.1 skrll dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n", 802 1.1 skrll n_desc - 1, &qh->desc_list[n_desc - 1]); 803 1.8 skrll usb_syncmem(&qh->desc_list_usbdma, 804 1.7 skrll ((n_desc - 1) * sizeof(struct dwc2_hcd_dma_desc)), 805 1.7 skrll sizeof(struct dwc2_hcd_dma_desc), 806 1.7 skrll BUS_DMASYNC_PREWRITE); 807 1.1 skrll if (n_desc > 1) { 808 1.1 skrll qh->desc_list[0].status |= HOST_DMA_A; 809 1.1 skrll dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n", 810 1.1 skrll &qh->desc_list[0]); 811 1.7 skrll usb_syncmem(&qh->desc_list_usbdma, 0, 812 1.7 skrll sizeof(struct dwc2_hcd_dma_desc), 813 1.7 skrll BUS_DMASYNC_PREWRITE); 814 1.1 skrll } 815 1.1 skrll chan->ntd = n_desc; 816 1.1 skrll } 817 1.1 skrll } 818 1.1 skrll 819 1.1 skrll /** 820 1.1 skrll * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode 821 1.1 skrll * 822 1.1 skrll * @hsotg: The HCD state structure for the DWC OTG controller 823 1.1 skrll * @qh: The QH to init 824 1.1 skrll * 825 1.1 skrll * Return: 0 if successful, negative error code otherwise 826 1.1 skrll * 827 1.1 skrll * For Control and Bulk endpoints, initializes descriptor list and starts the 828 1.1 skrll * transfer. For Interrupt and Isochronous endpoints, initializes descriptor 829 1.1 skrll * list then updates FrameList, marking appropriate entries as active. 830 1.1 skrll * 831 1.1 skrll * For Isochronous endpoints the starting descriptor index is calculated based 832 1.1 skrll * on the scheduled frame, but only on the first transfer descriptor within a 833 1.1 skrll * session. Then the transfer is started via enabling the channel. 834 1.1 skrll * 835 1.1 skrll * For Isochronous endpoints the channel is not halted on XferComplete 836 1.1 skrll * interrupt so remains assigned to the endpoint(QH) until session is done. 837 1.1 skrll */ 838 1.1 skrll void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 839 1.1 skrll { 840 1.1 skrll /* Channel is already assigned */ 841 1.1 skrll struct dwc2_host_chan *chan = qh->channel; 842 1.1 skrll u16 skip_frames = 0; 843 1.1 skrll 844 1.1 skrll switch (chan->ep_type) { 845 1.1 skrll case USB_ENDPOINT_XFER_CONTROL: 846 1.1 skrll case USB_ENDPOINT_XFER_BULK: 847 1.1 skrll dwc2_init_non_isoc_dma_desc(hsotg, qh); 848 1.1 skrll dwc2_hc_start_transfer_ddma(hsotg, chan); 849 1.1 skrll break; 850 1.1 skrll case USB_ENDPOINT_XFER_INT: 851 1.1 skrll dwc2_init_non_isoc_dma_desc(hsotg, qh); 852 1.1 skrll dwc2_update_frame_list(hsotg, qh, 1); 853 1.1 skrll dwc2_hc_start_transfer_ddma(hsotg, chan); 854 1.1 skrll break; 855 1.1 skrll case USB_ENDPOINT_XFER_ISOC: 856 1.1 skrll if (!qh->ntd) 857 1.1 skrll skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh); 858 1.1 skrll dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames); 859 1.1 skrll 860 1.1 skrll if (!chan->xfer_started) { 861 1.1 skrll dwc2_update_frame_list(hsotg, qh, 1); 862 1.1 skrll 863 1.1 skrll /* 864 1.1 skrll * Always set to max, instead of actual size. Otherwise 865 1.1 skrll * ntd will be changed with channel being enabled. Not 866 1.1 skrll * recommended. 867 1.1 skrll */ 868 1.1 skrll chan->ntd = dwc2_max_desc_num(qh); 869 1.1 skrll 870 1.1 skrll /* Enable channel only once for ISOC */ 871 1.1 skrll dwc2_hc_start_transfer_ddma(hsotg, chan); 872 1.1 skrll } 873 1.1 skrll 874 1.1 skrll break; 875 1.1 skrll default: 876 1.1 skrll break; 877 1.1 skrll } 878 1.1 skrll } 879 1.1 skrll 880 1.1 skrll #define DWC2_CMPL_DONE 1 881 1.1 skrll #define DWC2_CMPL_STOP 2 882 1.1 skrll 883 1.1 skrll static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, 884 1.1 skrll struct dwc2_host_chan *chan, 885 1.1 skrll struct dwc2_qtd *qtd, 886 1.1 skrll struct dwc2_qh *qh, u16 idx) 887 1.1 skrll { 888 1.7 skrll struct dwc2_hcd_dma_desc *dma_desc; 889 1.1 skrll struct dwc2_hcd_iso_packet_desc *frame_desc; 890 1.1 skrll u16 remain = 0; 891 1.1 skrll int rc = 0; 892 1.1 skrll 893 1.1 skrll if (!qtd->urb) 894 1.1 skrll return -EINVAL; 895 1.1 skrll 896 1.7 skrll usb_syncmem(&qh->desc_list_usbdma, 897 1.7 skrll (idx * sizeof(struct dwc2_hcd_dma_desc)), 898 1.7 skrll sizeof(struct dwc2_hcd_dma_desc), 899 1.7 skrll BUS_DMASYNC_POSTREAD); 900 1.8 skrll 901 1.7 skrll dma_desc = &qh->desc_list[idx]; 902 1.7 skrll 903 1.1 skrll frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; 904 1.2 skrll dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset)); 905 1.1 skrll if (chan->ep_is_in) 906 1.3 skrll remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >> 907 1.3 skrll HOST_DMA_ISOC_NBYTES_SHIFT; 908 1.1 skrll 909 1.1 skrll if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { 910 1.1 skrll /* 911 1.1 skrll * XactError, or unable to complete all the transactions 912 1.1 skrll * in the scheduled micro-frame/frame, both indicated by 913 1.1 skrll * HOST_DMA_STS_PKTERR 914 1.1 skrll */ 915 1.1 skrll qtd->urb->error_count++; 916 1.1 skrll frame_desc->actual_length = qh->n_bytes[idx] - remain; 917 1.1 skrll frame_desc->status = -EPROTO; 918 1.1 skrll } else { 919 1.1 skrll /* Success */ 920 1.1 skrll frame_desc->actual_length = qh->n_bytes[idx] - remain; 921 1.1 skrll frame_desc->status = 0; 922 1.1 skrll } 923 1.1 skrll 924 1.1 skrll if (++qtd->isoc_frame_index == qtd->urb->packet_count) { 925 1.1 skrll /* 926 1.1 skrll * urb->status is not used for isoc transfers here. The 927 1.1 skrll * individual frame_desc status are used instead. 928 1.1 skrll */ 929 1.1 skrll dwc2_host_complete(hsotg, qtd, 0); 930 1.1 skrll dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 931 1.1 skrll 932 1.1 skrll /* 933 1.1 skrll * This check is necessary because urb_dequeue can be called 934 1.1 skrll * from urb complete callback (sound driver for example). All 935 1.1 skrll * pending URBs are dequeued there, so no need for further 936 1.1 skrll * processing. 937 1.1 skrll */ 938 1.1 skrll if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) 939 1.1 skrll return -1; 940 1.1 skrll rc = DWC2_CMPL_DONE; 941 1.1 skrll } 942 1.1 skrll 943 1.1 skrll qh->ntd--; 944 1.1 skrll 945 1.1 skrll /* Stop if IOC requested descriptor reached */ 946 1.1 skrll if (dma_desc->status & HOST_DMA_IOC) 947 1.1 skrll rc = DWC2_CMPL_STOP; 948 1.1 skrll 949 1.1 skrll return rc; 950 1.1 skrll } 951 1.1 skrll 952 1.1 skrll static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, 953 1.1 skrll struct dwc2_host_chan *chan, 954 1.1 skrll enum dwc2_halt_status halt_status) 955 1.1 skrll { 956 1.1 skrll struct dwc2_hcd_iso_packet_desc *frame_desc; 957 1.1 skrll struct dwc2_qtd *qtd, *qtd_tmp; 958 1.1 skrll struct dwc2_qh *qh; 959 1.1 skrll u16 idx; 960 1.1 skrll int rc; 961 1.1 skrll 962 1.1 skrll qh = chan->qh; 963 1.1 skrll idx = qh->td_first; 964 1.1 skrll 965 1.1 skrll if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 966 1.1 skrll list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) 967 1.1 skrll qtd->in_process = 0; 968 1.1 skrll return; 969 1.1 skrll } 970 1.1 skrll 971 1.1 skrll if (halt_status == DWC2_HC_XFER_AHB_ERR || 972 1.1 skrll halt_status == DWC2_HC_XFER_BABBLE_ERR) { 973 1.1 skrll /* 974 1.1 skrll * Channel is halted in these error cases, considered as serious 975 1.1 skrll * issues. 976 1.1 skrll * Complete all URBs marking all frames as failed, irrespective 977 1.1 skrll * whether some of the descriptors (frames) succeeded or not. 978 1.1 skrll * Pass error code to completion routine as well, to update 979 1.1 skrll * urb->status, some of class drivers might use it to stop 980 1.1 skrll * queing transfer requests. 981 1.1 skrll */ 982 1.1 skrll int err = halt_status == DWC2_HC_XFER_AHB_ERR ? 983 1.1 skrll -EIO : -EOVERFLOW; 984 1.1 skrll 985 1.1 skrll list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, 986 1.1 skrll qtd_list_entry) { 987 1.1 skrll if (qtd->urb) { 988 1.1 skrll for (idx = 0; idx < qtd->urb->packet_count; 989 1.1 skrll idx++) { 990 1.1 skrll frame_desc = &qtd->urb->iso_descs[idx]; 991 1.1 skrll frame_desc->status = err; 992 1.1 skrll } 993 1.1 skrll 994 1.1 skrll dwc2_host_complete(hsotg, qtd, err); 995 1.1 skrll } 996 1.1 skrll 997 1.1 skrll dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 998 1.1 skrll } 999 1.1 skrll 1000 1.1 skrll return; 1001 1.1 skrll } 1002 1.1 skrll 1003 1.1 skrll list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) { 1004 1.1 skrll if (!qtd->in_process) 1005 1.1 skrll break; 1006 1.7 skrll 1007 1.7 skrll /* 1008 1.7 skrll * Ensure idx corresponds to descriptor where first urb of this 1009 1.7 skrll * qtd was added. In fact, during isoc desc init, dwc2 may skip 1010 1.7 skrll * an index if current frame number is already over this index. 1011 1.7 skrll */ 1012 1.7 skrll if (idx != qtd->isoc_td_first) { 1013 1.7 skrll dev_vdbg(hsotg->dev, 1014 1.7 skrll "try to complete %d instead of %d\n", 1015 1.7 skrll idx, qtd->isoc_td_first); 1016 1.7 skrll idx = qtd->isoc_td_first; 1017 1.7 skrll } 1018 1.7 skrll 1019 1.1 skrll do { 1020 1.7 skrll struct dwc2_qtd *qtd_next; 1021 1.7 skrll u16 cur_idx; 1022 1.7 skrll 1023 1.1 skrll rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh, 1024 1.1 skrll idx); 1025 1.1 skrll if (rc < 0) 1026 1.1 skrll return; 1027 1.1 skrll idx = dwc2_desclist_idx_inc(idx, qh->interval, 1028 1.1 skrll chan->speed); 1029 1.7 skrll if (!rc) 1030 1.7 skrll continue; 1031 1.7 skrll 1032 1.12 mlelstv if (rc == DWC2_CMPL_DONE || rc == DWC2_CMPL_STOP) 1033 1.12 mlelstv goto stop_scan; 1034 1.7 skrll 1035 1.7 skrll if (qh->interval >= 32) 1036 1.1 skrll goto stop_scan; 1037 1.7 skrll 1038 1.7 skrll qh->td_first = idx; 1039 1.7 skrll cur_idx = dwc2_frame_list_idx(hsotg->frame_number); 1040 1.7 skrll qtd_next = list_first_entry(&qh->qtd_list, 1041 1.7 skrll struct dwc2_qtd, 1042 1.7 skrll qtd_list_entry); 1043 1.7 skrll if (dwc2_frame_idx_num_gt(cur_idx, 1044 1.7 skrll qtd_next->isoc_td_last)) 1045 1.1 skrll break; 1046 1.7 skrll 1047 1.7 skrll goto stop_scan; 1048 1.7 skrll 1049 1.1 skrll } while (idx != qh->td_first); 1050 1.1 skrll } 1051 1.1 skrll 1052 1.1 skrll stop_scan: 1053 1.1 skrll qh->td_first = idx; 1054 1.1 skrll } 1055 1.1 skrll 1056 1.1 skrll static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg, 1057 1.1 skrll struct dwc2_host_chan *chan, 1058 1.1 skrll struct dwc2_qtd *qtd, 1059 1.1 skrll struct dwc2_hcd_dma_desc *dma_desc, 1060 1.1 skrll enum dwc2_halt_status halt_status, 1061 1.1 skrll u32 n_bytes, int *xfer_done) 1062 1.1 skrll { 1063 1.1 skrll struct dwc2_hcd_urb *urb = qtd->urb; 1064 1.1 skrll u16 remain = 0; 1065 1.1 skrll 1066 1.1 skrll if (chan->ep_is_in) 1067 1.3 skrll remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >> 1068 1.3 skrll HOST_DMA_NBYTES_SHIFT; 1069 1.1 skrll 1070 1.1 skrll dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb); 1071 1.1 skrll 1072 1.1 skrll if (halt_status == DWC2_HC_XFER_AHB_ERR) { 1073 1.1 skrll dev_err(hsotg->dev, "EIO\n"); 1074 1.1 skrll urb->status = -EIO; 1075 1.1 skrll return 1; 1076 1.1 skrll } 1077 1.1 skrll 1078 1.1 skrll if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { 1079 1.1 skrll switch (halt_status) { 1080 1.1 skrll case DWC2_HC_XFER_STALL: 1081 1.1 skrll dev_vdbg(hsotg->dev, "Stall\n"); 1082 1.1 skrll urb->status = -EPIPE; 1083 1.1 skrll break; 1084 1.1 skrll case DWC2_HC_XFER_BABBLE_ERR: 1085 1.1 skrll dev_err(hsotg->dev, "Babble\n"); 1086 1.1 skrll urb->status = -EOVERFLOW; 1087 1.1 skrll break; 1088 1.1 skrll case DWC2_HC_XFER_XACT_ERR: 1089 1.1 skrll dev_err(hsotg->dev, "XactErr\n"); 1090 1.1 skrll urb->status = -EPROTO; 1091 1.1 skrll break; 1092 1.1 skrll default: 1093 1.1 skrll dev_err(hsotg->dev, 1094 1.1 skrll "%s: Unhandled descriptor error status (%d)\n", 1095 1.1 skrll __func__, halt_status); 1096 1.1 skrll break; 1097 1.1 skrll } 1098 1.1 skrll return 1; 1099 1.1 skrll } 1100 1.1 skrll 1101 1.1 skrll if (dma_desc->status & HOST_DMA_A) { 1102 1.1 skrll dev_vdbg(hsotg->dev, 1103 1.1 skrll "Active descriptor encountered on channel %d\n", 1104 1.1 skrll chan->hc_num); 1105 1.1 skrll return 0; 1106 1.1 skrll } 1107 1.1 skrll 1108 1.1 skrll if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) { 1109 1.1 skrll if (qtd->control_phase == DWC2_CONTROL_DATA) { 1110 1.1 skrll urb->actual_length += n_bytes - remain; 1111 1.1 skrll if (remain || urb->actual_length >= urb->length) { 1112 1.1 skrll /* 1113 1.1 skrll * For Control Data stage do not set urb->status 1114 1.1 skrll * to 0, to prevent URB callback. Set it when 1115 1.1 skrll * Status phase is done. See below. 1116 1.1 skrll */ 1117 1.1 skrll *xfer_done = 1; 1118 1.1 skrll } 1119 1.1 skrll } else if (qtd->control_phase == DWC2_CONTROL_STATUS) { 1120 1.1 skrll urb->status = 0; 1121 1.1 skrll *xfer_done = 1; 1122 1.1 skrll } 1123 1.1 skrll /* No handling for SETUP stage */ 1124 1.1 skrll } else { 1125 1.1 skrll /* BULK and INTR */ 1126 1.1 skrll urb->actual_length += n_bytes - remain; 1127 1.1 skrll dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length, 1128 1.1 skrll urb->actual_length); 1129 1.1 skrll if (remain || urb->actual_length >= urb->length) { 1130 1.1 skrll urb->status = 0; 1131 1.1 skrll *xfer_done = 1; 1132 1.1 skrll } 1133 1.1 skrll } 1134 1.1 skrll 1135 1.1 skrll return 0; 1136 1.1 skrll } 1137 1.1 skrll 1138 1.1 skrll static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, 1139 1.1 skrll struct dwc2_host_chan *chan, 1140 1.1 skrll int chnum, struct dwc2_qtd *qtd, 1141 1.1 skrll int desc_num, 1142 1.1 skrll enum dwc2_halt_status halt_status, 1143 1.1 skrll int *xfer_done) 1144 1.1 skrll { 1145 1.1 skrll struct dwc2_qh *qh = chan->qh; 1146 1.1 skrll struct dwc2_hcd_urb *urb = qtd->urb; 1147 1.1 skrll struct dwc2_hcd_dma_desc *dma_desc; 1148 1.1 skrll u32 n_bytes; 1149 1.1 skrll int failed; 1150 1.1 skrll 1151 1.1 skrll dev_vdbg(hsotg->dev, "%s()\n", __func__); 1152 1.1 skrll 1153 1.1 skrll if (!urb) 1154 1.1 skrll return -EINVAL; 1155 1.1 skrll 1156 1.8 skrll usb_syncmem(&qh->desc_list_usbdma, 1157 1.7 skrll (desc_num * sizeof(struct dwc2_hcd_dma_desc)), 1158 1.7 skrll sizeof(struct dwc2_hcd_dma_desc), 1159 1.7 skrll BUS_DMASYNC_POSTREAD); 1160 1.7 skrll 1161 1.1 skrll dma_desc = &qh->desc_list[desc_num]; 1162 1.1 skrll n_bytes = qh->n_bytes[desc_num]; 1163 1.1 skrll dev_vdbg(hsotg->dev, 1164 1.1 skrll "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n", 1165 1.1 skrll qtd, urb, desc_num, dma_desc, n_bytes); 1166 1.1 skrll failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc, 1167 1.1 skrll halt_status, n_bytes, 1168 1.1 skrll xfer_done); 1169 1.7 skrll if (*xfer_done && urb->status != -EINPROGRESS) 1170 1.7 skrll failed = 1; 1171 1.7 skrll 1172 1.7 skrll if (failed) { 1173 1.1 skrll dwc2_host_complete(hsotg, qtd, urb->status); 1174 1.1 skrll dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 1175 1.1 skrll dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n", 1176 1.1 skrll failed, *xfer_done, urb->status); 1177 1.1 skrll return failed; 1178 1.1 skrll } 1179 1.1 skrll 1180 1.1 skrll if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) { 1181 1.1 skrll switch (qtd->control_phase) { 1182 1.1 skrll case DWC2_CONTROL_SETUP: 1183 1.1 skrll if (urb->length > 0) 1184 1.1 skrll qtd->control_phase = DWC2_CONTROL_DATA; 1185 1.1 skrll else 1186 1.1 skrll qtd->control_phase = DWC2_CONTROL_STATUS; 1187 1.1 skrll dev_vdbg(hsotg->dev, 1188 1.1 skrll " Control setup transaction done\n"); 1189 1.1 skrll break; 1190 1.1 skrll case DWC2_CONTROL_DATA: 1191 1.1 skrll if (*xfer_done) { 1192 1.1 skrll qtd->control_phase = DWC2_CONTROL_STATUS; 1193 1.1 skrll dev_vdbg(hsotg->dev, 1194 1.1 skrll " Control data transfer done\n"); 1195 1.1 skrll } else if (desc_num + 1 == qtd->n_desc) { 1196 1.1 skrll /* 1197 1.1 skrll * Last descriptor for Control data stage which 1198 1.1 skrll * is not completed yet 1199 1.1 skrll */ 1200 1.1 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum, 1201 1.1 skrll qtd); 1202 1.1 skrll } 1203 1.1 skrll break; 1204 1.1 skrll default: 1205 1.1 skrll break; 1206 1.1 skrll } 1207 1.1 skrll } 1208 1.1 skrll 1209 1.1 skrll return 0; 1210 1.1 skrll } 1211 1.1 skrll 1212 1.1 skrll static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, 1213 1.1 skrll struct dwc2_host_chan *chan, 1214 1.1 skrll int chnum, 1215 1.1 skrll enum dwc2_halt_status halt_status) 1216 1.1 skrll { 1217 1.1 skrll struct list_head *qtd_item, *qtd_tmp; 1218 1.1 skrll struct dwc2_qh *qh = chan->qh; 1219 1.1 skrll struct dwc2_qtd *qtd = NULL; 1220 1.1 skrll int xfer_done; 1221 1.1 skrll int desc_num = 0; 1222 1.1 skrll 1223 1.1 skrll if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 1224 1.1 skrll list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) 1225 1.1 skrll qtd->in_process = 0; 1226 1.1 skrll return; 1227 1.1 skrll } 1228 1.1 skrll 1229 1.1 skrll list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) { 1230 1.1 skrll int i; 1231 1.1 skrll 1232 1.1 skrll qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry); 1233 1.1 skrll xfer_done = 0; 1234 1.1 skrll 1235 1.1 skrll for (i = 0; i < qtd->n_desc; i++) { 1236 1.1 skrll if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, 1237 1.1 skrll desc_num, halt_status, 1238 1.6 skrll &xfer_done)) { 1239 1.6 skrll qtd = NULL; 1240 1.1 skrll break; 1241 1.6 skrll } 1242 1.1 skrll desc_num++; 1243 1.1 skrll } 1244 1.1 skrll } 1245 1.1 skrll 1246 1.1 skrll if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { 1247 1.1 skrll /* 1248 1.1 skrll * Resetting the data toggle for bulk and interrupt endpoints 1249 1.1 skrll * in case of stall. See handle_hc_stall_intr(). 1250 1.1 skrll */ 1251 1.1 skrll if (halt_status == DWC2_HC_XFER_STALL) 1252 1.1 skrll qh->data_toggle = DWC2_HC_PID_DATA0; 1253 1.1 skrll else if (qtd) 1254 1.1 skrll dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1255 1.1 skrll } 1256 1.1 skrll 1257 1.1 skrll if (halt_status == DWC2_HC_XFER_COMPLETE) { 1258 1.1 skrll if (chan->hcint & HCINTMSK_NYET) { 1259 1.1 skrll /* 1260 1.1 skrll * Got a NYET on the last transaction of the transfer. 1261 1.1 skrll * It means that the endpoint should be in the PING 1262 1.1 skrll * state at the beginning of the next transfer. 1263 1.1 skrll */ 1264 1.1 skrll qh->ping_state = 1; 1265 1.1 skrll } 1266 1.1 skrll } 1267 1.1 skrll } 1268 1.1 skrll 1269 1.1 skrll /** 1270 1.1 skrll * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's 1271 1.1 skrll * status and calls completion routine for the URB if it's done. Called from 1272 1.1 skrll * interrupt handlers. 1273 1.1 skrll * 1274 1.1 skrll * @hsotg: The HCD state structure for the DWC OTG controller 1275 1.1 skrll * @chan: Host channel the transfer is completed on 1276 1.1 skrll * @chnum: Index of Host channel registers 1277 1.1 skrll * @halt_status: Reason the channel is being halted or just XferComplete 1278 1.1 skrll * for isochronous transfers 1279 1.1 skrll * 1280 1.1 skrll * Releases the channel to be used by other transfers. 1281 1.1 skrll * In case of Isochronous endpoint the channel is not halted until the end of 1282 1.1 skrll * the session, i.e. QTD list is empty. 1283 1.1 skrll * If periodic channel released the FrameList is updated accordingly. 1284 1.1 skrll * Calls transaction selection routines to activate pending transfers. 1285 1.1 skrll */ 1286 1.1 skrll void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, 1287 1.1 skrll struct dwc2_host_chan *chan, int chnum, 1288 1.1 skrll enum dwc2_halt_status halt_status) 1289 1.1 skrll { 1290 1.1 skrll struct dwc2_qh *qh = chan->qh; 1291 1.1 skrll int continue_isoc_xfer = 0; 1292 1.1 skrll enum dwc2_transaction_type tr_type; 1293 1.1 skrll 1294 1.1 skrll if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1295 1.1 skrll dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status); 1296 1.1 skrll 1297 1.1 skrll /* Release the channel if halted or session completed */ 1298 1.1 skrll if (halt_status != DWC2_HC_XFER_COMPLETE || 1299 1.1 skrll list_empty(&qh->qtd_list)) { 1300 1.7 skrll struct dwc2_qtd *qtd, *qtd_tmp; 1301 1.7 skrll 1302 1.7 skrll /* 1303 1.7 skrll * Kill all remainings QTDs since channel has been 1304 1.7 skrll * halted. 1305 1.7 skrll */ 1306 1.7 skrll list_for_each_entry_safe(qtd, qtd_tmp, 1307 1.7 skrll &qh->qtd_list, 1308 1.7 skrll qtd_list_entry) { 1309 1.7 skrll dwc2_host_complete(hsotg, qtd, 1310 1.7 skrll -ECONNRESET); 1311 1.7 skrll dwc2_hcd_qtd_unlink_and_free(hsotg, 1312 1.7 skrll qtd, qh); 1313 1.7 skrll } 1314 1.7 skrll 1315 1.1 skrll /* Halt the channel if session completed */ 1316 1.1 skrll if (halt_status == DWC2_HC_XFER_COMPLETE) 1317 1.1 skrll dwc2_hc_halt(hsotg, chan, halt_status); 1318 1.1 skrll dwc2_release_channel_ddma(hsotg, qh); 1319 1.1 skrll dwc2_hcd_qh_unlink(hsotg, qh); 1320 1.1 skrll } else { 1321 1.1 skrll /* Keep in assigned schedule to continue transfer */ 1322 1.1 skrll list_move(&qh->qh_list_entry, 1323 1.1 skrll &hsotg->periodic_sched_assigned); 1324 1.7 skrll /* 1325 1.7 skrll * If channel has been halted during giveback of urb 1326 1.7 skrll * then prevent any new scheduling. 1327 1.7 skrll */ 1328 1.7 skrll if (!chan->halt_status) 1329 1.7 skrll continue_isoc_xfer = 1; 1330 1.1 skrll } 1331 1.1 skrll /* 1332 1.1 skrll * Todo: Consider the case when period exceeds FrameList size. 1333 1.1 skrll * Frame Rollover interrupt should be used. 1334 1.1 skrll */ 1335 1.1 skrll } else { 1336 1.1 skrll /* 1337 1.1 skrll * Scan descriptor list to complete the URB(s), then release 1338 1.1 skrll * the channel 1339 1.1 skrll */ 1340 1.1 skrll dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum, 1341 1.1 skrll halt_status); 1342 1.1 skrll dwc2_release_channel_ddma(hsotg, qh); 1343 1.1 skrll dwc2_hcd_qh_unlink(hsotg, qh); 1344 1.1 skrll 1345 1.1 skrll if (!list_empty(&qh->qtd_list)) { 1346 1.1 skrll /* 1347 1.1 skrll * Add back to inactive non-periodic schedule on normal 1348 1.1 skrll * completion 1349 1.1 skrll */ 1350 1.1 skrll dwc2_hcd_qh_add(hsotg, qh); 1351 1.1 skrll } 1352 1.1 skrll } 1353 1.1 skrll 1354 1.1 skrll tr_type = dwc2_hcd_select_transactions(hsotg); 1355 1.1 skrll if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) { 1356 1.1 skrll if (continue_isoc_xfer) { 1357 1.1 skrll if (tr_type == DWC2_TRANSACTION_NONE) 1358 1.1 skrll tr_type = DWC2_TRANSACTION_PERIODIC; 1359 1.1 skrll else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC) 1360 1.1 skrll tr_type = DWC2_TRANSACTION_ALL; 1361 1.1 skrll } 1362 1.1 skrll dwc2_hcd_queue_transactions(hsotg, tr_type); 1363 1.1 skrll } 1364 1.1 skrll } 1365