1 /* $NetBSD: dwc2_hcdddma.c,v 1.12 2025/04/12 08:22:31 mlelstv Exp $ */ 2 3 /* 4 * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines 5 * 6 * Copyright (C) 2004-2013 Synopsys, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The names of the above-listed copyright holders may not be used 18 * to endorse or promote products derived from this software without 19 * specific prior written permission. 20 * 21 * ALTERNATIVELY, this software may be distributed under the terms of the 22 * GNU General Public License ("GPL") as published by the Free Software 23 * Foundation; either version 2 of the License, or (at your option) any 24 * later version. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * This file contains the Descriptor DMA implementation for Host mode 41 */ 42 #include <sys/cdefs.h> 43 __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdddma.c,v 1.12 2025/04/12 08:22:31 mlelstv Exp $"); 44 45 #include <sys/param.h> 46 #include <sys/types.h> 47 #include <sys/kernel.h> 48 #include <sys/kmem.h> 49 #include <sys/cpu.h> 50 51 #include <dev/usb/usb.h> 52 #include <dev/usb/usbdi.h> 53 #include <dev/usb/usbdivar.h> 54 #include <dev/usb/usb_mem.h> 55 56 #include <linux/kernel.h> 57 #include <linux/list.h> 58 59 #include <dwc2/dwc2.h> 60 #include <dwc2/dwc2var.h> 61 62 #include "dwc2_core.h" 63 #include "dwc2_hcd.h" 64 65 static u16 dwc2_frame_list_idx(u16 frame) 66 { 67 return frame & (FRLISTEN_64_SIZE - 1); 68 } 69 70 static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed) 71 { 72 return (idx + inc) & 73 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : 74 MAX_DMA_DESC_NUM_GENERIC) - 1); 75 } 76 77 static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed) 78 { 79 return (idx - inc) & 80 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : 81 MAX_DMA_DESC_NUM_GENERIC) - 1); 82 } 83 84 static u16 dwc2_max_desc_num(struct dwc2_qh *qh) 85 { 86 return (qh->ep_type == USB_ENDPOINT_XFER_ISOC && 87 qh->dev_speed == USB_SPEED_HIGH) ? 88 MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC; 89 } 90 91 static u16 dwc2_frame_incr_val(struct dwc2_qh *qh) 92 { 93 return qh->dev_speed == USB_SPEED_HIGH ? 94 (qh->interval + 8 - 1) / 8 : qh->interval; 95 } 96 97 static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 98 gfp_t flags) 99 { 100 int err; 101 102 KASSERT(!cpu_intr_p()); 103 KASSERT(!cpu_softintr_p()); 104 105 qh->desc_list = NULL; 106 qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) * 107 dwc2_max_desc_num(qh); 108 109 err = usb_allocmem(hsotg->hsotg_sc->sc_bus.ub_dmatag, 110 qh->desc_list_sz, 0, USBMALLOC_COHERENT, &qh->desc_list_usbdma); 111 112 if (err) 113 return -ENOMEM; 114 115 qh->desc_list = KERNADDR(&qh->desc_list_usbdma, 0); 116 qh->desc_list_dma = DMAADDR(&qh->desc_list_usbdma, 0); 117 118 qh->n_bytes = kmem_zalloc(sizeof(u32) * dwc2_max_desc_num(qh), KM_SLEEP); 119 if (!qh->n_bytes) { 120 usb_freemem(&qh->desc_list_usbdma); 121 qh->desc_list = NULL; 122 return -ENOMEM; 123 } 124 125 return 0; 126 } 127 128 static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 129 { 130 131 if (qh->desc_list) { 132 usb_freemem(&qh->desc_list_usbdma); 133 qh->desc_list = NULL; 134 } 135 136 kmem_free(qh->n_bytes, sizeof(u32) * dwc2_max_desc_num(qh)); 137 qh->n_bytes = NULL; 138 } 139 140 static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags) 141 { 142 int err; 143 144 if (hsotg->frame_list) 145 return 0; 146 147 /* XXXNH - pool_cache_t */ 148 hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE; 149 hsotg->frame_list = NULL; 150 err = usb_allocmem(hsotg->hsotg_sc->sc_bus.ub_dmatag, 151 hsotg->frame_list_sz, 0, USBMALLOC_COHERENT, &hsotg->frame_list_usbdma); 152 153 if (!err) { 154 hsotg->frame_list = KERNADDR(&hsotg->frame_list_usbdma, 0); 155 hsotg->frame_list_dma = DMAADDR(&hsotg->frame_list_usbdma, 0); 156 } 157 158 if (!hsotg->frame_list) 159 return -ENOMEM; 160 161 return 0; 162 } 163 164 static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) 165 { 166 usb_dma_t frame_list_usbdma; 167 unsigned long flags; 168 169 spin_lock_irqsave(&hsotg->lock, flags); 170 171 if (!hsotg->frame_list) { 172 spin_unlock_irqrestore(&hsotg->lock, flags); 173 return; 174 } 175 176 frame_list_usbdma = hsotg->frame_list_usbdma; 177 hsotg->frame_list = NULL; 178 179 spin_unlock_irqrestore(&hsotg->lock, flags); 180 181 usb_freemem(&frame_list_usbdma); 182 } 183 184 static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en) 185 { 186 u32 hcfg; 187 unsigned long flags; 188 189 spin_lock_irqsave(&hsotg->lock, flags); 190 191 hcfg = DWC2_READ_4(hsotg, HCFG); 192 if (hcfg & HCFG_PERSCHEDENA) { 193 /* already enabled */ 194 spin_unlock_irqrestore(&hsotg->lock, flags); 195 return; 196 } 197 198 DWC2_WRITE_4(hsotg, HFLBADDR, hsotg->frame_list_dma); 199 200 hcfg &= ~HCFG_FRLISTEN_MASK; 201 hcfg |= fr_list_en | HCFG_PERSCHEDENA; 202 dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n"); 203 DWC2_WRITE_4(hsotg, HCFG, hcfg); 204 205 spin_unlock_irqrestore(&hsotg->lock, flags); 206 } 207 208 static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg) 209 { 210 u32 hcfg; 211 unsigned long flags; 212 213 spin_lock_irqsave(&hsotg->lock, flags); 214 215 hcfg = DWC2_READ_4(hsotg, HCFG); 216 if (!(hcfg & HCFG_PERSCHEDENA)) { 217 /* already disabled */ 218 spin_unlock_irqrestore(&hsotg->lock, flags); 219 return; 220 } 221 222 hcfg &= ~HCFG_PERSCHEDENA; 223 dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n"); 224 DWC2_WRITE_4(hsotg, HCFG, hcfg); 225 226 spin_unlock_irqrestore(&hsotg->lock, flags); 227 } 228 229 /* 230 * Activates/Deactivates FrameList entries for the channel based on endpoint 231 * servicing period 232 */ 233 static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 234 int enable) 235 { 236 struct dwc2_host_chan *chan; 237 u16 i, j, inc; 238 239 if (!hsotg) { 240 printf("hsotg = %p\n", hsotg); 241 return; 242 } 243 244 if (!qh->channel) { 245 dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel); 246 return; 247 } 248 249 if (!hsotg->frame_list) { 250 dev_err(hsotg->dev, "hsotg->frame_list = %p\n", 251 hsotg->frame_list); 252 return; 253 } 254 255 chan = qh->channel; 256 inc = dwc2_frame_incr_val(qh); 257 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC) 258 i = dwc2_frame_list_idx(qh->sched_frame); 259 else 260 i = 0; 261 262 j = i; 263 do { 264 if (enable) 265 hsotg->frame_list[j] |= 1 << chan->hc_num; 266 else 267 hsotg->frame_list[j] &= ~(1 << chan->hc_num); 268 j = (j + inc) & (FRLISTEN_64_SIZE - 1); 269 } while (j != i); 270 271 /* 272 * Sync frame list since controller will access it if periodic 273 * channel is currently enabled. 274 */ 275 usb_syncmem(&hsotg->frame_list_usbdma, 0, hsotg->frame_list_sz, 276 BUS_DMASYNC_PREWRITE); 277 278 if (!enable) 279 return; 280 281 chan->schinfo = 0; 282 if (chan->speed == USB_SPEED_HIGH && qh->interval) { 283 j = 1; 284 /* TODO - check this */ 285 inc = (8 + qh->interval - 1) / qh->interval; 286 for (i = 0; i < inc; i++) { 287 chan->schinfo |= j; 288 j = j << qh->interval; 289 } 290 } else { 291 chan->schinfo = 0xff; 292 } 293 } 294 295 static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, 296 struct dwc2_qh *qh) 297 { 298 struct dwc2_host_chan *chan = qh->channel; 299 300 if (dwc2_qh_is_non_per(qh)) { 301 if (hsotg->core_params->uframe_sched > 0) 302 hsotg->available_host_channels++; 303 else 304 hsotg->non_periodic_channels--; 305 } else { 306 dwc2_update_frame_list(hsotg, qh, 0); 307 hsotg->available_host_channels++; 308 } 309 310 /* 311 * The condition is added to prevent double cleanup try in case of 312 * device disconnect. See channel cleanup in dwc2_hcd_disconnect(). 313 */ 314 if (chan->qh) { 315 if (!list_empty(&chan->hc_list_entry)) 316 list_del(&chan->hc_list_entry); 317 dwc2_hc_cleanup(hsotg, chan); 318 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); 319 chan->qh = NULL; 320 } 321 322 qh->channel = NULL; 323 qh->ntd = 0; 324 325 if (qh->desc_list) 326 memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) * 327 dwc2_max_desc_num(qh)); 328 } 329 330 /** 331 * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA 332 * related members 333 * 334 * @hsotg: The HCD state structure for the DWC OTG controller 335 * @qh: The QH to init 336 * 337 * Return: 0 if successful, negative error code otherwise 338 * 339 * Allocates memory for the descriptor list. For the first periodic QH, 340 * allocates memory for the FrameList and enables periodic scheduling. 341 */ 342 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 343 gfp_t mem_flags) 344 { 345 int retval; 346 347 if (qh->do_split) { 348 dev_err(hsotg->dev, 349 "SPLIT Transfers are not supported in Descriptor DMA mode.\n"); 350 retval = -EINVAL; 351 goto err0; 352 } 353 354 retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags); 355 if (retval) 356 goto err0; 357 358 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC || 359 qh->ep_type == USB_ENDPOINT_XFER_INT) { 360 if (!hsotg->frame_list) { 361 retval = dwc2_frame_list_alloc(hsotg, mem_flags); 362 if (retval) 363 goto err1; 364 /* Enable periodic schedule on first periodic QH */ 365 dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64); 366 } 367 } 368 369 qh->ntd = 0; 370 return 0; 371 372 err1: 373 dwc2_desc_list_free(hsotg, qh); 374 err0: 375 return retval; 376 } 377 378 /** 379 * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related 380 * members 381 * 382 * @hsotg: The HCD state structure for the DWC OTG controller 383 * @qh: The QH to free 384 * 385 * Frees descriptor list memory associated with the QH. If QH is periodic and 386 * the last, frees FrameList memory and disables periodic scheduling. 387 */ 388 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 389 { 390 unsigned long flags; 391 392 dwc2_desc_list_free(hsotg, qh); 393 394 /* 395 * Channel still assigned due to some reasons. 396 * Seen on Isoc URB dequeue. Channel halted but no subsequent 397 * ChHalted interrupt to release the channel. Afterwards 398 * when it comes here from endpoint disable routine 399 * channel remains assigned. 400 */ 401 spin_lock_irqsave(&hsotg->lock, flags); 402 if (qh->channel) 403 dwc2_release_channel_ddma(hsotg, qh); 404 spin_unlock_irqrestore(&hsotg->lock, flags); 405 406 if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC || 407 qh->ep_type == USB_ENDPOINT_XFER_INT) && 408 (hsotg->core_params->uframe_sched > 0 || 409 !hsotg->periodic_channels) && hsotg->frame_list) { 410 dwc2_per_sched_disable(hsotg); 411 dwc2_frame_list_free(hsotg); 412 } 413 } 414 415 static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx) 416 { 417 if (qh->dev_speed == USB_SPEED_HIGH) 418 /* Descriptor set (8 descriptors) index which is 8-aligned */ 419 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8; 420 else 421 return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1); 422 } 423 424 /* 425 * Determine starting frame for Isochronous transfer. 426 * Few frames skipped to prevent race condition with HC. 427 */ 428 static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg, 429 struct dwc2_qh *qh, u16 *skip_frames) 430 { 431 u16 frame; 432 433 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 434 435 /* sched_frame is always frame number (not uFrame) both in FS and HS! */ 436 437 /* 438 * skip_frames is used to limit activated descriptors number 439 * to avoid the situation when HC services the last activated 440 * descriptor firstly. 441 * Example for FS: 442 * Current frame is 1, scheduled frame is 3. Since HC always fetches 443 * the descriptor corresponding to curr_frame+1, the descriptor 444 * corresponding to frame 2 will be fetched. If the number of 445 * descriptors is max=64 (or greather) the list will be fully programmed 446 * with Active descriptors and it is possible case (rare) that the 447 * latest descriptor(considering rollback) corresponding to frame 2 will 448 * be serviced first. HS case is more probable because, in fact, up to 449 * 11 uframes (16 in the code) may be skipped. 450 */ 451 if (qh->dev_speed == USB_SPEED_HIGH) { 452 /* 453 * Consider uframe counter also, to start xfer asap. If half of 454 * the frame elapsed skip 2 frames otherwise just 1 frame. 455 * Starting descriptor index must be 8-aligned, so if the 456 * current frame is near to complete the next one is skipped as 457 * well. 458 */ 459 if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) { 460 *skip_frames = 2 * 8; 461 frame = dwc2_frame_num_inc(hsotg->frame_number, 462 *skip_frames); 463 } else { 464 *skip_frames = 1 * 8; 465 frame = dwc2_frame_num_inc(hsotg->frame_number, 466 *skip_frames); 467 } 468 469 frame = dwc2_full_frame_num(frame); 470 } else { 471 /* 472 * Two frames are skipped for FS - the current and the next. 473 * But for descriptor programming, 1 frame (descriptor) is 474 * enough, see example above. 475 */ 476 *skip_frames = 1; 477 frame = dwc2_frame_num_inc(hsotg->frame_number, 2); 478 } 479 480 return frame; 481 } 482 483 /* 484 * Calculate initial descriptor index for isochronous transfer based on 485 * scheduled frame 486 */ 487 static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg, 488 struct dwc2_qh *qh) 489 { 490 u16 frame, fr_idx, fr_idx_tmp, skip_frames; 491 492 /* 493 * With current ISOC processing algorithm the channel is being released 494 * when no more QTDs in the list (qh->ntd == 0). Thus this function is 495 * called only when qh->ntd == 0 and qh->channel == 0. 496 * 497 * So qh->channel != NULL branch is not used and just not removed from 498 * the source file. It is required for another possible approach which 499 * is, do not disable and release the channel when ISOC session 500 * completed, just move QH to inactive schedule until new QTD arrives. 501 * On new QTD, the QH moved back to 'ready' schedule, starting frame and 502 * therefore starting desc_index are recalculated. In this case channel 503 * is released only on ep_disable. 504 */ 505 506 /* 507 * Calculate starting descriptor index. For INTERRUPT endpoint it is 508 * always 0. 509 */ 510 if (qh->channel) { 511 frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames); 512 /* 513 * Calculate initial descriptor index based on FrameList current 514 * bitmap and servicing period 515 */ 516 fr_idx_tmp = dwc2_frame_list_idx(frame); 517 fr_idx = (FRLISTEN_64_SIZE + 518 dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp) 519 % dwc2_frame_incr_val(qh); 520 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE; 521 } else { 522 qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh, 523 &skip_frames); 524 fr_idx = dwc2_frame_list_idx(qh->sched_frame); 525 } 526 527 qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx); 528 529 return skip_frames; 530 } 531 532 #define ISOC_URB_GIVEBACK_ASAP 533 534 #define MAX_ISOC_XFER_SIZE_FS 1023 535 #define MAX_ISOC_XFER_SIZE_HS 3072 536 #define DESCNUM_THRESHOLD 4 537 538 static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, 539 struct dwc2_qtd *qtd, 540 struct dwc2_qh *qh, u32 max_xfer_size, 541 u16 idx) 542 { 543 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; 544 struct dwc2_hcd_iso_packet_desc *frame_desc; 545 546 memset(dma_desc, 0, sizeof(*dma_desc)); 547 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; 548 549 if (frame_desc->length > max_xfer_size) 550 qh->n_bytes[idx] = max_xfer_size; 551 else 552 qh->n_bytes[idx] = frame_desc->length; 553 554 dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset)); 555 dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT & 556 HOST_DMA_ISOC_NBYTES_MASK; 557 558 /* Set active bit */ 559 dma_desc->status |= HOST_DMA_A; 560 561 qh->ntd++; 562 qtd->isoc_frame_index_last++; 563 564 #ifdef ISOC_URB_GIVEBACK_ASAP 565 /* Set IOC for each descriptor corresponding to last frame of URB */ 566 if (qtd->isoc_frame_index_last == qtd->urb->packet_count) 567 dma_desc->status |= HOST_DMA_IOC; 568 #endif 569 570 usb_syncmem(&qh->desc_list_usbdma, 571 (idx * sizeof(struct dwc2_hcd_dma_desc)), 572 sizeof(struct dwc2_hcd_dma_desc), 573 BUS_DMASYNC_PREWRITE); 574 } 575 576 static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, 577 struct dwc2_qh *qh, u16 skip_frames) 578 { 579 struct dwc2_qtd *qtd; 580 u32 max_xfer_size; 581 u16 idx, inc, n_desc = 0, ntd_max = 0; 582 u16 cur_idx; 583 u16 next_idx; 584 585 idx = qh->td_last; 586 inc = qh->interval; 587 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 588 cur_idx = dwc2_frame_list_idx(hsotg->frame_number); 589 next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed); 590 591 /* 592 * Ensure current frame number didn't overstep last scheduled 593 * descriptor. If it happens, the only way to recover is to move 594 * qh->td_last to current frame number + 1. 595 * So that next isoc descriptor will be scheduled on frame number + 1 596 * and not on a past frame. 597 */ 598 if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) { 599 if (inc < 32) { 600 dev_vdbg(hsotg->dev, 601 "current frame number overstep last descriptor\n"); 602 qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc, 603 qh->dev_speed); 604 idx = qh->td_last; 605 } 606 } 607 608 if (qh->interval) { 609 ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) / 610 qh->interval; 611 if (skip_frames && !qh->channel) 612 ntd_max -= skip_frames / qh->interval; 613 } 614 615 max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ? 616 MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS; 617 618 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { 619 if (qtd->in_process && 620 qtd->isoc_frame_index_last == 621 qtd->urb->packet_count) 622 continue; 623 624 qtd->isoc_td_first = idx; 625 while (qh->ntd < ntd_max && qtd->isoc_frame_index_last < 626 qtd->urb->packet_count) { 627 dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh, 628 max_xfer_size, idx); 629 idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed); 630 n_desc++; 631 } 632 qtd->isoc_td_last = idx; 633 qtd->in_process = 1; 634 } 635 636 qh->td_last = idx; 637 638 #ifdef ISOC_URB_GIVEBACK_ASAP 639 /* Set IOC for last descriptor if descriptor list is full */ 640 if (qh->ntd == ntd_max) { 641 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); 642 qh->desc_list[idx].status |= HOST_DMA_IOC; 643 644 usb_syncmem(&qh->desc_list_usbdma, 645 (idx * sizeof(struct dwc2_hcd_dma_desc)), 646 sizeof(struct dwc2_hcd_dma_desc), 647 BUS_DMASYNC_PREWRITE); 648 } 649 #else 650 /* 651 * Set IOC bit only for one descriptor. Always try to be ahead of HW 652 * processing, i.e. on IOC generation driver activates next descriptor 653 * but core continues to process descriptors following the one with IOC 654 * set. 655 */ 656 657 if (n_desc > DESCNUM_THRESHOLD) 658 /* 659 * Move IOC "up". Required even if there is only one QTD 660 * in the list, because QTDs might continue to be queued, 661 * but during the activation it was only one queued. 662 * Actually more than one QTD might be in the list if this 663 * function called from XferCompletion - QTDs was queued during 664 * HW processing of the previous descriptor chunk. 665 */ 666 idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), 667 qh->dev_speed); 668 else 669 /* 670 * Set the IOC for the latest descriptor if either number of 671 * descriptors is not greater than threshold or no more new 672 * descriptors activated 673 */ 674 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); 675 676 qh->desc_list[idx].status |= HOST_DMA_IOC; 677 usb_syncmem(&qh->desc_list_usbdma, 678 (idx * sizeof(struct dwc2_hcd_dma_desc)), 679 sizeof(struct dwc2_hcd_dma_desc), 680 BUS_DMASYNC_PREWRITE); 681 #endif 682 } 683 684 static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, 685 struct dwc2_host_chan *chan, 686 struct dwc2_qtd *qtd, struct dwc2_qh *qh, 687 int n_desc) 688 { 689 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc]; 690 int len = chan->xfer_len; 691 692 if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1)) 693 len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1); 694 695 if (chan->ep_is_in) { 696 int num_packets; 697 698 if (len > 0 && chan->max_packet) 699 num_packets = (len + chan->max_packet - 1) 700 / chan->max_packet; 701 else 702 /* Need 1 packet for transfer length of 0 */ 703 num_packets = 1; 704 705 /* Always program an integral # of packets for IN transfers */ 706 len = num_packets * chan->max_packet; 707 } 708 709 dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK; 710 qh->n_bytes[n_desc] = len; 711 712 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL && 713 qtd->control_phase == DWC2_CONTROL_SETUP) 714 dma_desc->status |= HOST_DMA_SUP; 715 716 dma_desc->buf = (u32)chan->xfer_dma; 717 718 usb_syncmem(&qh->desc_list_usbdma, 719 (n_desc * sizeof(struct dwc2_hcd_dma_desc)), 720 sizeof(struct dwc2_hcd_dma_desc), 721 BUS_DMASYNC_PREWRITE); 722 723 /* 724 * Last (or only) descriptor of IN transfer with actual size less 725 * than MaxPacket 726 */ 727 if (len > chan->xfer_len) { 728 chan->xfer_len = 0; 729 } else { 730 chan->xfer_dma += len; /* XXXNH safe */ 731 chan->xfer_len -= len; 732 } 733 } 734 735 static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, 736 struct dwc2_qh *qh) 737 { 738 struct dwc2_qtd *qtd; 739 struct dwc2_host_chan *chan = qh->channel; 740 int n_desc = 0; 741 742 dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh, 743 (unsigned long)chan->xfer_dma, chan->xfer_len); 744 745 /* 746 * Start with chan->xfer_dma initialized in assign_and_init_hc(), then 747 * if SG transfer consists of multiple URBs, this pointer is re-assigned 748 * to the buffer of the currently processed QTD. For non-SG request 749 * there is always one QTD active. 750 */ 751 752 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { 753 dev_vdbg(hsotg->dev, "qtd=%p\n", qtd); 754 755 if (n_desc) { 756 /* SG request - more than 1 QTD */ 757 chan->xfer_dma = DMAADDR(qtd->urb->usbdma, 758 qtd->urb->actual_length); 759 chan->xfer_len = qtd->urb->length - 760 qtd->urb->actual_length; 761 dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n", 762 (unsigned long)chan->xfer_dma, chan->xfer_len); 763 } 764 765 qtd->n_desc = 0; 766 do { 767 if (n_desc > 1) { 768 qh->desc_list[n_desc - 1].status |= HOST_DMA_A; 769 dev_vdbg(hsotg->dev, 770 "set A bit in desc %d (%p)\n", 771 n_desc - 1, 772 &qh->desc_list[n_desc - 1]); 773 usb_syncmem(&qh->desc_list_usbdma, 774 ((n_desc - 1) * 775 sizeof(struct dwc2_hcd_dma_desc)), 776 sizeof(struct dwc2_hcd_dma_desc), 777 BUS_DMASYNC_PREWRITE); 778 } 779 dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc); 780 dev_vdbg(hsotg->dev, 781 "desc %d (%p) buf=%08x status=%08x\n", 782 n_desc, &qh->desc_list[n_desc], 783 qh->desc_list[n_desc].buf, 784 qh->desc_list[n_desc].status); 785 qtd->n_desc++; 786 n_desc++; 787 } while (chan->xfer_len > 0 && 788 n_desc != MAX_DMA_DESC_NUM_GENERIC); 789 790 dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc); 791 qtd->in_process = 1; 792 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) 793 break; 794 if (n_desc == MAX_DMA_DESC_NUM_GENERIC) 795 break; 796 } 797 798 if (n_desc) { 799 qh->desc_list[n_desc - 1].status |= 800 HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A; 801 dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n", 802 n_desc - 1, &qh->desc_list[n_desc - 1]); 803 usb_syncmem(&qh->desc_list_usbdma, 804 ((n_desc - 1) * sizeof(struct dwc2_hcd_dma_desc)), 805 sizeof(struct dwc2_hcd_dma_desc), 806 BUS_DMASYNC_PREWRITE); 807 if (n_desc > 1) { 808 qh->desc_list[0].status |= HOST_DMA_A; 809 dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n", 810 &qh->desc_list[0]); 811 usb_syncmem(&qh->desc_list_usbdma, 0, 812 sizeof(struct dwc2_hcd_dma_desc), 813 BUS_DMASYNC_PREWRITE); 814 } 815 chan->ntd = n_desc; 816 } 817 } 818 819 /** 820 * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode 821 * 822 * @hsotg: The HCD state structure for the DWC OTG controller 823 * @qh: The QH to init 824 * 825 * Return: 0 if successful, negative error code otherwise 826 * 827 * For Control and Bulk endpoints, initializes descriptor list and starts the 828 * transfer. For Interrupt and Isochronous endpoints, initializes descriptor 829 * list then updates FrameList, marking appropriate entries as active. 830 * 831 * For Isochronous endpoints the starting descriptor index is calculated based 832 * on the scheduled frame, but only on the first transfer descriptor within a 833 * session. Then the transfer is started via enabling the channel. 834 * 835 * For Isochronous endpoints the channel is not halted on XferComplete 836 * interrupt so remains assigned to the endpoint(QH) until session is done. 837 */ 838 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 839 { 840 /* Channel is already assigned */ 841 struct dwc2_host_chan *chan = qh->channel; 842 u16 skip_frames = 0; 843 844 switch (chan->ep_type) { 845 case USB_ENDPOINT_XFER_CONTROL: 846 case USB_ENDPOINT_XFER_BULK: 847 dwc2_init_non_isoc_dma_desc(hsotg, qh); 848 dwc2_hc_start_transfer_ddma(hsotg, chan); 849 break; 850 case USB_ENDPOINT_XFER_INT: 851 dwc2_init_non_isoc_dma_desc(hsotg, qh); 852 dwc2_update_frame_list(hsotg, qh, 1); 853 dwc2_hc_start_transfer_ddma(hsotg, chan); 854 break; 855 case USB_ENDPOINT_XFER_ISOC: 856 if (!qh->ntd) 857 skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh); 858 dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames); 859 860 if (!chan->xfer_started) { 861 dwc2_update_frame_list(hsotg, qh, 1); 862 863 /* 864 * Always set to max, instead of actual size. Otherwise 865 * ntd will be changed with channel being enabled. Not 866 * recommended. 867 */ 868 chan->ntd = dwc2_max_desc_num(qh); 869 870 /* Enable channel only once for ISOC */ 871 dwc2_hc_start_transfer_ddma(hsotg, chan); 872 } 873 874 break; 875 default: 876 break; 877 } 878 } 879 880 #define DWC2_CMPL_DONE 1 881 #define DWC2_CMPL_STOP 2 882 883 static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, 884 struct dwc2_host_chan *chan, 885 struct dwc2_qtd *qtd, 886 struct dwc2_qh *qh, u16 idx) 887 { 888 struct dwc2_hcd_dma_desc *dma_desc; 889 struct dwc2_hcd_iso_packet_desc *frame_desc; 890 u16 remain = 0; 891 int rc = 0; 892 893 if (!qtd->urb) 894 return -EINVAL; 895 896 usb_syncmem(&qh->desc_list_usbdma, 897 (idx * sizeof(struct dwc2_hcd_dma_desc)), 898 sizeof(struct dwc2_hcd_dma_desc), 899 BUS_DMASYNC_POSTREAD); 900 901 dma_desc = &qh->desc_list[idx]; 902 903 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; 904 dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset)); 905 if (chan->ep_is_in) 906 remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >> 907 HOST_DMA_ISOC_NBYTES_SHIFT; 908 909 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { 910 /* 911 * XactError, or unable to complete all the transactions 912 * in the scheduled micro-frame/frame, both indicated by 913 * HOST_DMA_STS_PKTERR 914 */ 915 qtd->urb->error_count++; 916 frame_desc->actual_length = qh->n_bytes[idx] - remain; 917 frame_desc->status = -EPROTO; 918 } else { 919 /* Success */ 920 frame_desc->actual_length = qh->n_bytes[idx] - remain; 921 frame_desc->status = 0; 922 } 923 924 if (++qtd->isoc_frame_index == qtd->urb->packet_count) { 925 /* 926 * urb->status is not used for isoc transfers here. The 927 * individual frame_desc status are used instead. 928 */ 929 dwc2_host_complete(hsotg, qtd, 0); 930 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 931 932 /* 933 * This check is necessary because urb_dequeue can be called 934 * from urb complete callback (sound driver for example). All 935 * pending URBs are dequeued there, so no need for further 936 * processing. 937 */ 938 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) 939 return -1; 940 rc = DWC2_CMPL_DONE; 941 } 942 943 qh->ntd--; 944 945 /* Stop if IOC requested descriptor reached */ 946 if (dma_desc->status & HOST_DMA_IOC) 947 rc = DWC2_CMPL_STOP; 948 949 return rc; 950 } 951 952 static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, 953 struct dwc2_host_chan *chan, 954 enum dwc2_halt_status halt_status) 955 { 956 struct dwc2_hcd_iso_packet_desc *frame_desc; 957 struct dwc2_qtd *qtd, *qtd_tmp; 958 struct dwc2_qh *qh; 959 u16 idx; 960 int rc; 961 962 qh = chan->qh; 963 idx = qh->td_first; 964 965 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 966 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) 967 qtd->in_process = 0; 968 return; 969 } 970 971 if (halt_status == DWC2_HC_XFER_AHB_ERR || 972 halt_status == DWC2_HC_XFER_BABBLE_ERR) { 973 /* 974 * Channel is halted in these error cases, considered as serious 975 * issues. 976 * Complete all URBs marking all frames as failed, irrespective 977 * whether some of the descriptors (frames) succeeded or not. 978 * Pass error code to completion routine as well, to update 979 * urb->status, some of class drivers might use it to stop 980 * queing transfer requests. 981 */ 982 int err = halt_status == DWC2_HC_XFER_AHB_ERR ? 983 -EIO : -EOVERFLOW; 984 985 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, 986 qtd_list_entry) { 987 if (qtd->urb) { 988 for (idx = 0; idx < qtd->urb->packet_count; 989 idx++) { 990 frame_desc = &qtd->urb->iso_descs[idx]; 991 frame_desc->status = err; 992 } 993 994 dwc2_host_complete(hsotg, qtd, err); 995 } 996 997 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 998 } 999 1000 return; 1001 } 1002 1003 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) { 1004 if (!qtd->in_process) 1005 break; 1006 1007 /* 1008 * Ensure idx corresponds to descriptor where first urb of this 1009 * qtd was added. In fact, during isoc desc init, dwc2 may skip 1010 * an index if current frame number is already over this index. 1011 */ 1012 if (idx != qtd->isoc_td_first) { 1013 dev_vdbg(hsotg->dev, 1014 "try to complete %d instead of %d\n", 1015 idx, qtd->isoc_td_first); 1016 idx = qtd->isoc_td_first; 1017 } 1018 1019 do { 1020 struct dwc2_qtd *qtd_next; 1021 u16 cur_idx; 1022 1023 rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh, 1024 idx); 1025 if (rc < 0) 1026 return; 1027 idx = dwc2_desclist_idx_inc(idx, qh->interval, 1028 chan->speed); 1029 if (!rc) 1030 continue; 1031 1032 if (rc == DWC2_CMPL_DONE || rc == DWC2_CMPL_STOP) 1033 goto stop_scan; 1034 1035 if (qh->interval >= 32) 1036 goto stop_scan; 1037 1038 qh->td_first = idx; 1039 cur_idx = dwc2_frame_list_idx(hsotg->frame_number); 1040 qtd_next = list_first_entry(&qh->qtd_list, 1041 struct dwc2_qtd, 1042 qtd_list_entry); 1043 if (dwc2_frame_idx_num_gt(cur_idx, 1044 qtd_next->isoc_td_last)) 1045 break; 1046 1047 goto stop_scan; 1048 1049 } while (idx != qh->td_first); 1050 } 1051 1052 stop_scan: 1053 qh->td_first = idx; 1054 } 1055 1056 static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg, 1057 struct dwc2_host_chan *chan, 1058 struct dwc2_qtd *qtd, 1059 struct dwc2_hcd_dma_desc *dma_desc, 1060 enum dwc2_halt_status halt_status, 1061 u32 n_bytes, int *xfer_done) 1062 { 1063 struct dwc2_hcd_urb *urb = qtd->urb; 1064 u16 remain = 0; 1065 1066 if (chan->ep_is_in) 1067 remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >> 1068 HOST_DMA_NBYTES_SHIFT; 1069 1070 dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb); 1071 1072 if (halt_status == DWC2_HC_XFER_AHB_ERR) { 1073 dev_err(hsotg->dev, "EIO\n"); 1074 urb->status = -EIO; 1075 return 1; 1076 } 1077 1078 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { 1079 switch (halt_status) { 1080 case DWC2_HC_XFER_STALL: 1081 dev_vdbg(hsotg->dev, "Stall\n"); 1082 urb->status = -EPIPE; 1083 break; 1084 case DWC2_HC_XFER_BABBLE_ERR: 1085 dev_err(hsotg->dev, "Babble\n"); 1086 urb->status = -EOVERFLOW; 1087 break; 1088 case DWC2_HC_XFER_XACT_ERR: 1089 dev_err(hsotg->dev, "XactErr\n"); 1090 urb->status = -EPROTO; 1091 break; 1092 default: 1093 dev_err(hsotg->dev, 1094 "%s: Unhandled descriptor error status (%d)\n", 1095 __func__, halt_status); 1096 break; 1097 } 1098 return 1; 1099 } 1100 1101 if (dma_desc->status & HOST_DMA_A) { 1102 dev_vdbg(hsotg->dev, 1103 "Active descriptor encountered on channel %d\n", 1104 chan->hc_num); 1105 return 0; 1106 } 1107 1108 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) { 1109 if (qtd->control_phase == DWC2_CONTROL_DATA) { 1110 urb->actual_length += n_bytes - remain; 1111 if (remain || urb->actual_length >= urb->length) { 1112 /* 1113 * For Control Data stage do not set urb->status 1114 * to 0, to prevent URB callback. Set it when 1115 * Status phase is done. See below. 1116 */ 1117 *xfer_done = 1; 1118 } 1119 } else if (qtd->control_phase == DWC2_CONTROL_STATUS) { 1120 urb->status = 0; 1121 *xfer_done = 1; 1122 } 1123 /* No handling for SETUP stage */ 1124 } else { 1125 /* BULK and INTR */ 1126 urb->actual_length += n_bytes - remain; 1127 dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length, 1128 urb->actual_length); 1129 if (remain || urb->actual_length >= urb->length) { 1130 urb->status = 0; 1131 *xfer_done = 1; 1132 } 1133 } 1134 1135 return 0; 1136 } 1137 1138 static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, 1139 struct dwc2_host_chan *chan, 1140 int chnum, struct dwc2_qtd *qtd, 1141 int desc_num, 1142 enum dwc2_halt_status halt_status, 1143 int *xfer_done) 1144 { 1145 struct dwc2_qh *qh = chan->qh; 1146 struct dwc2_hcd_urb *urb = qtd->urb; 1147 struct dwc2_hcd_dma_desc *dma_desc; 1148 u32 n_bytes; 1149 int failed; 1150 1151 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1152 1153 if (!urb) 1154 return -EINVAL; 1155 1156 usb_syncmem(&qh->desc_list_usbdma, 1157 (desc_num * sizeof(struct dwc2_hcd_dma_desc)), 1158 sizeof(struct dwc2_hcd_dma_desc), 1159 BUS_DMASYNC_POSTREAD); 1160 1161 dma_desc = &qh->desc_list[desc_num]; 1162 n_bytes = qh->n_bytes[desc_num]; 1163 dev_vdbg(hsotg->dev, 1164 "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n", 1165 qtd, urb, desc_num, dma_desc, n_bytes); 1166 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc, 1167 halt_status, n_bytes, 1168 xfer_done); 1169 if (*xfer_done && urb->status != -EINPROGRESS) 1170 failed = 1; 1171 1172 if (failed) { 1173 dwc2_host_complete(hsotg, qtd, urb->status); 1174 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 1175 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n", 1176 failed, *xfer_done, urb->status); 1177 return failed; 1178 } 1179 1180 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) { 1181 switch (qtd->control_phase) { 1182 case DWC2_CONTROL_SETUP: 1183 if (urb->length > 0) 1184 qtd->control_phase = DWC2_CONTROL_DATA; 1185 else 1186 qtd->control_phase = DWC2_CONTROL_STATUS; 1187 dev_vdbg(hsotg->dev, 1188 " Control setup transaction done\n"); 1189 break; 1190 case DWC2_CONTROL_DATA: 1191 if (*xfer_done) { 1192 qtd->control_phase = DWC2_CONTROL_STATUS; 1193 dev_vdbg(hsotg->dev, 1194 " Control data transfer done\n"); 1195 } else if (desc_num + 1 == qtd->n_desc) { 1196 /* 1197 * Last descriptor for Control data stage which 1198 * is not completed yet 1199 */ 1200 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, 1201 qtd); 1202 } 1203 break; 1204 default: 1205 break; 1206 } 1207 } 1208 1209 return 0; 1210 } 1211 1212 static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, 1213 struct dwc2_host_chan *chan, 1214 int chnum, 1215 enum dwc2_halt_status halt_status) 1216 { 1217 struct list_head *qtd_item, *qtd_tmp; 1218 struct dwc2_qh *qh = chan->qh; 1219 struct dwc2_qtd *qtd = NULL; 1220 int xfer_done; 1221 int desc_num = 0; 1222 1223 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 1224 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) 1225 qtd->in_process = 0; 1226 return; 1227 } 1228 1229 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) { 1230 int i; 1231 1232 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry); 1233 xfer_done = 0; 1234 1235 for (i = 0; i < qtd->n_desc; i++) { 1236 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, 1237 desc_num, halt_status, 1238 &xfer_done)) { 1239 qtd = NULL; 1240 break; 1241 } 1242 desc_num++; 1243 } 1244 } 1245 1246 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { 1247 /* 1248 * Resetting the data toggle for bulk and interrupt endpoints 1249 * in case of stall. See handle_hc_stall_intr(). 1250 */ 1251 if (halt_status == DWC2_HC_XFER_STALL) 1252 qh->data_toggle = DWC2_HC_PID_DATA0; 1253 else if (qtd) 1254 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1255 } 1256 1257 if (halt_status == DWC2_HC_XFER_COMPLETE) { 1258 if (chan->hcint & HCINTMSK_NYET) { 1259 /* 1260 * Got a NYET on the last transaction of the transfer. 1261 * It means that the endpoint should be in the PING 1262 * state at the beginning of the next transfer. 1263 */ 1264 qh->ping_state = 1; 1265 } 1266 } 1267 } 1268 1269 /** 1270 * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's 1271 * status and calls completion routine for the URB if it's done. Called from 1272 * interrupt handlers. 1273 * 1274 * @hsotg: The HCD state structure for the DWC OTG controller 1275 * @chan: Host channel the transfer is completed on 1276 * @chnum: Index of Host channel registers 1277 * @halt_status: Reason the channel is being halted or just XferComplete 1278 * for isochronous transfers 1279 * 1280 * Releases the channel to be used by other transfers. 1281 * In case of Isochronous endpoint the channel is not halted until the end of 1282 * the session, i.e. QTD list is empty. 1283 * If periodic channel released the FrameList is updated accordingly. 1284 * Calls transaction selection routines to activate pending transfers. 1285 */ 1286 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, 1287 struct dwc2_host_chan *chan, int chnum, 1288 enum dwc2_halt_status halt_status) 1289 { 1290 struct dwc2_qh *qh = chan->qh; 1291 int continue_isoc_xfer = 0; 1292 enum dwc2_transaction_type tr_type; 1293 1294 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1295 dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status); 1296 1297 /* Release the channel if halted or session completed */ 1298 if (halt_status != DWC2_HC_XFER_COMPLETE || 1299 list_empty(&qh->qtd_list)) { 1300 struct dwc2_qtd *qtd, *qtd_tmp; 1301 1302 /* 1303 * Kill all remainings QTDs since channel has been 1304 * halted. 1305 */ 1306 list_for_each_entry_safe(qtd, qtd_tmp, 1307 &qh->qtd_list, 1308 qtd_list_entry) { 1309 dwc2_host_complete(hsotg, qtd, 1310 -ECONNRESET); 1311 dwc2_hcd_qtd_unlink_and_free(hsotg, 1312 qtd, qh); 1313 } 1314 1315 /* Halt the channel if session completed */ 1316 if (halt_status == DWC2_HC_XFER_COMPLETE) 1317 dwc2_hc_halt(hsotg, chan, halt_status); 1318 dwc2_release_channel_ddma(hsotg, qh); 1319 dwc2_hcd_qh_unlink(hsotg, qh); 1320 } else { 1321 /* Keep in assigned schedule to continue transfer */ 1322 list_move(&qh->qh_list_entry, 1323 &hsotg->periodic_sched_assigned); 1324 /* 1325 * If channel has been halted during giveback of urb 1326 * then prevent any new scheduling. 1327 */ 1328 if (!chan->halt_status) 1329 continue_isoc_xfer = 1; 1330 } 1331 /* 1332 * Todo: Consider the case when period exceeds FrameList size. 1333 * Frame Rollover interrupt should be used. 1334 */ 1335 } else { 1336 /* 1337 * Scan descriptor list to complete the URB(s), then release 1338 * the channel 1339 */ 1340 dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum, 1341 halt_status); 1342 dwc2_release_channel_ddma(hsotg, qh); 1343 dwc2_hcd_qh_unlink(hsotg, qh); 1344 1345 if (!list_empty(&qh->qtd_list)) { 1346 /* 1347 * Add back to inactive non-periodic schedule on normal 1348 * completion 1349 */ 1350 dwc2_hcd_qh_add(hsotg, qh); 1351 } 1352 } 1353 1354 tr_type = dwc2_hcd_select_transactions(hsotg); 1355 if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) { 1356 if (continue_isoc_xfer) { 1357 if (tr_type == DWC2_TRANSACTION_NONE) 1358 tr_type = DWC2_TRANSACTION_PERIODIC; 1359 else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC) 1360 tr_type = DWC2_TRANSACTION_ALL; 1361 } 1362 dwc2_hcd_queue_transactions(hsotg, tr_type); 1363 } 1364 } 1365