1 /* $OpenBSD: dwc2_hcdddma.c,v 1.11 2015/12/18 17:23:14 mmcc Exp $ */ 2 /* $NetBSD: dwc2_hcdddma.c,v 1.6 2014/04/03 06:34:58 skrll Exp $ */ 3 4 /* 5 * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines 6 * 7 * Copyright (C) 2004-2013 Synopsys, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The names of the above-listed copyright holders may not be used 19 * to endorse or promote products derived from this software without 20 * specific prior written permission. 21 * 22 * ALTERNATIVELY, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") as published by the Free Software 24 * Foundation; either version 2 of the License, or (at your option) any 25 * later version. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * This file contains the Descriptor DMA implementation for Host mode 42 */ 43 #if 0 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdddma.c,v 1.6 2014/04/03 06:34:58 skrll Exp $"); 46 #endif 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/types.h> 51 #include <sys/kernel.h> 52 #include <sys/malloc.h> 53 #if 0 54 #include <sys/cpu.h> 55 #endif 56 57 #include <machine/bus.h> 58 59 #include <dev/usb/usb.h> 60 #include <dev/usb/usbdi.h> 61 #include <dev/usb/usbdivar.h> 62 #include <dev/usb/usb_mem.h> 63 64 #include <dev/usb/dwc2/dwc2.h> 65 #include <dev/usb/dwc2/dwc2var.h> 66 67 #include <dev/usb/dwc2/dwc2_core.h> 68 #include <dev/usb/dwc2/dwc2_hcd.h> 69 70 STATIC u16 dwc2_frame_list_idx(u16 frame) 71 { 72 return frame & (FRLISTEN_64_SIZE - 1); 73 } 74 75 STATIC u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed) 76 { 77 return (idx + inc) & 78 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : 79 MAX_DMA_DESC_NUM_GENERIC) - 1); 80 } 81 82 STATIC u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed) 83 { 84 return (idx - inc) & 85 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : 86 MAX_DMA_DESC_NUM_GENERIC) - 1); 87 } 88 89 STATIC u16 dwc2_max_desc_num(struct dwc2_qh *qh) 90 { 91 return (qh->ep_type == USB_ENDPOINT_XFER_ISOC && 92 qh->dev_speed == USB_SPEED_HIGH) ? 93 MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC; 94 } 95 96 STATIC u16 dwc2_frame_incr_val(struct dwc2_qh *qh) 97 { 98 return qh->dev_speed == USB_SPEED_HIGH ? 99 (qh->interval + 8 - 1) / 8 : qh->interval; 100 } 101 102 STATIC int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 103 gfp_t flags) 104 { 105 int err; 106 107 //KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 108 109 qh->desc_list = NULL; 110 err = usb_allocmem(&hsotg->hsotg_sc->sc_bus, 111 sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh), 0, 112 &qh->desc_list_usbdma); 113 114 if (!err) { 115 qh->desc_list = KERNADDR(&qh->desc_list_usbdma, 0); 116 qh->desc_list_dma = DMAADDR(&qh->desc_list_usbdma, 0); 117 } 118 119 if (!qh->desc_list) 120 return -ENOMEM; 121 122 memset(qh->desc_list, 0, 123 sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh)); 124 125 qh->n_bytes = malloc(sizeof(u32) * dwc2_max_desc_num(qh), M_DEVBUF, 126 M_ZERO | M_WAITOK); 127 128 return 0; 129 } 130 131 STATIC void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 132 { 133 if (qh->desc_list) { 134 usb_freemem(&hsotg->hsotg_sc->sc_bus, &qh->desc_list_usbdma); 135 qh->desc_list = NULL; 136 } 137 138 free(qh->n_bytes, M_DEVBUF, sizeof(u32) * dwc2_max_desc_num(qh)); 139 qh->n_bytes = NULL; 140 } 141 142 STATIC int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags) 143 { 144 int err; 145 146 if (hsotg->frame_list) 147 return 0; 148 149 /* XXXNH - struct pool */ 150 hsotg->frame_list = NULL; 151 err = usb_allocmem(&hsotg->hsotg_sc->sc_bus, 4 * FRLISTEN_64_SIZE, 152 0, &hsotg->frame_list_usbdma); 153 154 if (!err) { 155 hsotg->frame_list = KERNADDR(&hsotg->frame_list_usbdma, 0); 156 hsotg->frame_list_dma = DMAADDR(&hsotg->frame_list_usbdma, 0); 157 } 158 159 if (!hsotg->frame_list) 160 return -ENOMEM; 161 162 memset(hsotg->frame_list, 0, 4 * FRLISTEN_64_SIZE); 163 return 0; 164 } 165 166 STATIC void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) 167 { 168 struct usb_dma frame_list_usbdma; 169 unsigned long flags; 170 171 spin_lock_irqsave(&hsotg->lock, flags); 172 173 if (!hsotg->frame_list) { 174 spin_unlock_irqrestore(&hsotg->lock, flags); 175 return; 176 } 177 178 frame_list_usbdma = hsotg->frame_list_usbdma; 179 hsotg->frame_list = NULL; 180 181 spin_unlock_irqrestore(&hsotg->lock, flags); 182 183 usb_freemem(&hsotg->hsotg_sc->sc_bus, &frame_list_usbdma); 184 } 185 186 STATIC void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en) 187 { 188 u32 hcfg; 189 unsigned long flags; 190 191 spin_lock_irqsave(&hsotg->lock, flags); 192 193 hcfg = DWC2_READ_4(hsotg, HCFG); 194 if (hcfg & HCFG_PERSCHEDENA) { 195 /* already enabled */ 196 spin_unlock_irqrestore(&hsotg->lock, flags); 197 return; 198 } 199 200 DWC2_WRITE_4(hsotg, HFLBADDR, hsotg->frame_list_dma); 201 202 hcfg &= ~HCFG_FRLISTEN_MASK; 203 hcfg |= fr_list_en | HCFG_PERSCHEDENA; 204 dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n"); 205 DWC2_WRITE_4(hsotg, HCFG, hcfg); 206 207 spin_unlock_irqrestore(&hsotg->lock, flags); 208 } 209 210 STATIC void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg) 211 { 212 u32 hcfg; 213 unsigned long flags; 214 215 spin_lock_irqsave(&hsotg->lock, flags); 216 217 hcfg = DWC2_READ_4(hsotg, HCFG); 218 if (!(hcfg & HCFG_PERSCHEDENA)) { 219 /* already disabled */ 220 spin_unlock_irqrestore(&hsotg->lock, flags); 221 return; 222 } 223 224 hcfg &= ~HCFG_PERSCHEDENA; 225 dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n"); 226 DWC2_WRITE_4(hsotg, HCFG, hcfg); 227 228 spin_unlock_irqrestore(&hsotg->lock, flags); 229 } 230 231 /* 232 * Activates/Deactivates FrameList entries for the channel based on endpoint 233 * servicing period 234 */ 235 STATIC void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 236 int enable) 237 { 238 struct dwc2_host_chan *chan; 239 u16 i, j, inc; 240 241 if (!hsotg) { 242 printf("hsotg = %p\n", hsotg); 243 return; 244 } 245 246 if (!qh->channel) { 247 dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel); 248 return; 249 } 250 251 if (!hsotg->frame_list) { 252 dev_err(hsotg->dev, "hsotg->frame_list = %p\n", 253 hsotg->frame_list); 254 return; 255 } 256 257 chan = qh->channel; 258 inc = dwc2_frame_incr_val(qh); 259 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC) 260 i = dwc2_frame_list_idx(qh->sched_frame); 261 else 262 i = 0; 263 264 j = i; 265 do { 266 if (enable) 267 hsotg->frame_list[j] |= 1 << chan->hc_num; 268 else 269 hsotg->frame_list[j] &= ~(1 << chan->hc_num); 270 j = (j + inc) & (FRLISTEN_64_SIZE - 1); 271 } while (j != i); 272 273 if (!enable) 274 return; 275 276 chan->schinfo = 0; 277 if (chan->speed == USB_SPEED_HIGH && qh->interval) { 278 j = 1; 279 /* TODO - check this */ 280 inc = (8 + qh->interval - 1) / qh->interval; 281 for (i = 0; i < inc; i++) { 282 chan->schinfo |= j; 283 j = j << qh->interval; 284 } 285 } else { 286 chan->schinfo = 0xff; 287 } 288 } 289 290 STATIC void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, 291 struct dwc2_qh *qh) 292 { 293 struct dwc2_host_chan *chan = qh->channel; 294 295 if (dwc2_qh_is_non_per(qh)) { 296 if (hsotg->core_params->uframe_sched > 0) 297 hsotg->available_host_channels++; 298 else 299 hsotg->non_periodic_channels--; 300 } else { 301 dwc2_update_frame_list(hsotg, qh, 0); 302 } 303 304 /* 305 * The condition is added to prevent double cleanup try in case of 306 * device disconnect. See channel cleanup in dwc2_hcd_disconnect(). 307 */ 308 if (chan->qh) { 309 if (chan->in_freelist != 0) 310 LIST_REMOVE(chan, hc_list_entry); 311 dwc2_hc_cleanup(hsotg, chan); 312 LIST_INSERT_HEAD(&hsotg->free_hc_list, chan, hc_list_entry); 313 chan->qh = NULL; 314 chan->in_freelist = 1; 315 } 316 317 qh->channel = NULL; 318 qh->ntd = 0; 319 320 if (qh->desc_list) 321 memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) * 322 dwc2_max_desc_num(qh)); 323 } 324 325 /** 326 * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA 327 * related members 328 * 329 * @hsotg: The HCD state structure for the DWC OTG controller 330 * @qh: The QH to init 331 * 332 * Return: 0 if successful, negative error code otherwise 333 * 334 * Allocates memory for the descriptor list. For the first periodic QH, 335 * allocates memory for the FrameList and enables periodic scheduling. 336 */ 337 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 338 gfp_t mem_flags) 339 { 340 int retval; 341 342 if (qh->do_split) { 343 dev_err(hsotg->dev, 344 "SPLIT Transfers are not supported in Descriptor DMA mode.\n"); 345 retval = -EINVAL; 346 goto err0; 347 } 348 349 retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags); 350 if (retval) 351 goto err0; 352 353 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC || 354 qh->ep_type == USB_ENDPOINT_XFER_INT) { 355 if (!hsotg->frame_list) { 356 retval = dwc2_frame_list_alloc(hsotg, mem_flags); 357 if (retval) 358 goto err1; 359 /* Enable periodic schedule on first periodic QH */ 360 dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64); 361 } 362 } 363 364 qh->ntd = 0; 365 return 0; 366 367 err1: 368 dwc2_desc_list_free(hsotg, qh); 369 err0: 370 return retval; 371 } 372 373 /** 374 * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related 375 * members 376 * 377 * @hsotg: The HCD state structure for the DWC OTG controller 378 * @qh: The QH to free 379 * 380 * Frees descriptor list memory associated with the QH. If QH is periodic and 381 * the last, frees FrameList memory and disables periodic scheduling. 382 */ 383 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 384 { 385 dwc2_desc_list_free(hsotg, qh); 386 387 /* 388 * Channel still assigned due to some reasons. 389 * Seen on Isoc URB dequeue. Channel halted but no subsequent 390 * ChHalted interrupt to release the channel. Afterwards 391 * when it comes here from endpoint disable routine 392 * channel remains assigned. 393 */ 394 if (qh->channel) 395 dwc2_release_channel_ddma(hsotg, qh); 396 397 if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC || 398 qh->ep_type == USB_ENDPOINT_XFER_INT) && 399 (hsotg->core_params->uframe_sched > 0 || 400 !hsotg->periodic_channels) && hsotg->frame_list) { 401 dwc2_per_sched_disable(hsotg); 402 dwc2_frame_list_free(hsotg); 403 } 404 } 405 406 STATIC u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx) 407 { 408 if (qh->dev_speed == USB_SPEED_HIGH) 409 /* Descriptor set (8 descriptors) index which is 8-aligned */ 410 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8; 411 else 412 return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1); 413 } 414 415 /* 416 * Determine starting frame for Isochronous transfer. 417 * Few frames skipped to prevent race condition with HC. 418 */ 419 STATIC u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg, 420 struct dwc2_qh *qh, u16 *skip_frames) 421 { 422 u16 frame; 423 424 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 425 426 /* sched_frame is always frame number (not uFrame) both in FS and HS! */ 427 428 /* 429 * skip_frames is used to limit activated descriptors number 430 * to avoid the situation when HC services the last activated 431 * descriptor firstly. 432 * Example for FS: 433 * Current frame is 1, scheduled frame is 3. Since HC always fetches 434 * the descriptor corresponding to curr_frame+1, the descriptor 435 * corresponding to frame 2 will be fetched. If the number of 436 * descriptors is max=64 (or greather) the list will be fully programmed 437 * with Active descriptors and it is possible case (rare) that the 438 * latest descriptor(considering rollback) corresponding to frame 2 will 439 * be serviced first. HS case is more probable because, in fact, up to 440 * 11 uframes (16 in the code) may be skipped. 441 */ 442 if (qh->dev_speed == USB_SPEED_HIGH) { 443 /* 444 * Consider uframe counter also, to start xfer asap. If half of 445 * the frame elapsed skip 2 frames otherwise just 1 frame. 446 * Starting descriptor index must be 8-aligned, so if the 447 * current frame is near to complete the next one is skipped as 448 * well. 449 */ 450 if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) { 451 *skip_frames = 2 * 8; 452 frame = dwc2_frame_num_inc(hsotg->frame_number, 453 *skip_frames); 454 } else { 455 *skip_frames = 1 * 8; 456 frame = dwc2_frame_num_inc(hsotg->frame_number, 457 *skip_frames); 458 } 459 460 frame = dwc2_full_frame_num(frame); 461 } else { 462 /* 463 * Two frames are skipped for FS - the current and the next. 464 * But for descriptor programming, 1 frame (descriptor) is 465 * enough, see example above. 466 */ 467 *skip_frames = 1; 468 frame = dwc2_frame_num_inc(hsotg->frame_number, 2); 469 } 470 471 return frame; 472 } 473 474 /* 475 * Calculate initial descriptor index for isochronous transfer based on 476 * scheduled frame 477 */ 478 STATIC u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg, 479 struct dwc2_qh *qh) 480 { 481 u16 frame, fr_idx, fr_idx_tmp, skip_frames; 482 483 /* 484 * With current ISOC processing algorithm the channel is being released 485 * when no more QTDs in the list (qh->ntd == 0). Thus this function is 486 * called only when qh->ntd == 0 and qh->channel == 0. 487 * 488 * So qh->channel != NULL branch is not used and just not removed from 489 * the source file. It is required for another possible approach which 490 * is, do not disable and release the channel when ISOC session 491 * completed, just move QH to inactive schedule until new QTD arrives. 492 * On new QTD, the QH moved back to 'ready' schedule, starting frame and 493 * therefore starting desc_index are recalculated. In this case channel 494 * is released only on ep_disable. 495 */ 496 497 /* 498 * Calculate starting descriptor index. For INTERRUPT endpoint it is 499 * always 0. 500 */ 501 if (qh->channel) { 502 frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames); 503 /* 504 * Calculate initial descriptor index based on FrameList current 505 * bitmap and servicing period 506 */ 507 fr_idx_tmp = dwc2_frame_list_idx(frame); 508 fr_idx = (FRLISTEN_64_SIZE + 509 dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp) 510 % dwc2_frame_incr_val(qh); 511 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE; 512 } else { 513 qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh, 514 &skip_frames); 515 fr_idx = dwc2_frame_list_idx(qh->sched_frame); 516 } 517 518 qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx); 519 520 return skip_frames; 521 } 522 523 #define ISOC_URB_GIVEBACK_ASAP 524 525 #define MAX_ISOC_XFER_SIZE_FS 1023 526 #define MAX_ISOC_XFER_SIZE_HS 3072 527 #define DESCNUM_THRESHOLD 4 528 529 STATIC void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, 530 struct dwc2_qtd *qtd, 531 struct dwc2_qh *qh, u32 max_xfer_size, 532 u16 idx) 533 { 534 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; 535 struct dwc2_hcd_iso_packet_desc *frame_desc; 536 537 memset(dma_desc, 0, sizeof(*dma_desc)); 538 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; 539 540 if (frame_desc->length > max_xfer_size) 541 qh->n_bytes[idx] = max_xfer_size; 542 else 543 qh->n_bytes[idx] = frame_desc->length; 544 545 dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset)); 546 dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT & 547 HOST_DMA_ISOC_NBYTES_MASK; 548 549 #ifdef ISOC_URB_GIVEBACK_ASAP 550 /* Set IOC for each descriptor corresponding to last frame of URB */ 551 if (qtd->isoc_frame_index_last == qtd->urb->packet_count) 552 dma_desc->status |= HOST_DMA_IOC; 553 #endif 554 555 qh->ntd++; 556 qtd->isoc_frame_index_last++; 557 } 558 559 STATIC void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, 560 struct dwc2_qh *qh, u16 skip_frames) 561 { 562 struct dwc2_qtd *qtd; 563 u32 max_xfer_size; 564 u16 idx, inc, n_desc, ntd_max = 0; 565 566 idx = qh->td_last; 567 inc = qh->interval; 568 n_desc = 0; 569 570 if (qh->interval) { 571 ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) / 572 qh->interval; 573 if (skip_frames && !qh->channel) 574 ntd_max -= skip_frames / qh->interval; 575 } 576 577 max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ? 578 MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS; 579 580 TAILQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) { 581 while (qh->ntd < ntd_max && qtd->isoc_frame_index_last < 582 qtd->urb->packet_count) { 583 if (n_desc > 1) 584 qh->desc_list[n_desc - 1].status |= HOST_DMA_A; 585 dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh, 586 max_xfer_size, idx); 587 idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed); 588 n_desc++; 589 } 590 qtd->in_process = 1; 591 } 592 593 qh->td_last = idx; 594 595 #ifdef ISOC_URB_GIVEBACK_ASAP 596 /* Set IOC for last descriptor if descriptor list is full */ 597 if (qh->ntd == ntd_max) { 598 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); 599 qh->desc_list[idx].status |= HOST_DMA_IOC; 600 } 601 #else 602 /* 603 * Set IOC bit only for one descriptor. Always try to be ahead of HW 604 * processing, i.e. on IOC generation driver activates next descriptor 605 * but core continues to process descriptors following the one with IOC 606 * set. 607 */ 608 609 if (n_desc > DESCNUM_THRESHOLD) 610 /* 611 * Move IOC "up". Required even if there is only one QTD 612 * in the list, because QTDs might continue to be queued, 613 * but during the activation it was only one queued. 614 * Actually more than one QTD might be in the list if this 615 * function called from XferCompletion - QTDs was queued during 616 * HW processing of the previous descriptor chunk. 617 */ 618 idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), 619 qh->dev_speed); 620 else 621 /* 622 * Set the IOC for the latest descriptor if either number of 623 * descriptors is not greater than threshold or no more new 624 * descriptors activated 625 */ 626 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); 627 628 qh->desc_list[idx].status |= HOST_DMA_IOC; 629 #endif 630 631 if (n_desc) { 632 qh->desc_list[n_desc - 1].status |= HOST_DMA_A; 633 if (n_desc > 1) 634 qh->desc_list[0].status |= HOST_DMA_A; 635 } 636 } 637 638 STATIC void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, 639 struct dwc2_host_chan *chan, 640 struct dwc2_qtd *qtd, struct dwc2_qh *qh, 641 int n_desc) 642 { 643 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc]; 644 int len = chan->xfer_len; 645 646 if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1)) 647 len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1); 648 649 if (chan->ep_is_in) { 650 int num_packets; 651 652 if (len > 0 && chan->max_packet) 653 num_packets = (len + chan->max_packet - 1) 654 / chan->max_packet; 655 else 656 /* Need 1 packet for transfer length of 0 */ 657 num_packets = 1; 658 659 /* Always program an integral # of packets for IN transfers */ 660 len = num_packets * chan->max_packet; 661 } 662 663 dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK; 664 qh->n_bytes[n_desc] = len; 665 666 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL && 667 qtd->control_phase == DWC2_CONTROL_SETUP) 668 dma_desc->status |= HOST_DMA_SUP; 669 670 dma_desc->buf = (u32)chan->xfer_dma; 671 672 /* 673 * Last (or only) descriptor of IN transfer with actual size less 674 * than MaxPacket 675 */ 676 if (len > chan->xfer_len) { 677 chan->xfer_len = 0; 678 } else { 679 chan->xfer_dma += len; /* XXXNH safe */ 680 chan->xfer_len -= len; 681 } 682 } 683 684 STATIC void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, 685 struct dwc2_qh *qh) 686 { 687 struct dwc2_qtd *qtd; 688 struct dwc2_host_chan *chan = qh->channel; 689 int n_desc = 0; 690 691 dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh, 692 (unsigned long)chan->xfer_dma, chan->xfer_len); 693 694 /* 695 * Start with chan->xfer_dma initialized in assign_and_init_hc(), then 696 * if SG transfer consists of multiple URBs, this pointer is re-assigned 697 * to the buffer of the currently processed QTD. For non-SG request 698 * there is always one QTD active. 699 */ 700 701 TAILQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) { 702 dev_vdbg(hsotg->dev, "qtd=%p\n", qtd); 703 704 if (n_desc) { 705 /* SG request - more than 1 QTD */ 706 chan->xfer_dma = DMAADDR(qtd->urb->usbdma, 707 qtd->urb->actual_length); 708 chan->xfer_len = qtd->urb->length - 709 qtd->urb->actual_length; 710 dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n", 711 (unsigned long)chan->xfer_dma, chan->xfer_len); 712 } 713 714 qtd->n_desc = 0; 715 do { 716 if (n_desc > 1) { 717 qh->desc_list[n_desc - 1].status |= HOST_DMA_A; 718 dev_vdbg(hsotg->dev, 719 "set A bit in desc %d (%p)\n", 720 n_desc - 1, 721 &qh->desc_list[n_desc - 1]); 722 } 723 dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc); 724 dev_vdbg(hsotg->dev, 725 "desc %d (%p) buf=%08x status=%08x\n", 726 n_desc, &qh->desc_list[n_desc], 727 qh->desc_list[n_desc].buf, 728 qh->desc_list[n_desc].status); 729 qtd->n_desc++; 730 n_desc++; 731 } while (chan->xfer_len > 0 && 732 n_desc != MAX_DMA_DESC_NUM_GENERIC); 733 734 dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc); 735 qtd->in_process = 1; 736 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) 737 break; 738 if (n_desc == MAX_DMA_DESC_NUM_GENERIC) 739 break; 740 } 741 742 if (n_desc) { 743 qh->desc_list[n_desc - 1].status |= 744 HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A; 745 dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n", 746 n_desc - 1, &qh->desc_list[n_desc - 1]); 747 if (n_desc > 1) { 748 qh->desc_list[0].status |= HOST_DMA_A; 749 dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n", 750 &qh->desc_list[0]); 751 } 752 chan->ntd = n_desc; 753 } 754 } 755 756 /** 757 * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode 758 * 759 * @hsotg: The HCD state structure for the DWC OTG controller 760 * @qh: The QH to init 761 * 762 * Return: 0 if successful, negative error code otherwise 763 * 764 * For Control and Bulk endpoints, initializes descriptor list and starts the 765 * transfer. For Interrupt and Isochronous endpoints, initializes descriptor 766 * list then updates FrameList, marking appropriate entries as active. 767 * 768 * For Isochronous endpoints the starting descriptor index is calculated based 769 * on the scheduled frame, but only on the first transfer descriptor within a 770 * session. Then the transfer is started via enabling the channel. 771 * 772 * For Isochronous endpoints the channel is not halted on XferComplete 773 * interrupt so remains assigned to the endpoint(QH) until session is done. 774 */ 775 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 776 { 777 /* Channel is already assigned */ 778 struct dwc2_host_chan *chan = qh->channel; 779 u16 skip_frames = 0; 780 781 switch (chan->ep_type) { 782 case USB_ENDPOINT_XFER_CONTROL: 783 case USB_ENDPOINT_XFER_BULK: 784 dwc2_init_non_isoc_dma_desc(hsotg, qh); 785 dwc2_hc_start_transfer_ddma(hsotg, chan); 786 break; 787 case USB_ENDPOINT_XFER_INT: 788 dwc2_init_non_isoc_dma_desc(hsotg, qh); 789 dwc2_update_frame_list(hsotg, qh, 1); 790 dwc2_hc_start_transfer_ddma(hsotg, chan); 791 break; 792 case USB_ENDPOINT_XFER_ISOC: 793 if (!qh->ntd) 794 skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh); 795 dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames); 796 797 if (!chan->xfer_started) { 798 dwc2_update_frame_list(hsotg, qh, 1); 799 800 /* 801 * Always set to max, instead of actual size. Otherwise 802 * ntd will be changed with channel being enabled. Not 803 * recommended. 804 */ 805 chan->ntd = dwc2_max_desc_num(qh); 806 807 /* Enable channel only once for ISOC */ 808 dwc2_hc_start_transfer_ddma(hsotg, chan); 809 } 810 811 break; 812 default: 813 break; 814 } 815 } 816 817 #define DWC2_CMPL_DONE 1 818 #define DWC2_CMPL_STOP 2 819 820 STATIC int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, 821 struct dwc2_host_chan *chan, 822 struct dwc2_qtd *qtd, 823 struct dwc2_qh *qh, u16 idx) 824 { 825 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; 826 struct dwc2_hcd_iso_packet_desc *frame_desc; 827 u16 remain = 0; 828 int rc = 0; 829 830 if (!qtd->urb) 831 return -EINVAL; 832 833 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; 834 dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset)); 835 if (chan->ep_is_in) 836 remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >> 837 HOST_DMA_ISOC_NBYTES_SHIFT; 838 839 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { 840 /* 841 * XactError, or unable to complete all the transactions 842 * in the scheduled micro-frame/frame, both indicated by 843 * HOST_DMA_STS_PKTERR 844 */ 845 qtd->urb->error_count++; 846 frame_desc->actual_length = qh->n_bytes[idx] - remain; 847 frame_desc->status = -EPROTO; 848 } else { 849 /* Success */ 850 frame_desc->actual_length = qh->n_bytes[idx] - remain; 851 frame_desc->status = 0; 852 } 853 854 if (++qtd->isoc_frame_index == qtd->urb->packet_count) { 855 /* 856 * urb->status is not used for isoc transfers here. The 857 * individual frame_desc status are used instead. 858 */ 859 dwc2_host_complete(hsotg, qtd, 0); 860 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 861 862 /* 863 * This check is necessary because urb_dequeue can be called 864 * from urb complete callback (sound driver for example). All 865 * pending URBs are dequeued there, so no need for further 866 * processing. 867 */ 868 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) 869 return -1; 870 rc = DWC2_CMPL_DONE; 871 } 872 873 qh->ntd--; 874 875 /* Stop if IOC requested descriptor reached */ 876 if (dma_desc->status & HOST_DMA_IOC) 877 rc = DWC2_CMPL_STOP; 878 879 return rc; 880 } 881 882 STATIC void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, 883 struct dwc2_host_chan *chan, 884 enum dwc2_halt_status halt_status) 885 { 886 struct dwc2_hcd_iso_packet_desc *frame_desc; 887 struct dwc2_qtd *qtd, *qtd_tmp; 888 struct dwc2_qh *qh; 889 u16 idx; 890 int rc; 891 892 qh = chan->qh; 893 idx = qh->td_first; 894 895 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 896 TAILQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) 897 qtd->in_process = 0; 898 return; 899 } 900 901 if (halt_status == DWC2_HC_XFER_AHB_ERR || 902 halt_status == DWC2_HC_XFER_BABBLE_ERR) { 903 /* 904 * Channel is halted in these error cases, considered as serious 905 * issues. 906 * Complete all URBs marking all frames as failed, irrespective 907 * whether some of the descriptors (frames) succeeded or not. 908 * Pass error code to completion routine as well, to update 909 * urb->status, some of class drivers might use it to stop 910 * queing transfer requests. 911 */ 912 int err = halt_status == DWC2_HC_XFER_AHB_ERR ? 913 -EIO : -EOVERFLOW; 914 915 TAILQ_FOREACH_SAFE(qtd, &qh->qtd_list, qtd_list_entry, qtd_tmp) { 916 if (qtd->urb) { 917 for (idx = 0; idx < qtd->urb->packet_count; 918 idx++) { 919 frame_desc = &qtd->urb->iso_descs[idx]; 920 frame_desc->status = err; 921 } 922 923 dwc2_host_complete(hsotg, qtd, err); 924 } 925 926 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 927 } 928 929 return; 930 } 931 932 TAILQ_FOREACH_SAFE(qtd, &qh->qtd_list, qtd_list_entry, qtd_tmp) { 933 if (!qtd->in_process) 934 break; 935 do { 936 rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh, 937 idx); 938 if (rc < 0) 939 return; 940 idx = dwc2_desclist_idx_inc(idx, qh->interval, 941 chan->speed); 942 if (rc == DWC2_CMPL_STOP) 943 goto stop_scan; 944 if (rc == DWC2_CMPL_DONE) 945 break; 946 } while (idx != qh->td_first); 947 } 948 949 stop_scan: 950 qh->td_first = idx; 951 } 952 953 STATIC int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg, 954 struct dwc2_host_chan *chan, 955 struct dwc2_qtd *qtd, 956 struct dwc2_hcd_dma_desc *dma_desc, 957 enum dwc2_halt_status halt_status, 958 u32 n_bytes, int *xfer_done) 959 { 960 struct dwc2_hcd_urb *urb = qtd->urb; 961 u16 remain = 0; 962 963 if (chan->ep_is_in) 964 remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >> 965 HOST_DMA_NBYTES_SHIFT; 966 967 dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb); 968 969 if (halt_status == DWC2_HC_XFER_AHB_ERR) { 970 dev_err(hsotg->dev, "EIO\n"); 971 urb->status = -EIO; 972 return 1; 973 } 974 975 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { 976 switch (halt_status) { 977 case DWC2_HC_XFER_STALL: 978 dev_vdbg(hsotg->dev, "Stall\n"); 979 urb->status = -EPIPE; 980 break; 981 case DWC2_HC_XFER_BABBLE_ERR: 982 dev_err(hsotg->dev, "Babble\n"); 983 urb->status = -EOVERFLOW; 984 break; 985 case DWC2_HC_XFER_XACT_ERR: 986 dev_err(hsotg->dev, "XactErr\n"); 987 urb->status = -EPROTO; 988 break; 989 default: 990 dev_err(hsotg->dev, 991 "%s: Unhandled descriptor error status (%d)\n", 992 __func__, halt_status); 993 break; 994 } 995 return 1; 996 } 997 998 if (dma_desc->status & HOST_DMA_A) { 999 dev_vdbg(hsotg->dev, 1000 "Active descriptor encountered on channel %d\n", 1001 chan->hc_num); 1002 return 0; 1003 } 1004 1005 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) { 1006 if (qtd->control_phase == DWC2_CONTROL_DATA) { 1007 urb->actual_length += n_bytes - remain; 1008 if (remain || urb->actual_length >= urb->length) { 1009 /* 1010 * For Control Data stage do not set urb->status 1011 * to 0, to prevent URB callback. Set it when 1012 * Status phase is done. See below. 1013 */ 1014 *xfer_done = 1; 1015 } 1016 } else if (qtd->control_phase == DWC2_CONTROL_STATUS) { 1017 urb->status = 0; 1018 *xfer_done = 1; 1019 } 1020 /* No handling for SETUP stage */ 1021 } else { 1022 /* BULK and INTR */ 1023 urb->actual_length += n_bytes - remain; 1024 dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length, 1025 urb->actual_length); 1026 if (remain || urb->actual_length >= urb->length) { 1027 urb->status = 0; 1028 *xfer_done = 1; 1029 } 1030 } 1031 1032 return 0; 1033 } 1034 1035 STATIC int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, 1036 struct dwc2_host_chan *chan, 1037 int chnum, struct dwc2_qtd *qtd, 1038 int desc_num, 1039 enum dwc2_halt_status halt_status, 1040 int *xfer_done) 1041 { 1042 struct dwc2_qh *qh = chan->qh; 1043 struct dwc2_hcd_urb *urb = qtd->urb; 1044 struct dwc2_hcd_dma_desc *dma_desc; 1045 u32 n_bytes; 1046 int failed; 1047 1048 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1049 1050 if (!urb) 1051 return -EINVAL; 1052 1053 dma_desc = &qh->desc_list[desc_num]; 1054 n_bytes = qh->n_bytes[desc_num]; 1055 dev_vdbg(hsotg->dev, 1056 "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n", 1057 qtd, urb, desc_num, dma_desc, n_bytes); 1058 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc, 1059 halt_status, n_bytes, 1060 xfer_done); 1061 if (failed || (*xfer_done && urb->status != -EINPROGRESS)) { 1062 dwc2_host_complete(hsotg, qtd, urb->status); 1063 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 1064 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n", 1065 failed, *xfer_done, urb->status); 1066 return failed; 1067 } 1068 1069 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) { 1070 switch (qtd->control_phase) { 1071 case DWC2_CONTROL_SETUP: 1072 if (urb->length > 0) 1073 qtd->control_phase = DWC2_CONTROL_DATA; 1074 else 1075 qtd->control_phase = DWC2_CONTROL_STATUS; 1076 dev_vdbg(hsotg->dev, 1077 " Control setup transaction done\n"); 1078 break; 1079 case DWC2_CONTROL_DATA: 1080 if (*xfer_done) { 1081 qtd->control_phase = DWC2_CONTROL_STATUS; 1082 dev_vdbg(hsotg->dev, 1083 " Control data transfer done\n"); 1084 } else if (desc_num + 1 == qtd->n_desc) { 1085 /* 1086 * Last descriptor for Control data stage which 1087 * is not completed yet 1088 */ 1089 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, 1090 qtd); 1091 } 1092 break; 1093 default: 1094 break; 1095 } 1096 } 1097 1098 return 0; 1099 } 1100 1101 STATIC void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, 1102 struct dwc2_host_chan *chan, 1103 int chnum, 1104 enum dwc2_halt_status halt_status) 1105 { 1106 struct dwc2_qh *qh = chan->qh; 1107 struct dwc2_qtd *qtd = NULL, *qtd_tmp; 1108 int xfer_done; 1109 int desc_num = 0; 1110 1111 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 1112 TAILQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) 1113 qtd->in_process = 0; 1114 return; 1115 } 1116 1117 TAILQ_FOREACH_SAFE(qtd, &qh->qtd_list, qtd_list_entry, qtd_tmp) { 1118 int i; 1119 1120 xfer_done = 0; 1121 1122 for (i = 0; i < qtd->n_desc; i++) { 1123 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, 1124 desc_num, halt_status, 1125 &xfer_done)) { 1126 qtd = NULL; 1127 break; 1128 } 1129 desc_num++; 1130 } 1131 } 1132 1133 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { 1134 /* 1135 * Resetting the data toggle for bulk and interrupt endpoints 1136 * in case of stall. See handle_hc_stall_intr(). 1137 */ 1138 if (halt_status == DWC2_HC_XFER_STALL) 1139 qh->data_toggle = DWC2_HC_PID_DATA0; 1140 else if (qtd) 1141 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1142 } 1143 1144 if (halt_status == DWC2_HC_XFER_COMPLETE) { 1145 if (chan->hcint & HCINTMSK_NYET) { 1146 /* 1147 * Got a NYET on the last transaction of the transfer. 1148 * It means that the endpoint should be in the PING 1149 * state at the beginning of the next transfer. 1150 */ 1151 qh->ping_state = 1; 1152 } 1153 } 1154 } 1155 1156 /** 1157 * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's 1158 * status and calls completion routine for the URB if it's done. Called from 1159 * interrupt handlers. 1160 * 1161 * @hsotg: The HCD state structure for the DWC OTG controller 1162 * @chan: Host channel the transfer is completed on 1163 * @chnum: Index of Host channel registers 1164 * @halt_status: Reason the channel is being halted or just XferComplete 1165 * for isochronous transfers 1166 * 1167 * Releases the channel to be used by other transfers. 1168 * In case of Isochronous endpoint the channel is not halted until the end of 1169 * the session, i.e. QTD list is empty. 1170 * If periodic channel released the FrameList is updated accordingly. 1171 * Calls transaction selection routines to activate pending transfers. 1172 */ 1173 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, 1174 struct dwc2_host_chan *chan, int chnum, 1175 enum dwc2_halt_status halt_status) 1176 { 1177 struct dwc2_qh *qh = chan->qh; 1178 int continue_isoc_xfer = 0; 1179 enum dwc2_transaction_type tr_type; 1180 1181 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1182 dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status); 1183 1184 /* Release the channel if halted or session completed */ 1185 if (halt_status != DWC2_HC_XFER_COMPLETE || 1186 TAILQ_EMPTY(&qh->qtd_list)) { 1187 /* Halt the channel if session completed */ 1188 if (halt_status == DWC2_HC_XFER_COMPLETE) 1189 dwc2_hc_halt(hsotg, chan, halt_status); 1190 dwc2_release_channel_ddma(hsotg, qh); 1191 dwc2_hcd_qh_unlink(hsotg, qh); 1192 } else { 1193 /* Keep in assigned schedule to continue transfer */ 1194 TAILQ_REMOVE(&hsotg->periodic_sched_queued, qh, qh_list_entry); 1195 TAILQ_INSERT_TAIL(&hsotg->periodic_sched_assigned, qh, qh_list_entry); 1196 continue_isoc_xfer = 1; 1197 } 1198 /* 1199 * Todo: Consider the case when period exceeds FrameList size. 1200 * Frame Rollover interrupt should be used. 1201 */ 1202 } else { 1203 /* 1204 * Scan descriptor list to complete the URB(s), then release 1205 * the channel 1206 */ 1207 dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum, 1208 halt_status); 1209 dwc2_release_channel_ddma(hsotg, qh); 1210 dwc2_hcd_qh_unlink(hsotg, qh); 1211 1212 if (!TAILQ_EMPTY(&qh->qtd_list)) { 1213 /* 1214 * Add back to inactive non-periodic schedule on normal 1215 * completion 1216 */ 1217 dwc2_hcd_qh_add(hsotg, qh); 1218 } 1219 } 1220 1221 tr_type = dwc2_hcd_select_transactions(hsotg); 1222 if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) { 1223 if (continue_isoc_xfer) { 1224 if (tr_type == DWC2_TRANSACTION_NONE) 1225 tr_type = DWC2_TRANSACTION_PERIODIC; 1226 else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC) 1227 tr_type = DWC2_TRANSACTION_ALL; 1228 } 1229 dwc2_hcd_queue_transactions(hsotg, tr_type); 1230 } 1231 } 1232