1 /* $NetBSD: dwc2_hcdddma.c,v 1.6 2014/04/03 06:34:58 skrll Exp $ */ 2 3 /* 4 * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines 5 * 6 * Copyright (C) 2004-2013 Synopsys, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The names of the above-listed copyright holders may not be used 18 * to endorse or promote products derived from this software without 19 * specific prior written permission. 20 * 21 * ALTERNATIVELY, this software may be distributed under the terms of the 22 * GNU General Public License ("GPL") as published by the Free Software 23 * Foundation; either version 2 of the License, or (at your option) any 24 * later version. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * This file contains the Descriptor DMA implementation for Host mode 41 */ 42 #include <sys/cdefs.h> 43 __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdddma.c,v 1.6 2014/04/03 06:34:58 skrll Exp $"); 44 45 #include <sys/param.h> 46 #include <sys/types.h> 47 #include <sys/kernel.h> 48 #include <sys/kmem.h> 49 #include <sys/cpu.h> 50 51 #include <dev/usb/usb.h> 52 #include <dev/usb/usbdi.h> 53 #include <dev/usb/usbdivar.h> 54 #include <dev/usb/usb_mem.h> 55 56 #include <linux/kernel.h> 57 #include <linux/list.h> 58 59 #include <dwc2/dwc2.h> 60 #include <dwc2/dwc2var.h> 61 62 #include "dwc2_core.h" 63 #include "dwc2_hcd.h" 64 65 static u16 dwc2_frame_list_idx(u16 frame) 66 { 67 return frame & (FRLISTEN_64_SIZE - 1); 68 } 69 70 static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed) 71 { 72 return (idx + inc) & 73 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : 74 MAX_DMA_DESC_NUM_GENERIC) - 1); 75 } 76 77 static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed) 78 { 79 return (idx - inc) & 80 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : 81 MAX_DMA_DESC_NUM_GENERIC) - 1); 82 } 83 84 static u16 dwc2_max_desc_num(struct dwc2_qh *qh) 85 { 86 return (qh->ep_type == USB_ENDPOINT_XFER_ISOC && 87 qh->dev_speed == USB_SPEED_HIGH) ? 88 MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC; 89 } 90 91 static u16 dwc2_frame_incr_val(struct dwc2_qh *qh) 92 { 93 return qh->dev_speed == USB_SPEED_HIGH ? 94 (qh->interval + 8 - 1) / 8 : qh->interval; 95 } 96 97 static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 98 gfp_t flags) 99 { 100 int err; 101 102 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 103 104 qh->desc_list = NULL; 105 err = usb_allocmem(&hsotg->hsotg_sc->sc_bus, 106 sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh), 0, 107 &qh->desc_list_usbdma); 108 109 if (!err) { 110 qh->desc_list = KERNADDR(&qh->desc_list_usbdma, 0); 111 qh->desc_list_dma = DMAADDR(&qh->desc_list_usbdma, 0); 112 } 113 114 if (!qh->desc_list) 115 return -ENOMEM; 116 117 memset(qh->desc_list, 0, 118 sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh)); 119 120 qh->n_bytes = kmem_zalloc(sizeof(u32) * dwc2_max_desc_num(qh), KM_SLEEP); 121 if (!qh->n_bytes) { 122 usb_freemem(&hsotg->hsotg_sc->sc_bus, &qh->desc_list_usbdma); 123 qh->desc_list = NULL; 124 return -ENOMEM; 125 } 126 127 return 0; 128 } 129 130 static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 131 { 132 if (qh->desc_list) { 133 usb_freemem(&hsotg->hsotg_sc->sc_bus, &qh->desc_list_usbdma); 134 qh->desc_list = NULL; 135 } 136 137 kmem_free(qh->n_bytes, sizeof(u32) * dwc2_max_desc_num(qh)); 138 qh->n_bytes = NULL; 139 } 140 141 static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags) 142 { 143 int err; 144 145 if (hsotg->frame_list) 146 return 0; 147 148 /* XXXNH - pool_cache_t */ 149 hsotg->frame_list = NULL; 150 err = usb_allocmem(&hsotg->hsotg_sc->sc_bus, 4 * FRLISTEN_64_SIZE, 151 0, &hsotg->frame_list_usbdma); 152 153 if (!err) { 154 hsotg->frame_list = KERNADDR(&hsotg->frame_list_usbdma, 0); 155 hsotg->frame_list_dma = DMAADDR(&hsotg->frame_list_usbdma, 0); 156 } 157 158 if (!hsotg->frame_list) 159 return -ENOMEM; 160 161 memset(hsotg->frame_list, 0, 4 * FRLISTEN_64_SIZE); 162 return 0; 163 } 164 165 static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) 166 { 167 usb_dma_t frame_list_usbdma; 168 unsigned long flags; 169 170 spin_lock_irqsave(&hsotg->lock, flags); 171 172 if (!hsotg->frame_list) { 173 spin_unlock_irqrestore(&hsotg->lock, flags); 174 return; 175 } 176 177 frame_list_usbdma = hsotg->frame_list_usbdma; 178 hsotg->frame_list = NULL; 179 180 spin_unlock_irqrestore(&hsotg->lock, flags); 181 182 usb_freemem(&hsotg->hsotg_sc->sc_bus, &frame_list_usbdma); 183 } 184 185 static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en) 186 { 187 u32 hcfg; 188 unsigned long flags; 189 190 spin_lock_irqsave(&hsotg->lock, flags); 191 192 hcfg = DWC2_READ_4(hsotg, HCFG); 193 if (hcfg & HCFG_PERSCHEDENA) { 194 /* already enabled */ 195 spin_unlock_irqrestore(&hsotg->lock, flags); 196 return; 197 } 198 199 DWC2_WRITE_4(hsotg, HFLBADDR, hsotg->frame_list_dma); 200 201 hcfg &= ~HCFG_FRLISTEN_MASK; 202 hcfg |= fr_list_en | HCFG_PERSCHEDENA; 203 dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n"); 204 DWC2_WRITE_4(hsotg, HCFG, hcfg); 205 206 spin_unlock_irqrestore(&hsotg->lock, flags); 207 } 208 209 static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg) 210 { 211 u32 hcfg; 212 unsigned long flags; 213 214 spin_lock_irqsave(&hsotg->lock, flags); 215 216 hcfg = DWC2_READ_4(hsotg, HCFG); 217 if (!(hcfg & HCFG_PERSCHEDENA)) { 218 /* already disabled */ 219 spin_unlock_irqrestore(&hsotg->lock, flags); 220 return; 221 } 222 223 hcfg &= ~HCFG_PERSCHEDENA; 224 dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n"); 225 DWC2_WRITE_4(hsotg, HCFG, hcfg); 226 227 spin_unlock_irqrestore(&hsotg->lock, flags); 228 } 229 230 /* 231 * Activates/Deactivates FrameList entries for the channel based on endpoint 232 * servicing period 233 */ 234 static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 235 int enable) 236 { 237 struct dwc2_host_chan *chan; 238 u16 i, j, inc; 239 240 if (!hsotg) { 241 printf("hsotg = %p\n", hsotg); 242 return; 243 } 244 245 if (!qh->channel) { 246 dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel); 247 return; 248 } 249 250 if (!hsotg->frame_list) { 251 dev_err(hsotg->dev, "hsotg->frame_list = %p\n", 252 hsotg->frame_list); 253 return; 254 } 255 256 chan = qh->channel; 257 inc = dwc2_frame_incr_val(qh); 258 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC) 259 i = dwc2_frame_list_idx(qh->sched_frame); 260 else 261 i = 0; 262 263 j = i; 264 do { 265 if (enable) 266 hsotg->frame_list[j] |= 1 << chan->hc_num; 267 else 268 hsotg->frame_list[j] &= ~(1 << chan->hc_num); 269 j = (j + inc) & (FRLISTEN_64_SIZE - 1); 270 } while (j != i); 271 272 if (!enable) 273 return; 274 275 chan->schinfo = 0; 276 if (chan->speed == USB_SPEED_HIGH && qh->interval) { 277 j = 1; 278 /* TODO - check this */ 279 inc = (8 + qh->interval - 1) / qh->interval; 280 for (i = 0; i < inc; i++) { 281 chan->schinfo |= j; 282 j = j << qh->interval; 283 } 284 } else { 285 chan->schinfo = 0xff; 286 } 287 } 288 289 static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, 290 struct dwc2_qh *qh) 291 { 292 struct dwc2_host_chan *chan = qh->channel; 293 294 if (dwc2_qh_is_non_per(qh)) { 295 if (hsotg->core_params->uframe_sched > 0) 296 hsotg->available_host_channels++; 297 else 298 hsotg->non_periodic_channels--; 299 } else { 300 dwc2_update_frame_list(hsotg, qh, 0); 301 } 302 303 /* 304 * The condition is added to prevent double cleanup try in case of 305 * device disconnect. See channel cleanup in dwc2_hcd_disconnect(). 306 */ 307 if (chan->qh) { 308 if (!list_empty(&chan->hc_list_entry)) 309 list_del(&chan->hc_list_entry); 310 dwc2_hc_cleanup(hsotg, chan); 311 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); 312 chan->qh = NULL; 313 } 314 315 qh->channel = NULL; 316 qh->ntd = 0; 317 318 if (qh->desc_list) 319 memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) * 320 dwc2_max_desc_num(qh)); 321 } 322 323 /** 324 * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA 325 * related members 326 * 327 * @hsotg: The HCD state structure for the DWC OTG controller 328 * @qh: The QH to init 329 * 330 * Return: 0 if successful, negative error code otherwise 331 * 332 * Allocates memory for the descriptor list. For the first periodic QH, 333 * allocates memory for the FrameList and enables periodic scheduling. 334 */ 335 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 336 gfp_t mem_flags) 337 { 338 int retval; 339 340 if (qh->do_split) { 341 dev_err(hsotg->dev, 342 "SPLIT Transfers are not supported in Descriptor DMA mode.\n"); 343 retval = -EINVAL; 344 goto err0; 345 } 346 347 retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags); 348 if (retval) 349 goto err0; 350 351 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC || 352 qh->ep_type == USB_ENDPOINT_XFER_INT) { 353 if (!hsotg->frame_list) { 354 retval = dwc2_frame_list_alloc(hsotg, mem_flags); 355 if (retval) 356 goto err1; 357 /* Enable periodic schedule on first periodic QH */ 358 dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64); 359 } 360 } 361 362 qh->ntd = 0; 363 return 0; 364 365 err1: 366 dwc2_desc_list_free(hsotg, qh); 367 err0: 368 return retval; 369 } 370 371 /** 372 * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related 373 * members 374 * 375 * @hsotg: The HCD state structure for the DWC OTG controller 376 * @qh: The QH to free 377 * 378 * Frees descriptor list memory associated with the QH. If QH is periodic and 379 * the last, frees FrameList memory and disables periodic scheduling. 380 */ 381 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 382 { 383 dwc2_desc_list_free(hsotg, qh); 384 385 /* 386 * Channel still assigned due to some reasons. 387 * Seen on Isoc URB dequeue. Channel halted but no subsequent 388 * ChHalted interrupt to release the channel. Afterwards 389 * when it comes here from endpoint disable routine 390 * channel remains assigned. 391 */ 392 if (qh->channel) 393 dwc2_release_channel_ddma(hsotg, qh); 394 395 if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC || 396 qh->ep_type == USB_ENDPOINT_XFER_INT) && 397 (hsotg->core_params->uframe_sched > 0 || 398 !hsotg->periodic_channels) && hsotg->frame_list) { 399 dwc2_per_sched_disable(hsotg); 400 dwc2_frame_list_free(hsotg); 401 } 402 } 403 404 static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx) 405 { 406 if (qh->dev_speed == USB_SPEED_HIGH) 407 /* Descriptor set (8 descriptors) index which is 8-aligned */ 408 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8; 409 else 410 return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1); 411 } 412 413 /* 414 * Determine starting frame for Isochronous transfer. 415 * Few frames skipped to prevent race condition with HC. 416 */ 417 static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg, 418 struct dwc2_qh *qh, u16 *skip_frames) 419 { 420 u16 frame; 421 422 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 423 424 /* sched_frame is always frame number (not uFrame) both in FS and HS! */ 425 426 /* 427 * skip_frames is used to limit activated descriptors number 428 * to avoid the situation when HC services the last activated 429 * descriptor firstly. 430 * Example for FS: 431 * Current frame is 1, scheduled frame is 3. Since HC always fetches 432 * the descriptor corresponding to curr_frame+1, the descriptor 433 * corresponding to frame 2 will be fetched. If the number of 434 * descriptors is max=64 (or greather) the list will be fully programmed 435 * with Active descriptors and it is possible case (rare) that the 436 * latest descriptor(considering rollback) corresponding to frame 2 will 437 * be serviced first. HS case is more probable because, in fact, up to 438 * 11 uframes (16 in the code) may be skipped. 439 */ 440 if (qh->dev_speed == USB_SPEED_HIGH) { 441 /* 442 * Consider uframe counter also, to start xfer asap. If half of 443 * the frame elapsed skip 2 frames otherwise just 1 frame. 444 * Starting descriptor index must be 8-aligned, so if the 445 * current frame is near to complete the next one is skipped as 446 * well. 447 */ 448 if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) { 449 *skip_frames = 2 * 8; 450 frame = dwc2_frame_num_inc(hsotg->frame_number, 451 *skip_frames); 452 } else { 453 *skip_frames = 1 * 8; 454 frame = dwc2_frame_num_inc(hsotg->frame_number, 455 *skip_frames); 456 } 457 458 frame = dwc2_full_frame_num(frame); 459 } else { 460 /* 461 * Two frames are skipped for FS - the current and the next. 462 * But for descriptor programming, 1 frame (descriptor) is 463 * enough, see example above. 464 */ 465 *skip_frames = 1; 466 frame = dwc2_frame_num_inc(hsotg->frame_number, 2); 467 } 468 469 return frame; 470 } 471 472 /* 473 * Calculate initial descriptor index for isochronous transfer based on 474 * scheduled frame 475 */ 476 static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg, 477 struct dwc2_qh *qh) 478 { 479 u16 frame, fr_idx, fr_idx_tmp, skip_frames; 480 481 /* 482 * With current ISOC processing algorithm the channel is being released 483 * when no more QTDs in the list (qh->ntd == 0). Thus this function is 484 * called only when qh->ntd == 0 and qh->channel == 0. 485 * 486 * So qh->channel != NULL branch is not used and just not removed from 487 * the source file. It is required for another possible approach which 488 * is, do not disable and release the channel when ISOC session 489 * completed, just move QH to inactive schedule until new QTD arrives. 490 * On new QTD, the QH moved back to 'ready' schedule, starting frame and 491 * therefore starting desc_index are recalculated. In this case channel 492 * is released only on ep_disable. 493 */ 494 495 /* 496 * Calculate starting descriptor index. For INTERRUPT endpoint it is 497 * always 0. 498 */ 499 if (qh->channel) { 500 frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames); 501 /* 502 * Calculate initial descriptor index based on FrameList current 503 * bitmap and servicing period 504 */ 505 fr_idx_tmp = dwc2_frame_list_idx(frame); 506 fr_idx = (FRLISTEN_64_SIZE + 507 dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp) 508 % dwc2_frame_incr_val(qh); 509 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE; 510 } else { 511 qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh, 512 &skip_frames); 513 fr_idx = dwc2_frame_list_idx(qh->sched_frame); 514 } 515 516 qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx); 517 518 return skip_frames; 519 } 520 521 #define ISOC_URB_GIVEBACK_ASAP 522 523 #define MAX_ISOC_XFER_SIZE_FS 1023 524 #define MAX_ISOC_XFER_SIZE_HS 3072 525 #define DESCNUM_THRESHOLD 4 526 527 static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, 528 struct dwc2_qtd *qtd, 529 struct dwc2_qh *qh, u32 max_xfer_size, 530 u16 idx) 531 { 532 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; 533 struct dwc2_hcd_iso_packet_desc *frame_desc; 534 535 memset(dma_desc, 0, sizeof(*dma_desc)); 536 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; 537 538 if (frame_desc->length > max_xfer_size) 539 qh->n_bytes[idx] = max_xfer_size; 540 else 541 qh->n_bytes[idx] = frame_desc->length; 542 543 dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset)); 544 dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT & 545 HOST_DMA_ISOC_NBYTES_MASK; 546 547 #ifdef ISOC_URB_GIVEBACK_ASAP 548 /* Set IOC for each descriptor corresponding to last frame of URB */ 549 if (qtd->isoc_frame_index_last == qtd->urb->packet_count) 550 dma_desc->status |= HOST_DMA_IOC; 551 #endif 552 553 qh->ntd++; 554 qtd->isoc_frame_index_last++; 555 } 556 557 static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, 558 struct dwc2_qh *qh, u16 skip_frames) 559 { 560 struct dwc2_qtd *qtd; 561 u32 max_xfer_size; 562 u16 idx, inc, n_desc, ntd_max = 0; 563 564 idx = qh->td_last; 565 inc = qh->interval; 566 n_desc = 0; 567 568 if (qh->interval) { 569 ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) / 570 qh->interval; 571 if (skip_frames && !qh->channel) 572 ntd_max -= skip_frames / qh->interval; 573 } 574 575 max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ? 576 MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS; 577 578 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { 579 while (qh->ntd < ntd_max && qtd->isoc_frame_index_last < 580 qtd->urb->packet_count) { 581 if (n_desc > 1) 582 qh->desc_list[n_desc - 1].status |= HOST_DMA_A; 583 dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh, 584 max_xfer_size, idx); 585 idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed); 586 n_desc++; 587 } 588 qtd->in_process = 1; 589 } 590 591 qh->td_last = idx; 592 593 #ifdef ISOC_URB_GIVEBACK_ASAP 594 /* Set IOC for last descriptor if descriptor list is full */ 595 if (qh->ntd == ntd_max) { 596 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); 597 qh->desc_list[idx].status |= HOST_DMA_IOC; 598 } 599 #else 600 /* 601 * Set IOC bit only for one descriptor. Always try to be ahead of HW 602 * processing, i.e. on IOC generation driver activates next descriptor 603 * but core continues to process descriptors following the one with IOC 604 * set. 605 */ 606 607 if (n_desc > DESCNUM_THRESHOLD) 608 /* 609 * Move IOC "up". Required even if there is only one QTD 610 * in the list, because QTDs might continue to be queued, 611 * but during the activation it was only one queued. 612 * Actually more than one QTD might be in the list if this 613 * function called from XferCompletion - QTDs was queued during 614 * HW processing of the previous descriptor chunk. 615 */ 616 idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), 617 qh->dev_speed); 618 else 619 /* 620 * Set the IOC for the latest descriptor if either number of 621 * descriptors is not greater than threshold or no more new 622 * descriptors activated 623 */ 624 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); 625 626 qh->desc_list[idx].status |= HOST_DMA_IOC; 627 #endif 628 629 if (n_desc) { 630 qh->desc_list[n_desc - 1].status |= HOST_DMA_A; 631 if (n_desc > 1) 632 qh->desc_list[0].status |= HOST_DMA_A; 633 } 634 } 635 636 static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, 637 struct dwc2_host_chan *chan, 638 struct dwc2_qtd *qtd, struct dwc2_qh *qh, 639 int n_desc) 640 { 641 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc]; 642 int len = chan->xfer_len; 643 644 if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1)) 645 len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1); 646 647 if (chan->ep_is_in) { 648 int num_packets; 649 650 if (len > 0 && chan->max_packet) 651 num_packets = (len + chan->max_packet - 1) 652 / chan->max_packet; 653 else 654 /* Need 1 packet for transfer length of 0 */ 655 num_packets = 1; 656 657 /* Always program an integral # of packets for IN transfers */ 658 len = num_packets * chan->max_packet; 659 } 660 661 dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK; 662 qh->n_bytes[n_desc] = len; 663 664 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL && 665 qtd->control_phase == DWC2_CONTROL_SETUP) 666 dma_desc->status |= HOST_DMA_SUP; 667 668 dma_desc->buf = (u32)chan->xfer_dma; 669 670 /* 671 * Last (or only) descriptor of IN transfer with actual size less 672 * than MaxPacket 673 */ 674 if (len > chan->xfer_len) { 675 chan->xfer_len = 0; 676 } else { 677 chan->xfer_dma += len; /* XXXNH safe */ 678 chan->xfer_len -= len; 679 } 680 } 681 682 static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, 683 struct dwc2_qh *qh) 684 { 685 struct dwc2_qtd *qtd; 686 struct dwc2_host_chan *chan = qh->channel; 687 int n_desc = 0; 688 689 dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh, 690 (unsigned long)chan->xfer_dma, chan->xfer_len); 691 692 /* 693 * Start with chan->xfer_dma initialized in assign_and_init_hc(), then 694 * if SG transfer consists of multiple URBs, this pointer is re-assigned 695 * to the buffer of the currently processed QTD. For non-SG request 696 * there is always one QTD active. 697 */ 698 699 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { 700 dev_vdbg(hsotg->dev, "qtd=%p\n", qtd); 701 702 if (n_desc) { 703 /* SG request - more than 1 QTD */ 704 chan->xfer_dma = DMAADDR(qtd->urb->usbdma, 705 qtd->urb->actual_length); 706 chan->xfer_len = qtd->urb->length - 707 qtd->urb->actual_length; 708 dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n", 709 (unsigned long)chan->xfer_dma, chan->xfer_len); 710 } 711 712 qtd->n_desc = 0; 713 do { 714 if (n_desc > 1) { 715 qh->desc_list[n_desc - 1].status |= HOST_DMA_A; 716 dev_vdbg(hsotg->dev, 717 "set A bit in desc %d (%p)\n", 718 n_desc - 1, 719 &qh->desc_list[n_desc - 1]); 720 } 721 dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc); 722 dev_vdbg(hsotg->dev, 723 "desc %d (%p) buf=%08x status=%08x\n", 724 n_desc, &qh->desc_list[n_desc], 725 qh->desc_list[n_desc].buf, 726 qh->desc_list[n_desc].status); 727 qtd->n_desc++; 728 n_desc++; 729 } while (chan->xfer_len > 0 && 730 n_desc != MAX_DMA_DESC_NUM_GENERIC); 731 732 dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc); 733 qtd->in_process = 1; 734 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) 735 break; 736 if (n_desc == MAX_DMA_DESC_NUM_GENERIC) 737 break; 738 } 739 740 if (n_desc) { 741 qh->desc_list[n_desc - 1].status |= 742 HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A; 743 dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n", 744 n_desc - 1, &qh->desc_list[n_desc - 1]); 745 if (n_desc > 1) { 746 qh->desc_list[0].status |= HOST_DMA_A; 747 dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n", 748 &qh->desc_list[0]); 749 } 750 chan->ntd = n_desc; 751 } 752 } 753 754 /** 755 * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode 756 * 757 * @hsotg: The HCD state structure for the DWC OTG controller 758 * @qh: The QH to init 759 * 760 * Return: 0 if successful, negative error code otherwise 761 * 762 * For Control and Bulk endpoints, initializes descriptor list and starts the 763 * transfer. For Interrupt and Isochronous endpoints, initializes descriptor 764 * list then updates FrameList, marking appropriate entries as active. 765 * 766 * For Isochronous endpoints the starting descriptor index is calculated based 767 * on the scheduled frame, but only on the first transfer descriptor within a 768 * session. Then the transfer is started via enabling the channel. 769 * 770 * For Isochronous endpoints the channel is not halted on XferComplete 771 * interrupt so remains assigned to the endpoint(QH) until session is done. 772 */ 773 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 774 { 775 /* Channel is already assigned */ 776 struct dwc2_host_chan *chan = qh->channel; 777 u16 skip_frames = 0; 778 779 switch (chan->ep_type) { 780 case USB_ENDPOINT_XFER_CONTROL: 781 case USB_ENDPOINT_XFER_BULK: 782 dwc2_init_non_isoc_dma_desc(hsotg, qh); 783 dwc2_hc_start_transfer_ddma(hsotg, chan); 784 break; 785 case USB_ENDPOINT_XFER_INT: 786 dwc2_init_non_isoc_dma_desc(hsotg, qh); 787 dwc2_update_frame_list(hsotg, qh, 1); 788 dwc2_hc_start_transfer_ddma(hsotg, chan); 789 break; 790 case USB_ENDPOINT_XFER_ISOC: 791 if (!qh->ntd) 792 skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh); 793 dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames); 794 795 if (!chan->xfer_started) { 796 dwc2_update_frame_list(hsotg, qh, 1); 797 798 /* 799 * Always set to max, instead of actual size. Otherwise 800 * ntd will be changed with channel being enabled. Not 801 * recommended. 802 */ 803 chan->ntd = dwc2_max_desc_num(qh); 804 805 /* Enable channel only once for ISOC */ 806 dwc2_hc_start_transfer_ddma(hsotg, chan); 807 } 808 809 break; 810 default: 811 break; 812 } 813 } 814 815 #define DWC2_CMPL_DONE 1 816 #define DWC2_CMPL_STOP 2 817 818 static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, 819 struct dwc2_host_chan *chan, 820 struct dwc2_qtd *qtd, 821 struct dwc2_qh *qh, u16 idx) 822 { 823 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; 824 struct dwc2_hcd_iso_packet_desc *frame_desc; 825 u16 remain = 0; 826 int rc = 0; 827 828 if (!qtd->urb) 829 return -EINVAL; 830 831 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; 832 dma_desc->buf = (u32)(DMAADDR(qtd->urb->usbdma, frame_desc->offset)); 833 if (chan->ep_is_in) 834 remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >> 835 HOST_DMA_ISOC_NBYTES_SHIFT; 836 837 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { 838 /* 839 * XactError, or unable to complete all the transactions 840 * in the scheduled micro-frame/frame, both indicated by 841 * HOST_DMA_STS_PKTERR 842 */ 843 qtd->urb->error_count++; 844 frame_desc->actual_length = qh->n_bytes[idx] - remain; 845 frame_desc->status = -EPROTO; 846 } else { 847 /* Success */ 848 frame_desc->actual_length = qh->n_bytes[idx] - remain; 849 frame_desc->status = 0; 850 } 851 852 if (++qtd->isoc_frame_index == qtd->urb->packet_count) { 853 /* 854 * urb->status is not used for isoc transfers here. The 855 * individual frame_desc status are used instead. 856 */ 857 dwc2_host_complete(hsotg, qtd, 0); 858 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 859 860 /* 861 * This check is necessary because urb_dequeue can be called 862 * from urb complete callback (sound driver for example). All 863 * pending URBs are dequeued there, so no need for further 864 * processing. 865 */ 866 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) 867 return -1; 868 rc = DWC2_CMPL_DONE; 869 } 870 871 qh->ntd--; 872 873 /* Stop if IOC requested descriptor reached */ 874 if (dma_desc->status & HOST_DMA_IOC) 875 rc = DWC2_CMPL_STOP; 876 877 return rc; 878 } 879 880 static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, 881 struct dwc2_host_chan *chan, 882 enum dwc2_halt_status halt_status) 883 { 884 struct dwc2_hcd_iso_packet_desc *frame_desc; 885 struct dwc2_qtd *qtd, *qtd_tmp; 886 struct dwc2_qh *qh; 887 u16 idx; 888 int rc; 889 890 qh = chan->qh; 891 idx = qh->td_first; 892 893 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 894 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) 895 qtd->in_process = 0; 896 return; 897 } 898 899 if (halt_status == DWC2_HC_XFER_AHB_ERR || 900 halt_status == DWC2_HC_XFER_BABBLE_ERR) { 901 /* 902 * Channel is halted in these error cases, considered as serious 903 * issues. 904 * Complete all URBs marking all frames as failed, irrespective 905 * whether some of the descriptors (frames) succeeded or not. 906 * Pass error code to completion routine as well, to update 907 * urb->status, some of class drivers might use it to stop 908 * queing transfer requests. 909 */ 910 int err = halt_status == DWC2_HC_XFER_AHB_ERR ? 911 -EIO : -EOVERFLOW; 912 913 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, 914 qtd_list_entry) { 915 if (qtd->urb) { 916 for (idx = 0; idx < qtd->urb->packet_count; 917 idx++) { 918 frame_desc = &qtd->urb->iso_descs[idx]; 919 frame_desc->status = err; 920 } 921 922 dwc2_host_complete(hsotg, qtd, err); 923 } 924 925 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 926 } 927 928 return; 929 } 930 931 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) { 932 if (!qtd->in_process) 933 break; 934 do { 935 rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh, 936 idx); 937 if (rc < 0) 938 return; 939 idx = dwc2_desclist_idx_inc(idx, qh->interval, 940 chan->speed); 941 if (rc == DWC2_CMPL_STOP) 942 goto stop_scan; 943 if (rc == DWC2_CMPL_DONE) 944 break; 945 } while (idx != qh->td_first); 946 } 947 948 stop_scan: 949 qh->td_first = idx; 950 } 951 952 static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg, 953 struct dwc2_host_chan *chan, 954 struct dwc2_qtd *qtd, 955 struct dwc2_hcd_dma_desc *dma_desc, 956 enum dwc2_halt_status halt_status, 957 u32 n_bytes, int *xfer_done) 958 { 959 struct dwc2_hcd_urb *urb = qtd->urb; 960 u16 remain = 0; 961 962 if (chan->ep_is_in) 963 remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >> 964 HOST_DMA_NBYTES_SHIFT; 965 966 dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb); 967 968 if (halt_status == DWC2_HC_XFER_AHB_ERR) { 969 dev_err(hsotg->dev, "EIO\n"); 970 urb->status = -EIO; 971 return 1; 972 } 973 974 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { 975 switch (halt_status) { 976 case DWC2_HC_XFER_STALL: 977 dev_vdbg(hsotg->dev, "Stall\n"); 978 urb->status = -EPIPE; 979 break; 980 case DWC2_HC_XFER_BABBLE_ERR: 981 dev_err(hsotg->dev, "Babble\n"); 982 urb->status = -EOVERFLOW; 983 break; 984 case DWC2_HC_XFER_XACT_ERR: 985 dev_err(hsotg->dev, "XactErr\n"); 986 urb->status = -EPROTO; 987 break; 988 default: 989 dev_err(hsotg->dev, 990 "%s: Unhandled descriptor error status (%d)\n", 991 __func__, halt_status); 992 break; 993 } 994 return 1; 995 } 996 997 if (dma_desc->status & HOST_DMA_A) { 998 dev_vdbg(hsotg->dev, 999 "Active descriptor encountered on channel %d\n", 1000 chan->hc_num); 1001 return 0; 1002 } 1003 1004 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) { 1005 if (qtd->control_phase == DWC2_CONTROL_DATA) { 1006 urb->actual_length += n_bytes - remain; 1007 if (remain || urb->actual_length >= urb->length) { 1008 /* 1009 * For Control Data stage do not set urb->status 1010 * to 0, to prevent URB callback. Set it when 1011 * Status phase is done. See below. 1012 */ 1013 *xfer_done = 1; 1014 } 1015 } else if (qtd->control_phase == DWC2_CONTROL_STATUS) { 1016 urb->status = 0; 1017 *xfer_done = 1; 1018 } 1019 /* No handling for SETUP stage */ 1020 } else { 1021 /* BULK and INTR */ 1022 urb->actual_length += n_bytes - remain; 1023 dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length, 1024 urb->actual_length); 1025 if (remain || urb->actual_length >= urb->length) { 1026 urb->status = 0; 1027 *xfer_done = 1; 1028 } 1029 } 1030 1031 return 0; 1032 } 1033 1034 static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, 1035 struct dwc2_host_chan *chan, 1036 int chnum, struct dwc2_qtd *qtd, 1037 int desc_num, 1038 enum dwc2_halt_status halt_status, 1039 int *xfer_done) 1040 { 1041 struct dwc2_qh *qh = chan->qh; 1042 struct dwc2_hcd_urb *urb = qtd->urb; 1043 struct dwc2_hcd_dma_desc *dma_desc; 1044 u32 n_bytes; 1045 int failed; 1046 1047 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1048 1049 if (!urb) 1050 return -EINVAL; 1051 1052 dma_desc = &qh->desc_list[desc_num]; 1053 n_bytes = qh->n_bytes[desc_num]; 1054 dev_vdbg(hsotg->dev, 1055 "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n", 1056 qtd, urb, desc_num, dma_desc, n_bytes); 1057 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc, 1058 halt_status, n_bytes, 1059 xfer_done); 1060 if (failed || (*xfer_done && urb->status != -EINPROGRESS)) { 1061 dwc2_host_complete(hsotg, qtd, urb->status); 1062 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 1063 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n", 1064 failed, *xfer_done, urb->status); 1065 return failed; 1066 } 1067 1068 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) { 1069 switch (qtd->control_phase) { 1070 case DWC2_CONTROL_SETUP: 1071 if (urb->length > 0) 1072 qtd->control_phase = DWC2_CONTROL_DATA; 1073 else 1074 qtd->control_phase = DWC2_CONTROL_STATUS; 1075 dev_vdbg(hsotg->dev, 1076 " Control setup transaction done\n"); 1077 break; 1078 case DWC2_CONTROL_DATA: 1079 if (*xfer_done) { 1080 qtd->control_phase = DWC2_CONTROL_STATUS; 1081 dev_vdbg(hsotg->dev, 1082 " Control data transfer done\n"); 1083 } else if (desc_num + 1 == qtd->n_desc) { 1084 /* 1085 * Last descriptor for Control data stage which 1086 * is not completed yet 1087 */ 1088 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, 1089 qtd); 1090 } 1091 break; 1092 default: 1093 break; 1094 } 1095 } 1096 1097 return 0; 1098 } 1099 1100 static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, 1101 struct dwc2_host_chan *chan, 1102 int chnum, 1103 enum dwc2_halt_status halt_status) 1104 { 1105 struct list_head *qtd_item, *qtd_tmp; 1106 struct dwc2_qh *qh = chan->qh; 1107 struct dwc2_qtd *qtd = NULL; 1108 int xfer_done; 1109 int desc_num = 0; 1110 1111 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 1112 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) 1113 qtd->in_process = 0; 1114 return; 1115 } 1116 1117 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) { 1118 int i; 1119 1120 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry); 1121 xfer_done = 0; 1122 1123 for (i = 0; i < qtd->n_desc; i++) { 1124 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, 1125 desc_num, halt_status, 1126 &xfer_done)) { 1127 qtd = NULL; 1128 break; 1129 } 1130 desc_num++; 1131 } 1132 } 1133 1134 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { 1135 /* 1136 * Resetting the data toggle for bulk and interrupt endpoints 1137 * in case of stall. See handle_hc_stall_intr(). 1138 */ 1139 if (halt_status == DWC2_HC_XFER_STALL) 1140 qh->data_toggle = DWC2_HC_PID_DATA0; 1141 else if (qtd) 1142 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1143 } 1144 1145 if (halt_status == DWC2_HC_XFER_COMPLETE) { 1146 if (chan->hcint & HCINTMSK_NYET) { 1147 /* 1148 * Got a NYET on the last transaction of the transfer. 1149 * It means that the endpoint should be in the PING 1150 * state at the beginning of the next transfer. 1151 */ 1152 qh->ping_state = 1; 1153 } 1154 } 1155 } 1156 1157 /** 1158 * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's 1159 * status and calls completion routine for the URB if it's done. Called from 1160 * interrupt handlers. 1161 * 1162 * @hsotg: The HCD state structure for the DWC OTG controller 1163 * @chan: Host channel the transfer is completed on 1164 * @chnum: Index of Host channel registers 1165 * @halt_status: Reason the channel is being halted or just XferComplete 1166 * for isochronous transfers 1167 * 1168 * Releases the channel to be used by other transfers. 1169 * In case of Isochronous endpoint the channel is not halted until the end of 1170 * the session, i.e. QTD list is empty. 1171 * If periodic channel released the FrameList is updated accordingly. 1172 * Calls transaction selection routines to activate pending transfers. 1173 */ 1174 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, 1175 struct dwc2_host_chan *chan, int chnum, 1176 enum dwc2_halt_status halt_status) 1177 { 1178 struct dwc2_qh *qh = chan->qh; 1179 int continue_isoc_xfer = 0; 1180 enum dwc2_transaction_type tr_type; 1181 1182 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1183 dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status); 1184 1185 /* Release the channel if halted or session completed */ 1186 if (halt_status != DWC2_HC_XFER_COMPLETE || 1187 list_empty(&qh->qtd_list)) { 1188 /* Halt the channel if session completed */ 1189 if (halt_status == DWC2_HC_XFER_COMPLETE) 1190 dwc2_hc_halt(hsotg, chan, halt_status); 1191 dwc2_release_channel_ddma(hsotg, qh); 1192 dwc2_hcd_qh_unlink(hsotg, qh); 1193 } else { 1194 /* Keep in assigned schedule to continue transfer */ 1195 list_move(&qh->qh_list_entry, 1196 &hsotg->periodic_sched_assigned); 1197 continue_isoc_xfer = 1; 1198 } 1199 /* 1200 * Todo: Consider the case when period exceeds FrameList size. 1201 * Frame Rollover interrupt should be used. 1202 */ 1203 } else { 1204 /* 1205 * Scan descriptor list to complete the URB(s), then release 1206 * the channel 1207 */ 1208 dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum, 1209 halt_status); 1210 dwc2_release_channel_ddma(hsotg, qh); 1211 dwc2_hcd_qh_unlink(hsotg, qh); 1212 1213 if (!list_empty(&qh->qtd_list)) { 1214 /* 1215 * Add back to inactive non-periodic schedule on normal 1216 * completion 1217 */ 1218 dwc2_hcd_qh_add(hsotg, qh); 1219 } 1220 } 1221 1222 tr_type = dwc2_hcd_select_transactions(hsotg); 1223 if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) { 1224 if (continue_isoc_xfer) { 1225 if (tr_type == DWC2_TRANSACTION_NONE) 1226 tr_type = DWC2_TRANSACTION_PERIODIC; 1227 else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC) 1228 tr_type = DWC2_TRANSACTION_ALL; 1229 } 1230 dwc2_hcd_queue_transactions(hsotg, tr_type); 1231 } 1232 } 1233