1 /* $NetBSD: dwc2_hcdqueue.c,v 1.5 2013/11/14 12:40:51 skrll Exp $ */ 2 3 /* 4 * hcd_queue.c - DesignWare HS OTG Controller host queuing routines 5 * 6 * Copyright (C) 2004-2013 Synopsys, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The names of the above-listed copyright holders may not be used 18 * to endorse or promote products derived from this software without 19 * specific prior written permission. 20 * 21 * ALTERNATIVELY, this software may be distributed under the terms of the 22 * GNU General Public License ("GPL") as published by the Free Software 23 * Foundation; either version 2 of the License, or (at your option) any 24 * later version. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * This file contains the functions to manage Queue Heads and Queue 41 * Transfer Descriptors for Host mode 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdqueue.c,v 1.5 2013/11/14 12:40:51 skrll Exp $"); 46 47 #include <sys/types.h> 48 #include <sys/kmem.h> 49 #include <sys/pool.h> 50 51 #include <dev/usb/usb.h> 52 #include <dev/usb/usbdi.h> 53 #include <dev/usb/usbdivar.h> 54 #include <dev/usb/usb_mem.h> 55 56 #include <machine/param.h> 57 58 #include <linux/kernel.h> 59 60 #include <dwc2/dwc2.h> 61 #include <dwc2/dwc2var.h> 62 63 #include "dwc2_core.h" 64 #include "dwc2_hcd.h" 65 66 static u32 dwc2_calc_bus_time(struct dwc2_hsotg *, int, int, int, int); 67 68 /** 69 * dwc2_qh_init() - Initializes a QH structure 70 * 71 * @hsotg: The HCD state structure for the DWC OTG controller 72 * @qh: The QH to init 73 * @urb: Holds the information about the device/endpoint needed to initialize 74 * the QH 75 */ 76 #define SCHEDULE_SLOP 10 77 static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 78 struct dwc2_hcd_urb *urb) 79 { 80 int dev_speed, hub_addr, hub_port; 81 const char *speed, *type; 82 83 dev_vdbg(hsotg->dev, "%s()\n", __func__); 84 85 /* Initialize QH */ 86 qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); 87 qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0; 88 89 qh->data_toggle = DWC2_HC_PID_DATA0; 90 qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info); 91 INIT_LIST_HEAD(&qh->qtd_list); 92 INIT_LIST_HEAD(&qh->qh_list_entry); 93 94 /* FS/LS Endpoint on HS Hub, NOT virtual root hub */ 95 dev_speed = dwc2_host_get_speed(hsotg, urb->priv); 96 97 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port); 98 99 if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) && 100 hub_addr != 0 && hub_addr != 1) { 101 dev_vdbg(hsotg->dev, 102 "QH init: EP %d: TT found at hub addr %d, for port %d\n", 103 dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr, 104 hub_port); 105 qh->do_split = 1; 106 } 107 108 if (qh->ep_type == USB_ENDPOINT_XFER_INT || 109 qh->ep_type == USB_ENDPOINT_XFER_ISOC) { 110 /* Compute scheduling parameters once and save them */ 111 u32 hprt, prtspd; 112 113 /* Todo: Account for split transfers in the bus time */ 114 int bytecount = 115 dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp); 116 117 qh->usecs = dwc2_calc_bus_time(hsotg, qh->do_split ? 118 USB_SPEED_HIGH : dev_speed, qh->ep_is_in, 119 qh->ep_type == USB_ENDPOINT_XFER_ISOC, 120 bytecount); 121 /* Start in a slightly future (micro)frame */ 122 qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number, 123 SCHEDULE_SLOP); 124 qh->interval = urb->interval; 125 #if 0 126 /* Increase interrupt polling rate for debugging */ 127 if (qh->ep_type == USB_ENDPOINT_XFER_INT) 128 qh->interval = 8; 129 #endif 130 hprt = DWC2_READ_4(hsotg, HPRT0); 131 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; 132 if (prtspd == HPRT0_SPD_HIGH_SPEED && 133 (dev_speed == USB_SPEED_LOW || 134 dev_speed == USB_SPEED_FULL)) { 135 qh->interval *= 8; 136 qh->sched_frame |= 0x7; 137 qh->start_split_frame = qh->sched_frame; 138 } 139 dev_dbg(hsotg->dev, "interval=%d\n", qh->interval); 140 } 141 142 dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n"); 143 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh); 144 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n", 145 dwc2_hcd_get_dev_addr(&urb->pipe_info)); 146 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n", 147 dwc2_hcd_get_ep_num(&urb->pipe_info), 148 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"); 149 150 qh->dev_speed = dev_speed; 151 152 switch (dev_speed) { 153 case USB_SPEED_LOW: 154 speed = "low"; 155 break; 156 case USB_SPEED_FULL: 157 speed = "full"; 158 break; 159 case USB_SPEED_HIGH: 160 speed = "high"; 161 break; 162 default: 163 speed = "?"; 164 break; 165 } 166 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed); 167 168 switch (qh->ep_type) { 169 case USB_ENDPOINT_XFER_ISOC: 170 type = "isochronous"; 171 break; 172 case USB_ENDPOINT_XFER_INT: 173 type = "interrupt"; 174 break; 175 case USB_ENDPOINT_XFER_CONTROL: 176 type = "control"; 177 break; 178 case USB_ENDPOINT_XFER_BULK: 179 type = "bulk"; 180 break; 181 default: 182 type = "?"; 183 break; 184 } 185 186 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type); 187 188 if (qh->ep_type == USB_ENDPOINT_XFER_INT) { 189 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n", 190 qh->usecs); 191 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n", 192 qh->interval); 193 } 194 } 195 196 /** 197 * dwc2_hcd_qh_create() - Allocates and initializes a QH 198 * 199 * @hsotg: The HCD state structure for the DWC OTG controller 200 * @urb: Holds the information about the device/endpoint needed 201 * to initialize the QH 202 * @mem_flags: Flag to do atomic allocation if needed 203 * 204 * Return: Pointer to the newly allocated QH, or NULL on error 205 */ 206 static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, 207 struct dwc2_hcd_urb *urb, 208 gfp_t mem_flags) 209 { 210 struct dwc2_softc *sc = hsotg->hsotg_sc; 211 struct dwc2_qh *qh; 212 213 if (!urb->priv) 214 return NULL; 215 216 /* Allocate memory */ 217 qh = pool_cache_get(sc->sc_qhpool, PR_NOWAIT); 218 if (!qh) 219 return NULL; 220 221 memset(qh, 0, sizeof(*qh)); 222 dwc2_qh_init(hsotg, qh, urb); 223 224 if (hsotg->core_params->dma_desc_enable > 0 && 225 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) { 226 dwc2_hcd_qh_free(hsotg, qh); 227 return NULL; 228 } 229 230 return qh; 231 } 232 233 /** 234 * dwc2_hcd_qh_free() - Frees the QH 235 * 236 * @hsotg: HCD instance 237 * @qh: The QH to free 238 * 239 * QH should already be removed from the list. QTD list should already be empty 240 * if called from URB Dequeue. 241 * 242 * Must NOT be called with interrupt disabled or spinlock held 243 */ 244 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 245 { 246 struct dwc2_softc *sc = hsotg->hsotg_sc; 247 248 if (hsotg->core_params->dma_desc_enable > 0) { 249 dwc2_hcd_qh_free_ddma(hsotg, qh); 250 } else if (qh->dw_align_buf) { 251 /* XXXNH */ 252 usb_freemem(&hsotg->hsotg_sc->sc_bus, &qh->dw_align_buf_usbdma); 253 } 254 255 pool_cache_put(sc->sc_qhpool, qh); 256 } 257 258 /** 259 * dwc2_periodic_channel_available() - Checks that a channel is available for a 260 * periodic transfer 261 * 262 * @hsotg: The HCD state structure for the DWC OTG controller 263 * 264 * Return: 0 if successful, negative error code otherwise 265 */ 266 static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg) 267 { 268 /* 269 * Currently assuming that there is a dedicated host channel for 270 * each periodic transaction plus at least one host channel for 271 * non-periodic transactions 272 */ 273 int status; 274 int num_channels; 275 276 num_channels = hsotg->core_params->host_channels; 277 if (hsotg->periodic_channels + hsotg->non_periodic_channels < 278 num_channels 279 && hsotg->periodic_channels < num_channels - 1) { 280 status = 0; 281 } else { 282 dev_dbg(hsotg->dev, 283 "%s: Total channels: %d, Periodic: %d, " 284 "Non-periodic: %d\n", __func__, num_channels, 285 hsotg->periodic_channels, hsotg->non_periodic_channels); 286 status = -ENOSPC; 287 } 288 289 return status; 290 } 291 292 /** 293 * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth 294 * for the specified QH in the periodic schedule 295 * 296 * @hsotg: The HCD state structure for the DWC OTG controller 297 * @qh: QH containing periodic bandwidth required 298 * 299 * Return: 0 if successful, negative error code otherwise 300 * 301 * For simplicity, this calculation assumes that all the transfers in the 302 * periodic schedule may occur in the same (micro)frame 303 */ 304 static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg, 305 struct dwc2_qh *qh) 306 { 307 int status; 308 s16 max_claimed_usecs; 309 310 status = 0; 311 312 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) { 313 /* 314 * High speed mode 315 * Max periodic usecs is 80% x 125 usec = 100 usec 316 */ 317 max_claimed_usecs = 100 - qh->usecs; 318 } else { 319 /* 320 * Full speed mode 321 * Max periodic usecs is 90% x 1000 usec = 900 usec 322 */ 323 max_claimed_usecs = 900 - qh->usecs; 324 } 325 326 if (hsotg->periodic_usecs > max_claimed_usecs) { 327 dev_err(hsotg->dev, 328 "%s: already claimed usecs %d, required usecs %d\n", 329 __func__, hsotg->periodic_usecs, qh->usecs); 330 status = -ENOSPC; 331 } 332 333 return status; 334 } 335 336 /** 337 * Microframe scheduler 338 * track the total use in hsotg->frame_usecs 339 * keep each qh use in qh->frame_usecs 340 * when surrendering the qh then donate the time back 341 */ 342 static const unsigned short max_uframe_usecs[] = { 343 100, 100, 100, 100, 100, 100, 30, 0 344 }; 345 346 void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg) 347 { 348 int i; 349 350 for (i = 0; i < 8; i++) 351 hsotg->frame_usecs[i] = max_uframe_usecs[i]; 352 } 353 354 static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 355 { 356 unsigned short utime = qh->usecs; 357 int done = 0; 358 int i = 0; 359 int ret = -1; 360 361 while (!done) { 362 /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */ 363 if (utime <= hsotg->frame_usecs[i]) { 364 hsotg->frame_usecs[i] -= utime; 365 qh->frame_usecs[i] += utime; 366 ret = i; 367 done = 1; 368 } else { 369 i++; 370 if (i == 8) 371 done = 1; 372 } 373 } 374 375 return ret; 376 } 377 378 /* 379 * use this for FS apps that can span multiple uframes 380 */ 381 static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 382 { 383 unsigned short utime = qh->usecs; 384 unsigned short xtime; 385 int t_left = utime; 386 int done = 0; 387 int i = 0; 388 int j; 389 int ret = -1; 390 391 while (!done) { 392 if (hsotg->frame_usecs[i] <= 0) { 393 i++; 394 if (i == 8) { 395 ret = -1; 396 done = 1; 397 } 398 continue; 399 } 400 401 /* 402 * we need n consecutive slots so use j as a start slot 403 * j plus j+1 must be enough time (for now) 404 */ 405 xtime = hsotg->frame_usecs[i]; 406 for (j = i + 1; j < 8; j++) { 407 /* 408 * if we add this frame remaining time to xtime we may 409 * be OK, if not we need to test j for a complete frame 410 */ 411 if (xtime + hsotg->frame_usecs[j] < utime) { 412 if (hsotg->frame_usecs[j] < 413 max_uframe_usecs[j]) { 414 ret = -1; 415 break; 416 } 417 } 418 if (xtime >= utime) { 419 ret = i; 420 break; 421 } 422 /* add the frame time to x time */ 423 xtime += hsotg->frame_usecs[j]; 424 /* we must have a fully available next frame or break */ 425 if (xtime < utime && 426 hsotg->frame_usecs[j] == max_uframe_usecs[j]) { 427 ret = -1; 428 break; 429 } 430 } 431 if (ret >= 0) { 432 t_left = utime; 433 for (j = i; t_left > 0 && j < 8; j++) { 434 t_left -= hsotg->frame_usecs[j]; 435 if (t_left <= 0) { 436 qh->frame_usecs[j] += 437 hsotg->frame_usecs[j] + t_left; 438 hsotg->frame_usecs[j] = -t_left; 439 ret = i; 440 done = 1; 441 } else { 442 qh->frame_usecs[j] += 443 hsotg->frame_usecs[j]; 444 hsotg->frame_usecs[j] = 0; 445 } 446 } 447 } else { 448 i++; 449 if (i == 8) { 450 ret = -1; 451 done = 1; 452 } 453 } 454 } 455 456 return ret; 457 } 458 459 static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 460 { 461 int ret; 462 463 if (qh->dev_speed == USB_SPEED_HIGH) { 464 /* if this is a hs transaction we need a full frame */ 465 ret = dwc2_find_single_uframe(hsotg, qh); 466 } else { 467 /* 468 * if this is a fs transaction we may need a sequence 469 * of frames 470 */ 471 ret = dwc2_find_multi_uframe(hsotg, qh); 472 } 473 return ret; 474 } 475 476 /** 477 * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a 478 * host channel is large enough to handle the maximum data transfer in a single 479 * (micro)frame for a periodic transfer 480 * 481 * @hsotg: The HCD state structure for the DWC OTG controller 482 * @qh: QH for a periodic endpoint 483 * 484 * Return: 0 if successful, negative error code otherwise 485 */ 486 static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg, 487 struct dwc2_qh *qh) 488 { 489 u32 max_xfer_size; 490 u32 max_channel_xfer_size; 491 int status = 0; 492 493 max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp); 494 max_channel_xfer_size = hsotg->core_params->max_transfer_size; 495 496 if (max_xfer_size > max_channel_xfer_size) { 497 dev_err(hsotg->dev, 498 "%s: Periodic xfer length %d > max xfer length for channel %d\n", 499 __func__, max_xfer_size, max_channel_xfer_size); 500 status = -ENOSPC; 501 } 502 503 return status; 504 } 505 506 /** 507 * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in 508 * the periodic schedule 509 * 510 * @hsotg: The HCD state structure for the DWC OTG controller 511 * @qh: QH for the periodic transfer. The QH should already contain the 512 * scheduling information. 513 * 514 * Return: 0 if successful, negative error code otherwise 515 */ 516 static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 517 { 518 int status; 519 520 if (hsotg->core_params->uframe_sched > 0) { 521 int frame = -1; 522 523 status = dwc2_find_uframe(hsotg, qh); 524 if (status == 0) 525 frame = 7; 526 else if (status > 0) 527 frame = status - 1; 528 529 /* Set the new frame up */ 530 if (frame > -1) { 531 qh->sched_frame &= ~0x7; 532 qh->sched_frame |= (frame & 7); 533 } 534 535 if (status != -1) 536 status = 0; 537 } else { 538 status = dwc2_periodic_channel_available(hsotg); 539 if (status) { 540 dev_info(hsotg->dev, 541 "%s: No host channel available for periodic transfer\n", 542 __func__); 543 return status; 544 } 545 546 status = dwc2_check_periodic_bandwidth(hsotg, qh); 547 } 548 549 if (status) { 550 dev_dbg(hsotg->dev, 551 "%s: Insufficient periodic bandwidth for periodic transfer\n", 552 __func__); 553 return status; 554 } 555 556 status = dwc2_check_max_xfer_size(hsotg, qh); 557 if (status) { 558 dev_dbg(hsotg->dev, 559 "%s: Channel max transfer size too small for periodic transfer\n", 560 __func__); 561 return status; 562 } 563 564 if (hsotg->core_params->dma_desc_enable > 0) 565 /* Don't rely on SOF and start in ready schedule */ 566 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready); 567 else 568 /* Always start in inactive schedule */ 569 list_add_tail(&qh->qh_list_entry, 570 &hsotg->periodic_sched_inactive); 571 572 if (hsotg->core_params->uframe_sched <= 0) 573 /* Reserve periodic channel */ 574 hsotg->periodic_channels++; 575 576 /* Update claimed usecs per (micro)frame */ 577 hsotg->periodic_usecs += qh->usecs; 578 579 return status; 580 } 581 582 /** 583 * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer 584 * from the periodic schedule 585 * 586 * @hsotg: The HCD state structure for the DWC OTG controller 587 * @qh: QH for the periodic transfer 588 */ 589 static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg, 590 struct dwc2_qh *qh) 591 { 592 int i; 593 594 list_del_init(&qh->qh_list_entry); 595 596 /* Update claimed usecs per (micro)frame */ 597 hsotg->periodic_usecs -= qh->usecs; 598 599 if (hsotg->core_params->uframe_sched > 0) { 600 for (i = 0; i < 8; i++) { 601 hsotg->frame_usecs[i] += qh->frame_usecs[i]; 602 qh->frame_usecs[i] = 0; 603 } 604 } else { 605 /* Release periodic channel reservation */ 606 hsotg->periodic_channels--; 607 } 608 } 609 610 /** 611 * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic 612 * schedule if it is not already in the schedule. If the QH is already in 613 * the schedule, no action is taken. 614 * 615 * @hsotg: The HCD state structure for the DWC OTG controller 616 * @qh: The QH to add 617 * 618 * Return: 0 if successful, negative error code otherwise 619 */ 620 int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 621 { 622 int status = 0; 623 u32 intr_mask; 624 625 if (dbg_qh(qh)) 626 dev_vdbg(hsotg->dev, "%s()\n", __func__); 627 628 if (!list_empty(&qh->qh_list_entry)) 629 /* QH already in a schedule */ 630 return status; 631 632 /* Add the new QH to the appropriate schedule */ 633 if (dwc2_qh_is_non_per(qh)) { 634 /* Always start in inactive schedule */ 635 list_add_tail(&qh->qh_list_entry, 636 &hsotg->non_periodic_sched_inactive); 637 } else { 638 status = dwc2_schedule_periodic(hsotg, qh); 639 if (status == 0) { 640 if (!hsotg->periodic_qh_count) { 641 intr_mask = DWC2_READ_4(hsotg, GINTMSK); 642 intr_mask |= GINTSTS_SOF; 643 DWC2_WRITE_4(hsotg, GINTMSK, intr_mask); 644 } 645 hsotg->periodic_qh_count++; 646 } 647 } 648 649 return status; 650 } 651 652 /** 653 * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic 654 * schedule. Memory is not freed. 655 * 656 * @hsotg: The HCD state structure 657 * @qh: QH to remove from schedule 658 */ 659 void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 660 { 661 u32 intr_mask; 662 663 dev_vdbg(hsotg->dev, "%s()\n", __func__); 664 665 if (list_empty(&qh->qh_list_entry)) 666 /* QH is not in a schedule */ 667 return; 668 669 if (dwc2_qh_is_non_per(qh)) { 670 if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry) 671 hsotg->non_periodic_qh_ptr = 672 hsotg->non_periodic_qh_ptr->next; 673 list_del_init(&qh->qh_list_entry); 674 } else { 675 dwc2_deschedule_periodic(hsotg, qh); 676 hsotg->periodic_qh_count--; 677 if (!hsotg->periodic_qh_count) { 678 intr_mask = DWC2_READ_4(hsotg, GINTMSK); 679 intr_mask &= ~GINTSTS_SOF; 680 DWC2_WRITE_4(hsotg, GINTMSK, intr_mask); 681 } 682 } 683 } 684 685 /* 686 * Schedule the next continuing periodic split transfer 687 */ 688 static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg, 689 struct dwc2_qh *qh, u16 frame_number, 690 int sched_next_periodic_split) 691 { 692 u16 incr; 693 694 if (sched_next_periodic_split) { 695 qh->sched_frame = frame_number; 696 incr = dwc2_frame_num_inc(qh->start_split_frame, 1); 697 if (dwc2_frame_num_le(frame_number, incr)) { 698 /* 699 * Allow one frame to elapse after start split 700 * microframe before scheduling complete split, but 701 * DON'T if we are doing the next start split in the 702 * same frame for an ISOC out 703 */ 704 if (qh->ep_type != USB_ENDPOINT_XFER_ISOC || 705 qh->ep_is_in != 0) { 706 qh->sched_frame = 707 dwc2_frame_num_inc(qh->sched_frame, 1); 708 } 709 } 710 } else { 711 qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame, 712 qh->interval); 713 if (dwc2_frame_num_le(qh->sched_frame, frame_number)) 714 qh->sched_frame = frame_number; 715 qh->sched_frame |= 0x7; 716 qh->start_split_frame = qh->sched_frame; 717 } 718 } 719 720 /* 721 * Deactivates a QH. For non-periodic QHs, removes the QH from the active 722 * non-periodic schedule. The QH is added to the inactive non-periodic 723 * schedule if any QTDs are still attached to the QH. 724 * 725 * For periodic QHs, the QH is removed from the periodic queued schedule. If 726 * there are any QTDs still attached to the QH, the QH is added to either the 727 * periodic inactive schedule or the periodic ready schedule and its next 728 * scheduled frame is calculated. The QH is placed in the ready schedule if 729 * the scheduled frame has been reached already. Otherwise it's placed in the 730 * inactive schedule. If there are no QTDs attached to the QH, the QH is 731 * completely removed from the periodic schedule. 732 */ 733 void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 734 int sched_next_periodic_split) 735 { 736 if (dbg_qh(qh)) 737 dev_vdbg(hsotg->dev, "%s()\n", __func__); 738 739 if (dwc2_qh_is_non_per(qh)) { 740 dwc2_hcd_qh_unlink(hsotg, qh); 741 if (!list_empty(&qh->qtd_list)) 742 /* Add back to inactive non-periodic schedule */ 743 dwc2_hcd_qh_add(hsotg, qh); 744 } else { 745 u16 frame_number = dwc2_hcd_get_frame_number(hsotg); 746 747 if (qh->do_split) { 748 dwc2_sched_periodic_split(hsotg, qh, frame_number, 749 sched_next_periodic_split); 750 } else { 751 qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame, 752 qh->interval); 753 if (dwc2_frame_num_le(qh->sched_frame, frame_number)) 754 qh->sched_frame = frame_number; 755 } 756 757 if (list_empty(&qh->qtd_list)) { 758 dwc2_hcd_qh_unlink(hsotg, qh); 759 } else { 760 /* 761 * Remove from periodic_sched_queued and move to 762 * appropriate queue 763 */ 764 if ((hsotg->core_params->uframe_sched > 0 && 765 dwc2_frame_num_le(qh->sched_frame, frame_number)) 766 || (hsotg->core_params->uframe_sched <= 0 && 767 qh->sched_frame == frame_number)) 768 list_move(&qh->qh_list_entry, 769 &hsotg->periodic_sched_ready); 770 else 771 list_move(&qh->qh_list_entry, 772 &hsotg->periodic_sched_inactive); 773 } 774 } 775 } 776 777 /** 778 * dwc2_hcd_qtd_init() - Initializes a QTD structure 779 * 780 * @qtd: The QTD to initialize 781 * @urb: The associated URB 782 */ 783 void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb) 784 { 785 qtd->urb = urb; 786 if (dwc2_hcd_get_pipe_type(&urb->pipe_info) == 787 USB_ENDPOINT_XFER_CONTROL) { 788 /* 789 * The only time the QTD data toggle is used is on the data 790 * phase of control transfers. This phase always starts with 791 * DATA1. 792 */ 793 qtd->data_toggle = DWC2_HC_PID_DATA1; 794 qtd->control_phase = DWC2_CONTROL_SETUP; 795 } 796 797 /* Start split */ 798 qtd->complete_split = 0; 799 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL; 800 qtd->isoc_split_offset = 0; 801 qtd->in_process = 0; 802 803 /* Store the qtd ptr in the urb to reference the QTD */ 804 urb->qtd = qtd; 805 } 806 807 /** 808 * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH 809 * 810 * @hsotg: The DWC HCD structure 811 * @qtd: The QTD to add 812 * @qh: Out parameter to return queue head 813 * @mem_flags: Flag to do atomic alloc if needed 814 * 815 * Return: 0 if successful, negative error code otherwise 816 * 817 * Finds the correct QH to place the QTD into. If it does not find a QH, it 818 * will create a new QH. If the QH to which the QTD is added is not currently 819 * scheduled, it is placed into the proper schedule based on its EP type. 820 */ 821 int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 822 struct dwc2_qh **qh, gfp_t mem_flags) 823 { 824 struct dwc2_hcd_urb *urb = qtd->urb; 825 unsigned long flags; 826 int allocated = 0; 827 int retval; 828 829 /* 830 * Get the QH which holds the QTD-list to insert to. Create QH if it 831 * doesn't exist. 832 */ 833 if (*qh == NULL) { 834 *qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags); 835 if (*qh == NULL) 836 return -ENOMEM; 837 allocated = 1; 838 } 839 840 spin_lock_irqsave(&hsotg->lock, flags); 841 842 retval = dwc2_hcd_qh_add(hsotg, *qh); 843 if (retval) 844 goto fail; 845 846 qtd->qh = *qh; 847 list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list); 848 spin_unlock_irqrestore(&hsotg->lock, flags); 849 850 return 0; 851 852 fail: 853 if (allocated) { 854 struct dwc2_qtd *qtd2, *qtd2_tmp; 855 struct dwc2_qh *qh_tmp = *qh; 856 857 *qh = NULL; 858 dwc2_hcd_qh_unlink(hsotg, qh_tmp); 859 860 /* Free each QTD in the QH's QTD list */ 861 list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list, 862 qtd_list_entry) 863 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp); 864 865 spin_unlock_irqrestore(&hsotg->lock, flags); 866 dwc2_hcd_qh_free(hsotg, qh_tmp); 867 } else { 868 spin_unlock_irqrestore(&hsotg->lock, flags); 869 } 870 871 return retval; 872 } 873 874 void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg, 875 struct dwc2_qtd *qtd, 876 struct dwc2_qh *qh) 877 { 878 struct dwc2_softc *sc = hsotg->hsotg_sc; 879 880 list_del_init(&qtd->qtd_list_entry); 881 pool_cache_put(sc->sc_qtdpool, qtd); 882 } 883 884 #define BITSTUFFTIME(bytecount) ((8 * 7 * (bytecount)) / 6) 885 #define HS_HOST_DELAY 5 /* nanoseconds */ 886 #define FS_LS_HOST_DELAY 1000 /* nanoseconds */ 887 #define HUB_LS_SETUP 333 /* nanoseconds */ 888 889 static u32 dwc2_calc_bus_time(struct dwc2_hsotg *hsotg, int speed, int is_in, 890 int is_isoc, int bytecount) 891 { 892 unsigned long retval; 893 894 switch (speed) { 895 case USB_SPEED_HIGH: 896 if (is_isoc) 897 retval = 898 ((38 * 8 * 2083) + 899 (2083 * (3 + BITSTUFFTIME(bytecount)))) / 1000 + 900 HS_HOST_DELAY; 901 else 902 retval = 903 ((55 * 8 * 2083) + 904 (2083 * (3 + BITSTUFFTIME(bytecount)))) / 1000 + 905 HS_HOST_DELAY; 906 break; 907 case USB_SPEED_FULL: 908 if (is_isoc) { 909 retval = 910 (8354 * (31 + 10 * BITSTUFFTIME(bytecount))) / 1000; 911 if (is_in) 912 retval = 7268 + FS_LS_HOST_DELAY + retval; 913 else 914 retval = 6265 + FS_LS_HOST_DELAY + retval; 915 } else { 916 retval = 917 (8354 * (31 + 10 * BITSTUFFTIME(bytecount))) / 1000; 918 retval = 9107 + FS_LS_HOST_DELAY + retval; 919 } 920 break; 921 case USB_SPEED_LOW: 922 if (is_in) { 923 retval = 924 (67667 * (31 + 10 * BITSTUFFTIME(bytecount))) / 925 1000; 926 retval = 927 64060 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY + 928 retval; 929 } else { 930 retval = 931 (66700 * (31 + 10 * BITSTUFFTIME(bytecount))) / 932 1000; 933 retval = 934 64107 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY + 935 retval; 936 } 937 break; 938 default: 939 dev_warn(hsotg->dev, "Unknown device speed\n"); 940 retval = -1; 941 } 942 943 return NS_TO_US(retval); 944 } 945