1 /* $NetBSD: dwc2_hcdqueue.c,v 1.14 2016/02/14 10:53:30 skrll Exp $ */ 2 3 /* 4 * hcd_queue.c - DesignWare HS OTG Controller host queuing routines 5 * 6 * Copyright (C) 2004-2013 Synopsys, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The names of the above-listed copyright holders may not be used 18 * to endorse or promote products derived from this software without 19 * specific prior written permission. 20 * 21 * ALTERNATIVELY, this software may be distributed under the terms of the 22 * GNU General Public License ("GPL") as published by the Free Software 23 * Foundation; either version 2 of the License, or (at your option) any 24 * later version. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * This file contains the functions to manage Queue Heads and Queue 41 * Transfer Descriptors for Host mode 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdqueue.c,v 1.14 2016/02/14 10:53:30 skrll Exp $"); 46 47 #include <sys/types.h> 48 #include <sys/kmem.h> 49 #include <sys/pool.h> 50 51 #include <dev/usb/usb.h> 52 #include <dev/usb/usbdi.h> 53 #include <dev/usb/usbdivar.h> 54 #include <dev/usb/usb_mem.h> 55 56 #include <machine/param.h> 57 58 #include <linux/kernel.h> 59 60 #include <dwc2/dwc2.h> 61 #include <dwc2/dwc2var.h> 62 63 #include "dwc2_core.h" 64 #include "dwc2_hcd.h" 65 66 static u32 dwc2_calc_bus_time(struct dwc2_hsotg *, int, int, int, int); 67 68 /** 69 * dwc2_qh_init() - Initializes a QH structure 70 * 71 * @hsotg: The HCD state structure for the DWC OTG controller 72 * @qh: The QH to init 73 * @urb: Holds the information about the device/endpoint needed to initialize 74 * the QH 75 */ 76 #define SCHEDULE_SLOP 10 77 static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 78 struct dwc2_hcd_urb *urb) 79 { 80 int dev_speed, hub_addr, hub_port; 81 82 dev_vdbg(hsotg->dev, "%s()\n", __func__); 83 84 /* Initialize QH */ 85 qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); 86 qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0; 87 88 qh->data_toggle = DWC2_HC_PID_DATA0; 89 qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info); 90 INIT_LIST_HEAD(&qh->qtd_list); 91 INIT_LIST_HEAD(&qh->qh_list_entry); 92 93 /* FS/LS Endpoint on HS Hub, NOT virtual root hub */ 94 dev_speed = dwc2_host_get_speed(hsotg, urb->priv); 95 96 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port); 97 qh->nak_frame = 0xffff; 98 99 if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) && 100 hub_addr != 0 && hub_addr != 1) { 101 dev_vdbg(hsotg->dev, 102 "QH init: EP %d: TT found at hub addr %d, for port %d\n", 103 dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr, 104 hub_port); 105 qh->do_split = 1; 106 } 107 108 if (qh->ep_type == USB_ENDPOINT_XFER_INT || 109 qh->ep_type == USB_ENDPOINT_XFER_ISOC) { 110 /* Compute scheduling parameters once and save them */ 111 u32 hprt, prtspd; 112 113 /* Todo: Account for split transfers in the bus time */ 114 int bytecount = 115 dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp); 116 117 qh->usecs = dwc2_calc_bus_time(hsotg, qh->do_split ? 118 USB_SPEED_HIGH : dev_speed, qh->ep_is_in, 119 qh->ep_type == USB_ENDPOINT_XFER_ISOC, 120 bytecount); 121 122 /* Ensure frame_number corresponds to the reality */ 123 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 124 /* Start in a slightly future (micro)frame */ 125 qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number, 126 SCHEDULE_SLOP); 127 qh->interval = urb->interval; 128 #if 0 129 /* Increase interrupt polling rate for debugging */ 130 if (qh->ep_type == USB_ENDPOINT_XFER_INT) 131 qh->interval = 8; 132 #endif 133 hprt = DWC2_READ_4(hsotg, HPRT0); 134 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; 135 if (prtspd == HPRT0_SPD_HIGH_SPEED && 136 (dev_speed == USB_SPEED_LOW || 137 dev_speed == USB_SPEED_FULL)) { 138 qh->interval *= 8; 139 qh->sched_frame |= 0x7; 140 qh->start_split_frame = qh->sched_frame; 141 } 142 dev_dbg(hsotg->dev, "interval=%d\n", qh->interval); 143 } 144 145 dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n"); 146 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh); 147 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n", 148 dwc2_hcd_get_dev_addr(&urb->pipe_info)); 149 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n", 150 dwc2_hcd_get_ep_num(&urb->pipe_info), 151 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"); 152 153 qh->dev_speed = dev_speed; 154 155 #ifdef DWC2_DEBUG 156 const char *speed, *type; 157 switch (dev_speed) { 158 case USB_SPEED_LOW: 159 speed = "low"; 160 break; 161 case USB_SPEED_FULL: 162 speed = "full"; 163 break; 164 case USB_SPEED_HIGH: 165 speed = "high"; 166 break; 167 default: 168 speed = "?"; 169 break; 170 } 171 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed); 172 173 switch (qh->ep_type) { 174 case USB_ENDPOINT_XFER_ISOC: 175 type = "isochronous"; 176 break; 177 case USB_ENDPOINT_XFER_INT: 178 type = "interrupt"; 179 break; 180 case USB_ENDPOINT_XFER_CONTROL: 181 type = "control"; 182 break; 183 case USB_ENDPOINT_XFER_BULK: 184 type = "bulk"; 185 break; 186 default: 187 type = "?"; 188 break; 189 } 190 191 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type); 192 #endif 193 194 if (qh->ep_type == USB_ENDPOINT_XFER_INT) { 195 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n", 196 qh->usecs); 197 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n", 198 qh->interval); 199 } 200 } 201 202 /** 203 * dwc2_hcd_qh_create() - Allocates and initializes a QH 204 * 205 * @hsotg: The HCD state structure for the DWC OTG controller 206 * @urb: Holds the information about the device/endpoint needed 207 * to initialize the QH 208 * @mem_flags: Flag to do atomic allocation if needed 209 * 210 * Return: Pointer to the newly allocated QH, or NULL on error 211 */ 212 struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, 213 struct dwc2_hcd_urb *urb, 214 gfp_t mem_flags) 215 { 216 struct dwc2_softc *sc = hsotg->hsotg_sc; 217 struct dwc2_qh *qh; 218 219 if (!urb->priv) 220 return NULL; 221 222 /* Allocate memory */ 223 qh = pool_cache_get(sc->sc_qhpool, PR_NOWAIT); 224 if (!qh) 225 return NULL; 226 227 memset(qh, 0, sizeof(*qh)); 228 dwc2_qh_init(hsotg, qh, urb); 229 230 if (hsotg->core_params->dma_desc_enable > 0 && 231 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) { 232 dwc2_hcd_qh_free(hsotg, qh); 233 return NULL; 234 } 235 236 return qh; 237 } 238 239 /** 240 * dwc2_hcd_qh_free() - Frees the QH 241 * 242 * @hsotg: HCD instance 243 * @qh: The QH to free 244 * 245 * QH should already be removed from the list. QTD list should already be empty 246 * if called from URB Dequeue. 247 * 248 * Must NOT be called with interrupt disabled or spinlock held 249 */ 250 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 251 { 252 struct dwc2_softc *sc = hsotg->hsotg_sc; 253 if (qh->desc_list) { 254 dwc2_hcd_qh_free_ddma(hsotg, qh); 255 } else if (qh->dw_align_buf) { 256 usb_freemem(&sc->sc_bus, &qh->dw_align_buf_usbdma); 257 qh->dw_align_buf_dma = (dma_addr_t)0; 258 } 259 260 pool_cache_put(sc->sc_qhpool, qh); 261 } 262 263 /** 264 * dwc2_periodic_channel_available() - Checks that a channel is available for a 265 * periodic transfer 266 * 267 * @hsotg: The HCD state structure for the DWC OTG controller 268 * 269 * Return: 0 if successful, negative error code otherwise 270 */ 271 static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg) 272 { 273 /* 274 * Currently assuming that there is a dedicated host channel for 275 * each periodic transaction plus at least one host channel for 276 * non-periodic transactions 277 */ 278 int status; 279 int num_channels; 280 281 num_channels = hsotg->core_params->host_channels; 282 if (hsotg->periodic_channels + hsotg->non_periodic_channels < 283 num_channels 284 && hsotg->periodic_channels < num_channels - 1) { 285 status = 0; 286 } else { 287 dev_dbg(hsotg->dev, 288 "%s: Total channels: %d, Periodic: %d, " 289 "Non-periodic: %d\n", __func__, num_channels, 290 hsotg->periodic_channels, hsotg->non_periodic_channels); 291 status = -ENOSPC; 292 } 293 294 return status; 295 } 296 297 /** 298 * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth 299 * for the specified QH in the periodic schedule 300 * 301 * @hsotg: The HCD state structure for the DWC OTG controller 302 * @qh: QH containing periodic bandwidth required 303 * 304 * Return: 0 if successful, negative error code otherwise 305 * 306 * For simplicity, this calculation assumes that all the transfers in the 307 * periodic schedule may occur in the same (micro)frame 308 */ 309 static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg, 310 struct dwc2_qh *qh) 311 { 312 int status; 313 s16 max_claimed_usecs; 314 315 status = 0; 316 317 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) { 318 /* 319 * High speed mode 320 * Max periodic usecs is 80% x 125 usec = 100 usec 321 */ 322 max_claimed_usecs = 100 - qh->usecs; 323 } else { 324 /* 325 * Full speed mode 326 * Max periodic usecs is 90% x 1000 usec = 900 usec 327 */ 328 max_claimed_usecs = 900 - qh->usecs; 329 } 330 331 if (hsotg->periodic_usecs > max_claimed_usecs) { 332 dev_err(hsotg->dev, 333 "%s: already claimed usecs %d, required usecs %d\n", 334 __func__, hsotg->periodic_usecs, qh->usecs); 335 status = -ENOSPC; 336 } 337 338 return status; 339 } 340 341 /** 342 * Microframe scheduler 343 * track the total use in hsotg->frame_usecs 344 * keep each qh use in qh->frame_usecs 345 * when surrendering the qh then donate the time back 346 */ 347 static const unsigned short max_uframe_usecs[] = { 348 100, 100, 100, 100, 100, 100, 30, 0 349 }; 350 351 void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg) 352 { 353 int i; 354 355 for (i = 0; i < 8; i++) 356 hsotg->frame_usecs[i] = max_uframe_usecs[i]; 357 } 358 359 static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 360 { 361 unsigned short utime = qh->usecs; 362 int i; 363 364 for (i = 0; i < 8; i++) { 365 /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */ 366 if (utime <= hsotg->frame_usecs[i]) { 367 hsotg->frame_usecs[i] -= utime; 368 qh->frame_usecs[i] += utime; 369 return i; 370 } 371 } 372 return -ENOSPC; 373 } 374 375 /* 376 * use this for FS apps that can span multiple uframes 377 */ 378 static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 379 { 380 unsigned short utime = qh->usecs; 381 unsigned short xtime; 382 int t_left; 383 int i; 384 int j; 385 int k; 386 387 for (i = 0; i < 8; i++) { 388 if (hsotg->frame_usecs[i] <= 0) 389 continue; 390 391 /* 392 * we need n consecutive slots so use j as a start slot 393 * j plus j+1 must be enough time (for now) 394 */ 395 xtime = hsotg->frame_usecs[i]; 396 for (j = i + 1; j < 8; j++) { 397 /* 398 * if we add this frame remaining time to xtime we may 399 * be OK, if not we need to test j for a complete frame 400 */ 401 if (xtime + hsotg->frame_usecs[j] < utime) { 402 if (hsotg->frame_usecs[j] < 403 max_uframe_usecs[j]) 404 continue; 405 } 406 if (xtime >= utime) { 407 t_left = utime; 408 for (k = i; k < 8; k++) { 409 t_left -= hsotg->frame_usecs[k]; 410 if (t_left <= 0) { 411 qh->frame_usecs[k] += 412 hsotg->frame_usecs[k] 413 + t_left; 414 hsotg->frame_usecs[k] = -t_left; 415 return i; 416 } else { 417 qh->frame_usecs[k] += 418 hsotg->frame_usecs[k]; 419 hsotg->frame_usecs[k] = 0; 420 } 421 } 422 } 423 /* add the frame time to x time */ 424 xtime += hsotg->frame_usecs[j]; 425 /* we must have a fully available next frame or break */ 426 if (xtime < utime && 427 hsotg->frame_usecs[j] == max_uframe_usecs[j]) 428 continue; 429 } 430 } 431 return -ENOSPC; 432 } 433 434 static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 435 { 436 int ret; 437 438 if (qh->dev_speed == USB_SPEED_HIGH) { 439 /* if this is a hs transaction we need a full frame */ 440 ret = dwc2_find_single_uframe(hsotg, qh); 441 } else { 442 /* 443 * if this is a fs transaction we may need a sequence 444 * of frames 445 */ 446 ret = dwc2_find_multi_uframe(hsotg, qh); 447 } 448 return ret; 449 } 450 451 /** 452 * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a 453 * host channel is large enough to handle the maximum data transfer in a single 454 * (micro)frame for a periodic transfer 455 * 456 * @hsotg: The HCD state structure for the DWC OTG controller 457 * @qh: QH for a periodic endpoint 458 * 459 * Return: 0 if successful, negative error code otherwise 460 */ 461 static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg, 462 struct dwc2_qh *qh) 463 { 464 u32 max_xfer_size; 465 u32 max_channel_xfer_size; 466 int status = 0; 467 468 max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp); 469 max_channel_xfer_size = hsotg->core_params->max_transfer_size; 470 471 if (max_xfer_size > max_channel_xfer_size) { 472 dev_err(hsotg->dev, 473 "%s: Periodic xfer length %d > max xfer length for channel %d\n", 474 __func__, max_xfer_size, max_channel_xfer_size); 475 status = -ENOSPC; 476 } 477 478 return status; 479 } 480 481 /** 482 * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in 483 * the periodic schedule 484 * 485 * @hsotg: The HCD state structure for the DWC OTG controller 486 * @qh: QH for the periodic transfer. The QH should already contain the 487 * scheduling information. 488 * 489 * Return: 0 if successful, negative error code otherwise 490 */ 491 static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 492 { 493 int status; 494 495 if (hsotg->core_params->uframe_sched > 0) { 496 int frame = -1; 497 498 status = dwc2_find_uframe(hsotg, qh); 499 if (status == 0) 500 frame = 7; 501 else if (status > 0) 502 frame = status - 1; 503 504 /* Set the new frame up */ 505 if (frame >= 0) { 506 qh->sched_frame &= ~0x7; 507 qh->sched_frame |= (frame & 7); 508 } 509 510 if (status > 0) 511 status = 0; 512 } else { 513 status = dwc2_periodic_channel_available(hsotg); 514 if (status) { 515 dev_info(hsotg->dev, 516 "%s: No host channel available for periodic transfer\n", 517 __func__); 518 return status; 519 } 520 521 status = dwc2_check_periodic_bandwidth(hsotg, qh); 522 } 523 524 if (status) { 525 dev_dbg(hsotg->dev, 526 "%s: Insufficient periodic bandwidth for periodic transfer\n", 527 __func__); 528 return status; 529 } 530 531 status = dwc2_check_max_xfer_size(hsotg, qh); 532 if (status) { 533 dev_dbg(hsotg->dev, 534 "%s: Channel max transfer size too small for periodic transfer\n", 535 __func__); 536 return status; 537 } 538 539 if (hsotg->core_params->dma_desc_enable > 0) 540 /* Don't rely on SOF and start in ready schedule */ 541 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready); 542 else 543 /* Always start in inactive schedule */ 544 list_add_tail(&qh->qh_list_entry, 545 &hsotg->periodic_sched_inactive); 546 547 if (hsotg->core_params->uframe_sched <= 0) 548 /* Reserve periodic channel */ 549 hsotg->periodic_channels++; 550 551 /* Update claimed usecs per (micro)frame */ 552 hsotg->periodic_usecs += qh->usecs; 553 554 return status; 555 } 556 557 /** 558 * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer 559 * from the periodic schedule 560 * 561 * @hsotg: The HCD state structure for the DWC OTG controller 562 * @qh: QH for the periodic transfer 563 */ 564 static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg, 565 struct dwc2_qh *qh) 566 { 567 int i; 568 569 list_del_init(&qh->qh_list_entry); 570 571 /* Update claimed usecs per (micro)frame */ 572 hsotg->periodic_usecs -= qh->usecs; 573 574 if (hsotg->core_params->uframe_sched > 0) { 575 for (i = 0; i < 8; i++) { 576 hsotg->frame_usecs[i] += qh->frame_usecs[i]; 577 qh->frame_usecs[i] = 0; 578 } 579 } else { 580 /* Release periodic channel reservation */ 581 hsotg->periodic_channels--; 582 } 583 } 584 585 /** 586 * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic 587 * schedule if it is not already in the schedule. If the QH is already in 588 * the schedule, no action is taken. 589 * 590 * @hsotg: The HCD state structure for the DWC OTG controller 591 * @qh: The QH to add 592 * 593 * Return: 0 if successful, negative error code otherwise 594 */ 595 int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 596 { 597 int status; 598 u32 intr_mask; 599 600 if (dbg_qh(qh)) 601 dev_vdbg(hsotg->dev, "%s()\n", __func__); 602 603 if (!list_empty(&qh->qh_list_entry)) 604 /* QH already in a schedule */ 605 return 0; 606 607 if (!dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number) && 608 !hsotg->frame_number) { 609 dev_dbg(hsotg->dev, 610 "reset frame number counter\n"); 611 qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number, 612 SCHEDULE_SLOP); 613 } 614 615 /* Add the new QH to the appropriate schedule */ 616 if (dwc2_qh_is_non_per(qh)) { 617 /* Always start in inactive schedule */ 618 list_add_tail(&qh->qh_list_entry, 619 &hsotg->non_periodic_sched_inactive); 620 return 0; 621 } 622 623 status = dwc2_schedule_periodic(hsotg, qh); 624 if (status) 625 return status; 626 if (!hsotg->periodic_qh_count) { 627 intr_mask = DWC2_READ_4(hsotg, GINTMSK); 628 intr_mask |= GINTSTS_SOF; 629 DWC2_WRITE_4(hsotg, GINTMSK, intr_mask); 630 } 631 hsotg->periodic_qh_count++; 632 633 return 0; 634 } 635 636 /** 637 * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic 638 * schedule. Memory is not freed. 639 * 640 * @hsotg: The HCD state structure 641 * @qh: QH to remove from schedule 642 */ 643 void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 644 { 645 u32 intr_mask; 646 647 dev_vdbg(hsotg->dev, "%s()\n", __func__); 648 649 if (list_empty(&qh->qh_list_entry)) 650 /* QH is not in a schedule */ 651 return; 652 653 if (dwc2_qh_is_non_per(qh)) { 654 if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry) 655 hsotg->non_periodic_qh_ptr = 656 hsotg->non_periodic_qh_ptr->next; 657 list_del_init(&qh->qh_list_entry); 658 return; 659 } 660 661 dwc2_deschedule_periodic(hsotg, qh); 662 hsotg->periodic_qh_count--; 663 if (!hsotg->periodic_qh_count) { 664 intr_mask = DWC2_READ_4(hsotg, GINTMSK); 665 intr_mask &= ~GINTSTS_SOF; 666 DWC2_WRITE_4(hsotg, GINTMSK, intr_mask); 667 } 668 } 669 670 /* 671 * Schedule the next continuing periodic split transfer 672 */ 673 static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg, 674 struct dwc2_qh *qh, u16 frame_number, 675 int sched_next_periodic_split) 676 { 677 u16 incr; 678 679 if (sched_next_periodic_split) { 680 qh->sched_frame = frame_number; 681 incr = dwc2_frame_num_inc(qh->start_split_frame, 1); 682 if (dwc2_frame_num_le(frame_number, incr)) { 683 /* 684 * Allow one frame to elapse after start split 685 * microframe before scheduling complete split, but 686 * DON'T if we are doing the next start split in the 687 * same frame for an ISOC out 688 */ 689 if (qh->ep_type != USB_ENDPOINT_XFER_ISOC || 690 qh->ep_is_in != 0) { 691 qh->sched_frame = 692 dwc2_frame_num_inc(qh->sched_frame, 1); 693 } 694 } 695 } else { 696 qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame, 697 qh->interval); 698 if (dwc2_frame_num_le(qh->sched_frame, frame_number)) 699 qh->sched_frame = frame_number; 700 qh->sched_frame |= 0x7; 701 qh->start_split_frame = qh->sched_frame; 702 } 703 } 704 705 /* 706 * Deactivates a QH. For non-periodic QHs, removes the QH from the active 707 * non-periodic schedule. The QH is added to the inactive non-periodic 708 * schedule if any QTDs are still attached to the QH. 709 * 710 * For periodic QHs, the QH is removed from the periodic queued schedule. If 711 * there are any QTDs still attached to the QH, the QH is added to either the 712 * periodic inactive schedule or the periodic ready schedule and its next 713 * scheduled frame is calculated. The QH is placed in the ready schedule if 714 * the scheduled frame has been reached already. Otherwise it's placed in the 715 * inactive schedule. If there are no QTDs attached to the QH, the QH is 716 * completely removed from the periodic schedule. 717 */ 718 void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 719 int sched_next_periodic_split) 720 { 721 u16 frame_number; 722 723 if (dbg_qh(qh)) 724 dev_vdbg(hsotg->dev, "%s()\n", __func__); 725 726 if (dwc2_qh_is_non_per(qh)) { 727 dwc2_hcd_qh_unlink(hsotg, qh); 728 if (!list_empty(&qh->qtd_list)) 729 /* Add back to inactive non-periodic schedule */ 730 dwc2_hcd_qh_add(hsotg, qh); 731 return; 732 } 733 734 frame_number = dwc2_hcd_get_frame_number(hsotg); 735 736 if (qh->do_split) { 737 dwc2_sched_periodic_split(hsotg, qh, frame_number, 738 sched_next_periodic_split); 739 } else { 740 qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame, 741 qh->interval); 742 if (dwc2_frame_num_le(qh->sched_frame, frame_number)) 743 qh->sched_frame = frame_number; 744 } 745 746 if (list_empty(&qh->qtd_list)) { 747 dwc2_hcd_qh_unlink(hsotg, qh); 748 return; 749 } 750 /* 751 * Remove from periodic_sched_queued and move to 752 * appropriate queue 753 */ 754 if ((hsotg->core_params->uframe_sched > 0 && 755 dwc2_frame_num_le(qh->sched_frame, frame_number)) || 756 (hsotg->core_params->uframe_sched <= 0 && 757 qh->sched_frame == frame_number)) 758 list_move(&qh->qh_list_entry, &hsotg->periodic_sched_ready); 759 else 760 list_move(&qh->qh_list_entry, &hsotg->periodic_sched_inactive); 761 } 762 763 /** 764 * dwc2_hcd_qtd_init() - Initializes a QTD structure 765 * 766 * @qtd: The QTD to initialize 767 * @urb: The associated URB 768 */ 769 void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb) 770 { 771 qtd->urb = urb; 772 if (dwc2_hcd_get_pipe_type(&urb->pipe_info) == 773 USB_ENDPOINT_XFER_CONTROL) { 774 /* 775 * The only time the QTD data toggle is used is on the data 776 * phase of control transfers. This phase always starts with 777 * DATA1. 778 */ 779 qtd->data_toggle = DWC2_HC_PID_DATA1; 780 qtd->control_phase = DWC2_CONTROL_SETUP; 781 } 782 783 /* Start split */ 784 qtd->complete_split = 0; 785 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL; 786 qtd->isoc_split_offset = 0; 787 qtd->in_process = 0; 788 789 /* Store the qtd ptr in the urb to reference the QTD */ 790 urb->qtd = qtd; 791 } 792 793 /** 794 * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH 795 * Caller must hold driver lock. 796 * 797 * @hsotg: The DWC HCD structure 798 * @qtd: The QTD to add 799 * @qh: Queue head to add qtd to 800 * 801 * Return: 0 if successful, negative error code otherwise 802 * 803 * If the QH to which the QTD is added is not currently scheduled, it is placed 804 * into the proper schedule based on its EP type. 805 */ 806 int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 807 struct dwc2_qh *qh) 808 { 809 810 KASSERT(mutex_owned(&hsotg->lock)); 811 int retval; 812 813 if (unlikely(!qh)) { 814 dev_err(hsotg->dev, "%s: Invalid QH\n", __func__); 815 retval = -EINVAL; 816 goto fail; 817 } 818 819 retval = dwc2_hcd_qh_add(hsotg, qh); 820 if (retval) 821 goto fail; 822 823 qtd->qh = qh; 824 list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list); 825 826 return 0; 827 fail: 828 return retval; 829 } 830 831 void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg, 832 struct dwc2_qtd *qtd, 833 struct dwc2_qh *qh) 834 { 835 struct dwc2_softc *sc = hsotg->hsotg_sc; 836 837 list_del_init(&qtd->qtd_list_entry); 838 pool_cache_put(sc->sc_qtdpool, qtd); 839 } 840 841 #define BITSTUFFTIME(bytecount) ((8 * 7 * (bytecount)) / 6) 842 #define HS_HOST_DELAY 5 /* nanoseconds */ 843 #define FS_LS_HOST_DELAY 1000 /* nanoseconds */ 844 #define HUB_LS_SETUP 333 /* nanoseconds */ 845 846 static u32 dwc2_calc_bus_time(struct dwc2_hsotg *hsotg, int speed, int is_in, 847 int is_isoc, int bytecount) 848 { 849 unsigned long retval; 850 851 switch (speed) { 852 case USB_SPEED_HIGH: 853 if (is_isoc) 854 retval = 855 ((38 * 8 * 2083) + 856 (2083 * (3 + BITSTUFFTIME(bytecount)))) / 1000 + 857 HS_HOST_DELAY; 858 else 859 retval = 860 ((55 * 8 * 2083) + 861 (2083 * (3 + BITSTUFFTIME(bytecount)))) / 1000 + 862 HS_HOST_DELAY; 863 break; 864 case USB_SPEED_FULL: 865 if (is_isoc) { 866 retval = 867 (8354 * (31 + 10 * BITSTUFFTIME(bytecount))) / 1000; 868 if (is_in) 869 retval = 7268 + FS_LS_HOST_DELAY + retval; 870 else 871 retval = 6265 + FS_LS_HOST_DELAY + retval; 872 } else { 873 retval = 874 (8354 * (31 + 10 * BITSTUFFTIME(bytecount))) / 1000; 875 retval = 9107 + FS_LS_HOST_DELAY + retval; 876 } 877 break; 878 case USB_SPEED_LOW: 879 if (is_in) { 880 retval = 881 (67667 * (31 + 10 * BITSTUFFTIME(bytecount))) / 882 1000; 883 retval = 884 64060 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY + 885 retval; 886 } else { 887 retval = 888 (66700 * (31 + 10 * BITSTUFFTIME(bytecount))) / 889 1000; 890 retval = 891 64107 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY + 892 retval; 893 } 894 break; 895 default: 896 dev_warn(hsotg->dev, "Unknown device speed\n"); 897 retval = -1; 898 } 899 900 return NS_TO_US(retval); 901 } 902