1 /* $OpenBSD: dwc2_core.c,v 1.11 2021/07/27 13:36:59 mglocker Exp $ */ 2 /* $NetBSD: dwc2_core.c,v 1.6 2014/04/03 06:34:58 skrll Exp $ */ 3 4 /* 5 * core.c - DesignWare HS OTG Controller common routines 6 * 7 * Copyright (C) 2004-2013 Synopsys, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The names of the above-listed copyright holders may not be used 19 * to endorse or promote products derived from this software without 20 * specific prior written permission. 21 * 22 * ALTERNATIVELY, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") as published by the Free Software 24 * Foundation; either version 2 of the License, or (at your option) any 25 * later version. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * The Core code provides basic services for accessing and managing the 42 * DWC_otg hardware. These services are used by both the Host Controller 43 * Driver and the Peripheral Controller Driver. 44 */ 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/signal.h> 49 #include <sys/proc.h> 50 #include <sys/timeout.h> 51 #include <sys/mutex.h> 52 #include <sys/pool.h> 53 #include <sys/task.h> 54 55 #include <machine/bus.h> 56 57 #include <dev/usb/usb.h> 58 #include <dev/usb/usbdi.h> 59 #include <dev/usb/usbdivar.h> 60 #include <dev/usb/usb_mem.h> 61 62 #include <dev/usb/dwc2/dwc2.h> 63 #include <dev/usb/dwc2/dwc2var.h> 64 65 #include <dev/usb/dwc2/dwc2_core.h> 66 #include <dev/usb/dwc2/dwc2_hcd.h> 67 68 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 69 /** 70 * dwc2_backup_host_registers() - Backup controller host registers. 71 * When suspending usb bus, registers needs to be backuped 72 * if controller power is disabled once suspended. 73 * 74 * @hsotg: Programming view of the DWC_otg controller 75 */ 76 STATIC int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) 77 { 78 struct dwc2_hregs_backup *hr; 79 int i; 80 81 dev_dbg(hsotg->dev, "%s\n", __func__); 82 83 /* Backup Host regs */ 84 hr = &hsotg->hr_backup; 85 hr->hcfg = DWC2_READ_4(hsotg, HCFG); 86 hr->haintmsk = DWC2_READ_4(hsotg, HAINTMSK); 87 for (i = 0; i < hsotg->core_params->host_channels; ++i) 88 hr->hcintmsk[i] = DWC2_READ_4(hsotg, HCINTMSK(i)); 89 90 hr->hprt0 = DWC2_READ_4(hsotg, HPRT0); 91 hr->hfir = DWC2_READ_4(hsotg, HFIR); 92 hr->valid = true; 93 94 return 0; 95 } 96 97 /** 98 * dwc2_restore_host_registers() - Restore controller host registers. 99 * When resuming usb bus, device registers needs to be restored 100 * if controller power were disabled. 101 * 102 * @hsotg: Programming view of the DWC_otg controller 103 */ 104 STATIC int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) 105 { 106 struct dwc2_hregs_backup *hr; 107 int i; 108 109 dev_dbg(hsotg->dev, "%s\n", __func__); 110 111 /* Restore host regs */ 112 hr = &hsotg->hr_backup; 113 if (!hr->valid) { 114 dev_err(hsotg->dev, "%s: no host registers to restore\n", 115 __func__); 116 return -EINVAL; 117 } 118 hr->valid = false; 119 120 DWC2_WRITE_4(hsotg, HCFG, hr->hcfg); 121 DWC2_WRITE_4(hsotg, HAINTMSK, hr->haintmsk); 122 123 for (i = 0; i < hsotg->core_params->host_channels; ++i) 124 DWC2_WRITE_4(hsotg, HCINTMSK(i), hr->hcintmsk[i]); 125 126 DWC2_WRITE_4(hsotg, HPRT0, hr->hprt0); 127 DWC2_WRITE_4(hsotg, HFIR, hr->hfir); 128 hsotg->frame_number = 0; 129 130 return 0; 131 } 132 #else 133 static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) 134 { return 0; } 135 136 static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) 137 { return 0; } 138 #endif 139 140 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ 141 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 142 /** 143 * dwc2_backup_device_registers() - Backup controller device registers. 144 * When suspending usb bus, registers needs to be backuped 145 * if controller power is disabled once suspended. 146 * 147 * @hsotg: Programming view of the DWC_otg controller 148 */ 149 STATIC int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 150 { 151 struct dwc2_dregs_backup *dr; 152 int i; 153 154 dev_dbg(hsotg->dev, "%s\n", __func__); 155 156 /* Backup dev regs */ 157 dr = &hsotg->dr_backup; 158 159 dr->dcfg = DWC2_READ_4(hsotg, DCFG); 160 dr->dctl = DWC2_READ_4(hsotg, DCTL); 161 dr->daintmsk = DWC2_READ_4(hsotg, DAINTMSK); 162 dr->diepmsk = DWC2_READ_4(hsotg, DIEPMSK); 163 dr->doepmsk = DWC2_READ_4(hsotg, DOEPMSK); 164 165 for (i = 0; i < hsotg->num_of_eps; i++) { 166 /* Backup IN EPs */ 167 dr->diepctl[i] = DWC2_READ_4(hsotg, DIEPCTL(i)); 168 169 /* Ensure DATA PID is correctly configured */ 170 if (dr->diepctl[i] & DXEPCTL_DPID) 171 dr->diepctl[i] |= DXEPCTL_SETD1PID; 172 else 173 dr->diepctl[i] |= DXEPCTL_SETD0PID; 174 175 dr->dieptsiz[i] = DWC2_READ_4(hsotg, DIEPTSIZ(i)); 176 dr->diepdma[i] = DWC2_READ_4(hsotg, DIEPDMA(i)); 177 178 /* Backup OUT EPs */ 179 dr->doepctl[i] = DWC2_READ_4(hsotg, DOEPCTL(i)); 180 181 /* Ensure DATA PID is correctly configured */ 182 if (dr->doepctl[i] & DXEPCTL_DPID) 183 dr->doepctl[i] |= DXEPCTL_SETD1PID; 184 else 185 dr->doepctl[i] |= DXEPCTL_SETD0PID; 186 187 dr->doeptsiz[i] = DWC2_READ_4(hsotg, DOEPTSIZ(i)); 188 dr->doepdma[i] = DWC2_READ_4(hsotg, DOEPDMA(i)); 189 } 190 dr->valid = true; 191 return 0; 192 } 193 194 /** 195 * dwc2_restore_device_registers() - Restore controller device registers. 196 * When resuming usb bus, device registers needs to be restored 197 * if controller power were disabled. 198 * 199 * @hsotg: Programming view of the DWC_otg controller 200 */ 201 STATIC int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) 202 { 203 struct dwc2_dregs_backup *dr; 204 u32 dctl; 205 int i; 206 207 dev_dbg(hsotg->dev, "%s\n", __func__); 208 209 /* Restore dev regs */ 210 dr = &hsotg->dr_backup; 211 if (!dr->valid) { 212 dev_err(hsotg->dev, "%s: no device registers to restore\n", 213 __func__); 214 return -EINVAL; 215 } 216 dr->valid = false; 217 218 DWC2_WRITE_4(hsotg, DCFG, dr->dcfg); 219 DWC2_WRITE_4(hsotg, DCTL, dr->dctl); 220 DWC2_WRITE_4(hsotg, DAINTMSK, dr->daintmsk); 221 DWC2_WRITE_4(hsotg, DIEPMSK, dr->diepmsk); 222 DWC2_WRITE_4(hsotg, DOEPMSK, dr->doepmsk); 223 224 for (i = 0; i < hsotg->num_of_eps; i++) { 225 /* Restore IN EPs */ 226 DWC2_WRITE_4(hsotg, DIEPCTL(i), dr->diepctl[i]); 227 DWC2_WRITE_4(hsotg, DIEPTSIZ(i), dr->dieptsiz[i]); 228 DWC2_WRITE_4(hsotg, DIEPDMA(i), dr->diepdma[i]); 229 230 /* Restore OUT EPs */ 231 DWC2_WRITE_4(hsotg, DOEPCTL(i), dr->doepctl[i]); 232 DWC2_WRITE_4(hsotg, DOEPTSIZ(i), dr->doeptsiz[i]); 233 DWC2_WRITE_4(hsotg, DOEPDMA(i), dr->doepdma[i]); 234 } 235 236 /* Set the Power-On Programming done bit */ 237 dctl = DWC2_READ_4(hsotg, DCTL); 238 dctl |= DCTL_PWRONPRGDONE; 239 DWC2_WRITE_4(hsotg, DCTL, dctl); 240 241 return 0; 242 } 243 #else 244 static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 245 { return 0; } 246 247 static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) 248 { return 0; } 249 #endif 250 251 /** 252 * dwc2_backup_global_registers() - Backup global controller registers. 253 * When suspending usb bus, registers needs to be backuped 254 * if controller power is disabled once suspended. 255 * 256 * @hsotg: Programming view of the DWC_otg controller 257 */ 258 STATIC int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg) 259 { 260 struct dwc2_gregs_backup *gr; 261 int i; 262 263 /* Backup global regs */ 264 gr = &hsotg->gr_backup; 265 266 gr->gotgctl = DWC2_READ_4(hsotg, GOTGCTL); 267 gr->gintmsk = DWC2_READ_4(hsotg, GINTMSK); 268 gr->gahbcfg = DWC2_READ_4(hsotg, GAHBCFG); 269 gr->gusbcfg = DWC2_READ_4(hsotg, GUSBCFG); 270 gr->grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ); 271 gr->gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ); 272 gr->hptxfsiz = DWC2_READ_4(hsotg, HPTXFSIZ); 273 gr->gdfifocfg = DWC2_READ_4(hsotg, GDFIFOCFG); 274 for (i = 0; i < MAX_EPS_CHANNELS; i++) 275 gr->dtxfsiz[i] = DWC2_READ_4(hsotg, DPTXFSIZN(i)); 276 277 gr->valid = true; 278 return 0; 279 } 280 281 /** 282 * dwc2_restore_global_registers() - Restore controller global registers. 283 * When resuming usb bus, device registers needs to be restored 284 * if controller power were disabled. 285 * 286 * @hsotg: Programming view of the DWC_otg controller 287 */ 288 STATIC int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg) 289 { 290 struct dwc2_gregs_backup *gr; 291 int i; 292 293 dev_dbg(hsotg->dev, "%s\n", __func__); 294 295 /* Restore global regs */ 296 gr = &hsotg->gr_backup; 297 if (!gr->valid) { 298 dev_err(hsotg->dev, "%s: no global registers to restore\n", 299 __func__); 300 return -EINVAL; 301 } 302 gr->valid = false; 303 304 DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff); 305 DWC2_WRITE_4(hsotg, GOTGCTL, gr->gotgctl); 306 DWC2_WRITE_4(hsotg, GINTMSK, gr->gintmsk); 307 DWC2_WRITE_4(hsotg, GUSBCFG, gr->gusbcfg); 308 DWC2_WRITE_4(hsotg, GAHBCFG, gr->gahbcfg); 309 DWC2_WRITE_4(hsotg, GRXFSIZ, gr->grxfsiz); 310 DWC2_WRITE_4(hsotg, GNPTXFSIZ, gr->gnptxfsiz); 311 DWC2_WRITE_4(hsotg, HPTXFSIZ, gr->hptxfsiz); 312 DWC2_WRITE_4(hsotg, GDFIFOCFG, gr->gdfifocfg); 313 for (i = 0; i < MAX_EPS_CHANNELS; i++) 314 DWC2_WRITE_4(hsotg, DPTXFSIZN(i), gr->dtxfsiz[i]); 315 316 return 0; 317 } 318 319 /** 320 * dwc2_exit_hibernation() - Exit controller from Partial Power Down. 321 * 322 * @hsotg: Programming view of the DWC_otg controller 323 * @restore: Controller registers need to be restored 324 */ 325 int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore) 326 { 327 u32 pcgcctl; 328 int ret = 0; 329 330 if (!hsotg->core_params->hibernation) 331 return -ENOTSUP; 332 333 pcgcctl = DWC2_READ_4(hsotg, PCGCTL); 334 pcgcctl &= ~PCGCTL_STOPPCLK; 335 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl); 336 337 pcgcctl = DWC2_READ_4(hsotg, PCGCTL); 338 pcgcctl &= ~PCGCTL_PWRCLMP; 339 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl); 340 341 pcgcctl = DWC2_READ_4(hsotg, PCGCTL); 342 pcgcctl &= ~PCGCTL_RSTPDWNMODULE; 343 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl); 344 345 udelay(100); 346 if (restore) { 347 ret = dwc2_restore_global_registers(hsotg); 348 if (ret) { 349 dev_err(hsotg->dev, "%s: failed to restore registers\n", 350 __func__); 351 return ret; 352 } 353 if (dwc2_is_host_mode(hsotg)) { 354 ret = dwc2_restore_host_registers(hsotg); 355 if (ret) { 356 dev_err(hsotg->dev, "%s: failed to restore host registers\n", 357 __func__); 358 return ret; 359 } 360 } else { 361 ret = dwc2_restore_device_registers(hsotg); 362 if (ret) { 363 dev_err(hsotg->dev, "%s: failed to restore device registers\n", 364 __func__); 365 return ret; 366 } 367 } 368 } 369 370 return ret; 371 } 372 373 /** 374 * dwc2_enter_hibernation() - Put controller in Partial Power Down. 375 * 376 * @hsotg: Programming view of the DWC_otg controller 377 */ 378 int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg) 379 { 380 u32 pcgcctl; 381 int ret = 0; 382 383 if (!hsotg->core_params->hibernation) 384 return -ENOTSUP; 385 386 /* Backup all registers */ 387 ret = dwc2_backup_global_registers(hsotg); 388 if (ret) { 389 dev_err(hsotg->dev, "%s: failed to backup global registers\n", 390 __func__); 391 return ret; 392 } 393 394 if (dwc2_is_host_mode(hsotg)) { 395 ret = dwc2_backup_host_registers(hsotg); 396 if (ret) { 397 dev_err(hsotg->dev, "%s: failed to backup host registers\n", 398 __func__); 399 return ret; 400 } 401 } else { 402 ret = dwc2_backup_device_registers(hsotg); 403 if (ret) { 404 dev_err(hsotg->dev, "%s: failed to backup device registers\n", 405 __func__); 406 return ret; 407 } 408 } 409 410 /* 411 * Clear any pending interrupts since dwc2 will not be able to 412 * clear them after entering hibernation. 413 */ 414 DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff); 415 416 /* Put the controller in low power state */ 417 pcgcctl = DWC2_READ_4(hsotg, PCGCTL); 418 419 pcgcctl |= PCGCTL_PWRCLMP; 420 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl); 421 ndelay(20); 422 423 pcgcctl |= PCGCTL_RSTPDWNMODULE; 424 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl); 425 ndelay(20); 426 427 pcgcctl |= PCGCTL_STOPPCLK; 428 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl); 429 430 return ret; 431 } 432 433 /** 434 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts, 435 * used in both device and host modes 436 * 437 * @hsotg: Programming view of the DWC_otg controller 438 */ 439 STATIC void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg) 440 { 441 u32 intmsk; 442 443 /* Clear any pending OTG Interrupts */ 444 DWC2_WRITE_4(hsotg, GOTGINT, 0xffffffff); 445 446 /* Clear any pending interrupts */ 447 DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff); 448 449 /* Enable the interrupts in the GINTMSK */ 450 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; 451 452 if (hsotg->core_params->dma_enable <= 0) 453 intmsk |= GINTSTS_RXFLVL; 454 if (hsotg->core_params->external_id_pin_ctl <= 0) 455 intmsk |= GINTSTS_CONIDSTSCHNG; 456 457 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP | 458 GINTSTS_SESSREQINT; 459 460 DWC2_WRITE_4(hsotg, GINTMSK, intmsk); 461 } 462 463 /* 464 * Initializes the FSLSPClkSel field of the HCFG register depending on the 465 * PHY type 466 */ 467 STATIC void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) 468 { 469 u32 hcfg, val; 470 471 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && 472 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && 473 hsotg->core_params->ulpi_fs_ls > 0) || 474 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { 475 /* Full speed PHY */ 476 val = HCFG_FSLSPCLKSEL_48_MHZ; 477 } else { 478 /* High speed PHY running at full speed or high speed */ 479 val = HCFG_FSLSPCLKSEL_30_60_MHZ; 480 } 481 482 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val); 483 hcfg = DWC2_READ_4(hsotg, HCFG); 484 hcfg &= ~HCFG_FSLSPCLKSEL_MASK; 485 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT; 486 DWC2_WRITE_4(hsotg, HCFG, hcfg); 487 } 488 489 /* 490 * Do core a soft reset of the core. Be careful with this because it 491 * resets all the internal state machines of the core. 492 */ 493 int dwc2_core_reset(struct dwc2_hsotg *hsotg) 494 { 495 u32 greset; 496 int count = 0; 497 498 dev_vdbg(hsotg->dev, "%s()\n", __func__); 499 500 /* Core Soft Reset */ 501 greset = DWC2_READ_4(hsotg, GRSTCTL); 502 greset |= GRSTCTL_CSFTRST; 503 DWC2_WRITE_4(hsotg, GRSTCTL, greset); 504 do { 505 udelay(1); 506 greset = DWC2_READ_4(hsotg, GRSTCTL); 507 if (++count > 50) { 508 dev_warn(hsotg->dev, 509 "%s() HANG! Soft Reset GRSTCTL=%0x\n", 510 __func__, greset); 511 return -EBUSY; 512 } 513 } while (greset & GRSTCTL_CSFTRST); 514 515 /* Wait for AHB master IDLE state */ 516 count = 0; 517 do { 518 udelay(1); 519 greset = DWC2_READ_4(hsotg, GRSTCTL); 520 if (++count > 50) { 521 dev_warn(hsotg->dev, 522 "%s() HANG! AHB Idle GRSTCTL=%0x\n", 523 __func__, greset); 524 return -EBUSY; 525 } 526 } while (!(greset & GRSTCTL_AHBIDLE)); 527 528 return 0; 529 } 530 531 /* 532 * Force the mode of the controller. 533 * 534 * Forcing the mode is needed for two cases: 535 * 536 * 1) If the dr_mode is set to either HOST or PERIPHERAL we force the 537 * controller to stay in a particular mode regardless of ID pin 538 * changes. We do this usually after a core reset. 539 * 540 * 2) During probe we want to read reset values of the hw 541 * configuration registers that are only available in either host or 542 * device mode. We may need to force the mode if the current mode does 543 * not allow us to access the register in the mode that we want. 544 * 545 * In either case it only makes sense to force the mode if the 546 * controller hardware is OTG capable. 547 * 548 * Checks are done in this function to determine whether doing a force 549 * would be valid or not. 550 * 551 * If a force is done, it requires a 25ms delay to take effect. 552 * 553 * Returns true if the mode was forced. 554 */ 555 STATIC bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host) 556 { 557 struct dwc2_softc *sc = hsotg->hsotg_sc; 558 u32 gusbcfg; 559 u32 set; 560 u32 clear; 561 562 dev_dbg(hsotg->dev, "Forcing mode to %s\n", host ? "host" : "device"); 563 564 /* 565 * Force mode has no effect if the hardware is not OTG. 566 */ 567 if (!dwc2_hw_is_otg(hsotg)) 568 return false; 569 570 /* 571 * If dr_mode is either peripheral or host only, there is no 572 * need to ever force the mode to the opposite mode. 573 */ 574 if (host && hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) { 575 WARN_ON(1); 576 return false; 577 } 578 579 if (!host && hsotg->dr_mode == USB_DR_MODE_HOST) { 580 WARN_ON(1); 581 return false; 582 } 583 584 gusbcfg = DWC2_READ_4(hsotg, GUSBCFG); 585 586 set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE; 587 clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE; 588 589 gusbcfg &= ~clear; 590 gusbcfg |= set; 591 DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg); 592 593 usb_delay_ms(&sc->sc_bus, 25); 594 return true; 595 } 596 597 /* 598 * Clears the force mode bits. 599 */ 600 STATIC void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg) 601 { 602 struct dwc2_softc *sc = hsotg->hsotg_sc; 603 u32 gusbcfg; 604 605 gusbcfg = DWC2_READ_4(hsotg, GUSBCFG); 606 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; 607 gusbcfg &= ~GUSBCFG_FORCEDEVMODE; 608 DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg); 609 610 /* 611 * NOTE: This long sleep is _very_ important, otherwise the core will 612 * not stay in host mode after a connector ID change! 613 */ 614 usb_delay_ms(&sc->sc_bus, 25); 615 } 616 617 /* 618 * Sets or clears force mode based on the dr_mode parameter. 619 */ 620 void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg) 621 { 622 switch (hsotg->dr_mode) { 623 case USB_DR_MODE_HOST: 624 dwc2_force_mode(hsotg, true); 625 break; 626 case USB_DR_MODE_PERIPHERAL: 627 dwc2_force_mode(hsotg, false); 628 break; 629 case USB_DR_MODE_OTG: 630 dwc2_clear_force_mode(hsotg); 631 break; 632 default: 633 dev_warn(hsotg->dev, "%s() Invalid dr_mode=%d\n", 634 __func__, hsotg->dr_mode); 635 break; 636 } 637 } 638 639 /* 640 * Do core a soft reset of the core. Be careful with this because it 641 * resets all the internal state machines of the core. 642 * 643 * Additionally this will apply force mode as per the hsotg->dr_mode 644 * parameter. 645 */ 646 int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg) 647 { 648 int retval; 649 650 retval = dwc2_core_reset(hsotg); 651 if (retval) 652 return retval; 653 654 dwc2_force_dr_mode(hsotg); 655 return 0; 656 } 657 658 STATIC int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 659 { 660 u32 usbcfg, i2cctl; 661 int retval = 0; 662 663 /* 664 * core_init() is now called on every switch so only call the 665 * following for the first time through 666 */ 667 if (select_phy) { 668 dev_dbg(hsotg->dev, "FS PHY selected\n"); 669 670 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 671 if (!(usbcfg & GUSBCFG_PHYSEL)) { 672 usbcfg |= GUSBCFG_PHYSEL; 673 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 674 675 /* Reset after a PHY select */ 676 retval = dwc2_core_reset_and_force_dr_mode(hsotg); 677 678 if (retval) { 679 dev_err(hsotg->dev, 680 "%s: Reset failed, aborting", __func__); 681 return retval; 682 } 683 } 684 } 685 686 /* 687 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also 688 * do this on HNP Dev/Host mode switches (done in dev_init and 689 * host_init). 690 */ 691 if (dwc2_is_host_mode(hsotg)) 692 dwc2_init_fs_ls_pclk_sel(hsotg); 693 694 if (hsotg->core_params->i2c_enable > 0) { 695 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); 696 697 /* Program GUSBCFG.OtgUtmiFsSel to I2C */ 698 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 699 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL; 700 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 701 702 /* Program GI2CCTL.I2CEn */ 703 i2cctl = DWC2_READ_4(hsotg, GI2CCTL); 704 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK; 705 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT; 706 i2cctl &= ~GI2CCTL_I2CEN; 707 DWC2_WRITE_4(hsotg, GI2CCTL, i2cctl); 708 i2cctl |= GI2CCTL_I2CEN; 709 DWC2_WRITE_4(hsotg, GI2CCTL, i2cctl); 710 } 711 712 return retval; 713 } 714 715 STATIC int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 716 { 717 u32 usbcfg, usbcfg_old; 718 int retval = 0; 719 720 if (!select_phy) 721 return 0; 722 723 usbcfg = usbcfg_old = DWC2_READ_4(hsotg, GUSBCFG); 724 725 /* 726 * HS PHY parameters. These parameters are preserved during soft reset 727 * so only program the first time. Do a soft reset immediately after 728 * setting phyif. 729 */ 730 switch (hsotg->core_params->phy_type) { 731 case DWC2_PHY_TYPE_PARAM_ULPI: 732 /* ULPI interface */ 733 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); 734 usbcfg |= GUSBCFG_ULPI_UTMI_SEL; 735 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); 736 if (hsotg->core_params->phy_ulpi_ddr > 0) 737 usbcfg |= GUSBCFG_DDRSEL; 738 break; 739 case DWC2_PHY_TYPE_PARAM_UTMI: 740 /* UTMI+ interface */ 741 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n"); 742 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16); 743 if (hsotg->core_params->phy_utmi_width == 16) 744 usbcfg |= GUSBCFG_PHYIF16; 745 break; 746 default: 747 dev_err(hsotg->dev, "FS PHY selected at HS!\n"); 748 break; 749 } 750 751 if (usbcfg != usbcfg_old) { 752 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 753 754 /* Reset after setting the PHY parameters */ 755 retval = dwc2_core_reset_and_force_dr_mode(hsotg); 756 if (retval) { 757 dev_err(hsotg->dev, 758 "%s: Reset failed, aborting", __func__); 759 return retval; 760 } 761 } 762 763 return retval; 764 } 765 766 STATIC int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 767 { 768 u32 usbcfg; 769 int retval = 0; 770 771 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL && 772 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { 773 /* If FS mode with FS PHY */ 774 retval = dwc2_fs_phy_init(hsotg, select_phy); 775 if (retval) 776 return retval; 777 } else { 778 /* High speed PHY */ 779 retval = dwc2_hs_phy_init(hsotg, select_phy); 780 if (retval) 781 return retval; 782 } 783 784 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && 785 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && 786 hsotg->core_params->ulpi_fs_ls > 0) { 787 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); 788 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 789 usbcfg |= GUSBCFG_ULPI_FS_LS; 790 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M; 791 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 792 } else { 793 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 794 usbcfg &= ~GUSBCFG_ULPI_FS_LS; 795 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M; 796 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 797 } 798 799 return retval; 800 } 801 802 STATIC int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) 803 { 804 struct dwc2_softc *sc = hsotg->hsotg_sc; 805 u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG); 806 807 switch (hsotg->hw_params.arch) { 808 case GHWCFG2_EXT_DMA_ARCH: 809 dev_dbg(hsotg->dev, "External DMA Mode\n"); 810 if (!sc->sc_set_dma_addr) { 811 dev_err(hsotg->dev, "External DMA Mode not supported\n"); 812 return -EINVAL; 813 } 814 if (hsotg->core_params->ahbcfg != -1) { 815 ahbcfg &= GAHBCFG_CTRL_MASK; 816 ahbcfg |= hsotg->core_params->ahbcfg & 817 ~GAHBCFG_CTRL_MASK; 818 } 819 break; 820 821 case GHWCFG2_INT_DMA_ARCH: 822 dev_dbg(hsotg->dev, "Internal DMA Mode\n"); 823 if (hsotg->core_params->ahbcfg != -1) { 824 ahbcfg &= GAHBCFG_CTRL_MASK; 825 ahbcfg |= hsotg->core_params->ahbcfg & 826 ~GAHBCFG_CTRL_MASK; 827 } 828 break; 829 830 case GHWCFG2_SLAVE_ONLY_ARCH: 831 default: 832 dev_dbg(hsotg->dev, "Slave Only Mode\n"); 833 break; 834 } 835 836 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n", 837 hsotg->core_params->dma_enable, 838 hsotg->core_params->dma_desc_enable); 839 840 if (hsotg->core_params->dma_enable > 0) { 841 if (hsotg->core_params->dma_desc_enable > 0) 842 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n"); 843 else 844 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n"); 845 } else { 846 dev_dbg(hsotg->dev, "Using Slave mode\n"); 847 hsotg->core_params->dma_desc_enable = 0; 848 } 849 850 if (hsotg->core_params->dma_enable > 0) 851 ahbcfg |= GAHBCFG_DMA_EN; 852 853 DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg); 854 855 return 0; 856 } 857 858 STATIC void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) 859 { 860 u32 usbcfg; 861 862 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 863 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP); 864 865 switch (hsotg->hw_params.op_mode) { 866 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 867 if (hsotg->core_params->otg_cap == 868 DWC2_CAP_PARAM_HNP_SRP_CAPABLE) 869 usbcfg |= GUSBCFG_HNPCAP; 870 if (hsotg->core_params->otg_cap != 871 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) 872 usbcfg |= GUSBCFG_SRPCAP; 873 break; 874 875 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 876 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 877 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 878 if (hsotg->core_params->otg_cap != 879 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) 880 usbcfg |= GUSBCFG_SRPCAP; 881 break; 882 883 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE: 884 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE: 885 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST: 886 default: 887 break; 888 } 889 890 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 891 } 892 893 /** 894 * dwc2_core_init() - Initializes the DWC_otg controller registers and 895 * prepares the core for device mode or host mode operation 896 * 897 * @hsotg: Programming view of the DWC_otg controller 898 * @initial_setup: If true then this is the first init for this instance. 899 */ 900 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup) 901 { 902 u32 usbcfg, otgctl; 903 int retval; 904 905 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); 906 907 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 908 909 /* Set ULPI External VBUS bit if needed */ 910 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; 911 if (hsotg->core_params->phy_ulpi_ext_vbus == 912 DWC2_PHY_ULPI_EXTERNAL_VBUS) 913 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; 914 915 /* Set external TS Dline pulsing bit if needed */ 916 usbcfg &= ~GUSBCFG_TERMSELDLPULSE; 917 if (hsotg->core_params->ts_dline > 0) 918 usbcfg |= GUSBCFG_TERMSELDLPULSE; 919 920 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 921 922 /* 923 * Reset the Controller 924 * 925 * We only need to reset the controller if this is a re-init. 926 * For the first init we know for sure that earlier code reset us (it 927 * needed to in order to properly detect various parameters). 928 */ 929 if (!initial_setup) { 930 retval = dwc2_core_reset_and_force_dr_mode(hsotg); 931 if (retval) { 932 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n", 933 __func__); 934 return retval; 935 } 936 } 937 938 /* 939 * This needs to happen in FS mode before any other programming occurs 940 */ 941 retval = dwc2_phy_init(hsotg, initial_setup); 942 if (retval) 943 return retval; 944 945 /* Program the GAHBCFG Register */ 946 retval = dwc2_gahbcfg_init(hsotg); 947 if (retval) 948 return retval; 949 950 /* Program the GUSBCFG register */ 951 dwc2_gusbcfg_init(hsotg); 952 953 /* Program the GOTGCTL register */ 954 otgctl = DWC2_READ_4(hsotg, GOTGCTL); 955 otgctl &= ~GOTGCTL_OTGVER; 956 if (hsotg->core_params->otg_ver > 0) 957 otgctl |= GOTGCTL_OTGVER; 958 DWC2_WRITE_4(hsotg, GOTGCTL, otgctl); 959 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver); 960 961 /* Clear the SRP success bit for FS-I2c */ 962 hsotg->srp_success = 0; 963 964 /* Enable common interrupts */ 965 dwc2_enable_common_interrupts(hsotg); 966 967 /* 968 * Do device or host initialization based on mode during PCD and 969 * HCD initialization 970 */ 971 if (dwc2_is_host_mode(hsotg)) { 972 dev_dbg(hsotg->dev, "Host Mode\n"); 973 hsotg->op_state = OTG_STATE_A_HOST; 974 } else { 975 dev_dbg(hsotg->dev, "Device Mode\n"); 976 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 977 } 978 979 return 0; 980 } 981 982 /** 983 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts 984 * 985 * @hsotg: Programming view of DWC_otg controller 986 */ 987 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg) 988 { 989 u32 intmsk; 990 991 dev_dbg(hsotg->dev, "%s()\n", __func__); 992 993 /* Disable all interrupts */ 994 DWC2_WRITE_4(hsotg, GINTMSK, 0); 995 DWC2_WRITE_4(hsotg, HAINTMSK, 0); 996 997 /* Enable the common interrupts */ 998 dwc2_enable_common_interrupts(hsotg); 999 1000 /* Enable host mode interrupts without disturbing common interrupts */ 1001 intmsk = DWC2_READ_4(hsotg, GINTMSK); 1002 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT; 1003 DWC2_WRITE_4(hsotg, GINTMSK, intmsk); 1004 } 1005 1006 /** 1007 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts 1008 * 1009 * @hsotg: Programming view of DWC_otg controller 1010 */ 1011 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg) 1012 { 1013 u32 intmsk = DWC2_READ_4(hsotg, GINTMSK); 1014 1015 /* Disable host mode interrupts without disturbing common interrupts */ 1016 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT | 1017 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT); 1018 DWC2_WRITE_4(hsotg, GINTMSK, intmsk); 1019 } 1020 1021 /* 1022 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size 1023 * For system that have a total fifo depth that is smaller than the default 1024 * RX + TX fifo size. 1025 * 1026 * @hsotg: Programming view of DWC_otg controller 1027 */ 1028 STATIC void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) 1029 { 1030 struct dwc2_core_params *params = hsotg->core_params; 1031 struct dwc2_hw_params *hw = &hsotg->hw_params; 1032 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size; 1033 1034 total_fifo_size = hw->total_fifo_size; 1035 rxfsiz = params->host_rx_fifo_size; 1036 nptxfsiz = params->host_nperio_tx_fifo_size; 1037 ptxfsiz = params->host_perio_tx_fifo_size; 1038 1039 /* 1040 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth 1041 * allocation with support for high bandwidth endpoints. Synopsys 1042 * defines MPS(Max Packet size) for a periodic EP=1024, and for 1043 * non-periodic as 512. 1044 */ 1045 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) { 1046 /* 1047 * For Buffer DMA mode/Scatter Gather DMA mode 1048 * 2 * ((Largest Packet size / 4) + 1 + 1) + n 1049 * with n = number of host channel. 1050 * 2 * ((1024/4) + 2) = 516 1051 */ 1052 rxfsiz = 516 + hw->host_channels; 1053 1054 /* 1055 * min non-periodic tx fifo depth 1056 * 2 * (largest non-periodic USB packet used / 4) 1057 * 2 * (512/4) = 256 1058 */ 1059 nptxfsiz = 256; 1060 1061 /* 1062 * min periodic tx fifo depth 1063 * (largest packet size*MC)/4 1064 * (1024 * 3)/4 = 768 1065 */ 1066 ptxfsiz = 768; 1067 1068 params->host_rx_fifo_size = rxfsiz; 1069 params->host_nperio_tx_fifo_size = nptxfsiz; 1070 params->host_perio_tx_fifo_size = ptxfsiz; 1071 } 1072 1073 /* 1074 * If the summation of RX, NPTX and PTX fifo sizes is still 1075 * bigger than the total_fifo_size, then we have a problem. 1076 * 1077 * We won't be able to allocate as many endpoints. Right now, 1078 * we're just printing an error message, but ideally this FIFO 1079 * allocation algorithm would be improved in the future. 1080 * 1081 * FIXME improve this FIFO allocation algorithm. 1082 */ 1083 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) 1084 dev_err(hsotg->dev, "invalid fifo sizes\n"); 1085 } 1086 1087 STATIC void dwc2_config_fifos(struct dwc2_hsotg *hsotg) 1088 { 1089 struct dwc2_core_params *params = hsotg->core_params; 1090 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz; 1091 1092 if (!params->enable_dynamic_fifo) 1093 return; 1094 1095 dwc2_calculate_dynamic_fifo(hsotg); 1096 1097 /* Rx FIFO */ 1098 grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ); 1099 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz); 1100 grxfsiz &= ~GRXFSIZ_DEPTH_MASK; 1101 grxfsiz |= params->host_rx_fifo_size << 1102 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK; 1103 DWC2_WRITE_4(hsotg, GRXFSIZ, grxfsiz); 1104 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", 1105 DWC2_READ_4(hsotg, GRXFSIZ)); 1106 1107 /* Non-periodic Tx FIFO */ 1108 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n", 1109 DWC2_READ_4(hsotg, GNPTXFSIZ)); 1110 nptxfsiz = params->host_nperio_tx_fifo_size << 1111 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; 1112 nptxfsiz |= params->host_rx_fifo_size << 1113 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; 1114 DWC2_WRITE_4(hsotg, GNPTXFSIZ, nptxfsiz); 1115 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n", 1116 DWC2_READ_4(hsotg, GNPTXFSIZ)); 1117 1118 /* Periodic Tx FIFO */ 1119 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n", 1120 DWC2_READ_4(hsotg, HPTXFSIZ)); 1121 hptxfsiz = params->host_perio_tx_fifo_size << 1122 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; 1123 hptxfsiz |= (params->host_rx_fifo_size + 1124 params->host_nperio_tx_fifo_size) << 1125 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; 1126 DWC2_WRITE_4(hsotg, HPTXFSIZ, hptxfsiz); 1127 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", 1128 DWC2_READ_4(hsotg, HPTXFSIZ)); 1129 1130 if (hsotg->core_params->en_multiple_tx_fifo > 0 && 1131 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) { 1132 /* 1133 * Global DFIFOCFG calculation for Host mode - 1134 * include RxFIFO, NPTXFIFO and HPTXFIFO 1135 */ 1136 dfifocfg = DWC2_READ_4(hsotg, GDFIFOCFG); 1137 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK; 1138 dfifocfg |= (params->host_rx_fifo_size + 1139 params->host_nperio_tx_fifo_size + 1140 params->host_perio_tx_fifo_size) << 1141 GDFIFOCFG_EPINFOBASE_SHIFT & 1142 GDFIFOCFG_EPINFOBASE_MASK; 1143 DWC2_WRITE_4(hsotg, GDFIFOCFG, dfifocfg); 1144 } 1145 } 1146 1147 /** 1148 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for 1149 * Host mode 1150 * 1151 * @hsotg: Programming view of DWC_otg controller 1152 * 1153 * This function flushes the Tx and Rx FIFOs and flushes any entries in the 1154 * request queues. Host channels are reset to ensure that they are ready for 1155 * performing transfers. 1156 */ 1157 void dwc2_core_host_init(struct dwc2_hsotg *hsotg) 1158 { 1159 u32 hcfg, hfir, otgctl; 1160 1161 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); 1162 1163 /* Restart the Phy Clock */ 1164 DWC2_WRITE_4(hsotg, PCGCTL, 0); 1165 1166 /* Initialize Host Configuration Register */ 1167 dwc2_init_fs_ls_pclk_sel(hsotg); 1168 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) { 1169 hcfg = DWC2_READ_4(hsotg, HCFG); 1170 hcfg |= HCFG_FSLSSUPP; 1171 DWC2_WRITE_4(hsotg, HCFG, hcfg); 1172 } 1173 1174 /* 1175 * This bit allows dynamic reloading of the HFIR register during 1176 * runtime. This bit needs to be programmed during initial configuration 1177 * and its value must not be changed during runtime. 1178 */ 1179 if (hsotg->core_params->reload_ctl > 0) { 1180 hfir = DWC2_READ_4(hsotg, HFIR); 1181 hfir |= HFIR_RLDCTRL; 1182 DWC2_WRITE_4(hsotg, HFIR, hfir); 1183 } 1184 1185 if (hsotg->core_params->dma_desc_enable > 0) { 1186 u32 op_mode = hsotg->hw_params.op_mode; 1187 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || 1188 !hsotg->hw_params.dma_desc_enable || 1189 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE || 1190 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE || 1191 op_mode == GHWCFG2_OP_MODE_UNDEFINED) { 1192 dev_err(hsotg->dev, 1193 "Hardware does not support descriptor DMA mode -\n"); 1194 dev_err(hsotg->dev, 1195 "falling back to buffer DMA mode.\n"); 1196 hsotg->core_params->dma_desc_enable = 0; 1197 } else { 1198 hcfg = DWC2_READ_4(hsotg, HCFG); 1199 hcfg |= HCFG_DESCDMA; 1200 DWC2_WRITE_4(hsotg, HCFG, hcfg); 1201 } 1202 } 1203 1204 /* Configure data FIFO sizes */ 1205 dwc2_config_fifos(hsotg); 1206 1207 /* TODO - check this */ 1208 /* Clear Host Set HNP Enable in the OTG Control Register */ 1209 otgctl = DWC2_READ_4(hsotg, GOTGCTL); 1210 otgctl &= ~GOTGCTL_HSTSETHNPEN; 1211 DWC2_WRITE_4(hsotg, GOTGCTL, otgctl); 1212 1213 /* Make sure the FIFOs are flushed */ 1214 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */); 1215 dwc2_flush_rx_fifo(hsotg); 1216 1217 /* Clear Host Set HNP Enable in the OTG Control Register */ 1218 otgctl = DWC2_READ_4(hsotg, GOTGCTL); 1219 otgctl &= ~GOTGCTL_HSTSETHNPEN; 1220 DWC2_WRITE_4(hsotg, GOTGCTL, otgctl); 1221 1222 if (hsotg->core_params->dma_desc_enable <= 0) { 1223 int num_channels, i; 1224 u32 hcchar; 1225 1226 /* Flush out any leftover queued requests */ 1227 num_channels = hsotg->core_params->host_channels; 1228 for (i = 0; i < num_channels; i++) { 1229 hcchar = DWC2_READ_4(hsotg, HCCHAR(i)); 1230 hcchar &= ~HCCHAR_CHENA; 1231 hcchar |= HCCHAR_CHDIS; 1232 hcchar &= ~HCCHAR_EPDIR; 1233 DWC2_WRITE_4(hsotg, HCCHAR(i), hcchar); 1234 } 1235 1236 /* Halt all channels to put them into a known state */ 1237 for (i = 0; i < num_channels; i++) { 1238 int count = 0; 1239 1240 hcchar = DWC2_READ_4(hsotg, HCCHAR(i)); 1241 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS; 1242 hcchar &= ~HCCHAR_EPDIR; 1243 DWC2_WRITE_4(hsotg, HCCHAR(i), hcchar); 1244 dev_dbg(hsotg->dev, "%s: Halt channel %d\n", 1245 __func__, i); 1246 do { 1247 hcchar = DWC2_READ_4(hsotg, HCCHAR(i)); 1248 if (++count > 1000) { 1249 dev_err(hsotg->dev, 1250 "Unable to clear enable on channel %d\n", 1251 i); 1252 break; 1253 } 1254 udelay(1); 1255 } while (hcchar & HCCHAR_CHENA); 1256 } 1257 } 1258 1259 /* Turn on the vbus power */ 1260 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state); 1261 if (hsotg->op_state == OTG_STATE_A_HOST) { 1262 u32 hprt0 = dwc2_read_hprt0(hsotg); 1263 1264 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n", 1265 !!(hprt0 & HPRT0_PWR)); 1266 if (!(hprt0 & HPRT0_PWR)) { 1267 hprt0 |= HPRT0_PWR; 1268 DWC2_WRITE_4(hsotg, HPRT0, hprt0); 1269 } 1270 } 1271 1272 dwc2_enable_host_interrupts(hsotg); 1273 } 1274 1275 STATIC void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg, 1276 struct dwc2_host_chan *chan) 1277 { 1278 u32 hcintmsk = HCINTMSK_CHHLTD; 1279 1280 switch (chan->ep_type) { 1281 case USB_ENDPOINT_XFER_CONTROL: 1282 case USB_ENDPOINT_XFER_BULK: 1283 dev_vdbg(hsotg->dev, "control/bulk\n"); 1284 hcintmsk |= HCINTMSK_XFERCOMPL; 1285 hcintmsk |= HCINTMSK_STALL; 1286 hcintmsk |= HCINTMSK_XACTERR; 1287 hcintmsk |= HCINTMSK_DATATGLERR; 1288 if (chan->ep_is_in) { 1289 hcintmsk |= HCINTMSK_BBLERR; 1290 } else { 1291 hcintmsk |= HCINTMSK_NAK; 1292 hcintmsk |= HCINTMSK_NYET; 1293 if (chan->do_ping) 1294 hcintmsk |= HCINTMSK_ACK; 1295 } 1296 1297 if (chan->do_split) { 1298 hcintmsk |= HCINTMSK_NAK; 1299 if (chan->complete_split) 1300 hcintmsk |= HCINTMSK_NYET; 1301 else 1302 hcintmsk |= HCINTMSK_ACK; 1303 } 1304 1305 if (chan->error_state) 1306 hcintmsk |= HCINTMSK_ACK; 1307 break; 1308 1309 case USB_ENDPOINT_XFER_INT: 1310 if (dbg_perio()) 1311 dev_vdbg(hsotg->dev, "intr\n"); 1312 hcintmsk |= HCINTMSK_XFERCOMPL; 1313 hcintmsk |= HCINTMSK_NAK; 1314 hcintmsk |= HCINTMSK_STALL; 1315 hcintmsk |= HCINTMSK_XACTERR; 1316 hcintmsk |= HCINTMSK_DATATGLERR; 1317 hcintmsk |= HCINTMSK_FRMOVRUN; 1318 1319 if (chan->ep_is_in) 1320 hcintmsk |= HCINTMSK_BBLERR; 1321 if (chan->error_state) 1322 hcintmsk |= HCINTMSK_ACK; 1323 if (chan->do_split) { 1324 if (chan->complete_split) 1325 hcintmsk |= HCINTMSK_NYET; 1326 else 1327 hcintmsk |= HCINTMSK_ACK; 1328 } 1329 break; 1330 1331 case USB_ENDPOINT_XFER_ISOC: 1332 if (dbg_perio()) 1333 dev_vdbg(hsotg->dev, "isoc\n"); 1334 hcintmsk |= HCINTMSK_XFERCOMPL; 1335 hcintmsk |= HCINTMSK_FRMOVRUN; 1336 hcintmsk |= HCINTMSK_ACK; 1337 1338 if (chan->ep_is_in) { 1339 hcintmsk |= HCINTMSK_XACTERR; 1340 hcintmsk |= HCINTMSK_BBLERR; 1341 } 1342 break; 1343 default: 1344 dev_err(hsotg->dev, "## Unknown EP type ##\n"); 1345 break; 1346 } 1347 1348 DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk); 1349 if (dbg_hc(chan)) 1350 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); 1351 } 1352 1353 STATIC void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg, 1354 struct dwc2_host_chan *chan) 1355 { 1356 u32 hcintmsk = HCINTMSK_CHHLTD; 1357 1358 /* 1359 * For Descriptor DMA mode core halts the channel on AHB error. 1360 * Interrupt is not required. 1361 */ 1362 if (hsotg->core_params->dma_desc_enable <= 0) { 1363 if (dbg_hc(chan)) 1364 dev_vdbg(hsotg->dev, "desc DMA disabled\n"); 1365 hcintmsk |= HCINTMSK_AHBERR; 1366 } else { 1367 if (dbg_hc(chan)) 1368 dev_vdbg(hsotg->dev, "desc DMA enabled\n"); 1369 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1370 hcintmsk |= HCINTMSK_XFERCOMPL; 1371 } 1372 1373 if (chan->error_state && !chan->do_split && 1374 chan->ep_type != USB_ENDPOINT_XFER_ISOC) { 1375 if (dbg_hc(chan)) 1376 dev_vdbg(hsotg->dev, "setting ACK\n"); 1377 hcintmsk |= HCINTMSK_ACK; 1378 if (chan->ep_is_in) { 1379 hcintmsk |= HCINTMSK_DATATGLERR; 1380 if (chan->ep_type != USB_ENDPOINT_XFER_INT) 1381 hcintmsk |= HCINTMSK_NAK; 1382 } 1383 } 1384 1385 DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk); 1386 if (dbg_hc(chan)) 1387 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); 1388 } 1389 1390 STATIC void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg, 1391 struct dwc2_host_chan *chan) 1392 { 1393 u32 intmsk; 1394 1395 if (hsotg->core_params->dma_enable > 0) { 1396 if (dbg_hc(chan)) 1397 dev_vdbg(hsotg->dev, "DMA enabled\n"); 1398 dwc2_hc_enable_dma_ints(hsotg, chan); 1399 } else { 1400 if (dbg_hc(chan)) 1401 dev_vdbg(hsotg->dev, "DMA disabled\n"); 1402 dwc2_hc_enable_slave_ints(hsotg, chan); 1403 } 1404 1405 /* Enable the top level host channel interrupt */ 1406 intmsk = DWC2_READ_4(hsotg, HAINTMSK); 1407 intmsk |= 1 << chan->hc_num; 1408 DWC2_WRITE_4(hsotg, HAINTMSK, intmsk); 1409 if (dbg_hc(chan)) 1410 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk); 1411 1412 /* Make sure host channel interrupts are enabled */ 1413 intmsk = DWC2_READ_4(hsotg, GINTMSK); 1414 intmsk |= GINTSTS_HCHINT; 1415 DWC2_WRITE_4(hsotg, GINTMSK, intmsk); 1416 if (dbg_hc(chan)) 1417 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk); 1418 } 1419 1420 /** 1421 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from 1422 * a specific endpoint 1423 * 1424 * @hsotg: Programming view of DWC_otg controller 1425 * @chan: Information needed to initialize the host channel 1426 * 1427 * The HCCHARn register is set up with the characteristics specified in chan. 1428 * Host channel interrupts that may need to be serviced while this transfer is 1429 * in progress are enabled. 1430 */ 1431 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 1432 { 1433 u8 hc_num = chan->hc_num; 1434 u32 hcintmsk; 1435 u32 hcchar; 1436 u32 hcsplt = 0; 1437 1438 if (dbg_hc(chan)) 1439 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1440 1441 /* Clear old interrupt conditions for this host channel */ 1442 hcintmsk = 0xffffffff; 1443 hcintmsk &= ~HCINTMSK_RESERVED14_31; 1444 DWC2_WRITE_4(hsotg, HCINT(hc_num), hcintmsk); 1445 1446 /* Enable channel interrupts required for this transfer */ 1447 dwc2_hc_enable_ints(hsotg, chan); 1448 1449 /* 1450 * Program the HCCHARn register with the endpoint characteristics for 1451 * the current transfer 1452 */ 1453 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK; 1454 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK; 1455 if (chan->ep_is_in) 1456 hcchar |= HCCHAR_EPDIR; 1457 if (chan->speed == USB_SPEED_LOW) 1458 hcchar |= HCCHAR_LSPDDEV; 1459 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK; 1460 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK; 1461 DWC2_WRITE_4(hsotg, HCCHAR(hc_num), hcchar); 1462 if (dbg_hc(chan)) { 1463 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n", 1464 hc_num, hcchar); 1465 1466 dev_vdbg(hsotg->dev, "%s: Channel %d\n", 1467 __func__, hc_num); 1468 dev_vdbg(hsotg->dev, " Dev Addr: %d\n", 1469 chan->dev_addr); 1470 dev_vdbg(hsotg->dev, " Ep Num: %d\n", 1471 chan->ep_num); 1472 dev_vdbg(hsotg->dev, " Is In: %d\n", 1473 chan->ep_is_in); 1474 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n", 1475 chan->speed == USB_SPEED_LOW); 1476 dev_vdbg(hsotg->dev, " Ep Type: %d\n", 1477 chan->ep_type); 1478 dev_vdbg(hsotg->dev, " Max Pkt: %d\n", 1479 chan->max_packet); 1480 } 1481 1482 /* Program the HCSPLT register for SPLITs */ 1483 if (chan->do_split) { 1484 if (dbg_hc(chan)) 1485 dev_vdbg(hsotg->dev, 1486 "Programming HC %d with split --> %s\n", 1487 hc_num, 1488 chan->complete_split ? "CSPLIT" : "SSPLIT"); 1489 if (chan->complete_split) 1490 hcsplt |= HCSPLT_COMPSPLT; 1491 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT & 1492 HCSPLT_XACTPOS_MASK; 1493 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT & 1494 HCSPLT_HUBADDR_MASK; 1495 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT & 1496 HCSPLT_PRTADDR_MASK; 1497 if (dbg_hc(chan)) { 1498 dev_vdbg(hsotg->dev, " comp split %d\n", 1499 chan->complete_split); 1500 dev_vdbg(hsotg->dev, " xact pos %d\n", 1501 chan->xact_pos); 1502 dev_vdbg(hsotg->dev, " hub addr %d\n", 1503 chan->hub_addr); 1504 dev_vdbg(hsotg->dev, " hub port %d\n", 1505 chan->hub_port); 1506 dev_vdbg(hsotg->dev, " is_in %d\n", 1507 chan->ep_is_in); 1508 dev_vdbg(hsotg->dev, " Max Pkt %d\n", 1509 chan->max_packet); 1510 dev_vdbg(hsotg->dev, " xferlen %d\n", 1511 chan->xfer_len); 1512 } 1513 } 1514 1515 DWC2_WRITE_4(hsotg, HCSPLT(hc_num), hcsplt); 1516 } 1517 1518 /** 1519 * dwc2_hc_halt() - Attempts to halt a host channel 1520 * 1521 * @hsotg: Controller register interface 1522 * @chan: Host channel to halt 1523 * @halt_status: Reason for halting the channel 1524 * 1525 * This function should only be called in Slave mode or to abort a transfer in 1526 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the 1527 * controller halts the channel when the transfer is complete or a condition 1528 * occurs that requires application intervention. 1529 * 1530 * In slave mode, checks for a free request queue entry, then sets the Channel 1531 * Enable and Channel Disable bits of the Host Channel Characteristics 1532 * register of the specified channel to intiate the halt. If there is no free 1533 * request queue entry, sets only the Channel Disable bit of the HCCHARn 1534 * register to flush requests for this channel. In the latter case, sets a 1535 * flag to indicate that the host channel needs to be halted when a request 1536 * queue slot is open. 1537 * 1538 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the 1539 * HCCHARn register. The controller ensures there is space in the request 1540 * queue before submitting the halt request. 1541 * 1542 * Some time may elapse before the core flushes any posted requests for this 1543 * host channel and halts. The Channel Halted interrupt handler completes the 1544 * deactivation of the host channel. 1545 */ 1546 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, 1547 enum dwc2_halt_status halt_status) 1548 { 1549 u32 nptxsts, hptxsts, hcchar; 1550 1551 if (dbg_hc(chan)) 1552 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1553 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS) 1554 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status); 1555 1556 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE || 1557 halt_status == DWC2_HC_XFER_AHB_ERR) { 1558 /* 1559 * Disable all channel interrupts except Ch Halted. The QTD 1560 * and QH state associated with this transfer has been cleared 1561 * (in the case of URB_DEQUEUE), so the channel needs to be 1562 * shut down carefully to prevent crashes. 1563 */ 1564 u32 hcintmsk = HCINTMSK_CHHLTD; 1565 1566 dev_vdbg(hsotg->dev, "dequeue/error\n"); 1567 DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk); 1568 1569 /* 1570 * Make sure no other interrupts besides halt are currently 1571 * pending. Handling another interrupt could cause a crash due 1572 * to the QTD and QH state. 1573 */ 1574 DWC2_WRITE_4(hsotg, HCINT(chan->hc_num), ~hcintmsk); 1575 1576 /* 1577 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR 1578 * even if the channel was already halted for some other 1579 * reason 1580 */ 1581 chan->halt_status = halt_status; 1582 1583 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num)); 1584 if (!(hcchar & HCCHAR_CHENA)) { 1585 /* 1586 * The channel is either already halted or it hasn't 1587 * started yet. In DMA mode, the transfer may halt if 1588 * it finishes normally or a condition occurs that 1589 * requires driver intervention. Don't want to halt 1590 * the channel again. In either Slave or DMA mode, 1591 * it's possible that the transfer has been assigned 1592 * to a channel, but not started yet when an URB is 1593 * dequeued. Don't want to halt a channel that hasn't 1594 * started yet. 1595 */ 1596 return; 1597 } 1598 } 1599 if (chan->halt_pending) { 1600 /* 1601 * A halt has already been issued for this channel. This might 1602 * happen when a transfer is aborted by a higher level in 1603 * the stack. 1604 */ 1605 dev_vdbg(hsotg->dev, 1606 "*** %s: Channel %d, chan->halt_pending already set ***\n", 1607 __func__, chan->hc_num); 1608 return; 1609 } 1610 1611 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num)); 1612 1613 /* No need to set the bit in DDMA for disabling the channel */ 1614 /* TODO check it everywhere channel is disabled */ 1615 if (hsotg->core_params->dma_desc_enable <= 0) { 1616 if (dbg_hc(chan)) 1617 dev_vdbg(hsotg->dev, "desc DMA disabled\n"); 1618 hcchar |= HCCHAR_CHENA; 1619 } else { 1620 if (dbg_hc(chan)) 1621 dev_dbg(hsotg->dev, "desc DMA enabled\n"); 1622 } 1623 hcchar |= HCCHAR_CHDIS; 1624 1625 if (hsotg->core_params->dma_enable <= 0) { 1626 if (dbg_hc(chan)) 1627 dev_vdbg(hsotg->dev, "DMA not enabled\n"); 1628 hcchar |= HCCHAR_CHENA; 1629 1630 /* Check for space in the request queue to issue the halt */ 1631 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || 1632 chan->ep_type == USB_ENDPOINT_XFER_BULK) { 1633 dev_vdbg(hsotg->dev, "control/bulk\n"); 1634 nptxsts = DWC2_READ_4(hsotg, GNPTXSTS); 1635 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) { 1636 dev_vdbg(hsotg->dev, "Disabling channel\n"); 1637 hcchar &= ~HCCHAR_CHENA; 1638 } 1639 } else { 1640 if (dbg_perio()) 1641 dev_vdbg(hsotg->dev, "isoc/intr\n"); 1642 hptxsts = DWC2_READ_4(hsotg, HPTXSTS); 1643 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 || 1644 hsotg->queuing_high_bandwidth) { 1645 if (dbg_perio()) 1646 dev_vdbg(hsotg->dev, "Disabling channel\n"); 1647 hcchar &= ~HCCHAR_CHENA; 1648 } 1649 } 1650 } else { 1651 if (dbg_hc(chan)) 1652 dev_vdbg(hsotg->dev, "DMA enabled\n"); 1653 } 1654 1655 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar); 1656 chan->halt_status = halt_status; 1657 1658 if (hcchar & HCCHAR_CHENA) { 1659 if (dbg_hc(chan)) 1660 dev_vdbg(hsotg->dev, "Channel enabled\n"); 1661 chan->halt_pending = 1; 1662 chan->halt_on_queue = 0; 1663 } else { 1664 if (dbg_hc(chan)) 1665 dev_vdbg(hsotg->dev, "Channel disabled\n"); 1666 chan->halt_on_queue = 1; 1667 } 1668 1669 if (dbg_hc(chan)) { 1670 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1671 chan->hc_num); 1672 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n", 1673 hcchar); 1674 dev_vdbg(hsotg->dev, " halt_pending: %d\n", 1675 chan->halt_pending); 1676 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n", 1677 chan->halt_on_queue); 1678 dev_vdbg(hsotg->dev, " halt_status: %d\n", 1679 chan->halt_status); 1680 } 1681 } 1682 1683 /** 1684 * dwc2_hc_cleanup() - Clears the transfer state for a host channel 1685 * 1686 * @hsotg: Programming view of DWC_otg controller 1687 * @chan: Identifies the host channel to clean up 1688 * 1689 * This function is normally called after a transfer is done and the host 1690 * channel is being released 1691 */ 1692 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 1693 { 1694 u32 hcintmsk; 1695 1696 chan->xfer_started = 0; 1697 1698 list_del_init(&chan->split_order_list_entry); 1699 1700 /* 1701 * Clear channel interrupt enables and any unhandled channel interrupt 1702 * conditions 1703 */ 1704 DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), 0); 1705 hcintmsk = 0xffffffff; 1706 hcintmsk &= ~HCINTMSK_RESERVED14_31; 1707 DWC2_WRITE_4(hsotg, HCINT(chan->hc_num), hcintmsk); 1708 } 1709 1710 /** 1711 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in 1712 * which frame a periodic transfer should occur 1713 * 1714 * @hsotg: Programming view of DWC_otg controller 1715 * @chan: Identifies the host channel to set up and its properties 1716 * @hcchar: Current value of the HCCHAR register for the specified host channel 1717 * 1718 * This function has no effect on non-periodic transfers 1719 */ 1720 STATIC void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg, 1721 struct dwc2_host_chan *chan, u32 *hcchar) 1722 { 1723 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1724 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1725 /* 1 if _next_ frame is odd, 0 if it's even */ 1726 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1)) 1727 *hcchar |= HCCHAR_ODDFRM; 1728 } 1729 } 1730 1731 STATIC void dwc2_set_pid_isoc(struct dwc2_host_chan *chan) 1732 { 1733 /* Set up the initial PID for the transfer */ 1734 if (chan->speed == USB_SPEED_HIGH) { 1735 if (chan->ep_is_in) { 1736 if (chan->multi_count == 1) 1737 chan->data_pid_start = DWC2_HC_PID_DATA0; 1738 else if (chan->multi_count == 2) 1739 chan->data_pid_start = DWC2_HC_PID_DATA1; 1740 else 1741 chan->data_pid_start = DWC2_HC_PID_DATA2; 1742 } else { 1743 if (chan->multi_count == 1) 1744 chan->data_pid_start = DWC2_HC_PID_DATA0; 1745 else 1746 chan->data_pid_start = DWC2_HC_PID_MDATA; 1747 } 1748 } else { 1749 chan->data_pid_start = DWC2_HC_PID_DATA0; 1750 } 1751 } 1752 1753 /** 1754 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with 1755 * the Host Channel 1756 * 1757 * @hsotg: Programming view of DWC_otg controller 1758 * @chan: Information needed to initialize the host channel 1759 * 1760 * This function should only be called in Slave mode. For a channel associated 1761 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel 1762 * associated with a periodic EP, the periodic Tx FIFO is written. 1763 * 1764 * Upon return the xfer_buf and xfer_count fields in chan are incremented by 1765 * the number of bytes written to the Tx FIFO. 1766 */ 1767 STATIC void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg, 1768 struct dwc2_host_chan *chan) 1769 { 1770 u32 i; 1771 u32 remaining_count; 1772 u32 byte_count; 1773 u32 dword_count; 1774 u32 *data_buf = (u32 *)chan->xfer_buf; 1775 u32 data_fifo; 1776 1777 if (dbg_hc(chan)) 1778 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1779 1780 data_fifo = HCFIFO(chan->hc_num); 1781 1782 remaining_count = chan->xfer_len - chan->xfer_count; 1783 if (remaining_count > chan->max_packet) 1784 byte_count = chan->max_packet; 1785 else 1786 byte_count = remaining_count; 1787 1788 dword_count = (byte_count + 3) / 4; 1789 1790 if (((unsigned long)data_buf & 0x3) == 0) { 1791 /* xfer_buf is DWORD aligned */ 1792 for (i = 0; i < dword_count; i++, data_buf++) 1793 DWC2_WRITE_4(hsotg, data_fifo, *data_buf); 1794 } else { 1795 /* xfer_buf is not DWORD aligned */ 1796 for (i = 0; i < dword_count; i++, data_buf++) { 1797 u32 data = data_buf[0] | data_buf[1] << 8 | 1798 data_buf[2] << 16 | data_buf[3] << 24; 1799 DWC2_WRITE_4(hsotg, data_fifo, data); 1800 } 1801 } 1802 1803 chan->xfer_count += byte_count; 1804 chan->xfer_buf += byte_count; 1805 } 1806 1807 /** 1808 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host 1809 * channel and starts the transfer 1810 * 1811 * @hsotg: Programming view of DWC_otg controller 1812 * @chan: Information needed to initialize the host channel. The xfer_len value 1813 * may be reduced to accommodate the max widths of the XferSize and 1814 * PktCnt fields in the HCTSIZn register. The multi_count value may be 1815 * changed to reflect the final xfer_len value. 1816 * 1817 * This function may be called in either Slave mode or DMA mode. In Slave mode, 1818 * the caller must ensure that there is sufficient space in the request queue 1819 * and Tx Data FIFO. 1820 * 1821 * For an OUT transfer in Slave mode, it loads a data packet into the 1822 * appropriate FIFO. If necessary, additional data packets are loaded in the 1823 * Host ISR. 1824 * 1825 * For an IN transfer in Slave mode, a data packet is requested. The data 1826 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, 1827 * additional data packets are requested in the Host ISR. 1828 * 1829 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ 1830 * register along with a packet count of 1 and the channel is enabled. This 1831 * causes a single PING transaction to occur. Other fields in HCTSIZ are 1832 * simply set to 0 since no data transfer occurs in this case. 1833 * 1834 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with 1835 * all the information required to perform the subsequent data transfer. In 1836 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the 1837 * controller performs the entire PING protocol, then starts the data 1838 * transfer. 1839 */ 1840 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, 1841 struct dwc2_host_chan *chan) 1842 { 1843 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size; 1844 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count; 1845 u32 hcchar; 1846 u32 hctsiz = 0; 1847 u16 num_packets; 1848 u32 ec_mc; 1849 1850 if (dbg_hc(chan)) 1851 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1852 1853 if (chan->do_ping) { 1854 if (hsotg->core_params->dma_enable <= 0) { 1855 if (dbg_hc(chan)) 1856 dev_vdbg(hsotg->dev, "ping, no DMA\n"); 1857 dwc2_hc_do_ping(hsotg, chan); 1858 chan->xfer_started = 1; 1859 return; 1860 } else { 1861 if (dbg_hc(chan)) 1862 dev_vdbg(hsotg->dev, "ping, DMA\n"); 1863 hctsiz |= TSIZ_DOPNG; 1864 } 1865 } 1866 1867 if (chan->do_split) { 1868 if (dbg_hc(chan)) 1869 dev_vdbg(hsotg->dev, "split\n"); 1870 num_packets = 1; 1871 1872 if (chan->complete_split && !chan->ep_is_in) 1873 /* 1874 * For CSPLIT OUT Transfer, set the size to 0 so the 1875 * core doesn't expect any data written to the FIFO 1876 */ 1877 chan->xfer_len = 0; 1878 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet) 1879 chan->xfer_len = chan->max_packet; 1880 else if (!chan->ep_is_in && chan->xfer_len > 188) 1881 chan->xfer_len = 188; 1882 1883 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & 1884 TSIZ_XFERSIZE_MASK; 1885 1886 /* For split set ec_mc for immediate retries */ 1887 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1888 chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1889 ec_mc = 3; 1890 else 1891 ec_mc = 1; 1892 } else { 1893 if (dbg_hc(chan)) 1894 dev_vdbg(hsotg->dev, "no split\n"); 1895 /* 1896 * Ensure that the transfer length and packet count will fit 1897 * in the widths allocated for them in the HCTSIZn register 1898 */ 1899 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1900 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1901 /* 1902 * Make sure the transfer size is no larger than one 1903 * (micro)frame's worth of data. (A check was done 1904 * when the periodic transfer was accepted to ensure 1905 * that a (micro)frame's worth of data can be 1906 * programmed into a channel.) 1907 */ 1908 u32 max_periodic_len = 1909 chan->multi_count * chan->max_packet; 1910 1911 if (chan->xfer_len > max_periodic_len) 1912 chan->xfer_len = max_periodic_len; 1913 } else if (chan->xfer_len > max_hc_xfer_size) { 1914 /* 1915 * Make sure that xfer_len is a multiple of max packet 1916 * size 1917 */ 1918 chan->xfer_len = 1919 max_hc_xfer_size - chan->max_packet + 1; 1920 } 1921 1922 if (chan->xfer_len > 0) { 1923 num_packets = (chan->xfer_len + chan->max_packet - 1) / 1924 chan->max_packet; 1925 if (num_packets > max_hc_pkt_count) { 1926 num_packets = max_hc_pkt_count; 1927 chan->xfer_len = num_packets * chan->max_packet; 1928 } 1929 } else { 1930 /* Need 1 packet for transfer length of 0 */ 1931 num_packets = 1; 1932 } 1933 1934 if (chan->ep_is_in) 1935 /* 1936 * Always program an integral # of max packets for IN 1937 * transfers 1938 */ 1939 chan->xfer_len = num_packets * chan->max_packet; 1940 1941 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1942 chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1943 /* 1944 * Make sure that the multi_count field matches the 1945 * actual transfer length 1946 */ 1947 chan->multi_count = num_packets; 1948 1949 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1950 dwc2_set_pid_isoc(chan); 1951 1952 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & 1953 TSIZ_XFERSIZE_MASK; 1954 1955 /* The ec_mc gets the multi_count for non-split */ 1956 ec_mc = chan->multi_count; 1957 } 1958 1959 chan->start_pkt_count = num_packets; 1960 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK; 1961 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & 1962 TSIZ_SC_MC_PID_MASK; 1963 DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz); 1964 if (dbg_hc(chan)) { 1965 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n", 1966 hctsiz, chan->hc_num); 1967 1968 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1969 chan->hc_num); 1970 dev_vdbg(hsotg->dev, " Xfer Size: %d\n", 1971 (hctsiz & TSIZ_XFERSIZE_MASK) >> 1972 TSIZ_XFERSIZE_SHIFT); 1973 dev_vdbg(hsotg->dev, " Num Pkts: %d\n", 1974 (hctsiz & TSIZ_PKTCNT_MASK) >> 1975 TSIZ_PKTCNT_SHIFT); 1976 dev_vdbg(hsotg->dev, " Start PID: %d\n", 1977 (hctsiz & TSIZ_SC_MC_PID_MASK) >> 1978 TSIZ_SC_MC_PID_SHIFT); 1979 } 1980 1981 if (hsotg->core_params->dma_enable > 0) { 1982 dma_addr_t dma_addr; 1983 1984 if (chan->align_buf) { 1985 if (dbg_hc(chan)) 1986 dev_vdbg(hsotg->dev, "align_buf\n"); 1987 dma_addr = chan->align_buf; 1988 } else { 1989 dma_addr = chan->xfer_dma; 1990 } 1991 if (hsotg->hsotg_sc->sc_set_dma_addr == NULL) { 1992 DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num), 1993 (u32)dma_addr); 1994 if (dbg_hc(chan)) 1995 dev_vdbg(hsotg->dev, 1996 "Wrote %08lx to HCDMA(%d)\n", 1997 (unsigned long)dma_addr, 1998 chan->hc_num); 1999 } else { 2000 (void)(*hsotg->hsotg_sc->sc_set_dma_addr)( 2001 hsotg->dev, dma_addr, chan->hc_num); 2002 } 2003 } 2004 2005 /* Start the split */ 2006 if (chan->do_split) { 2007 u32 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chan->hc_num)); 2008 2009 hcsplt |= HCSPLT_SPLTENA; 2010 DWC2_WRITE_4(hsotg, HCSPLT(chan->hc_num), hcsplt); 2011 } 2012 2013 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num)); 2014 hcchar &= ~HCCHAR_MULTICNT_MASK; 2015 hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK; 2016 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); 2017 2018 if (hcchar & HCCHAR_CHDIS) 2019 dev_warn(hsotg->dev, 2020 "%s: chdis set, channel %d, hcchar 0x%08x\n", 2021 __func__, chan->hc_num, hcchar); 2022 2023 /* Set host channel enable after all other setup is complete */ 2024 hcchar |= HCCHAR_CHENA; 2025 hcchar &= ~HCCHAR_CHDIS; 2026 2027 if (dbg_hc(chan)) 2028 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", 2029 (hcchar & HCCHAR_MULTICNT_MASK) >> 2030 HCCHAR_MULTICNT_SHIFT); 2031 2032 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar); 2033 if (dbg_hc(chan)) 2034 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, 2035 chan->hc_num); 2036 2037 chan->xfer_started = 1; 2038 chan->requests++; 2039 2040 if (hsotg->core_params->dma_enable <= 0 && 2041 !chan->ep_is_in && chan->xfer_len > 0) 2042 /* Load OUT packet into the appropriate Tx FIFO */ 2043 dwc2_hc_write_packet(hsotg, chan); 2044 } 2045 2046 /** 2047 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a 2048 * host channel and starts the transfer in Descriptor DMA mode 2049 * 2050 * @hsotg: Programming view of DWC_otg controller 2051 * @chan: Information needed to initialize the host channel 2052 * 2053 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set. 2054 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field 2055 * with micro-frame bitmap. 2056 * 2057 * Initializes HCDMA register with descriptor list address and CTD value then 2058 * starts the transfer via enabling the channel. 2059 */ 2060 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, 2061 struct dwc2_host_chan *chan) 2062 { 2063 u32 hcchar; 2064 u32 hctsiz = 0; 2065 2066 if (chan->do_ping) 2067 hctsiz |= TSIZ_DOPNG; 2068 2069 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 2070 dwc2_set_pid_isoc(chan); 2071 2072 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */ 2073 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & 2074 TSIZ_SC_MC_PID_MASK; 2075 2076 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */ 2077 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK; 2078 2079 /* Non-zero only for high-speed interrupt endpoints */ 2080 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK; 2081 2082 if (dbg_hc(chan)) { 2083 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 2084 chan->hc_num); 2085 dev_vdbg(hsotg->dev, " Start PID: %d\n", 2086 chan->data_pid_start); 2087 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1); 2088 } 2089 2090 DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz); 2091 2092 usb_syncmem(&chan->desc_list_usbdma, 0, chan->desc_list_sz, 2093 BUS_DMASYNC_PREWRITE); 2094 2095 if (hsotg->hsotg_sc->sc_set_dma_addr == NULL) { 2096 DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num), chan->desc_list_addr); 2097 if (dbg_hc(chan)) 2098 dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n", 2099 &chan->desc_list_addr, chan->hc_num); 2100 } else { 2101 (void)(*hsotg->hsotg_sc->sc_set_dma_addr)( 2102 hsotg->dev, chan->desc_list_addr, chan->hc_num); 2103 if (dbg_hc(chan)) 2104 dev_vdbg(hsotg->dev, "Wrote %pad to ext dma(%d)\n", 2105 &chan->desc_list_addr, chan->hc_num); 2106 } 2107 2108 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num)); 2109 hcchar &= ~HCCHAR_MULTICNT_MASK; 2110 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & 2111 HCCHAR_MULTICNT_MASK; 2112 2113 if (hcchar & HCCHAR_CHDIS) 2114 dev_warn(hsotg->dev, 2115 "%s: chdis set, channel %d, hcchar 0x%08x\n", 2116 __func__, chan->hc_num, hcchar); 2117 2118 /* Set host channel enable after all other setup is complete */ 2119 hcchar |= HCCHAR_CHENA; 2120 hcchar &= ~HCCHAR_CHDIS; 2121 2122 if (dbg_hc(chan)) 2123 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", 2124 (hcchar & HCCHAR_MULTICNT_MASK) >> 2125 HCCHAR_MULTICNT_SHIFT); 2126 2127 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar); 2128 if (dbg_hc(chan)) 2129 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, 2130 chan->hc_num); 2131 2132 chan->xfer_started = 1; 2133 chan->requests++; 2134 } 2135 2136 /** 2137 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by 2138 * a previous call to dwc2_hc_start_transfer() 2139 * 2140 * @hsotg: Programming view of DWC_otg controller 2141 * @chan: Information needed to initialize the host channel 2142 * 2143 * The caller must ensure there is sufficient space in the request queue and Tx 2144 * Data FIFO. This function should only be called in Slave mode. In DMA mode, 2145 * the controller acts autonomously to complete transfers programmed to a host 2146 * channel. 2147 * 2148 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO 2149 * if there is any data remaining to be queued. For an IN transfer, another 2150 * data packet is always requested. For the SETUP phase of a control transfer, 2151 * this function does nothing. 2152 * 2153 * Return: 1 if a new request is queued, 0 if no more requests are required 2154 * for this transfer 2155 */ 2156 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg, 2157 struct dwc2_host_chan *chan) 2158 { 2159 if (dbg_hc(chan)) 2160 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 2161 chan->hc_num); 2162 2163 if (chan->do_split) 2164 /* SPLITs always queue just once per channel */ 2165 return 0; 2166 2167 if (chan->data_pid_start == DWC2_HC_PID_SETUP) 2168 /* SETUPs are queued only once since they can't be NAK'd */ 2169 return 0; 2170 2171 if (chan->ep_is_in) { 2172 /* 2173 * Always queue another request for other IN transfers. If 2174 * back-to-back INs are issued and NAKs are received for both, 2175 * the driver may still be processing the first NAK when the 2176 * second NAK is received. When the interrupt handler clears 2177 * the NAK interrupt for the first NAK, the second NAK will 2178 * not be seen. So we can't depend on the NAK interrupt 2179 * handler to requeue a NAK'd request. Instead, IN requests 2180 * are issued each time this function is called. When the 2181 * transfer completes, the extra requests for the channel will 2182 * be flushed. 2183 */ 2184 u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num)); 2185 2186 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); 2187 hcchar |= HCCHAR_CHENA; 2188 hcchar &= ~HCCHAR_CHDIS; 2189 if (dbg_hc(chan)) 2190 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n", 2191 hcchar); 2192 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar); 2193 chan->requests++; 2194 return 1; 2195 } 2196 2197 /* OUT transfers */ 2198 2199 if (chan->xfer_count < chan->xfer_len) { 2200 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 2201 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 2202 u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num)); 2203 2204 dwc2_hc_set_even_odd_frame(hsotg, chan, 2205 &hcchar); 2206 } 2207 2208 /* Load OUT packet into the appropriate Tx FIFO */ 2209 dwc2_hc_write_packet(hsotg, chan); 2210 chan->requests++; 2211 return 1; 2212 } 2213 2214 return 0; 2215 } 2216 2217 /** 2218 * dwc2_hc_do_ping() - Starts a PING transfer 2219 * 2220 * @hsotg: Programming view of DWC_otg controller 2221 * @chan: Information needed to initialize the host channel 2222 * 2223 * This function should only be called in Slave mode. The Do Ping bit is set in 2224 * the HCTSIZ register, then the channel is enabled. 2225 */ 2226 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 2227 { 2228 u32 hcchar; 2229 u32 hctsiz; 2230 2231 if (dbg_hc(chan)) 2232 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 2233 chan->hc_num); 2234 2235 2236 hctsiz = TSIZ_DOPNG; 2237 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT; 2238 DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz); 2239 2240 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num)); 2241 hcchar |= HCCHAR_CHENA; 2242 hcchar &= ~HCCHAR_CHDIS; 2243 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar); 2244 } 2245 2246 /** 2247 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for 2248 * the HFIR register according to PHY type and speed 2249 * 2250 * @hsotg: Programming view of DWC_otg controller 2251 * 2252 * NOTE: The caller can modify the value of the HFIR register only after the 2253 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort) 2254 * has been set 2255 */ 2256 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg) 2257 { 2258 u32 usbcfg; 2259 u32 hprt0; 2260 int clock = 60; /* default value */ 2261 2262 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 2263 hprt0 = DWC2_READ_4(hsotg, HPRT0); 2264 2265 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) && 2266 !(usbcfg & GUSBCFG_PHYIF16)) 2267 clock = 60; 2268 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type == 2269 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI) 2270 clock = 48; 2271 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2272 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) 2273 clock = 30; 2274 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2275 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16)) 2276 clock = 60; 2277 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2278 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) 2279 clock = 48; 2280 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) && 2281 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI) 2282 clock = 48; 2283 if ((usbcfg & GUSBCFG_PHYSEL) && 2284 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) 2285 clock = 48; 2286 2287 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED) 2288 /* High speed case */ 2289 return 125 * clock; 2290 else 2291 /* FS/LS case */ 2292 return 1000 * clock; 2293 } 2294 2295 /** 2296 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination 2297 * buffer 2298 * 2299 * @core_if: Programming view of DWC_otg controller 2300 * @dest: Destination buffer for the packet 2301 * @bytes: Number of bytes to copy to the destination 2302 */ 2303 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes) 2304 { 2305 bus_size_t fifo = HCFIFO(0); 2306 u32 *data_buf = (u32 *)dest; 2307 int word_count = (bytes + 3) / 4; 2308 int i; 2309 2310 /* 2311 * Todo: Account for the case where dest is not dword aligned. This 2312 * requires reading data from the FIFO into a u32 temp buffer, then 2313 * moving it into the data buffer. 2314 */ 2315 2316 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes); 2317 2318 for (i = 0; i < word_count; i++, data_buf++) 2319 *data_buf = DWC2_READ_4(hsotg, fifo); 2320 } 2321 2322 /** 2323 * dwc2_dump_host_registers() - Prints the host registers 2324 * 2325 * @hsotg: Programming view of DWC_otg controller 2326 * 2327 * NOTE: This function will be removed once the peripheral controller code 2328 * is integrated and the driver is stable 2329 */ 2330 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg) 2331 { 2332 #ifdef DWC2_DEBUG 2333 bus_size_t addr; 2334 int i; 2335 2336 dev_dbg(hsotg->dev, "Host Global Registers\n"); 2337 addr = HCFG; 2338 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n", 2339 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2340 addr = HFIR; 2341 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n", 2342 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2343 addr = HFNUM; 2344 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n", 2345 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2346 addr = HPTXSTS; 2347 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n", 2348 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2349 addr = HAINT; 2350 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n", 2351 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2352 addr = HAINTMSK; 2353 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n", 2354 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2355 if (hsotg->core_params->dma_desc_enable > 0) { 2356 addr = HFLBADDR; 2357 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n", 2358 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2359 } 2360 2361 addr = HPRT0; 2362 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n", 2363 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2364 2365 for (i = 0; i < hsotg->core_params->host_channels; i++) { 2366 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i); 2367 addr = HCCHAR(i); 2368 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n", 2369 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2370 addr = HCSPLT(i); 2371 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n", 2372 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2373 addr = HCINT(i); 2374 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n", 2375 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2376 addr = HCINTMSK(i); 2377 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n", 2378 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2379 addr = HCTSIZ(i); 2380 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n", 2381 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2382 addr = HCDMA(i); 2383 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n", 2384 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2385 if (hsotg->core_params->dma_desc_enable > 0) { 2386 addr = HCDMAB(i); 2387 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n", 2388 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2389 } 2390 } 2391 #endif 2392 } 2393 2394 /** 2395 * dwc2_dump_global_registers() - Prints the core global registers 2396 * 2397 * @hsotg: Programming view of DWC_otg controller 2398 * 2399 * NOTE: This function will be removed once the peripheral controller code 2400 * is integrated and the driver is stable 2401 */ 2402 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg) 2403 { 2404 #ifdef DWC2_DEBUG 2405 bus_size_t addr; 2406 2407 dev_dbg(hsotg->dev, "Core Global Registers\n"); 2408 addr = GOTGCTL; 2409 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n", 2410 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2411 addr = GOTGINT; 2412 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n", 2413 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2414 addr = GAHBCFG; 2415 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n", 2416 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2417 addr = GUSBCFG; 2418 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n", 2419 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2420 addr = GRSTCTL; 2421 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n", 2422 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2423 addr = GINTSTS; 2424 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n", 2425 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2426 addr = GINTMSK; 2427 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n", 2428 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2429 addr = GRXSTSR; 2430 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n", 2431 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2432 addr = GRXFSIZ; 2433 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n", 2434 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2435 addr = GNPTXFSIZ; 2436 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n", 2437 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2438 addr = GNPTXSTS; 2439 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n", 2440 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2441 addr = GI2CCTL; 2442 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n", 2443 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2444 addr = GPVNDCTL; 2445 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n", 2446 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2447 addr = GGPIO; 2448 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n", 2449 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2450 addr = GUID; 2451 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n", 2452 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2453 addr = GSNPSID; 2454 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n", 2455 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2456 addr = GHWCFG1; 2457 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n", 2458 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2459 addr = GHWCFG2; 2460 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n", 2461 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2462 addr = GHWCFG3; 2463 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n", 2464 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2465 addr = GHWCFG4; 2466 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n", 2467 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2468 addr = GLPMCFG; 2469 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n", 2470 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2471 addr = GPWRDN; 2472 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n", 2473 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2474 addr = GDFIFOCFG; 2475 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n", 2476 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2477 addr = HPTXFSIZ; 2478 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n", 2479 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2480 2481 addr = PCGCTL; 2482 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n", 2483 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2484 #endif 2485 } 2486 2487 /** 2488 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO 2489 * 2490 * @hsotg: Programming view of DWC_otg controller 2491 * @num: Tx FIFO to flush 2492 */ 2493 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num) 2494 { 2495 u32 greset; 2496 int count = 0; 2497 2498 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num); 2499 2500 greset = GRSTCTL_TXFFLSH; 2501 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK; 2502 DWC2_WRITE_4(hsotg, GRSTCTL, greset); 2503 2504 do { 2505 greset = DWC2_READ_4(hsotg, GRSTCTL); 2506 if (++count > 10000) { 2507 dev_warn(hsotg->dev, 2508 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n", 2509 __func__, greset, 2510 DWC2_READ_4(hsotg, GNPTXSTS)); 2511 break; 2512 } 2513 udelay(1); 2514 } while (greset & GRSTCTL_TXFFLSH); 2515 2516 /* Wait for at least 3 PHY Clocks */ 2517 udelay(1); 2518 } 2519 2520 /** 2521 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO 2522 * 2523 * @hsotg: Programming view of DWC_otg controller 2524 */ 2525 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg) 2526 { 2527 u32 greset; 2528 int count = 0; 2529 2530 dev_vdbg(hsotg->dev, "%s()\n", __func__); 2531 2532 greset = GRSTCTL_RXFFLSH; 2533 DWC2_WRITE_4(hsotg, GRSTCTL, greset); 2534 2535 do { 2536 greset = DWC2_READ_4(hsotg, GRSTCTL); 2537 if (++count > 10000) { 2538 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n", 2539 __func__, greset); 2540 break; 2541 } 2542 udelay(1); 2543 } while (greset & GRSTCTL_RXFFLSH); 2544 2545 /* Wait for at least 3 PHY Clocks */ 2546 udelay(1); 2547 } 2548 2549 #define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c)) 2550 2551 /* Parameter access functions */ 2552 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val) 2553 { 2554 int valid = 1; 2555 2556 switch (val) { 2557 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE: 2558 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) 2559 valid = 0; 2560 break; 2561 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE: 2562 switch (hsotg->hw_params.op_mode) { 2563 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 2564 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 2565 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 2566 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 2567 break; 2568 default: 2569 valid = 0; 2570 break; 2571 } 2572 break; 2573 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE: 2574 /* always valid */ 2575 break; 2576 default: 2577 valid = 0; 2578 break; 2579 } 2580 2581 if (!valid) { 2582 if (val >= 0) 2583 dev_err(hsotg->dev, 2584 "%d invalid for otg_cap parameter. Check HW configuration.\n", 2585 val); 2586 switch (hsotg->hw_params.op_mode) { 2587 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 2588 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE; 2589 break; 2590 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 2591 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 2592 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 2593 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE; 2594 break; 2595 default: 2596 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; 2597 break; 2598 } 2599 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val); 2600 } 2601 2602 hsotg->core_params->otg_cap = val; 2603 } 2604 2605 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val) 2606 { 2607 int valid = 1; 2608 2609 if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH) 2610 valid = 0; 2611 if (val < 0) 2612 valid = 0; 2613 2614 if (!valid) { 2615 if (val >= 0) 2616 dev_err(hsotg->dev, 2617 "%d invalid for dma_enable parameter. Check HW configuration.\n", 2618 val); 2619 val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH; 2620 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val); 2621 } 2622 2623 hsotg->core_params->dma_enable = val; 2624 } 2625 2626 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val) 2627 { 2628 int valid = 1; 2629 2630 if (val > 0 && (hsotg->core_params->dma_enable <= 0 || 2631 !hsotg->hw_params.dma_desc_enable)) 2632 valid = 0; 2633 if (val < 0) 2634 valid = 0; 2635 2636 if (!valid) { 2637 if (val >= 0) 2638 dev_err(hsotg->dev, 2639 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n", 2640 val); 2641 val = (hsotg->core_params->dma_enable > 0 && 2642 hsotg->hw_params.dma_desc_enable); 2643 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val); 2644 } 2645 2646 hsotg->core_params->dma_desc_enable = val; 2647 } 2648 2649 void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val) 2650 { 2651 int valid = 1; 2652 2653 if (val > 0 && (hsotg->core_params->dma_enable <= 0 || 2654 !hsotg->hw_params.dma_desc_enable)) 2655 valid = 0; 2656 if (val < 0) 2657 valid = 0; 2658 2659 if (!valid) { 2660 if (val >= 0) 2661 dev_err(hsotg->dev, 2662 "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n", 2663 val); 2664 val = (hsotg->core_params->dma_enable > 0 && 2665 hsotg->hw_params.dma_desc_enable); 2666 } 2667 2668 hsotg->core_params->dma_desc_fs_enable = val; 2669 dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val); 2670 } 2671 2672 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg, 2673 int val) 2674 { 2675 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2676 if (val >= 0) { 2677 dev_err(hsotg->dev, 2678 "Wrong value for host_support_fs_low_power\n"); 2679 dev_err(hsotg->dev, 2680 "host_support_fs_low_power must be 0 or 1\n"); 2681 } 2682 val = 0; 2683 dev_dbg(hsotg->dev, 2684 "Setting host_support_fs_low_power to %d\n", val); 2685 } 2686 2687 hsotg->core_params->host_support_fs_ls_low_power = val; 2688 } 2689 2690 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val) 2691 { 2692 int valid = 1; 2693 2694 if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo) 2695 valid = 0; 2696 if (val < 0) 2697 valid = 0; 2698 2699 if (!valid) { 2700 if (val >= 0) 2701 dev_err(hsotg->dev, 2702 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n", 2703 val); 2704 val = hsotg->hw_params.enable_dynamic_fifo; 2705 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val); 2706 } 2707 2708 hsotg->core_params->enable_dynamic_fifo = val; 2709 } 2710 2711 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2712 { 2713 int valid = 1; 2714 2715 if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size) 2716 valid = 0; 2717 2718 if (!valid) { 2719 if (val >= 0) 2720 dev_err(hsotg->dev, 2721 "%d invalid for host_rx_fifo_size. Check HW configuration.\n", 2722 val); 2723 val = hsotg->hw_params.host_rx_fifo_size; 2724 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val); 2725 } 2726 2727 hsotg->core_params->host_rx_fifo_size = val; 2728 } 2729 2730 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2731 { 2732 int valid = 1; 2733 2734 if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size) 2735 valid = 0; 2736 2737 if (!valid) { 2738 if (val >= 0) 2739 dev_err(hsotg->dev, 2740 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n", 2741 val); 2742 val = hsotg->hw_params.host_nperio_tx_fifo_size; 2743 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n", 2744 val); 2745 } 2746 2747 hsotg->core_params->host_nperio_tx_fifo_size = val; 2748 } 2749 2750 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2751 { 2752 int valid = 1; 2753 2754 if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size) 2755 valid = 0; 2756 2757 if (!valid) { 2758 if (val >= 0) 2759 dev_err(hsotg->dev, 2760 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n", 2761 val); 2762 val = hsotg->hw_params.host_perio_tx_fifo_size; 2763 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n", 2764 val); 2765 } 2766 2767 hsotg->core_params->host_perio_tx_fifo_size = val; 2768 } 2769 2770 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val) 2771 { 2772 int valid = 1; 2773 2774 if (val < 2047 || val > hsotg->hw_params.max_transfer_size) 2775 valid = 0; 2776 2777 if (!valid) { 2778 if (val >= 0) 2779 dev_err(hsotg->dev, 2780 "%d invalid for max_transfer_size. Check HW configuration.\n", 2781 val); 2782 val = hsotg->hw_params.max_transfer_size; 2783 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val); 2784 } 2785 2786 hsotg->core_params->max_transfer_size = val; 2787 } 2788 2789 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val) 2790 { 2791 int valid = 1; 2792 2793 if (val < 15 || val > hsotg->hw_params.max_packet_count) 2794 valid = 0; 2795 2796 if (!valid) { 2797 if (val >= 0) 2798 dev_err(hsotg->dev, 2799 "%d invalid for max_packet_count. Check HW configuration.\n", 2800 val); 2801 val = hsotg->hw_params.max_packet_count; 2802 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val); 2803 } 2804 2805 hsotg->core_params->max_packet_count = val; 2806 } 2807 2808 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val) 2809 { 2810 int valid = 1; 2811 2812 if (val < 1 || val > hsotg->hw_params.host_channels) 2813 valid = 0; 2814 2815 if (!valid) { 2816 if (val >= 0) 2817 dev_err(hsotg->dev, 2818 "%d invalid for host_channels. Check HW configuration.\n", 2819 val); 2820 val = hsotg->hw_params.host_channels; 2821 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val); 2822 } 2823 2824 hsotg->core_params->host_channels = val; 2825 } 2826 2827 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val) 2828 { 2829 int valid = 0; 2830 u32 hs_phy_type, fs_phy_type; 2831 2832 if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS, 2833 DWC2_PHY_TYPE_PARAM_ULPI)) { 2834 if (val >= 0) { 2835 dev_err(hsotg->dev, "Wrong value for phy_type\n"); 2836 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n"); 2837 } 2838 2839 valid = 0; 2840 } 2841 2842 hs_phy_type = hsotg->hw_params.hs_phy_type; 2843 fs_phy_type = hsotg->hw_params.fs_phy_type; 2844 if (val == DWC2_PHY_TYPE_PARAM_UTMI && 2845 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 2846 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 2847 valid = 1; 2848 else if (val == DWC2_PHY_TYPE_PARAM_ULPI && 2849 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI || 2850 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 2851 valid = 1; 2852 else if (val == DWC2_PHY_TYPE_PARAM_FS && 2853 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) 2854 valid = 1; 2855 2856 if (!valid) { 2857 if (val >= 0) 2858 dev_err(hsotg->dev, 2859 "%d invalid for phy_type. Check HW configuration.\n", 2860 val); 2861 val = DWC2_PHY_TYPE_PARAM_FS; 2862 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) { 2863 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 2864 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI) 2865 val = DWC2_PHY_TYPE_PARAM_UTMI; 2866 else 2867 val = DWC2_PHY_TYPE_PARAM_ULPI; 2868 } 2869 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val); 2870 } 2871 2872 hsotg->core_params->phy_type = val; 2873 } 2874 2875 STATIC int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg) 2876 { 2877 return hsotg->core_params->phy_type; 2878 } 2879 2880 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val) 2881 { 2882 int valid = 1; 2883 2884 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2885 if (val >= 0) { 2886 dev_err(hsotg->dev, "Wrong value for speed parameter\n"); 2887 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n"); 2888 } 2889 valid = 0; 2890 } 2891 2892 if (val == DWC2_SPEED_PARAM_HIGH && 2893 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) 2894 valid = 0; 2895 2896 if (!valid) { 2897 if (val >= 0) 2898 dev_err(hsotg->dev, 2899 "%d invalid for speed parameter. Check HW configuration.\n", 2900 val); 2901 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ? 2902 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH; 2903 dev_dbg(hsotg->dev, "Setting speed to %d\n", val); 2904 } 2905 2906 hsotg->core_params->speed = val; 2907 } 2908 2909 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val) 2910 { 2911 int valid = 1; 2912 2913 if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ, 2914 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) { 2915 if (val >= 0) { 2916 dev_err(hsotg->dev, 2917 "Wrong value for host_ls_low_power_phy_clk parameter\n"); 2918 dev_err(hsotg->dev, 2919 "host_ls_low_power_phy_clk must be 0 or 1\n"); 2920 } 2921 valid = 0; 2922 } 2923 2924 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ && 2925 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) 2926 valid = 0; 2927 2928 if (!valid) { 2929 if (val >= 0) 2930 dev_err(hsotg->dev, 2931 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n", 2932 val); 2933 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS 2934 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 2935 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ; 2936 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n", 2937 val); 2938 } 2939 2940 hsotg->core_params->host_ls_low_power_phy_clk = val; 2941 } 2942 2943 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val) 2944 { 2945 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2946 if (val >= 0) { 2947 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n"); 2948 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n"); 2949 } 2950 val = 0; 2951 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val); 2952 } 2953 2954 hsotg->core_params->phy_ulpi_ddr = val; 2955 } 2956 2957 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val) 2958 { 2959 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2960 if (val >= 0) { 2961 dev_err(hsotg->dev, 2962 "Wrong value for phy_ulpi_ext_vbus\n"); 2963 dev_err(hsotg->dev, 2964 "phy_ulpi_ext_vbus must be 0 or 1\n"); 2965 } 2966 val = 0; 2967 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val); 2968 } 2969 2970 hsotg->core_params->phy_ulpi_ext_vbus = val; 2971 } 2972 2973 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val) 2974 { 2975 int valid = 0; 2976 2977 switch (hsotg->hw_params.utmi_phy_data_width) { 2978 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8: 2979 valid = (val == 8); 2980 break; 2981 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16: 2982 valid = (val == 16); 2983 break; 2984 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16: 2985 valid = (val == 8 || val == 16); 2986 break; 2987 } 2988 2989 if (!valid) { 2990 if (val >= 0) { 2991 dev_err(hsotg->dev, 2992 "%d invalid for phy_utmi_width. Check HW configuration.\n", 2993 val); 2994 } 2995 val = (hsotg->hw_params.utmi_phy_data_width == 2996 GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16; 2997 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val); 2998 } 2999 3000 hsotg->core_params->phy_utmi_width = val; 3001 } 3002 3003 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val) 3004 { 3005 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3006 if (val >= 0) { 3007 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n"); 3008 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n"); 3009 } 3010 val = 0; 3011 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val); 3012 } 3013 3014 hsotg->core_params->ulpi_fs_ls = val; 3015 } 3016 3017 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val) 3018 { 3019 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3020 if (val >= 0) { 3021 dev_err(hsotg->dev, "Wrong value for ts_dline\n"); 3022 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n"); 3023 } 3024 val = 0; 3025 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val); 3026 } 3027 3028 hsotg->core_params->ts_dline = val; 3029 } 3030 3031 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val) 3032 { 3033 int valid = 1; 3034 3035 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3036 if (val >= 0) { 3037 dev_err(hsotg->dev, "Wrong value for i2c_enable\n"); 3038 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n"); 3039 } 3040 3041 valid = 0; 3042 } 3043 3044 if (val == 1 && !(hsotg->hw_params.i2c_enable)) 3045 valid = 0; 3046 3047 if (!valid) { 3048 if (val >= 0) 3049 dev_err(hsotg->dev, 3050 "%d invalid for i2c_enable. Check HW configuration.\n", 3051 val); 3052 val = hsotg->hw_params.i2c_enable; 3053 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val); 3054 } 3055 3056 hsotg->core_params->i2c_enable = val; 3057 } 3058 3059 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val) 3060 { 3061 int valid = 1; 3062 3063 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3064 if (val >= 0) { 3065 dev_err(hsotg->dev, 3066 "Wrong value for en_multiple_tx_fifo,\n"); 3067 dev_err(hsotg->dev, 3068 "en_multiple_tx_fifo must be 0 or 1\n"); 3069 } 3070 valid = 0; 3071 } 3072 3073 if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo) 3074 valid = 0; 3075 3076 if (!valid) { 3077 if (val >= 0) 3078 dev_err(hsotg->dev, 3079 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n", 3080 val); 3081 val = hsotg->hw_params.en_multiple_tx_fifo; 3082 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val); 3083 } 3084 3085 hsotg->core_params->en_multiple_tx_fifo = val; 3086 } 3087 3088 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val) 3089 { 3090 int valid = 1; 3091 3092 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3093 if (val >= 0) { 3094 dev_err(hsotg->dev, 3095 "'%d' invalid for parameter reload_ctl\n", val); 3096 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n"); 3097 } 3098 valid = 0; 3099 } 3100 3101 if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a) 3102 valid = 0; 3103 3104 if (!valid) { 3105 if (val >= 0) 3106 dev_err(hsotg->dev, 3107 "%d invalid for parameter reload_ctl. Check HW configuration.\n", 3108 val); 3109 val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a; 3110 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val); 3111 } 3112 3113 hsotg->core_params->reload_ctl = val; 3114 } 3115 3116 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val) 3117 { 3118 if (val != -1) 3119 hsotg->core_params->ahbcfg = val; 3120 else 3121 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 << 3122 GAHBCFG_HBSTLEN_SHIFT; 3123 } 3124 3125 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val) 3126 { 3127 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3128 if (val >= 0) { 3129 dev_err(hsotg->dev, 3130 "'%d' invalid for parameter otg_ver\n", val); 3131 dev_err(hsotg->dev, 3132 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n"); 3133 } 3134 val = 0; 3135 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val); 3136 } 3137 3138 hsotg->core_params->otg_ver = val; 3139 } 3140 3141 STATIC void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val) 3142 { 3143 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3144 if (val >= 0) { 3145 dev_err(hsotg->dev, 3146 "'%d' invalid for parameter uframe_sched\n", 3147 val); 3148 dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n"); 3149 } 3150 val = 1; 3151 dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val); 3152 } 3153 3154 hsotg->core_params->uframe_sched = val; 3155 } 3156 3157 STATIC void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg, 3158 int val) 3159 { 3160 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3161 if (val >= 0) { 3162 dev_err(hsotg->dev, 3163 "'%d' invalid for parameter external_id_pin_ctl\n", 3164 val); 3165 dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n"); 3166 } 3167 val = 0; 3168 dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val); 3169 } 3170 3171 hsotg->core_params->external_id_pin_ctl = val; 3172 } 3173 3174 STATIC void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg, 3175 int val) 3176 { 3177 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3178 if (val >= 0) { 3179 dev_err(hsotg->dev, 3180 "'%d' invalid for parameter hibernation\n", 3181 val); 3182 dev_err(hsotg->dev, "hibernation must be 0 or 1\n"); 3183 } 3184 val = 0; 3185 dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val); 3186 } 3187 3188 hsotg->core_params->hibernation = val; 3189 } 3190 3191 /* 3192 * This function is called during module intialization to pass module parameters 3193 * for the DWC_otg core. 3194 */ 3195 void dwc2_set_parameters(struct dwc2_hsotg *hsotg, 3196 const struct dwc2_core_params *params) 3197 { 3198 dev_dbg(hsotg->dev, "%s()\n", __func__); 3199 3200 dwc2_set_param_otg_cap(hsotg, params->otg_cap); 3201 dwc2_set_param_dma_enable(hsotg, params->dma_enable); 3202 dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable); 3203 dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable); 3204 dwc2_set_param_host_support_fs_ls_low_power(hsotg, 3205 params->host_support_fs_ls_low_power); 3206 dwc2_set_param_enable_dynamic_fifo(hsotg, 3207 params->enable_dynamic_fifo); 3208 dwc2_set_param_host_rx_fifo_size(hsotg, 3209 params->host_rx_fifo_size); 3210 dwc2_set_param_host_nperio_tx_fifo_size(hsotg, 3211 params->host_nperio_tx_fifo_size); 3212 dwc2_set_param_host_perio_tx_fifo_size(hsotg, 3213 params->host_perio_tx_fifo_size); 3214 dwc2_set_param_max_transfer_size(hsotg, 3215 params->max_transfer_size); 3216 dwc2_set_param_max_packet_count(hsotg, 3217 params->max_packet_count); 3218 dwc2_set_param_host_channels(hsotg, params->host_channels); 3219 dwc2_set_param_phy_type(hsotg, params->phy_type); 3220 dwc2_set_param_speed(hsotg, params->speed); 3221 dwc2_set_param_host_ls_low_power_phy_clk(hsotg, 3222 params->host_ls_low_power_phy_clk); 3223 dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr); 3224 dwc2_set_param_phy_ulpi_ext_vbus(hsotg, 3225 params->phy_ulpi_ext_vbus); 3226 dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width); 3227 dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls); 3228 dwc2_set_param_ts_dline(hsotg, params->ts_dline); 3229 dwc2_set_param_i2c_enable(hsotg, params->i2c_enable); 3230 dwc2_set_param_en_multiple_tx_fifo(hsotg, 3231 params->en_multiple_tx_fifo); 3232 dwc2_set_param_reload_ctl(hsotg, params->reload_ctl); 3233 dwc2_set_param_ahbcfg(hsotg, params->ahbcfg); 3234 dwc2_set_param_otg_ver(hsotg, params->otg_ver); 3235 dwc2_set_param_uframe_sched(hsotg, params->uframe_sched); 3236 dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl); 3237 dwc2_set_param_hibernation(hsotg, params->hibernation); 3238 } 3239 3240 /* 3241 * Forces either host or device mode if the controller is not 3242 * currently in that mode. 3243 * 3244 * Returns true if the mode was forced. 3245 */ 3246 STATIC bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host) 3247 { 3248 if (host && dwc2_is_host_mode(hsotg)) 3249 return false; 3250 else if (!host && dwc2_is_device_mode(hsotg)) 3251 return false; 3252 3253 return dwc2_force_mode(hsotg, host); 3254 } 3255 3256 /* 3257 * Gets host hardware parameters. Forces host mode if not currently in 3258 * host mode. Should be called immediately after a core soft reset in 3259 * order to get the reset values. 3260 */ 3261 STATIC void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg) 3262 { 3263 struct dwc2_hw_params *hw = &hsotg->hw_params; 3264 u32 gnptxfsiz; 3265 u32 hptxfsiz; 3266 bool forced; 3267 3268 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) 3269 return; 3270 3271 forced = dwc2_force_mode_if_needed(hsotg, true); 3272 3273 gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ); 3274 hptxfsiz = DWC2_READ_4(hsotg, HPTXFSIZ); 3275 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); 3276 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); 3277 3278 if (forced) 3279 dwc2_clear_force_mode(hsotg); 3280 3281 hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> 3282 FIFOSIZE_DEPTH_SHIFT; 3283 hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> 3284 FIFOSIZE_DEPTH_SHIFT; 3285 } 3286 3287 /* 3288 * Gets device hardware parameters. Forces device mode if not 3289 * currently in device mode. Should be called immediately after a core 3290 * soft reset in order to get the reset values. 3291 */ 3292 STATIC void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg) 3293 { 3294 struct dwc2_hw_params *hw = &hsotg->hw_params; 3295 bool forced; 3296 u32 gnptxfsiz; 3297 3298 if (hsotg->dr_mode == USB_DR_MODE_HOST) 3299 return; 3300 3301 forced = dwc2_force_mode_if_needed(hsotg, false); 3302 3303 gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ); 3304 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); 3305 3306 if (forced) 3307 dwc2_clear_force_mode(hsotg); 3308 3309 hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> 3310 FIFOSIZE_DEPTH_SHIFT; 3311 } 3312 3313 /** 3314 * During device initialization, read various hardware configuration 3315 * registers and interpret the contents. 3316 */ 3317 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) 3318 { 3319 struct dwc2_hw_params *hw = &hsotg->hw_params; 3320 unsigned width; 3321 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4; 3322 u32 grxfsiz; 3323 3324 /* 3325 * Attempt to ensure this device is really a DWC_otg Controller. 3326 * Read and verify the GSNPSID register contents. The value should be 3327 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3", 3328 * as in "OTG version 2.xx" or "OTG version 3.xx". 3329 */ 3330 hw->snpsid = DWC2_READ_4(hsotg, GSNPSID); 3331 if ((hw->snpsid & 0xfffff000) != 0x4f542000 && 3332 (hw->snpsid & 0xfffff000) != 0x4f543000) { 3333 dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n", 3334 hw->snpsid); 3335 return -ENODEV; 3336 } 3337 3338 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n", 3339 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf, 3340 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid); 3341 3342 hwcfg1 = DWC2_READ_4(hsotg, GHWCFG1); 3343 hwcfg2 = DWC2_READ_4(hsotg, GHWCFG2); 3344 hwcfg3 = DWC2_READ_4(hsotg, GHWCFG3); 3345 hwcfg4 = DWC2_READ_4(hsotg, GHWCFG4); 3346 grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ); 3347 3348 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1); 3349 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2); 3350 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3); 3351 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4); 3352 dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz); 3353 3354 /* 3355 * Host specific hardware parameters. Reading these parameters 3356 * requires the controller to be in host mode. The mode will 3357 * be forced, if necessary, to read these values. 3358 */ 3359 dwc2_get_host_hwparams(hsotg); 3360 dwc2_get_dev_hwparams(hsotg); 3361 3362 /* hwcfg1 */ 3363 hw->dev_ep_dirs = hwcfg1; 3364 3365 /* hwcfg2 */ 3366 hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >> 3367 GHWCFG2_OP_MODE_SHIFT; 3368 hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >> 3369 GHWCFG2_ARCHITECTURE_SHIFT; 3370 hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO); 3371 hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >> 3372 GHWCFG2_NUM_HOST_CHAN_SHIFT); 3373 hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >> 3374 GHWCFG2_HS_PHY_TYPE_SHIFT; 3375 hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >> 3376 GHWCFG2_FS_PHY_TYPE_SHIFT; 3377 hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >> 3378 GHWCFG2_NUM_DEV_EP_SHIFT; 3379 hw->nperio_tx_q_depth = 3380 (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >> 3381 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1; 3382 hw->host_perio_tx_q_depth = 3383 (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >> 3384 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1; 3385 hw->dev_token_q_depth = 3386 (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >> 3387 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT; 3388 3389 /* hwcfg3 */ 3390 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >> 3391 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT; 3392 hw->max_transfer_size = (1 << (width + 11)) - 1; 3393 /* 3394 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates 3395 * coherent buffers with this size, and if it's too large we can 3396 * exhaust the coherent DMA pool. 3397 */ 3398 if (hw->max_transfer_size > 65535) 3399 hw->max_transfer_size = 65535; 3400 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >> 3401 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT; 3402 hw->max_packet_count = (1 << (width + 4)) - 1; 3403 hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C); 3404 hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >> 3405 GHWCFG3_DFIFO_DEPTH_SHIFT; 3406 3407 /* hwcfg4 */ 3408 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); 3409 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> 3410 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; 3411 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); 3412 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); 3413 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> 3414 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT; 3415 3416 /* fifo sizes */ 3417 hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> 3418 GRXFSIZ_DEPTH_SHIFT; 3419 3420 dev_dbg(hsotg->dev, "Detected values from hardware:\n"); 3421 dev_dbg(hsotg->dev, " op_mode=%d\n", 3422 hw->op_mode); 3423 dev_dbg(hsotg->dev, " arch=%d\n", 3424 hw->arch); 3425 dev_dbg(hsotg->dev, " dma_desc_enable=%d\n", 3426 hw->dma_desc_enable); 3427 dev_dbg(hsotg->dev, " power_optimized=%d\n", 3428 hw->power_optimized); 3429 dev_dbg(hsotg->dev, " i2c_enable=%d\n", 3430 hw->i2c_enable); 3431 dev_dbg(hsotg->dev, " hs_phy_type=%d\n", 3432 hw->hs_phy_type); 3433 dev_dbg(hsotg->dev, " fs_phy_type=%d\n", 3434 hw->fs_phy_type); 3435 dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n", 3436 hw->utmi_phy_data_width); 3437 dev_dbg(hsotg->dev, " num_dev_ep=%d\n", 3438 hw->num_dev_ep); 3439 dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n", 3440 hw->num_dev_perio_in_ep); 3441 dev_dbg(hsotg->dev, " host_channels=%d\n", 3442 hw->host_channels); 3443 dev_dbg(hsotg->dev, " max_transfer_size=%d\n", 3444 hw->max_transfer_size); 3445 dev_dbg(hsotg->dev, " max_packet_count=%d\n", 3446 hw->max_packet_count); 3447 dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n", 3448 hw->nperio_tx_q_depth); 3449 dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n", 3450 hw->host_perio_tx_q_depth); 3451 dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n", 3452 hw->dev_token_q_depth); 3453 dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n", 3454 hw->enable_dynamic_fifo); 3455 dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n", 3456 hw->en_multiple_tx_fifo); 3457 dev_dbg(hsotg->dev, " total_fifo_size=%d\n", 3458 hw->total_fifo_size); 3459 dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n", 3460 hw->host_rx_fifo_size); 3461 dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n", 3462 hw->host_nperio_tx_fifo_size); 3463 dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n", 3464 hw->host_perio_tx_fifo_size); 3465 dev_dbg(hsotg->dev, "\n"); 3466 3467 return 0; 3468 } 3469 3470 /* 3471 * Sets all parameters to the given value. 3472 * 3473 * Assumes that the dwc2_core_params struct contains only integers. 3474 */ 3475 void dwc2_set_all_params(struct dwc2_core_params *params, int value) 3476 { 3477 int *p = (int *)params; 3478 size_t size = sizeof(*params) / sizeof(*p); 3479 int i; 3480 3481 for (i = 0; i < size; i++) 3482 p[i] = value; 3483 } 3484 3485 3486 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg) 3487 { 3488 return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103; 3489 } 3490 3491 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg) 3492 { 3493 if (DWC2_READ_4(hsotg, GSNPSID) == 0xffffffff) 3494 return false; 3495 else 3496 return true; 3497 } 3498 3499 /** 3500 * dwc2_enable_global_interrupts() - Enables the controller's Global 3501 * Interrupt in the AHB Config register 3502 * 3503 * @hsotg: Programming view of DWC_otg controller 3504 */ 3505 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg) 3506 { 3507 u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG); 3508 3509 ahbcfg |= GAHBCFG_GLBL_INTR_EN; 3510 DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg); 3511 } 3512 3513 /** 3514 * dwc2_disable_global_interrupts() - Disables the controller's Global 3515 * Interrupt in the AHB Config register 3516 * 3517 * @hsotg: Programming view of DWC_otg controller 3518 */ 3519 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg) 3520 { 3521 u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG); 3522 3523 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN; 3524 DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg); 3525 } 3526 3527 /* Returns the controller's GHWCFG2.OTG_MODE. */ 3528 unsigned dwc2_op_mode(struct dwc2_hsotg *hsotg) 3529 { 3530 u32 ghwcfg2 = DWC2_READ_4(hsotg, GHWCFG2); 3531 3532 return (ghwcfg2 & GHWCFG2_OP_MODE_MASK) >> 3533 GHWCFG2_OP_MODE_SHIFT; 3534 } 3535 3536 /* Returns true if the controller is capable of DRD. */ 3537 bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg) 3538 { 3539 unsigned op_mode = dwc2_op_mode(hsotg); 3540 3541 return (op_mode == GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) || 3542 (op_mode == GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE) || 3543 (op_mode == GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE); 3544 } 3545 3546 /* Returns true if the controller is host-only. */ 3547 bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg) 3548 { 3549 unsigned op_mode = dwc2_op_mode(hsotg); 3550 3551 return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_HOST) || 3552 (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST); 3553 } 3554 3555 /* Returns true if the controller is device-only. */ 3556 bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg) 3557 { 3558 unsigned op_mode = dwc2_op_mode(hsotg); 3559 3560 return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || 3561 (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE); 3562 } 3563