1 /* $NetBSD: dwc2_core.c,v 1.10 2015/09/01 14:03:00 skrll Exp $ */ 2 3 /* 4 * core.c - DesignWare HS OTG Controller common routines 5 * 6 * Copyright (C) 2004-2013 Synopsys, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The names of the above-listed copyright holders may not be used 18 * to endorse or promote products derived from this software without 19 * specific prior written permission. 20 * 21 * ALTERNATIVELY, this software may be distributed under the terms of the 22 * GNU General Public License ("GPL") as published by the Free Software 23 * Foundation; either version 2 of the License, or (at your option) any 24 * later version. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * The Core code provides basic services for accessing and managing the 41 * DWC_otg hardware. These services are used by both the Host Controller 42 * Driver and the Peripheral Controller Driver. 43 */ 44 45 #include <sys/cdefs.h> 46 __KERNEL_RCSID(0, "$NetBSD: dwc2_core.c,v 1.10 2015/09/01 14:03:00 skrll Exp $"); 47 48 #include <sys/types.h> 49 #include <sys/bus.h> 50 #include <sys/proc.h> 51 #include <sys/callout.h> 52 #include <sys/mutex.h> 53 #include <sys/pool.h> 54 #include <sys/workqueue.h> 55 56 #include <dev/usb/usb.h> 57 #include <dev/usb/usbdi.h> 58 #include <dev/usb/usbdivar.h> 59 #include <dev/usb/usb_mem.h> 60 61 #include <linux/kernel.h> 62 #include <linux/list.h> 63 64 #include <dwc2/dwc2.h> 65 #include <dwc2/dwc2var.h> 66 67 #include "dwc2_core.h" 68 #include "dwc2_hcd.h" 69 70 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 71 /** 72 * dwc2_backup_host_registers() - Backup controller host registers. 73 * When suspending usb bus, registers needs to be backuped 74 * if controller power is disabled once suspended. 75 * 76 * @hsotg: Programming view of the DWC_otg controller 77 */ 78 static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) 79 { 80 struct dwc2_hregs_backup *hr; 81 int i; 82 83 dev_dbg(hsotg->dev, "%s\n", __func__); 84 85 /* Backup Host regs */ 86 hr = &hsotg->hr_backup; 87 hr->hcfg = DWC2_READ_4(hsotg, HCFG); 88 hr->haintmsk = DWC2_READ_4(hsotg, HAINTMSK); 89 for (i = 0; i < hsotg->core_params->host_channels; ++i) 90 hr->hcintmsk[i] = DWC2_READ_4(hsotg, HCINTMSK(i)); 91 92 hr->hprt0 = DWC2_READ_4(hsotg, HPRT0); 93 hr->hfir = DWC2_READ_4(hsotg, HFIR); 94 hr->valid = true; 95 96 return 0; 97 } 98 99 /** 100 * dwc2_restore_host_registers() - Restore controller host registers. 101 * When resuming usb bus, device registers needs to be restored 102 * if controller power were disabled. 103 * 104 * @hsotg: Programming view of the DWC_otg controller 105 */ 106 static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) 107 { 108 struct dwc2_hregs_backup *hr; 109 int i; 110 111 dev_dbg(hsotg->dev, "%s\n", __func__); 112 113 /* Restore host regs */ 114 hr = &hsotg->hr_backup; 115 if (!hr->valid) { 116 dev_err(hsotg->dev, "%s: no host registers to restore\n", 117 __func__); 118 return -EINVAL; 119 } 120 hr->valid = false; 121 122 DWC2_WRITE_4(hsotg, HCFG, hr->hcfg); 123 DWC2_WRITE_4(hsotg, HAINTMSK, hr->haintmsk); 124 125 for (i = 0; i < hsotg->core_params->host_channels; ++i) 126 DWC2_WRITE_4(hsotg, HCINTMSK(i), hr->hcintmsk[i]); 127 128 DWC2_WRITE_4(hsotg, HPRT0, hr->hprt0); 129 DWC2_WRITE_4(hsotg, HFIR, hr->hfir); 130 131 return 0; 132 } 133 #else 134 static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) 135 { return 0; } 136 137 static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) 138 { return 0; } 139 #endif 140 141 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ 142 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 143 /** 144 * dwc2_backup_device_registers() - Backup controller device registers. 145 * When suspending usb bus, registers needs to be backuped 146 * if controller power is disabled once suspended. 147 * 148 * @hsotg: Programming view of the DWC_otg controller 149 */ 150 static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 151 { 152 struct dwc2_dregs_backup *dr; 153 int i; 154 155 dev_dbg(hsotg->dev, "%s\n", __func__); 156 157 /* Backup dev regs */ 158 dr = &hsotg->dr_backup; 159 160 dr->dcfg = DWC2_READ_4(hsotg, DCFG); 161 dr->dctl = DWC2_READ_4(hsotg, DCTL); 162 dr->daintmsk = DWC2_READ_4(hsotg, DAINTMSK); 163 dr->diepmsk = DWC2_READ_4(hsotg, DIEPMSK); 164 dr->doepmsk = DWC2_READ_4(hsotg, DOEPMSK); 165 166 for (i = 0; i < hsotg->num_of_eps; i++) { 167 /* Backup IN EPs */ 168 dr->diepctl[i] = DWC2_READ_4(hsotg, DIEPCTL(i)); 169 170 /* Ensure DATA PID is correctly configured */ 171 if (dr->diepctl[i] & DXEPCTL_DPID) 172 dr->diepctl[i] |= DXEPCTL_SETD1PID; 173 else 174 dr->diepctl[i] |= DXEPCTL_SETD0PID; 175 176 dr->dieptsiz[i] = DWC2_READ_4(hsotg, DIEPTSIZ(i)); 177 dr->diepdma[i] = DWC2_READ_4(hsotg, DIEPDMA(i)); 178 179 /* Backup OUT EPs */ 180 dr->doepctl[i] = DWC2_READ_4(hsotg, DOEPCTL(i)); 181 182 /* Ensure DATA PID is correctly configured */ 183 if (dr->doepctl[i] & DXEPCTL_DPID) 184 dr->doepctl[i] |= DXEPCTL_SETD1PID; 185 else 186 dr->doepctl[i] |= DXEPCTL_SETD0PID; 187 188 dr->doeptsiz[i] = DWC2_READ_4(hsotg, DOEPTSIZ(i)); 189 dr->doepdma[i] = DWC2_READ_4(hsotg, DOEPDMA(i)); 190 } 191 dr->valid = true; 192 return 0; 193 } 194 195 /** 196 * dwc2_restore_device_registers() - Restore controller device registers. 197 * When resuming usb bus, device registers needs to be restored 198 * if controller power were disabled. 199 * 200 * @hsotg: Programming view of the DWC_otg controller 201 */ 202 static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) 203 { 204 struct dwc2_dregs_backup *dr; 205 u32 dctl; 206 int i; 207 208 dev_dbg(hsotg->dev, "%s\n", __func__); 209 210 /* Restore dev regs */ 211 dr = &hsotg->dr_backup; 212 if (!dr->valid) { 213 dev_err(hsotg->dev, "%s: no device registers to restore\n", 214 __func__); 215 return -EINVAL; 216 } 217 dr->valid = false; 218 219 DWC2_WRITE_4(hsotg, DCFG, dr->dcfg); 220 DWC2_WRITE_4(hsotg, DCTL, dr->dctl); 221 DWC2_WRITE_4(hsotg, DAINTMSK, dr->daintmsk); 222 DWC2_WRITE_4(hsotg, DIEPMSK, dr->diepmsk); 223 DWC2_WRITE_4(hsotg, DOEPMSK, dr->doepmsk); 224 225 for (i = 0; i < hsotg->num_of_eps; i++) { 226 /* Restore IN EPs */ 227 DWC2_WRITE_4(hsotg, DIEPCTL(i), dr->diepctl[i]); 228 DWC2_WRITE_4(hsotg, DIEPTSIZ(i), dr->dieptsiz[i]); 229 DWC2_WRITE_4(hsotg, DIEPDMA(i), dr->diepdma[i]); 230 231 /* Restore OUT EPs */ 232 DWC2_WRITE_4(hsotg, DOEPCTL(i), dr->doepctl[i]); 233 DWC2_WRITE_4(hsotg, DOEPTSIZ(i), dr->doeptsiz[i]); 234 DWC2_WRITE_4(hsotg, DOEPDMA(i), dr->doepdma[i]); 235 } 236 237 /* Set the Power-On Programming done bit */ 238 dctl = DWC2_READ_4(hsotg, DCTL); 239 dctl |= DCTL_PWRONPRGDONE; 240 DWC2_WRITE_4(hsotg, DCTL, dctl); 241 242 return 0; 243 } 244 #else 245 static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 246 { return 0; } 247 248 static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) 249 { return 0; } 250 #endif 251 252 /** 253 * dwc2_backup_global_registers() - Backup global controller registers. 254 * When suspending usb bus, registers needs to be backuped 255 * if controller power is disabled once suspended. 256 * 257 * @hsotg: Programming view of the DWC_otg controller 258 */ 259 static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg) 260 { 261 struct dwc2_gregs_backup *gr; 262 int i; 263 264 /* Backup global regs */ 265 gr = &hsotg->gr_backup; 266 267 gr->gotgctl = DWC2_READ_4(hsotg, GOTGCTL); 268 gr->gintmsk = DWC2_READ_4(hsotg, GINTMSK); 269 gr->gahbcfg = DWC2_READ_4(hsotg, GAHBCFG); 270 gr->gusbcfg = DWC2_READ_4(hsotg, GUSBCFG); 271 gr->grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ); 272 gr->gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ); 273 gr->hptxfsiz = DWC2_READ_4(hsotg, HPTXFSIZ); 274 gr->gdfifocfg = DWC2_READ_4(hsotg, GDFIFOCFG); 275 for (i = 0; i < MAX_EPS_CHANNELS; i++) 276 gr->dtxfsiz[i] = DWC2_READ_4(hsotg, DPTXFSIZN(i)); 277 278 gr->valid = true; 279 return 0; 280 } 281 282 /** 283 * dwc2_restore_global_registers() - Restore controller global registers. 284 * When resuming usb bus, device registers needs to be restored 285 * if controller power were disabled. 286 * 287 * @hsotg: Programming view of the DWC_otg controller 288 */ 289 static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg) 290 { 291 struct dwc2_gregs_backup *gr; 292 int i; 293 294 dev_dbg(hsotg->dev, "%s\n", __func__); 295 296 /* Restore global regs */ 297 gr = &hsotg->gr_backup; 298 if (!gr->valid) { 299 dev_err(hsotg->dev, "%s: no global registers to restore\n", 300 __func__); 301 return -EINVAL; 302 } 303 gr->valid = false; 304 305 DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff); 306 DWC2_WRITE_4(hsotg, GOTGCTL, gr->gotgctl); 307 DWC2_WRITE_4(hsotg, GINTMSK, gr->gintmsk); 308 DWC2_WRITE_4(hsotg, GUSBCFG, gr->gusbcfg); 309 DWC2_WRITE_4(hsotg, GAHBCFG, gr->gahbcfg); 310 DWC2_WRITE_4(hsotg, GRXFSIZ, gr->grxfsiz); 311 DWC2_WRITE_4(hsotg, GNPTXFSIZ, gr->gnptxfsiz); 312 DWC2_WRITE_4(hsotg, HPTXFSIZ, gr->hptxfsiz); 313 DWC2_WRITE_4(hsotg, GDFIFOCFG, gr->gdfifocfg); 314 for (i = 0; i < MAX_EPS_CHANNELS; i++) 315 DWC2_WRITE_4(hsotg, DPTXFSIZN(i), gr->dtxfsiz[i]); 316 317 return 0; 318 } 319 320 /** 321 * dwc2_exit_hibernation() - Exit controller from Partial Power Down. 322 * 323 * @hsotg: Programming view of the DWC_otg controller 324 * @restore: Controller registers need to be restored 325 */ 326 int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore) 327 { 328 u32 pcgcctl; 329 int ret = 0; 330 331 if (!hsotg->core_params->hibernation) 332 return -ENOTSUPP; 333 334 pcgcctl = DWC2_READ_4(hsotg, PCGCTL); 335 pcgcctl &= ~PCGCTL_STOPPCLK; 336 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl); 337 338 pcgcctl = DWC2_READ_4(hsotg, PCGCTL); 339 pcgcctl &= ~PCGCTL_PWRCLMP; 340 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl); 341 342 pcgcctl = DWC2_READ_4(hsotg, PCGCTL); 343 pcgcctl &= ~PCGCTL_RSTPDWNMODULE; 344 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl); 345 346 udelay(100); 347 if (restore) { 348 ret = dwc2_restore_global_registers(hsotg); 349 if (ret) { 350 dev_err(hsotg->dev, "%s: failed to restore registers\n", 351 __func__); 352 return ret; 353 } 354 if (dwc2_is_host_mode(hsotg)) { 355 ret = dwc2_restore_host_registers(hsotg); 356 if (ret) { 357 dev_err(hsotg->dev, "%s: failed to restore host registers\n", 358 __func__); 359 return ret; 360 } 361 } else { 362 ret = dwc2_restore_device_registers(hsotg); 363 if (ret) { 364 dev_err(hsotg->dev, "%s: failed to restore device registers\n", 365 __func__); 366 return ret; 367 } 368 } 369 } 370 371 return ret; 372 } 373 374 /** 375 * dwc2_enter_hibernation() - Put controller in Partial Power Down. 376 * 377 * @hsotg: Programming view of the DWC_otg controller 378 */ 379 int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg) 380 { 381 u32 pcgcctl; 382 int ret = 0; 383 384 if (!hsotg->core_params->hibernation) 385 return -ENOTSUPP; 386 387 /* Backup all registers */ 388 ret = dwc2_backup_global_registers(hsotg); 389 if (ret) { 390 dev_err(hsotg->dev, "%s: failed to backup global registers\n", 391 __func__); 392 return ret; 393 } 394 395 if (dwc2_is_host_mode(hsotg)) { 396 ret = dwc2_backup_host_registers(hsotg); 397 if (ret) { 398 dev_err(hsotg->dev, "%s: failed to backup host registers\n", 399 __func__); 400 return ret; 401 } 402 } else { 403 ret = dwc2_backup_device_registers(hsotg); 404 if (ret) { 405 dev_err(hsotg->dev, "%s: failed to backup device registers\n", 406 __func__); 407 return ret; 408 } 409 } 410 411 /* Put the controller in low power state */ 412 pcgcctl = DWC2_READ_4(hsotg, PCGCTL); 413 414 pcgcctl |= PCGCTL_PWRCLMP; 415 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl); 416 ndelay(20); 417 418 pcgcctl |= PCGCTL_RSTPDWNMODULE; 419 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl); 420 ndelay(20); 421 422 pcgcctl |= PCGCTL_STOPPCLK; 423 DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl); 424 425 return ret; 426 } 427 428 /** 429 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts, 430 * used in both device and host modes 431 * 432 * @hsotg: Programming view of the DWC_otg controller 433 */ 434 static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg) 435 { 436 u32 intmsk; 437 438 /* Clear any pending OTG Interrupts */ 439 DWC2_WRITE_4(hsotg, GOTGINT, 0xffffffff); 440 441 /* Clear any pending interrupts */ 442 DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff); 443 444 /* Enable the interrupts in the GINTMSK */ 445 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; 446 447 if (hsotg->core_params->dma_enable <= 0) 448 intmsk |= GINTSTS_RXFLVL; 449 if (hsotg->core_params->external_id_pin_ctl <= 0) 450 intmsk |= GINTSTS_CONIDSTSCHNG; 451 452 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP | 453 GINTSTS_SESSREQINT; 454 455 DWC2_WRITE_4(hsotg, GINTMSK, intmsk); 456 } 457 458 /* 459 * Initializes the FSLSPClkSel field of the HCFG register depending on the 460 * PHY type 461 */ 462 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) 463 { 464 u32 hcfg, val; 465 466 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && 467 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && 468 hsotg->core_params->ulpi_fs_ls > 0) || 469 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { 470 /* Full speed PHY */ 471 val = HCFG_FSLSPCLKSEL_48_MHZ; 472 } else { 473 /* High speed PHY running at full speed or high speed */ 474 val = HCFG_FSLSPCLKSEL_30_60_MHZ; 475 } 476 477 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val); 478 hcfg = DWC2_READ_4(hsotg, HCFG); 479 hcfg &= ~HCFG_FSLSPCLKSEL_MASK; 480 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT; 481 DWC2_WRITE_4(hsotg, HCFG, hcfg); 482 } 483 484 /* 485 * Do core a soft reset of the core. Be careful with this because it 486 * resets all the internal state machines of the core. 487 */ 488 static int dwc2_core_reset(struct dwc2_hsotg *hsotg) 489 { 490 u32 greset; 491 int count = 0; 492 u32 gusbcfg; 493 494 dev_vdbg(hsotg->dev, "%s()\n", __func__); 495 496 /* Wait for AHB master IDLE state */ 497 do { 498 usleep_range(20000, 40000); 499 greset = DWC2_READ_4(hsotg, GRSTCTL); 500 if (++count > 50) { 501 dev_warn(hsotg->dev, 502 "%s() HANG! AHB Idle GRSTCTL=%0x\n", 503 __func__, greset); 504 return -EBUSY; 505 } 506 } while (!(greset & GRSTCTL_AHBIDLE)); 507 508 /* Core Soft Reset */ 509 count = 0; 510 greset |= GRSTCTL_CSFTRST; 511 DWC2_WRITE_4(hsotg, GRSTCTL, greset); 512 do { 513 usleep_range(20000, 40000); 514 greset = DWC2_READ_4(hsotg, GRSTCTL); 515 if (++count > 50) { 516 dev_warn(hsotg->dev, 517 "%s() HANG! Soft Reset GRSTCTL=%0x\n", 518 __func__, greset); 519 return -EBUSY; 520 } 521 } while (greset & GRSTCTL_CSFTRST); 522 523 if (hsotg->dr_mode == USB_DR_MODE_HOST) { 524 gusbcfg = DWC2_READ_4(hsotg, GUSBCFG); 525 gusbcfg &= ~GUSBCFG_FORCEDEVMODE; 526 gusbcfg |= GUSBCFG_FORCEHOSTMODE; 527 DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg); 528 } else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) { 529 gusbcfg = DWC2_READ_4(hsotg, GUSBCFG); 530 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; 531 gusbcfg |= GUSBCFG_FORCEDEVMODE; 532 DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg); 533 } else if (hsotg->dr_mode == USB_DR_MODE_OTG) { 534 gusbcfg = DWC2_READ_4(hsotg, GUSBCFG); 535 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; 536 gusbcfg &= ~GUSBCFG_FORCEDEVMODE; 537 DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg); 538 } 539 540 /* 541 * NOTE: This long sleep is _very_ important, otherwise the core will 542 * not stay in host mode after a connector ID change! 543 */ 544 usleep_range(150000, 200000); 545 546 return 0; 547 } 548 549 static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 550 { 551 u32 usbcfg, i2cctl; 552 int retval = 0; 553 554 /* 555 * core_init() is now called on every switch so only call the 556 * following for the first time through 557 */ 558 if (select_phy) { 559 dev_dbg(hsotg->dev, "FS PHY selected\n"); 560 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 561 usbcfg |= GUSBCFG_PHYSEL; 562 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 563 564 /* Reset after a PHY select */ 565 retval = dwc2_core_reset(hsotg); 566 if (retval) { 567 dev_err(hsotg->dev, "%s() Reset failed, aborting", 568 __func__); 569 return retval; 570 } 571 } 572 573 /* 574 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also 575 * do this on HNP Dev/Host mode switches (done in dev_init and 576 * host_init). 577 */ 578 if (dwc2_is_host_mode(hsotg)) 579 dwc2_init_fs_ls_pclk_sel(hsotg); 580 581 if (hsotg->core_params->i2c_enable > 0) { 582 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); 583 584 /* Program GUSBCFG.OtgUtmiFsSel to I2C */ 585 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 586 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL; 587 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 588 589 /* Program GI2CCTL.I2CEn */ 590 i2cctl = DWC2_READ_4(hsotg, GI2CCTL); 591 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK; 592 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT; 593 i2cctl &= ~GI2CCTL_I2CEN; 594 DWC2_WRITE_4(hsotg, GI2CCTL, i2cctl); 595 i2cctl |= GI2CCTL_I2CEN; 596 DWC2_WRITE_4(hsotg, GI2CCTL, i2cctl); 597 } 598 599 return retval; 600 } 601 602 static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 603 { 604 u32 usbcfg; 605 int retval = 0; 606 607 if (!select_phy) 608 return 0; 609 610 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 611 612 /* 613 * HS PHY parameters. These parameters are preserved during soft reset 614 * so only program the first time. Do a soft reset immediately after 615 * setting phyif. 616 */ 617 switch (hsotg->core_params->phy_type) { 618 case DWC2_PHY_TYPE_PARAM_ULPI: 619 /* ULPI interface */ 620 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); 621 usbcfg |= GUSBCFG_ULPI_UTMI_SEL; 622 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); 623 if (hsotg->core_params->phy_ulpi_ddr > 0) 624 usbcfg |= GUSBCFG_DDRSEL; 625 break; 626 case DWC2_PHY_TYPE_PARAM_UTMI: 627 /* UTMI+ interface */ 628 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n"); 629 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16); 630 if (hsotg->core_params->phy_utmi_width == 16) 631 usbcfg |= GUSBCFG_PHYIF16; 632 break; 633 default: 634 dev_err(hsotg->dev, "FS PHY selected at HS!\n"); 635 break; 636 } 637 638 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 639 640 /* Reset after setting the PHY parameters */ 641 retval = dwc2_core_reset(hsotg); 642 if (retval) { 643 dev_err(hsotg->dev, "%s() Reset failed, aborting", 644 __func__); 645 return retval; 646 } 647 648 return retval; 649 } 650 651 static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) 652 { 653 u32 usbcfg; 654 int retval = 0; 655 656 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL && 657 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { 658 /* If FS mode with FS PHY */ 659 retval = dwc2_fs_phy_init(hsotg, select_phy); 660 if (retval) 661 return retval; 662 } else { 663 /* High speed PHY */ 664 retval = dwc2_hs_phy_init(hsotg, select_phy); 665 if (retval) 666 return retval; 667 } 668 669 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && 670 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && 671 hsotg->core_params->ulpi_fs_ls > 0) { 672 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); 673 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 674 usbcfg |= GUSBCFG_ULPI_FS_LS; 675 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M; 676 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 677 } else { 678 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 679 usbcfg &= ~GUSBCFG_ULPI_FS_LS; 680 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M; 681 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 682 } 683 684 return retval; 685 } 686 687 static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) 688 { 689 struct dwc2_softc *sc = hsotg->hsotg_sc; 690 u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG); 691 692 switch (hsotg->hw_params.arch) { 693 case GHWCFG2_EXT_DMA_ARCH: 694 dev_dbg(hsotg->dev, "External DMA Mode\n"); 695 if (!sc->sc_set_dma_addr) { 696 dev_err(hsotg->dev, "External DMA Mode not supported\n"); 697 return -EINVAL; 698 } 699 if (hsotg->core_params->ahbcfg != -1) { 700 ahbcfg &= GAHBCFG_CTRL_MASK; 701 ahbcfg |= hsotg->core_params->ahbcfg & 702 ~GAHBCFG_CTRL_MASK; 703 } 704 break; 705 706 case GHWCFG2_INT_DMA_ARCH: 707 dev_dbg(hsotg->dev, "Internal DMA Mode\n"); 708 if (hsotg->core_params->ahbcfg != -1) { 709 ahbcfg &= GAHBCFG_CTRL_MASK; 710 ahbcfg |= hsotg->core_params->ahbcfg & 711 ~GAHBCFG_CTRL_MASK; 712 } 713 break; 714 715 case GHWCFG2_SLAVE_ONLY_ARCH: 716 default: 717 dev_dbg(hsotg->dev, "Slave Only Mode\n"); 718 break; 719 } 720 721 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n", 722 hsotg->core_params->dma_enable, 723 hsotg->core_params->dma_desc_enable); 724 725 if (hsotg->core_params->dma_enable > 0) { 726 if (hsotg->core_params->dma_desc_enable > 0) 727 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n"); 728 else 729 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n"); 730 } else { 731 dev_dbg(hsotg->dev, "Using Slave mode\n"); 732 hsotg->core_params->dma_desc_enable = 0; 733 } 734 735 if (hsotg->core_params->dma_enable > 0) 736 ahbcfg |= GAHBCFG_DMA_EN; 737 738 DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg); 739 740 return 0; 741 } 742 743 static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) 744 { 745 u32 usbcfg; 746 747 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 748 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP); 749 750 switch (hsotg->hw_params.op_mode) { 751 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 752 if (hsotg->core_params->otg_cap == 753 DWC2_CAP_PARAM_HNP_SRP_CAPABLE) 754 usbcfg |= GUSBCFG_HNPCAP; 755 if (hsotg->core_params->otg_cap != 756 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) 757 usbcfg |= GUSBCFG_SRPCAP; 758 break; 759 760 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 761 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 762 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 763 if (hsotg->core_params->otg_cap != 764 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) 765 usbcfg |= GUSBCFG_SRPCAP; 766 break; 767 768 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE: 769 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE: 770 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST: 771 default: 772 break; 773 } 774 775 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 776 } 777 778 /** 779 * dwc2_core_init() - Initializes the DWC_otg controller registers and 780 * prepares the core for device mode or host mode operation 781 * 782 * @hsotg: Programming view of the DWC_otg controller 783 * @select_phy: If true then also set the Phy type 784 * @irq: If >= 0, the irq to register 785 */ 786 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy) 787 { 788 u32 usbcfg, otgctl; 789 int retval; 790 791 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); 792 793 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 794 795 /* Set ULPI External VBUS bit if needed */ 796 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; 797 if (hsotg->core_params->phy_ulpi_ext_vbus == 798 DWC2_PHY_ULPI_EXTERNAL_VBUS) 799 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; 800 801 /* Set external TS Dline pulsing bit if needed */ 802 usbcfg &= ~GUSBCFG_TERMSELDLPULSE; 803 if (hsotg->core_params->ts_dline > 0) 804 usbcfg |= GUSBCFG_TERMSELDLPULSE; 805 806 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 807 808 /* Reset the Controller */ 809 retval = dwc2_core_reset(hsotg); 810 if (retval) { 811 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n", 812 __func__); 813 return retval; 814 } 815 816 /* 817 * This needs to happen in FS mode before any other programming occurs 818 */ 819 retval = dwc2_phy_init(hsotg, select_phy); 820 if (retval) 821 return retval; 822 823 /* Program the GAHBCFG Register */ 824 retval = dwc2_gahbcfg_init(hsotg); 825 if (retval) 826 return retval; 827 828 /* Program the GUSBCFG register */ 829 dwc2_gusbcfg_init(hsotg); 830 831 /* Program the GOTGCTL register */ 832 otgctl = DWC2_READ_4(hsotg, GOTGCTL); 833 otgctl &= ~GOTGCTL_OTGVER; 834 if (hsotg->core_params->otg_ver > 0) 835 otgctl |= GOTGCTL_OTGVER; 836 DWC2_WRITE_4(hsotg, GOTGCTL, otgctl); 837 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver); 838 839 /* Clear the SRP success bit for FS-I2c */ 840 hsotg->srp_success = 0; 841 842 /* Enable common interrupts */ 843 dwc2_enable_common_interrupts(hsotg); 844 845 /* 846 * Do device or host initialization based on mode during PCD and 847 * HCD initialization 848 */ 849 if (dwc2_is_host_mode(hsotg)) { 850 dev_dbg(hsotg->dev, "Host Mode\n"); 851 hsotg->op_state = OTG_STATE_A_HOST; 852 } else { 853 dev_dbg(hsotg->dev, "Device Mode\n"); 854 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 855 } 856 857 return 0; 858 } 859 860 /** 861 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts 862 * 863 * @hsotg: Programming view of DWC_otg controller 864 */ 865 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg) 866 { 867 u32 intmsk; 868 869 dev_dbg(hsotg->dev, "%s()\n", __func__); 870 871 /* Disable all interrupts */ 872 DWC2_WRITE_4(hsotg, GINTMSK, 0); 873 DWC2_WRITE_4(hsotg, HAINTMSK, 0); 874 875 /* Enable the common interrupts */ 876 dwc2_enable_common_interrupts(hsotg); 877 878 /* Enable host mode interrupts without disturbing common interrupts */ 879 intmsk = DWC2_READ_4(hsotg, GINTMSK); 880 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT; 881 DWC2_WRITE_4(hsotg, GINTMSK, intmsk); 882 } 883 884 /** 885 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts 886 * 887 * @hsotg: Programming view of DWC_otg controller 888 */ 889 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg) 890 { 891 u32 intmsk = DWC2_READ_4(hsotg, GINTMSK); 892 893 /* Disable host mode interrupts without disturbing common interrupts */ 894 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT | 895 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP); 896 DWC2_WRITE_4(hsotg, GINTMSK, intmsk); 897 } 898 899 /* 900 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size 901 * For system that have a total fifo depth that is smaller than the default 902 * RX + TX fifo size. 903 * 904 * @hsotg: Programming view of DWC_otg controller 905 */ 906 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) 907 { 908 struct dwc2_core_params *params = hsotg->core_params; 909 struct dwc2_hw_params *hw = &hsotg->hw_params; 910 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size; 911 912 total_fifo_size = hw->total_fifo_size; 913 rxfsiz = params->host_rx_fifo_size; 914 nptxfsiz = params->host_nperio_tx_fifo_size; 915 ptxfsiz = params->host_perio_tx_fifo_size; 916 917 /* 918 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth 919 * allocation with support for high bandwidth endpoints. Synopsys 920 * defines MPS(Max Packet size) for a periodic EP=1024, and for 921 * non-periodic as 512. 922 */ 923 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) { 924 /* 925 * For Buffer DMA mode/Scatter Gather DMA mode 926 * 2 * ((Largest Packet size / 4) + 1 + 1) + n 927 * with n = number of host channel. 928 * 2 * ((1024/4) + 2) = 516 929 */ 930 rxfsiz = 516 + hw->host_channels; 931 932 /* 933 * min non-periodic tx fifo depth 934 * 2 * (largest non-periodic USB packet used / 4) 935 * 2 * (512/4) = 256 936 */ 937 nptxfsiz = 256; 938 939 /* 940 * min periodic tx fifo depth 941 * (largest packet size*MC)/4 942 * (1024 * 3)/4 = 768 943 */ 944 ptxfsiz = 768; 945 946 params->host_rx_fifo_size = rxfsiz; 947 params->host_nperio_tx_fifo_size = nptxfsiz; 948 params->host_perio_tx_fifo_size = ptxfsiz; 949 } 950 951 /* 952 * If the summation of RX, NPTX and PTX fifo sizes is still 953 * bigger than the total_fifo_size, then we have a problem. 954 * 955 * We won't be able to allocate as many endpoints. Right now, 956 * we're just printing an error message, but ideally this FIFO 957 * allocation algorithm would be improved in the future. 958 * 959 * FIXME improve this FIFO allocation algorithm. 960 */ 961 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz))) 962 dev_err(hsotg->dev, "invalid fifo sizes\n"); 963 } 964 965 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) 966 { 967 struct dwc2_core_params *params = hsotg->core_params; 968 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz; 969 970 if (!params->enable_dynamic_fifo) 971 return; 972 973 dwc2_calculate_dynamic_fifo(hsotg); 974 975 /* Rx FIFO */ 976 grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ); 977 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz); 978 grxfsiz &= ~GRXFSIZ_DEPTH_MASK; 979 grxfsiz |= params->host_rx_fifo_size << 980 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK; 981 DWC2_WRITE_4(hsotg, GRXFSIZ, grxfsiz); 982 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", DWC2_READ_4(hsotg, GRXFSIZ)); 983 984 /* Non-periodic Tx FIFO */ 985 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n", 986 DWC2_READ_4(hsotg, GNPTXFSIZ)); 987 nptxfsiz = params->host_nperio_tx_fifo_size << 988 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; 989 nptxfsiz |= params->host_rx_fifo_size << 990 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; 991 DWC2_WRITE_4(hsotg, GNPTXFSIZ, nptxfsiz); 992 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n", 993 DWC2_READ_4(hsotg, GNPTXFSIZ)); 994 995 /* Periodic Tx FIFO */ 996 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n", 997 DWC2_READ_4(hsotg, HPTXFSIZ)); 998 hptxfsiz = params->host_perio_tx_fifo_size << 999 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; 1000 hptxfsiz |= (params->host_rx_fifo_size + 1001 params->host_nperio_tx_fifo_size) << 1002 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; 1003 DWC2_WRITE_4(hsotg, HPTXFSIZ, hptxfsiz); 1004 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", 1005 DWC2_READ_4(hsotg, HPTXFSIZ)); 1006 1007 if (hsotg->core_params->en_multiple_tx_fifo > 0 && 1008 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) { 1009 /* 1010 * Global DFIFOCFG calculation for Host mode - 1011 * include RxFIFO, NPTXFIFO and HPTXFIFO 1012 */ 1013 dfifocfg = DWC2_READ_4(hsotg, GDFIFOCFG); 1014 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK; 1015 dfifocfg |= (params->host_rx_fifo_size + 1016 params->host_nperio_tx_fifo_size + 1017 params->host_perio_tx_fifo_size) << 1018 GDFIFOCFG_EPINFOBASE_SHIFT & 1019 GDFIFOCFG_EPINFOBASE_MASK; 1020 DWC2_WRITE_4(hsotg, GDFIFOCFG, dfifocfg); 1021 } 1022 } 1023 1024 /** 1025 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for 1026 * Host mode 1027 * 1028 * @hsotg: Programming view of DWC_otg controller 1029 * 1030 * This function flushes the Tx and Rx FIFOs and flushes any entries in the 1031 * request queues. Host channels are reset to ensure that they are ready for 1032 * performing transfers. 1033 */ 1034 void dwc2_core_host_init(struct dwc2_hsotg *hsotg) 1035 { 1036 u32 hcfg, hfir, otgctl; 1037 1038 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); 1039 1040 /* Restart the Phy Clock */ 1041 DWC2_WRITE_4(hsotg, PCGCTL, 0); 1042 1043 /* Initialize Host Configuration Register */ 1044 dwc2_init_fs_ls_pclk_sel(hsotg); 1045 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) { 1046 hcfg = DWC2_READ_4(hsotg, HCFG); 1047 hcfg |= HCFG_FSLSSUPP; 1048 DWC2_WRITE_4(hsotg, HCFG, hcfg); 1049 } 1050 1051 /* 1052 * This bit allows dynamic reloading of the HFIR register during 1053 * runtime. This bit needs to be programmed during initial configuration 1054 * and its value must not be changed during runtime. 1055 */ 1056 if (hsotg->core_params->reload_ctl > 0) { 1057 hfir = DWC2_READ_4(hsotg, HFIR); 1058 hfir |= HFIR_RLDCTRL; 1059 DWC2_WRITE_4(hsotg, HFIR, hfir); 1060 } 1061 1062 if (hsotg->core_params->dma_desc_enable > 0) { 1063 u32 op_mode = hsotg->hw_params.op_mode; 1064 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || 1065 !hsotg->hw_params.dma_desc_enable || 1066 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE || 1067 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE || 1068 op_mode == GHWCFG2_OP_MODE_UNDEFINED) { 1069 dev_err(hsotg->dev, 1070 "Hardware does not support descriptor DMA mode -\n"); 1071 dev_err(hsotg->dev, 1072 "falling back to buffer DMA mode.\n"); 1073 hsotg->core_params->dma_desc_enable = 0; 1074 } else { 1075 hcfg = DWC2_READ_4(hsotg, HCFG); 1076 hcfg |= HCFG_DESCDMA; 1077 DWC2_WRITE_4(hsotg, HCFG, hcfg); 1078 } 1079 } 1080 1081 /* Configure data FIFO sizes */ 1082 dwc2_config_fifos(hsotg); 1083 1084 /* TODO - check this */ 1085 /* Clear Host Set HNP Enable in the OTG Control Register */ 1086 otgctl = DWC2_READ_4(hsotg, GOTGCTL); 1087 otgctl &= ~GOTGCTL_HSTSETHNPEN; 1088 DWC2_WRITE_4(hsotg, GOTGCTL, otgctl); 1089 1090 /* Make sure the FIFOs are flushed */ 1091 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */); 1092 dwc2_flush_rx_fifo(hsotg); 1093 1094 /* Clear Host Set HNP Enable in the OTG Control Register */ 1095 otgctl = DWC2_READ_4(hsotg, GOTGCTL); 1096 otgctl &= ~GOTGCTL_HSTSETHNPEN; 1097 DWC2_WRITE_4(hsotg, GOTGCTL, otgctl); 1098 1099 if (hsotg->core_params->dma_desc_enable <= 0) { 1100 int num_channels, i; 1101 u32 hcchar; 1102 1103 /* Flush out any leftover queued requests */ 1104 num_channels = hsotg->core_params->host_channels; 1105 for (i = 0; i < num_channels; i++) { 1106 hcchar = DWC2_READ_4(hsotg, HCCHAR(i)); 1107 hcchar &= ~HCCHAR_CHENA; 1108 hcchar |= HCCHAR_CHDIS; 1109 hcchar &= ~HCCHAR_EPDIR; 1110 DWC2_WRITE_4(hsotg, HCCHAR(i), hcchar); 1111 } 1112 1113 /* Halt all channels to put them into a known state */ 1114 for (i = 0; i < num_channels; i++) { 1115 int count = 0; 1116 1117 hcchar = DWC2_READ_4(hsotg, HCCHAR(i)); 1118 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS; 1119 hcchar &= ~HCCHAR_EPDIR; 1120 DWC2_WRITE_4(hsotg, HCCHAR(i), hcchar); 1121 dev_dbg(hsotg->dev, "%s: Halt channel %d\n", 1122 __func__, i); 1123 do { 1124 hcchar = DWC2_READ_4(hsotg, HCCHAR(i)); 1125 if (++count > 1000) { 1126 dev_err(hsotg->dev, 1127 "Unable to clear enable on channel %d\n", 1128 i); 1129 break; 1130 } 1131 udelay(1); 1132 } while (hcchar & HCCHAR_CHENA); 1133 } 1134 } 1135 1136 /* Turn on the vbus power */ 1137 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state); 1138 if (hsotg->op_state == OTG_STATE_A_HOST) { 1139 u32 hprt0 = dwc2_read_hprt0(hsotg); 1140 1141 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n", 1142 !!(hprt0 & HPRT0_PWR)); 1143 if (!(hprt0 & HPRT0_PWR)) { 1144 hprt0 |= HPRT0_PWR; 1145 DWC2_WRITE_4(hsotg, HPRT0, hprt0); 1146 } 1147 } 1148 1149 dwc2_enable_host_interrupts(hsotg); 1150 } 1151 1152 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg, 1153 struct dwc2_host_chan *chan) 1154 { 1155 u32 hcintmsk = HCINTMSK_CHHLTD; 1156 1157 switch (chan->ep_type) { 1158 case USB_ENDPOINT_XFER_CONTROL: 1159 case USB_ENDPOINT_XFER_BULK: 1160 dev_vdbg(hsotg->dev, "control/bulk\n"); 1161 hcintmsk |= HCINTMSK_XFERCOMPL; 1162 hcintmsk |= HCINTMSK_STALL; 1163 hcintmsk |= HCINTMSK_XACTERR; 1164 hcintmsk |= HCINTMSK_DATATGLERR; 1165 if (chan->ep_is_in) { 1166 hcintmsk |= HCINTMSK_BBLERR; 1167 } else { 1168 hcintmsk |= HCINTMSK_NAK; 1169 hcintmsk |= HCINTMSK_NYET; 1170 if (chan->do_ping) 1171 hcintmsk |= HCINTMSK_ACK; 1172 } 1173 1174 if (chan->do_split) { 1175 hcintmsk |= HCINTMSK_NAK; 1176 if (chan->complete_split) 1177 hcintmsk |= HCINTMSK_NYET; 1178 else 1179 hcintmsk |= HCINTMSK_ACK; 1180 } 1181 1182 if (chan->error_state) 1183 hcintmsk |= HCINTMSK_ACK; 1184 break; 1185 1186 case USB_ENDPOINT_XFER_INT: 1187 if (dbg_perio()) 1188 dev_vdbg(hsotg->dev, "intr\n"); 1189 hcintmsk |= HCINTMSK_XFERCOMPL; 1190 hcintmsk |= HCINTMSK_NAK; 1191 hcintmsk |= HCINTMSK_STALL; 1192 hcintmsk |= HCINTMSK_XACTERR; 1193 hcintmsk |= HCINTMSK_DATATGLERR; 1194 hcintmsk |= HCINTMSK_FRMOVRUN; 1195 1196 if (chan->ep_is_in) 1197 hcintmsk |= HCINTMSK_BBLERR; 1198 if (chan->error_state) 1199 hcintmsk |= HCINTMSK_ACK; 1200 if (chan->do_split) { 1201 if (chan->complete_split) 1202 hcintmsk |= HCINTMSK_NYET; 1203 else 1204 hcintmsk |= HCINTMSK_ACK; 1205 } 1206 break; 1207 1208 case USB_ENDPOINT_XFER_ISOC: 1209 if (dbg_perio()) 1210 dev_vdbg(hsotg->dev, "isoc\n"); 1211 hcintmsk |= HCINTMSK_XFERCOMPL; 1212 hcintmsk |= HCINTMSK_FRMOVRUN; 1213 hcintmsk |= HCINTMSK_ACK; 1214 1215 if (chan->ep_is_in) { 1216 hcintmsk |= HCINTMSK_XACTERR; 1217 hcintmsk |= HCINTMSK_BBLERR; 1218 } 1219 break; 1220 default: 1221 dev_err(hsotg->dev, "## Unknown EP type ##\n"); 1222 break; 1223 } 1224 1225 DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk); 1226 if (dbg_hc(chan)) 1227 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); 1228 } 1229 1230 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg, 1231 struct dwc2_host_chan *chan) 1232 { 1233 u32 hcintmsk = HCINTMSK_CHHLTD; 1234 1235 /* 1236 * For Descriptor DMA mode core halts the channel on AHB error. 1237 * Interrupt is not required. 1238 */ 1239 if (hsotg->core_params->dma_desc_enable <= 0) { 1240 if (dbg_hc(chan)) 1241 dev_vdbg(hsotg->dev, "desc DMA disabled\n"); 1242 hcintmsk |= HCINTMSK_AHBERR; 1243 } else { 1244 if (dbg_hc(chan)) 1245 dev_vdbg(hsotg->dev, "desc DMA enabled\n"); 1246 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1247 hcintmsk |= HCINTMSK_XFERCOMPL; 1248 } 1249 1250 if (chan->error_state && !chan->do_split && 1251 chan->ep_type != USB_ENDPOINT_XFER_ISOC) { 1252 if (dbg_hc(chan)) 1253 dev_vdbg(hsotg->dev, "setting ACK\n"); 1254 hcintmsk |= HCINTMSK_ACK; 1255 if (chan->ep_is_in) { 1256 hcintmsk |= HCINTMSK_DATATGLERR; 1257 if (chan->ep_type != USB_ENDPOINT_XFER_INT) 1258 hcintmsk |= HCINTMSK_NAK; 1259 } 1260 } 1261 1262 DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk); 1263 if (dbg_hc(chan)) 1264 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); 1265 } 1266 1267 static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg, 1268 struct dwc2_host_chan *chan) 1269 { 1270 u32 intmsk; 1271 1272 if (hsotg->core_params->dma_enable > 0) { 1273 if (dbg_hc(chan)) 1274 dev_vdbg(hsotg->dev, "DMA enabled\n"); 1275 dwc2_hc_enable_dma_ints(hsotg, chan); 1276 } else { 1277 if (dbg_hc(chan)) 1278 dev_vdbg(hsotg->dev, "DMA disabled\n"); 1279 dwc2_hc_enable_slave_ints(hsotg, chan); 1280 } 1281 1282 /* Enable the top level host channel interrupt */ 1283 intmsk = DWC2_READ_4(hsotg, HAINTMSK); 1284 intmsk |= 1 << chan->hc_num; 1285 DWC2_WRITE_4(hsotg, HAINTMSK, intmsk); 1286 if (dbg_hc(chan)) 1287 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk); 1288 1289 /* Make sure host channel interrupts are enabled */ 1290 intmsk = DWC2_READ_4(hsotg, GINTMSK); 1291 intmsk |= GINTSTS_HCHINT; 1292 DWC2_WRITE_4(hsotg, GINTMSK, intmsk); 1293 if (dbg_hc(chan)) 1294 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk); 1295 } 1296 1297 /** 1298 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from 1299 * a specific endpoint 1300 * 1301 * @hsotg: Programming view of DWC_otg controller 1302 * @chan: Information needed to initialize the host channel 1303 * 1304 * The HCCHARn register is set up with the characteristics specified in chan. 1305 * Host channel interrupts that may need to be serviced while this transfer is 1306 * in progress are enabled. 1307 */ 1308 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 1309 { 1310 u8 hc_num = chan->hc_num; 1311 u32 hcintmsk; 1312 u32 hcchar; 1313 u32 hcsplt = 0; 1314 1315 if (dbg_hc(chan)) 1316 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1317 1318 /* Clear old interrupt conditions for this host channel */ 1319 hcintmsk = 0xffffffff; 1320 hcintmsk &= ~HCINTMSK_RESERVED14_31; 1321 DWC2_WRITE_4(hsotg, HCINT(hc_num), hcintmsk); 1322 1323 /* Enable channel interrupts required for this transfer */ 1324 dwc2_hc_enable_ints(hsotg, chan); 1325 1326 /* 1327 * Program the HCCHARn register with the endpoint characteristics for 1328 * the current transfer 1329 */ 1330 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK; 1331 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK; 1332 if (chan->ep_is_in) 1333 hcchar |= HCCHAR_EPDIR; 1334 if (chan->speed == USB_SPEED_LOW) 1335 hcchar |= HCCHAR_LSPDDEV; 1336 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK; 1337 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK; 1338 DWC2_WRITE_4(hsotg, HCCHAR(hc_num), hcchar); 1339 if (dbg_hc(chan)) { 1340 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n", 1341 hc_num, hcchar); 1342 1343 dev_vdbg(hsotg->dev, "%s: Channel %d\n", 1344 __func__, hc_num); 1345 dev_vdbg(hsotg->dev, " Dev Addr: %d\n", 1346 chan->dev_addr); 1347 dev_vdbg(hsotg->dev, " Ep Num: %d\n", 1348 chan->ep_num); 1349 dev_vdbg(hsotg->dev, " Is In: %d\n", 1350 chan->ep_is_in); 1351 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n", 1352 chan->speed == USB_SPEED_LOW); 1353 dev_vdbg(hsotg->dev, " Ep Type: %d\n", 1354 chan->ep_type); 1355 dev_vdbg(hsotg->dev, " Max Pkt: %d\n", 1356 chan->max_packet); 1357 } 1358 1359 /* Program the HCSPLT register for SPLITs */ 1360 if (chan->do_split) { 1361 if (dbg_hc(chan)) 1362 dev_vdbg(hsotg->dev, 1363 "Programming HC %d with split --> %s\n", 1364 hc_num, 1365 chan->complete_split ? "CSPLIT" : "SSPLIT"); 1366 if (chan->complete_split) 1367 hcsplt |= HCSPLT_COMPSPLT; 1368 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT & 1369 HCSPLT_XACTPOS_MASK; 1370 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT & 1371 HCSPLT_HUBADDR_MASK; 1372 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT & 1373 HCSPLT_PRTADDR_MASK; 1374 if (dbg_hc(chan)) { 1375 dev_vdbg(hsotg->dev, " comp split %d\n", 1376 chan->complete_split); 1377 dev_vdbg(hsotg->dev, " xact pos %d\n", 1378 chan->xact_pos); 1379 dev_vdbg(hsotg->dev, " hub addr %d\n", 1380 chan->hub_addr); 1381 dev_vdbg(hsotg->dev, " hub port %d\n", 1382 chan->hub_port); 1383 dev_vdbg(hsotg->dev, " is_in %d\n", 1384 chan->ep_is_in); 1385 dev_vdbg(hsotg->dev, " Max Pkt %d\n", 1386 chan->max_packet); 1387 dev_vdbg(hsotg->dev, " xferlen %d\n", 1388 chan->xfer_len); 1389 } 1390 } 1391 1392 DWC2_WRITE_4(hsotg, HCSPLT(hc_num), hcsplt); 1393 } 1394 1395 /** 1396 * dwc2_hc_halt() - Attempts to halt a host channel 1397 * 1398 * @hsotg: Controller register interface 1399 * @chan: Host channel to halt 1400 * @halt_status: Reason for halting the channel 1401 * 1402 * This function should only be called in Slave mode or to abort a transfer in 1403 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the 1404 * controller halts the channel when the transfer is complete or a condition 1405 * occurs that requires application intervention. 1406 * 1407 * In slave mode, checks for a free request queue entry, then sets the Channel 1408 * Enable and Channel Disable bits of the Host Channel Characteristics 1409 * register of the specified channel to intiate the halt. If there is no free 1410 * request queue entry, sets only the Channel Disable bit of the HCCHARn 1411 * register to flush requests for this channel. In the latter case, sets a 1412 * flag to indicate that the host channel needs to be halted when a request 1413 * queue slot is open. 1414 * 1415 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the 1416 * HCCHARn register. The controller ensures there is space in the request 1417 * queue before submitting the halt request. 1418 * 1419 * Some time may elapse before the core flushes any posted requests for this 1420 * host channel and halts. The Channel Halted interrupt handler completes the 1421 * deactivation of the host channel. 1422 */ 1423 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, 1424 enum dwc2_halt_status halt_status) 1425 { 1426 u32 nptxsts, hptxsts, hcchar; 1427 1428 if (dbg_hc(chan)) 1429 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1430 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS) 1431 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status); 1432 1433 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE || 1434 halt_status == DWC2_HC_XFER_AHB_ERR) { 1435 /* 1436 * Disable all channel interrupts except Ch Halted. The QTD 1437 * and QH state associated with this transfer has been cleared 1438 * (in the case of URB_DEQUEUE), so the channel needs to be 1439 * shut down carefully to prevent crashes. 1440 */ 1441 u32 hcintmsk = HCINTMSK_CHHLTD; 1442 1443 dev_vdbg(hsotg->dev, "dequeue/error\n"); 1444 DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk); 1445 1446 /* 1447 * Make sure no other interrupts besides halt are currently 1448 * pending. Handling another interrupt could cause a crash due 1449 * to the QTD and QH state. 1450 */ 1451 DWC2_WRITE_4(hsotg, HCINT(chan->hc_num), ~hcintmsk); 1452 1453 /* 1454 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR 1455 * even if the channel was already halted for some other 1456 * reason 1457 */ 1458 chan->halt_status = halt_status; 1459 1460 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num)); 1461 if (!(hcchar & HCCHAR_CHENA)) { 1462 /* 1463 * The channel is either already halted or it hasn't 1464 * started yet. In DMA mode, the transfer may halt if 1465 * it finishes normally or a condition occurs that 1466 * requires driver intervention. Don't want to halt 1467 * the channel again. In either Slave or DMA mode, 1468 * it's possible that the transfer has been assigned 1469 * to a channel, but not started yet when an URB is 1470 * dequeued. Don't want to halt a channel that hasn't 1471 * started yet. 1472 */ 1473 return; 1474 } 1475 } 1476 if (chan->halt_pending) { 1477 /* 1478 * A halt has already been issued for this channel. This might 1479 * happen when a transfer is aborted by a higher level in 1480 * the stack. 1481 */ 1482 dev_vdbg(hsotg->dev, 1483 "*** %s: Channel %d, chan->halt_pending already set ***\n", 1484 __func__, chan->hc_num); 1485 return; 1486 } 1487 1488 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num)); 1489 1490 /* No need to set the bit in DDMA for disabling the channel */ 1491 /* TODO check it everywhere channel is disabled */ 1492 if (hsotg->core_params->dma_desc_enable <= 0) { 1493 if (dbg_hc(chan)) 1494 dev_vdbg(hsotg->dev, "desc DMA disabled\n"); 1495 hcchar |= HCCHAR_CHENA; 1496 } else { 1497 if (dbg_hc(chan)) 1498 dev_dbg(hsotg->dev, "desc DMA enabled\n"); 1499 } 1500 hcchar |= HCCHAR_CHDIS; 1501 1502 if (hsotg->core_params->dma_enable <= 0) { 1503 if (dbg_hc(chan)) 1504 dev_vdbg(hsotg->dev, "DMA not enabled\n"); 1505 hcchar |= HCCHAR_CHENA; 1506 1507 /* Check for space in the request queue to issue the halt */ 1508 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || 1509 chan->ep_type == USB_ENDPOINT_XFER_BULK) { 1510 dev_vdbg(hsotg->dev, "control/bulk\n"); 1511 nptxsts = DWC2_READ_4(hsotg, GNPTXSTS); 1512 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) { 1513 dev_vdbg(hsotg->dev, "Disabling channel\n"); 1514 hcchar &= ~HCCHAR_CHENA; 1515 } 1516 } else { 1517 if (dbg_perio()) 1518 dev_vdbg(hsotg->dev, "isoc/intr\n"); 1519 hptxsts = DWC2_READ_4(hsotg, HPTXSTS); 1520 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 || 1521 hsotg->queuing_high_bandwidth) { 1522 if (dbg_perio()) 1523 dev_vdbg(hsotg->dev, "Disabling channel\n"); 1524 hcchar &= ~HCCHAR_CHENA; 1525 } 1526 } 1527 } else { 1528 if (dbg_hc(chan)) 1529 dev_vdbg(hsotg->dev, "DMA enabled\n"); 1530 } 1531 1532 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar); 1533 chan->halt_status = halt_status; 1534 1535 if (hcchar & HCCHAR_CHENA) { 1536 if (dbg_hc(chan)) 1537 dev_vdbg(hsotg->dev, "Channel enabled\n"); 1538 chan->halt_pending = 1; 1539 chan->halt_on_queue = 0; 1540 } else { 1541 if (dbg_hc(chan)) 1542 dev_vdbg(hsotg->dev, "Channel disabled\n"); 1543 chan->halt_on_queue = 1; 1544 } 1545 1546 if (dbg_hc(chan)) { 1547 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1548 chan->hc_num); 1549 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n", 1550 hcchar); 1551 dev_vdbg(hsotg->dev, " halt_pending: %d\n", 1552 chan->halt_pending); 1553 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n", 1554 chan->halt_on_queue); 1555 dev_vdbg(hsotg->dev, " halt_status: %d\n", 1556 chan->halt_status); 1557 } 1558 } 1559 1560 /** 1561 * dwc2_hc_cleanup() - Clears the transfer state for a host channel 1562 * 1563 * @hsotg: Programming view of DWC_otg controller 1564 * @chan: Identifies the host channel to clean up 1565 * 1566 * This function is normally called after a transfer is done and the host 1567 * channel is being released 1568 */ 1569 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 1570 { 1571 u32 hcintmsk; 1572 1573 chan->xfer_started = 0; 1574 1575 /* 1576 * Clear channel interrupt enables and any unhandled channel interrupt 1577 * conditions 1578 */ 1579 DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), 0); 1580 hcintmsk = 0xffffffff; 1581 hcintmsk &= ~HCINTMSK_RESERVED14_31; 1582 DWC2_WRITE_4(hsotg, HCINT(chan->hc_num), hcintmsk); 1583 } 1584 1585 /** 1586 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in 1587 * which frame a periodic transfer should occur 1588 * 1589 * @hsotg: Programming view of DWC_otg controller 1590 * @chan: Identifies the host channel to set up and its properties 1591 * @hcchar: Current value of the HCCHAR register for the specified host channel 1592 * 1593 * This function has no effect on non-periodic transfers 1594 */ 1595 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg, 1596 struct dwc2_host_chan *chan, u32 *hcchar) 1597 { 1598 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1599 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1600 /* 1 if _next_ frame is odd, 0 if it's even */ 1601 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1)) 1602 *hcchar |= HCCHAR_ODDFRM; 1603 } 1604 } 1605 1606 static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan) 1607 { 1608 /* Set up the initial PID for the transfer */ 1609 if (chan->speed == USB_SPEED_HIGH) { 1610 if (chan->ep_is_in) { 1611 if (chan->multi_count == 1) 1612 chan->data_pid_start = DWC2_HC_PID_DATA0; 1613 else if (chan->multi_count == 2) 1614 chan->data_pid_start = DWC2_HC_PID_DATA1; 1615 else 1616 chan->data_pid_start = DWC2_HC_PID_DATA2; 1617 } else { 1618 if (chan->multi_count == 1) 1619 chan->data_pid_start = DWC2_HC_PID_DATA0; 1620 else 1621 chan->data_pid_start = DWC2_HC_PID_MDATA; 1622 } 1623 } else { 1624 chan->data_pid_start = DWC2_HC_PID_DATA0; 1625 } 1626 } 1627 1628 /** 1629 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with 1630 * the Host Channel 1631 * 1632 * @hsotg: Programming view of DWC_otg controller 1633 * @chan: Information needed to initialize the host channel 1634 * 1635 * This function should only be called in Slave mode. For a channel associated 1636 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel 1637 * associated with a periodic EP, the periodic Tx FIFO is written. 1638 * 1639 * Upon return the xfer_buf and xfer_count fields in chan are incremented by 1640 * the number of bytes written to the Tx FIFO. 1641 */ 1642 static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg, 1643 struct dwc2_host_chan *chan) 1644 { 1645 u32 i; 1646 u32 remaining_count; 1647 u32 byte_count; 1648 u32 dword_count; 1649 u32 *data_buf = (u32 *)chan->xfer_buf; 1650 u32 data_fifo; 1651 1652 if (dbg_hc(chan)) 1653 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1654 1655 data_fifo = HCFIFO(chan->hc_num); 1656 1657 remaining_count = chan->xfer_len - chan->xfer_count; 1658 if (remaining_count > chan->max_packet) 1659 byte_count = chan->max_packet; 1660 else 1661 byte_count = remaining_count; 1662 1663 dword_count = (byte_count + 3) / 4; 1664 1665 if (((unsigned long)data_buf & 0x3) == 0) { 1666 /* xfer_buf is DWORD aligned */ 1667 for (i = 0; i < dword_count; i++, data_buf++) 1668 DWC2_WRITE_4(hsotg, data_fifo, *data_buf); 1669 } else { 1670 /* xfer_buf is not DWORD aligned */ 1671 for (i = 0; i < dword_count; i++, data_buf++) { 1672 u32 data = data_buf[0] | data_buf[1] << 8 | 1673 data_buf[2] << 16 | data_buf[3] << 24; 1674 DWC2_WRITE_4(hsotg, data_fifo, data); 1675 } 1676 } 1677 1678 chan->xfer_count += byte_count; 1679 chan->xfer_buf += byte_count; 1680 } 1681 1682 /** 1683 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host 1684 * channel and starts the transfer 1685 * 1686 * @hsotg: Programming view of DWC_otg controller 1687 * @chan: Information needed to initialize the host channel. The xfer_len value 1688 * may be reduced to accommodate the max widths of the XferSize and 1689 * PktCnt fields in the HCTSIZn register. The multi_count value may be 1690 * changed to reflect the final xfer_len value. 1691 * 1692 * This function may be called in either Slave mode or DMA mode. In Slave mode, 1693 * the caller must ensure that there is sufficient space in the request queue 1694 * and Tx Data FIFO. 1695 * 1696 * For an OUT transfer in Slave mode, it loads a data packet into the 1697 * appropriate FIFO. If necessary, additional data packets are loaded in the 1698 * Host ISR. 1699 * 1700 * For an IN transfer in Slave mode, a data packet is requested. The data 1701 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, 1702 * additional data packets are requested in the Host ISR. 1703 * 1704 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ 1705 * register along with a packet count of 1 and the channel is enabled. This 1706 * causes a single PING transaction to occur. Other fields in HCTSIZ are 1707 * simply set to 0 since no data transfer occurs in this case. 1708 * 1709 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with 1710 * all the information required to perform the subsequent data transfer. In 1711 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the 1712 * controller performs the entire PING protocol, then starts the data 1713 * transfer. 1714 */ 1715 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, 1716 struct dwc2_host_chan *chan) 1717 { 1718 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size; 1719 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count; 1720 u32 hcchar; 1721 u32 hctsiz = 0; 1722 u16 num_packets; 1723 1724 if (dbg_hc(chan)) 1725 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1726 1727 if (chan->do_ping) { 1728 if (hsotg->core_params->dma_enable <= 0) { 1729 if (dbg_hc(chan)) 1730 dev_vdbg(hsotg->dev, "ping, no DMA\n"); 1731 dwc2_hc_do_ping(hsotg, chan); 1732 chan->xfer_started = 1; 1733 return; 1734 } else { 1735 if (dbg_hc(chan)) 1736 dev_vdbg(hsotg->dev, "ping, DMA\n"); 1737 hctsiz |= TSIZ_DOPNG; 1738 } 1739 } 1740 1741 if (chan->do_split) { 1742 if (dbg_hc(chan)) 1743 dev_vdbg(hsotg->dev, "split\n"); 1744 num_packets = 1; 1745 1746 if (chan->complete_split && !chan->ep_is_in) 1747 /* 1748 * For CSPLIT OUT Transfer, set the size to 0 so the 1749 * core doesn't expect any data written to the FIFO 1750 */ 1751 chan->xfer_len = 0; 1752 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet) 1753 chan->xfer_len = chan->max_packet; 1754 else if (!chan->ep_is_in && chan->xfer_len > 188) 1755 chan->xfer_len = 188; 1756 1757 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & 1758 TSIZ_XFERSIZE_MASK; 1759 } else { 1760 if (dbg_hc(chan)) 1761 dev_vdbg(hsotg->dev, "no split\n"); 1762 /* 1763 * Ensure that the transfer length and packet count will fit 1764 * in the widths allocated for them in the HCTSIZn register 1765 */ 1766 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1767 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1768 /* 1769 * Make sure the transfer size is no larger than one 1770 * (micro)frame's worth of data. (A check was done 1771 * when the periodic transfer was accepted to ensure 1772 * that a (micro)frame's worth of data can be 1773 * programmed into a channel.) 1774 */ 1775 u32 max_periodic_len = 1776 chan->multi_count * chan->max_packet; 1777 1778 if (chan->xfer_len > max_periodic_len) 1779 chan->xfer_len = max_periodic_len; 1780 } else if (chan->xfer_len > max_hc_xfer_size) { 1781 /* 1782 * Make sure that xfer_len is a multiple of max packet 1783 * size 1784 */ 1785 chan->xfer_len = 1786 max_hc_xfer_size - chan->max_packet + 1; 1787 } 1788 1789 if (chan->xfer_len > 0) { 1790 num_packets = (chan->xfer_len + chan->max_packet - 1) / 1791 chan->max_packet; 1792 if (num_packets > max_hc_pkt_count) { 1793 num_packets = max_hc_pkt_count; 1794 chan->xfer_len = num_packets * chan->max_packet; 1795 } 1796 } else { 1797 /* Need 1 packet for transfer length of 0 */ 1798 num_packets = 1; 1799 } 1800 1801 if (chan->ep_is_in) 1802 /* 1803 * Always program an integral # of max packets for IN 1804 * transfers 1805 */ 1806 chan->xfer_len = num_packets * chan->max_packet; 1807 1808 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1809 chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1810 /* 1811 * Make sure that the multi_count field matches the 1812 * actual transfer length 1813 */ 1814 chan->multi_count = num_packets; 1815 1816 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1817 dwc2_set_pid_isoc(chan); 1818 1819 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & 1820 TSIZ_XFERSIZE_MASK; 1821 } 1822 1823 chan->start_pkt_count = num_packets; 1824 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK; 1825 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & 1826 TSIZ_SC_MC_PID_MASK; 1827 DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz); 1828 if (dbg_hc(chan)) { 1829 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n", 1830 hctsiz, chan->hc_num); 1831 1832 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1833 chan->hc_num); 1834 dev_vdbg(hsotg->dev, " Xfer Size: %d\n", 1835 (hctsiz & TSIZ_XFERSIZE_MASK) >> 1836 TSIZ_XFERSIZE_SHIFT); 1837 dev_vdbg(hsotg->dev, " Num Pkts: %d\n", 1838 (hctsiz & TSIZ_PKTCNT_MASK) >> 1839 TSIZ_PKTCNT_SHIFT); 1840 dev_vdbg(hsotg->dev, " Start PID: %d\n", 1841 (hctsiz & TSIZ_SC_MC_PID_MASK) >> 1842 TSIZ_SC_MC_PID_SHIFT); 1843 } 1844 1845 if (hsotg->core_params->dma_enable > 0) { 1846 dma_addr_t dma_addr; 1847 1848 if (chan->align_buf) { 1849 if (dbg_hc(chan)) 1850 dev_vdbg(hsotg->dev, "align_buf\n"); 1851 dma_addr = chan->align_buf; 1852 } else { 1853 dma_addr = chan->xfer_dma; 1854 } 1855 if (hsotg->hsotg_sc->sc_set_dma_addr == NULL) { 1856 DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num), 1857 (u32)dma_addr); 1858 if (dbg_hc(chan)) 1859 dev_vdbg(hsotg->dev, 1860 "Wrote %08lx to HCDMA(%d)\n", 1861 (unsigned long)dma_addr, 1862 chan->hc_num); 1863 } else { 1864 (void)(*hsotg->hsotg_sc->sc_set_dma_addr)( 1865 hsotg->dev, dma_addr, chan->hc_num); 1866 } 1867 } 1868 1869 /* Start the split */ 1870 if (chan->do_split) { 1871 u32 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chan->hc_num)); 1872 1873 hcsplt |= HCSPLT_SPLTENA; 1874 DWC2_WRITE_4(hsotg, HCSPLT(chan->hc_num), hcsplt); 1875 } 1876 1877 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num)); 1878 hcchar &= ~HCCHAR_MULTICNT_MASK; 1879 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & 1880 HCCHAR_MULTICNT_MASK; 1881 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); 1882 1883 if (hcchar & HCCHAR_CHDIS) 1884 dev_warn(hsotg->dev, 1885 "%s: chdis set, channel %d, hcchar 0x%08x\n", 1886 __func__, chan->hc_num, hcchar); 1887 1888 /* Set host channel enable after all other setup is complete */ 1889 hcchar |= HCCHAR_CHENA; 1890 hcchar &= ~HCCHAR_CHDIS; 1891 1892 if (dbg_hc(chan)) 1893 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", 1894 (hcchar & HCCHAR_MULTICNT_MASK) >> 1895 HCCHAR_MULTICNT_SHIFT); 1896 1897 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar); 1898 if (dbg_hc(chan)) 1899 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, 1900 chan->hc_num); 1901 1902 chan->xfer_started = 1; 1903 chan->requests++; 1904 1905 if (hsotg->core_params->dma_enable <= 0 && 1906 !chan->ep_is_in && chan->xfer_len > 0) 1907 /* Load OUT packet into the appropriate Tx FIFO */ 1908 dwc2_hc_write_packet(hsotg, chan); 1909 } 1910 1911 /** 1912 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a 1913 * host channel and starts the transfer in Descriptor DMA mode 1914 * 1915 * @hsotg: Programming view of DWC_otg controller 1916 * @chan: Information needed to initialize the host channel 1917 * 1918 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set. 1919 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field 1920 * with micro-frame bitmap. 1921 * 1922 * Initializes HCDMA register with descriptor list address and CTD value then 1923 * starts the transfer via enabling the channel. 1924 */ 1925 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, 1926 struct dwc2_host_chan *chan) 1927 { 1928 u32 hcchar; 1929 u32 hc_dma; 1930 u32 hctsiz = 0; 1931 1932 if (chan->do_ping) 1933 hctsiz |= TSIZ_DOPNG; 1934 1935 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) 1936 dwc2_set_pid_isoc(chan); 1937 1938 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */ 1939 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & 1940 TSIZ_SC_MC_PID_MASK; 1941 1942 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */ 1943 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK; 1944 1945 /* Non-zero only for high-speed interrupt endpoints */ 1946 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK; 1947 1948 if (dbg_hc(chan)) { 1949 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 1950 chan->hc_num); 1951 dev_vdbg(hsotg->dev, " Start PID: %d\n", 1952 chan->data_pid_start); 1953 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1); 1954 } 1955 1956 DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz); 1957 1958 hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK; 1959 1960 /* Always start from first descriptor */ 1961 hc_dma &= ~HCDMA_CTD_MASK; 1962 DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num), hc_dma); 1963 if (dbg_hc(chan)) 1964 dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n", 1965 hc_dma, chan->hc_num); 1966 1967 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num)); 1968 hcchar &= ~HCCHAR_MULTICNT_MASK; 1969 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & 1970 HCCHAR_MULTICNT_MASK; 1971 1972 if (hcchar & HCCHAR_CHDIS) 1973 dev_warn(hsotg->dev, 1974 "%s: chdis set, channel %d, hcchar 0x%08x\n", 1975 __func__, chan->hc_num, hcchar); 1976 1977 /* Set host channel enable after all other setup is complete */ 1978 hcchar |= HCCHAR_CHENA; 1979 hcchar &= ~HCCHAR_CHDIS; 1980 1981 if (dbg_hc(chan)) 1982 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n", 1983 (hcchar & HCCHAR_MULTICNT_MASK) >> 1984 HCCHAR_MULTICNT_SHIFT); 1985 1986 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar); 1987 if (dbg_hc(chan)) 1988 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, 1989 chan->hc_num); 1990 1991 chan->xfer_started = 1; 1992 chan->requests++; 1993 } 1994 1995 /** 1996 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by 1997 * a previous call to dwc2_hc_start_transfer() 1998 * 1999 * @hsotg: Programming view of DWC_otg controller 2000 * @chan: Information needed to initialize the host channel 2001 * 2002 * The caller must ensure there is sufficient space in the request queue and Tx 2003 * Data FIFO. This function should only be called in Slave mode. In DMA mode, 2004 * the controller acts autonomously to complete transfers programmed to a host 2005 * channel. 2006 * 2007 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO 2008 * if there is any data remaining to be queued. For an IN transfer, another 2009 * data packet is always requested. For the SETUP phase of a control transfer, 2010 * this function does nothing. 2011 * 2012 * Return: 1 if a new request is queued, 0 if no more requests are required 2013 * for this transfer 2014 */ 2015 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg, 2016 struct dwc2_host_chan *chan) 2017 { 2018 if (dbg_hc(chan)) 2019 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 2020 chan->hc_num); 2021 2022 if (chan->do_split) 2023 /* SPLITs always queue just once per channel */ 2024 return 0; 2025 2026 if (chan->data_pid_start == DWC2_HC_PID_SETUP) 2027 /* SETUPs are queued only once since they can't be NAK'd */ 2028 return 0; 2029 2030 if (chan->ep_is_in) { 2031 /* 2032 * Always queue another request for other IN transfers. If 2033 * back-to-back INs are issued and NAKs are received for both, 2034 * the driver may still be processing the first NAK when the 2035 * second NAK is received. When the interrupt handler clears 2036 * the NAK interrupt for the first NAK, the second NAK will 2037 * not be seen. So we can't depend on the NAK interrupt 2038 * handler to requeue a NAK'd request. Instead, IN requests 2039 * are issued each time this function is called. When the 2040 * transfer completes, the extra requests for the channel will 2041 * be flushed. 2042 */ 2043 u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num)); 2044 2045 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); 2046 hcchar |= HCCHAR_CHENA; 2047 hcchar &= ~HCCHAR_CHDIS; 2048 if (dbg_hc(chan)) 2049 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n", 2050 hcchar); 2051 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar); 2052 chan->requests++; 2053 return 1; 2054 } 2055 2056 /* OUT transfers */ 2057 2058 if (chan->xfer_count < chan->xfer_len) { 2059 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 2060 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 2061 u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num)); 2062 2063 dwc2_hc_set_even_odd_frame(hsotg, chan, 2064 &hcchar); 2065 } 2066 2067 /* Load OUT packet into the appropriate Tx FIFO */ 2068 dwc2_hc_write_packet(hsotg, chan); 2069 chan->requests++; 2070 return 1; 2071 } 2072 2073 return 0; 2074 } 2075 2076 /** 2077 * dwc2_hc_do_ping() - Starts a PING transfer 2078 * 2079 * @hsotg: Programming view of DWC_otg controller 2080 * @chan: Information needed to initialize the host channel 2081 * 2082 * This function should only be called in Slave mode. The Do Ping bit is set in 2083 * the HCTSIZ register, then the channel is enabled. 2084 */ 2085 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) 2086 { 2087 u32 hcchar; 2088 u32 hctsiz; 2089 2090 if (dbg_hc(chan)) 2091 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, 2092 chan->hc_num); 2093 2094 2095 hctsiz = TSIZ_DOPNG; 2096 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT; 2097 DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz); 2098 2099 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num)); 2100 hcchar |= HCCHAR_CHENA; 2101 hcchar &= ~HCCHAR_CHDIS; 2102 DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar); 2103 } 2104 2105 /** 2106 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for 2107 * the HFIR register according to PHY type and speed 2108 * 2109 * @hsotg: Programming view of DWC_otg controller 2110 * 2111 * NOTE: The caller can modify the value of the HFIR register only after the 2112 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort) 2113 * has been set 2114 */ 2115 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg) 2116 { 2117 u32 usbcfg; 2118 u32 hprt0; 2119 int clock = 60; /* default value */ 2120 2121 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 2122 hprt0 = DWC2_READ_4(hsotg, HPRT0); 2123 2124 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) && 2125 !(usbcfg & GUSBCFG_PHYIF16)) 2126 clock = 60; 2127 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type == 2128 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI) 2129 clock = 48; 2130 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2131 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) 2132 clock = 30; 2133 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2134 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16)) 2135 clock = 60; 2136 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && 2137 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) 2138 clock = 48; 2139 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) && 2140 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI) 2141 clock = 48; 2142 if ((usbcfg & GUSBCFG_PHYSEL) && 2143 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) 2144 clock = 48; 2145 2146 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED) 2147 /* High speed case */ 2148 return 125 * clock; 2149 else 2150 /* FS/LS case */ 2151 return 1000 * clock; 2152 } 2153 2154 /** 2155 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination 2156 * buffer 2157 * 2158 * @core_if: Programming view of DWC_otg controller 2159 * @dest: Destination buffer for the packet 2160 * @bytes: Number of bytes to copy to the destination 2161 */ 2162 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes) 2163 { 2164 bus_size_t fifo = HCFIFO(0); 2165 u32 *data_buf = (u32 *)dest; 2166 int word_count = (bytes + 3) / 4; 2167 int i; 2168 2169 /* 2170 * Todo: Account for the case where dest is not dword aligned. This 2171 * requires reading data from the FIFO into a u32 temp buffer, then 2172 * moving it into the data buffer. 2173 */ 2174 2175 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes); 2176 2177 for (i = 0; i < word_count; i++, data_buf++) 2178 *data_buf = DWC2_READ_4(hsotg, fifo); 2179 } 2180 2181 /** 2182 * dwc2_dump_host_registers() - Prints the host registers 2183 * 2184 * @hsotg: Programming view of DWC_otg controller 2185 * 2186 * NOTE: This function will be removed once the peripheral controller code 2187 * is integrated and the driver is stable 2188 */ 2189 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg) 2190 { 2191 #ifdef DWC2_DEBUG 2192 bus_size_t addr; 2193 int i; 2194 2195 dev_dbg(hsotg->dev, "Host Global Registers\n"); 2196 addr = HCFG; 2197 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n", 2198 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2199 addr = HFIR; 2200 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n", 2201 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2202 addr = HFNUM; 2203 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n", 2204 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2205 addr = HPTXSTS; 2206 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n", 2207 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2208 addr = HAINT; 2209 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n", 2210 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2211 addr = HAINTMSK; 2212 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n", 2213 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2214 if (hsotg->core_params->dma_desc_enable > 0) { 2215 addr = HFLBADDR; 2216 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n", 2217 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2218 } 2219 2220 addr = HPRT0; 2221 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n", 2222 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2223 2224 for (i = 0; i < hsotg->core_params->host_channels; i++) { 2225 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i); 2226 addr = HCCHAR(i); 2227 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n", 2228 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2229 addr = HCSPLT(i); 2230 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n", 2231 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2232 addr = HCINT(i); 2233 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n", 2234 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2235 addr = HCINTMSK(i); 2236 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n", 2237 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2238 addr = HCTSIZ(i); 2239 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n", 2240 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2241 addr = HCDMA(i); 2242 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n", 2243 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2244 if (hsotg->core_params->dma_desc_enable > 0) { 2245 addr = HCDMAB(i); 2246 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n", 2247 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2248 } 2249 } 2250 #endif 2251 } 2252 2253 /** 2254 * dwc2_dump_global_registers() - Prints the core global registers 2255 * 2256 * @hsotg: Programming view of DWC_otg controller 2257 * 2258 * NOTE: This function will be removed once the peripheral controller code 2259 * is integrated and the driver is stable 2260 */ 2261 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg) 2262 { 2263 #ifdef DWC2_DEBUG 2264 bus_size_t addr; 2265 2266 dev_dbg(hsotg->dev, "Core Global Registers\n"); 2267 addr = GOTGCTL; 2268 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n", 2269 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2270 addr = GOTGINT; 2271 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n", 2272 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2273 addr = GAHBCFG; 2274 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n", 2275 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2276 addr = GUSBCFG; 2277 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n", 2278 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2279 addr = GRSTCTL; 2280 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n", 2281 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2282 addr = GINTSTS; 2283 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n", 2284 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2285 addr = GINTMSK; 2286 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n", 2287 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2288 addr = GRXSTSR; 2289 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n", 2290 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2291 addr = GRXFSIZ; 2292 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n", 2293 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2294 addr = GNPTXFSIZ; 2295 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n", 2296 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2297 addr = GNPTXSTS; 2298 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n", 2299 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2300 addr = GI2CCTL; 2301 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n", 2302 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2303 addr = GPVNDCTL; 2304 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n", 2305 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2306 addr = GGPIO; 2307 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n", 2308 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2309 addr = GUID; 2310 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n", 2311 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2312 addr = GSNPSID; 2313 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n", 2314 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2315 addr = GHWCFG1; 2316 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n", 2317 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2318 addr = GHWCFG2; 2319 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n", 2320 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2321 addr = GHWCFG3; 2322 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n", 2323 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2324 addr = GHWCFG4; 2325 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n", 2326 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2327 addr = GLPMCFG; 2328 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n", 2329 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2330 addr = GPWRDN; 2331 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n", 2332 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2333 addr = GDFIFOCFG; 2334 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n", 2335 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2336 addr = HPTXFSIZ; 2337 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n", 2338 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2339 2340 addr = PCGCTL; 2341 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n", 2342 (unsigned long)addr, DWC2_READ_4(hsotg, addr)); 2343 #endif 2344 } 2345 2346 /** 2347 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO 2348 * 2349 * @hsotg: Programming view of DWC_otg controller 2350 * @num: Tx FIFO to flush 2351 */ 2352 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num) 2353 { 2354 u32 greset; 2355 int count = 0; 2356 2357 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num); 2358 2359 greset = GRSTCTL_TXFFLSH; 2360 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK; 2361 DWC2_WRITE_4(hsotg, GRSTCTL, greset); 2362 2363 do { 2364 greset = DWC2_READ_4(hsotg, GRSTCTL); 2365 if (++count > 10000) { 2366 dev_warn(hsotg->dev, 2367 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n", 2368 __func__, greset, 2369 DWC2_READ_4(hsotg, GNPTXSTS)); 2370 break; 2371 } 2372 udelay(1); 2373 } while (greset & GRSTCTL_TXFFLSH); 2374 2375 /* Wait for at least 3 PHY Clocks */ 2376 udelay(1); 2377 } 2378 2379 /** 2380 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO 2381 * 2382 * @hsotg: Programming view of DWC_otg controller 2383 */ 2384 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg) 2385 { 2386 u32 greset; 2387 int count = 0; 2388 2389 dev_vdbg(hsotg->dev, "%s()\n", __func__); 2390 2391 greset = GRSTCTL_RXFFLSH; 2392 DWC2_WRITE_4(hsotg, GRSTCTL, greset); 2393 2394 do { 2395 greset = DWC2_READ_4(hsotg, GRSTCTL); 2396 if (++count > 10000) { 2397 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n", 2398 __func__, greset); 2399 break; 2400 } 2401 udelay(1); 2402 } while (greset & GRSTCTL_RXFFLSH); 2403 2404 /* Wait for at least 3 PHY Clocks */ 2405 udelay(1); 2406 } 2407 2408 #define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c)) 2409 2410 /* Parameter access functions */ 2411 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val) 2412 { 2413 int valid = 1; 2414 2415 switch (val) { 2416 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE: 2417 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) 2418 valid = 0; 2419 break; 2420 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE: 2421 switch (hsotg->hw_params.op_mode) { 2422 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 2423 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 2424 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 2425 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 2426 break; 2427 default: 2428 valid = 0; 2429 break; 2430 } 2431 break; 2432 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE: 2433 /* always valid */ 2434 break; 2435 default: 2436 valid = 0; 2437 break; 2438 } 2439 2440 if (!valid) { 2441 if (val >= 0) 2442 dev_err(hsotg->dev, 2443 "%d invalid for otg_cap parameter. Check HW configuration.\n", 2444 val); 2445 switch (hsotg->hw_params.op_mode) { 2446 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 2447 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE; 2448 break; 2449 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 2450 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 2451 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 2452 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE; 2453 break; 2454 default: 2455 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; 2456 break; 2457 } 2458 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val); 2459 } 2460 2461 hsotg->core_params->otg_cap = val; 2462 } 2463 2464 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val) 2465 { 2466 int valid = 1; 2467 2468 if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH) 2469 valid = 0; 2470 if (val < 0) 2471 valid = 0; 2472 2473 if (!valid) { 2474 if (val >= 0) 2475 dev_err(hsotg->dev, 2476 "%d invalid for dma_enable parameter. Check HW configuration.\n", 2477 val); 2478 val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH; 2479 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val); 2480 } 2481 2482 hsotg->core_params->dma_enable = val; 2483 } 2484 2485 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val) 2486 { 2487 int valid = 1; 2488 2489 if (val > 0 && (hsotg->core_params->dma_enable <= 0 || 2490 !hsotg->hw_params.dma_desc_enable)) 2491 valid = 0; 2492 if (val < 0) 2493 valid = 0; 2494 2495 if (!valid) { 2496 if (val >= 0) 2497 dev_err(hsotg->dev, 2498 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n", 2499 val); 2500 val = (hsotg->core_params->dma_enable > 0 && 2501 hsotg->hw_params.dma_desc_enable); 2502 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val); 2503 } 2504 2505 hsotg->core_params->dma_desc_enable = val; 2506 } 2507 2508 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg, 2509 int val) 2510 { 2511 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2512 if (val >= 0) { 2513 dev_err(hsotg->dev, 2514 "Wrong value for host_support_fs_low_power\n"); 2515 dev_err(hsotg->dev, 2516 "host_support_fs_low_power must be 0 or 1\n"); 2517 } 2518 val = 0; 2519 dev_dbg(hsotg->dev, 2520 "Setting host_support_fs_low_power to %d\n", val); 2521 } 2522 2523 hsotg->core_params->host_support_fs_ls_low_power = val; 2524 } 2525 2526 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val) 2527 { 2528 int valid = 1; 2529 2530 if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo) 2531 valid = 0; 2532 if (val < 0) 2533 valid = 0; 2534 2535 if (!valid) { 2536 if (val >= 0) 2537 dev_err(hsotg->dev, 2538 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n", 2539 val); 2540 val = hsotg->hw_params.enable_dynamic_fifo; 2541 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val); 2542 } 2543 2544 hsotg->core_params->enable_dynamic_fifo = val; 2545 } 2546 2547 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2548 { 2549 int valid = 1; 2550 2551 if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size) 2552 valid = 0; 2553 2554 if (!valid) { 2555 if (val >= 0) 2556 dev_err(hsotg->dev, 2557 "%d invalid for host_rx_fifo_size. Check HW configuration.\n", 2558 val); 2559 val = hsotg->hw_params.host_rx_fifo_size; 2560 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val); 2561 } 2562 2563 hsotg->core_params->host_rx_fifo_size = val; 2564 } 2565 2566 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2567 { 2568 int valid = 1; 2569 2570 if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size) 2571 valid = 0; 2572 2573 if (!valid) { 2574 if (val >= 0) 2575 dev_err(hsotg->dev, 2576 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n", 2577 val); 2578 val = hsotg->hw_params.host_nperio_tx_fifo_size; 2579 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n", 2580 val); 2581 } 2582 2583 hsotg->core_params->host_nperio_tx_fifo_size = val; 2584 } 2585 2586 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) 2587 { 2588 int valid = 1; 2589 2590 if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size) 2591 valid = 0; 2592 2593 if (!valid) { 2594 if (val >= 0) 2595 dev_err(hsotg->dev, 2596 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n", 2597 val); 2598 val = hsotg->hw_params.host_perio_tx_fifo_size; 2599 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n", 2600 val); 2601 } 2602 2603 hsotg->core_params->host_perio_tx_fifo_size = val; 2604 } 2605 2606 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val) 2607 { 2608 int valid = 1; 2609 2610 if (val < 2047 || val > hsotg->hw_params.max_transfer_size) 2611 valid = 0; 2612 2613 if (!valid) { 2614 if (val >= 0) 2615 dev_err(hsotg->dev, 2616 "%d invalid for max_transfer_size. Check HW configuration.\n", 2617 val); 2618 val = hsotg->hw_params.max_transfer_size; 2619 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val); 2620 } 2621 2622 hsotg->core_params->max_transfer_size = val; 2623 } 2624 2625 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val) 2626 { 2627 int valid = 1; 2628 2629 if (val < 15 || val > hsotg->hw_params.max_packet_count) 2630 valid = 0; 2631 2632 if (!valid) { 2633 if (val >= 0) 2634 dev_err(hsotg->dev, 2635 "%d invalid for max_packet_count. Check HW configuration.\n", 2636 val); 2637 val = hsotg->hw_params.max_packet_count; 2638 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val); 2639 } 2640 2641 hsotg->core_params->max_packet_count = val; 2642 } 2643 2644 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val) 2645 { 2646 int valid = 1; 2647 2648 if (val < 1 || val > hsotg->hw_params.host_channels) 2649 valid = 0; 2650 2651 if (!valid) { 2652 if (val >= 0) 2653 dev_err(hsotg->dev, 2654 "%d invalid for host_channels. Check HW configuration.\n", 2655 val); 2656 val = hsotg->hw_params.host_channels; 2657 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val); 2658 } 2659 2660 hsotg->core_params->host_channels = val; 2661 } 2662 2663 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val) 2664 { 2665 int valid = 0; 2666 u32 hs_phy_type, fs_phy_type; 2667 2668 if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS, 2669 DWC2_PHY_TYPE_PARAM_ULPI)) { 2670 if (val >= 0) { 2671 dev_err(hsotg->dev, "Wrong value for phy_type\n"); 2672 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n"); 2673 } 2674 2675 valid = 0; 2676 } 2677 2678 hs_phy_type = hsotg->hw_params.hs_phy_type; 2679 fs_phy_type = hsotg->hw_params.fs_phy_type; 2680 if (val == DWC2_PHY_TYPE_PARAM_UTMI && 2681 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 2682 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 2683 valid = 1; 2684 else if (val == DWC2_PHY_TYPE_PARAM_ULPI && 2685 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI || 2686 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 2687 valid = 1; 2688 else if (val == DWC2_PHY_TYPE_PARAM_FS && 2689 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) 2690 valid = 1; 2691 2692 if (!valid) { 2693 if (val >= 0) 2694 dev_err(hsotg->dev, 2695 "%d invalid for phy_type. Check HW configuration.\n", 2696 val); 2697 val = DWC2_PHY_TYPE_PARAM_FS; 2698 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) { 2699 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 2700 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI) 2701 val = DWC2_PHY_TYPE_PARAM_UTMI; 2702 else 2703 val = DWC2_PHY_TYPE_PARAM_ULPI; 2704 } 2705 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val); 2706 } 2707 2708 hsotg->core_params->phy_type = val; 2709 } 2710 2711 static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg) 2712 { 2713 return hsotg->core_params->phy_type; 2714 } 2715 2716 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val) 2717 { 2718 int valid = 1; 2719 2720 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2721 if (val >= 0) { 2722 dev_err(hsotg->dev, "Wrong value for speed parameter\n"); 2723 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n"); 2724 } 2725 valid = 0; 2726 } 2727 2728 if (val == DWC2_SPEED_PARAM_HIGH && 2729 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) 2730 valid = 0; 2731 2732 if (!valid) { 2733 if (val >= 0) 2734 dev_err(hsotg->dev, 2735 "%d invalid for speed parameter. Check HW configuration.\n", 2736 val); 2737 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ? 2738 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH; 2739 dev_dbg(hsotg->dev, "Setting speed to %d\n", val); 2740 } 2741 2742 hsotg->core_params->speed = val; 2743 } 2744 2745 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val) 2746 { 2747 int valid = 1; 2748 2749 if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ, 2750 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) { 2751 if (val >= 0) { 2752 dev_err(hsotg->dev, 2753 "Wrong value for host_ls_low_power_phy_clk parameter\n"); 2754 dev_err(hsotg->dev, 2755 "host_ls_low_power_phy_clk must be 0 or 1\n"); 2756 } 2757 valid = 0; 2758 } 2759 2760 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ && 2761 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) 2762 valid = 0; 2763 2764 if (!valid) { 2765 if (val >= 0) 2766 dev_err(hsotg->dev, 2767 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n", 2768 val); 2769 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS 2770 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 2771 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ; 2772 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n", 2773 val); 2774 } 2775 2776 hsotg->core_params->host_ls_low_power_phy_clk = val; 2777 } 2778 2779 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val) 2780 { 2781 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2782 if (val >= 0) { 2783 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n"); 2784 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n"); 2785 } 2786 val = 0; 2787 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val); 2788 } 2789 2790 hsotg->core_params->phy_ulpi_ddr = val; 2791 } 2792 2793 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val) 2794 { 2795 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2796 if (val >= 0) { 2797 dev_err(hsotg->dev, 2798 "Wrong value for phy_ulpi_ext_vbus\n"); 2799 dev_err(hsotg->dev, 2800 "phy_ulpi_ext_vbus must be 0 or 1\n"); 2801 } 2802 val = 0; 2803 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val); 2804 } 2805 2806 hsotg->core_params->phy_ulpi_ext_vbus = val; 2807 } 2808 2809 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val) 2810 { 2811 int valid = 0; 2812 2813 switch (hsotg->hw_params.utmi_phy_data_width) { 2814 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8: 2815 valid = (val == 8); 2816 break; 2817 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16: 2818 valid = (val == 16); 2819 break; 2820 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16: 2821 valid = (val == 8 || val == 16); 2822 break; 2823 } 2824 2825 if (!valid) { 2826 if (val >= 0) { 2827 dev_err(hsotg->dev, 2828 "%d invalid for phy_utmi_width. Check HW configuration.\n", 2829 val); 2830 } 2831 val = (hsotg->hw_params.utmi_phy_data_width == 2832 GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16; 2833 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val); 2834 } 2835 2836 hsotg->core_params->phy_utmi_width = val; 2837 } 2838 2839 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val) 2840 { 2841 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2842 if (val >= 0) { 2843 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n"); 2844 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n"); 2845 } 2846 val = 0; 2847 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val); 2848 } 2849 2850 hsotg->core_params->ulpi_fs_ls = val; 2851 } 2852 2853 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val) 2854 { 2855 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2856 if (val >= 0) { 2857 dev_err(hsotg->dev, "Wrong value for ts_dline\n"); 2858 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n"); 2859 } 2860 val = 0; 2861 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val); 2862 } 2863 2864 hsotg->core_params->ts_dline = val; 2865 } 2866 2867 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val) 2868 { 2869 int valid = 1; 2870 2871 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2872 if (val >= 0) { 2873 dev_err(hsotg->dev, "Wrong value for i2c_enable\n"); 2874 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n"); 2875 } 2876 2877 valid = 0; 2878 } 2879 2880 if (val == 1 && !(hsotg->hw_params.i2c_enable)) 2881 valid = 0; 2882 2883 if (!valid) { 2884 if (val >= 0) 2885 dev_err(hsotg->dev, 2886 "%d invalid for i2c_enable. Check HW configuration.\n", 2887 val); 2888 val = hsotg->hw_params.i2c_enable; 2889 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val); 2890 } 2891 2892 hsotg->core_params->i2c_enable = val; 2893 } 2894 2895 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val) 2896 { 2897 int valid = 1; 2898 2899 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2900 if (val >= 0) { 2901 dev_err(hsotg->dev, 2902 "Wrong value for en_multiple_tx_fifo,\n"); 2903 dev_err(hsotg->dev, 2904 "en_multiple_tx_fifo must be 0 or 1\n"); 2905 } 2906 valid = 0; 2907 } 2908 2909 if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo) 2910 valid = 0; 2911 2912 if (!valid) { 2913 if (val >= 0) 2914 dev_err(hsotg->dev, 2915 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n", 2916 val); 2917 val = hsotg->hw_params.en_multiple_tx_fifo; 2918 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val); 2919 } 2920 2921 hsotg->core_params->en_multiple_tx_fifo = val; 2922 } 2923 2924 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val) 2925 { 2926 int valid = 1; 2927 2928 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2929 if (val >= 0) { 2930 dev_err(hsotg->dev, 2931 "'%d' invalid for parameter reload_ctl\n", val); 2932 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n"); 2933 } 2934 valid = 0; 2935 } 2936 2937 if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a) 2938 valid = 0; 2939 2940 if (!valid) { 2941 if (val >= 0) 2942 dev_err(hsotg->dev, 2943 "%d invalid for parameter reload_ctl. Check HW configuration.\n", 2944 val); 2945 val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a; 2946 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val); 2947 } 2948 2949 hsotg->core_params->reload_ctl = val; 2950 } 2951 2952 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val) 2953 { 2954 if (val != -1) 2955 hsotg->core_params->ahbcfg = val; 2956 else 2957 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 << 2958 GAHBCFG_HBSTLEN_SHIFT; 2959 } 2960 2961 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val) 2962 { 2963 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2964 if (val >= 0) { 2965 dev_err(hsotg->dev, 2966 "'%d' invalid for parameter otg_ver\n", val); 2967 dev_err(hsotg->dev, 2968 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n"); 2969 } 2970 val = 0; 2971 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val); 2972 } 2973 2974 hsotg->core_params->otg_ver = val; 2975 } 2976 2977 static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val) 2978 { 2979 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2980 if (val >= 0) { 2981 dev_err(hsotg->dev, 2982 "'%d' invalid for parameter uframe_sched\n", 2983 val); 2984 dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n"); 2985 } 2986 val = 1; 2987 dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val); 2988 } 2989 2990 hsotg->core_params->uframe_sched = val; 2991 } 2992 2993 static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg, 2994 int val) 2995 { 2996 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 2997 if (val >= 0) { 2998 dev_err(hsotg->dev, 2999 "'%d' invalid for parameter external_id_pin_ctl\n", 3000 val); 3001 dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n"); 3002 } 3003 val = 0; 3004 dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val); 3005 } 3006 3007 hsotg->core_params->external_id_pin_ctl = val; 3008 } 3009 3010 static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg, 3011 int val) 3012 { 3013 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 3014 if (val >= 0) { 3015 dev_err(hsotg->dev, 3016 "'%d' invalid for parameter hibernation\n", 3017 val); 3018 dev_err(hsotg->dev, "hibernation must be 0 or 1\n"); 3019 } 3020 val = 0; 3021 dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val); 3022 } 3023 3024 hsotg->core_params->hibernation = val; 3025 } 3026 3027 /* 3028 * This function is called during module intialization to pass module parameters 3029 * for the DWC_otg core. 3030 */ 3031 void dwc2_set_parameters(struct dwc2_hsotg *hsotg, 3032 const struct dwc2_core_params *params) 3033 { 3034 dev_dbg(hsotg->dev, "%s()\n", __func__); 3035 3036 dwc2_set_param_otg_cap(hsotg, params->otg_cap); 3037 dwc2_set_param_dma_enable(hsotg, params->dma_enable); 3038 dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable); 3039 dwc2_set_param_host_support_fs_ls_low_power(hsotg, 3040 params->host_support_fs_ls_low_power); 3041 dwc2_set_param_enable_dynamic_fifo(hsotg, 3042 params->enable_dynamic_fifo); 3043 dwc2_set_param_host_rx_fifo_size(hsotg, 3044 params->host_rx_fifo_size); 3045 dwc2_set_param_host_nperio_tx_fifo_size(hsotg, 3046 params->host_nperio_tx_fifo_size); 3047 dwc2_set_param_host_perio_tx_fifo_size(hsotg, 3048 params->host_perio_tx_fifo_size); 3049 dwc2_set_param_max_transfer_size(hsotg, 3050 params->max_transfer_size); 3051 dwc2_set_param_max_packet_count(hsotg, 3052 params->max_packet_count); 3053 dwc2_set_param_host_channels(hsotg, params->host_channels); 3054 dwc2_set_param_phy_type(hsotg, params->phy_type); 3055 dwc2_set_param_speed(hsotg, params->speed); 3056 dwc2_set_param_host_ls_low_power_phy_clk(hsotg, 3057 params->host_ls_low_power_phy_clk); 3058 dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr); 3059 dwc2_set_param_phy_ulpi_ext_vbus(hsotg, 3060 params->phy_ulpi_ext_vbus); 3061 dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width); 3062 dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls); 3063 dwc2_set_param_ts_dline(hsotg, params->ts_dline); 3064 dwc2_set_param_i2c_enable(hsotg, params->i2c_enable); 3065 dwc2_set_param_en_multiple_tx_fifo(hsotg, 3066 params->en_multiple_tx_fifo); 3067 dwc2_set_param_reload_ctl(hsotg, params->reload_ctl); 3068 dwc2_set_param_ahbcfg(hsotg, params->ahbcfg); 3069 dwc2_set_param_otg_ver(hsotg, params->otg_ver); 3070 dwc2_set_param_uframe_sched(hsotg, params->uframe_sched); 3071 dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl); 3072 dwc2_set_param_hibernation(hsotg, params->hibernation); 3073 } 3074 3075 /** 3076 * During device initialization, read various hardware configuration 3077 * registers and interpret the contents. 3078 */ 3079 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) 3080 { 3081 struct dwc2_hw_params *hw = &hsotg->hw_params; 3082 unsigned width; 3083 u32 hwcfg2, hwcfg3, hwcfg4; 3084 u32 hptxfsiz, grxfsiz, gnptxfsiz; 3085 u32 gusbcfg; 3086 3087 /* 3088 * Attempt to ensure this device is really a DWC_otg Controller. 3089 * Read and verify the GSNPSID register contents. The value should be 3090 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3", 3091 * as in "OTG version 2.xx" or "OTG version 3.xx". 3092 */ 3093 hw->snpsid = DWC2_READ_4(hsotg, GSNPSID); 3094 if ((hw->snpsid & 0xfffff000) != 0x4f542000 && 3095 (hw->snpsid & 0xfffff000) != 0x4f543000) { 3096 dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n", 3097 hw->snpsid); 3098 return -ENODEV; 3099 } 3100 3101 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n", 3102 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf, 3103 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid); 3104 3105 hwcfg2 = DWC2_READ_4(hsotg, GHWCFG2); 3106 hwcfg3 = DWC2_READ_4(hsotg, GHWCFG3); 3107 hwcfg4 = DWC2_READ_4(hsotg, GHWCFG4); 3108 grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ); 3109 3110 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", DWC2_READ_4(hsotg, GHWCFG1)); 3111 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2); 3112 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3); 3113 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4); 3114 dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz); 3115 3116 /* Force host mode to get HPTXFSIZ / GNPTXFSIZ exact power on value */ 3117 gusbcfg = DWC2_READ_4(hsotg, GUSBCFG); 3118 gusbcfg |= GUSBCFG_FORCEHOSTMODE; 3119 DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg); 3120 usleep_range(100000, 150000); 3121 3122 gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ); 3123 hptxfsiz = DWC2_READ_4(hsotg, HPTXFSIZ); 3124 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); 3125 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); 3126 gusbcfg = DWC2_READ_4(hsotg, GUSBCFG); 3127 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE; 3128 DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg); 3129 usleep_range(100000, 150000); 3130 3131 /* hwcfg2 */ 3132 hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >> 3133 GHWCFG2_OP_MODE_SHIFT; 3134 hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >> 3135 GHWCFG2_ARCHITECTURE_SHIFT; 3136 hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO); 3137 hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >> 3138 GHWCFG2_NUM_HOST_CHAN_SHIFT); 3139 hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >> 3140 GHWCFG2_HS_PHY_TYPE_SHIFT; 3141 hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >> 3142 GHWCFG2_FS_PHY_TYPE_SHIFT; 3143 hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >> 3144 GHWCFG2_NUM_DEV_EP_SHIFT; 3145 hw->nperio_tx_q_depth = 3146 (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >> 3147 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1; 3148 hw->host_perio_tx_q_depth = 3149 (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >> 3150 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1; 3151 hw->dev_token_q_depth = 3152 (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >> 3153 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT; 3154 3155 /* hwcfg3 */ 3156 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >> 3157 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT; 3158 hw->max_transfer_size = (1 << (width + 11)) - 1; 3159 /* 3160 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates 3161 * coherent buffers with this size, and if it's too large we can 3162 * exhaust the coherent DMA pool. 3163 */ 3164 if (hw->max_transfer_size > 65535) 3165 hw->max_transfer_size = 65535; 3166 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >> 3167 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT; 3168 hw->max_packet_count = (1 << (width + 4)) - 1; 3169 hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C); 3170 hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >> 3171 GHWCFG3_DFIFO_DEPTH_SHIFT; 3172 3173 /* hwcfg4 */ 3174 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); 3175 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> 3176 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; 3177 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); 3178 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); 3179 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> 3180 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT; 3181 3182 /* fifo sizes */ 3183 hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> 3184 GRXFSIZ_DEPTH_SHIFT; 3185 hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> 3186 FIFOSIZE_DEPTH_SHIFT; 3187 hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> 3188 FIFOSIZE_DEPTH_SHIFT; 3189 3190 dev_dbg(hsotg->dev, "Detected values from hardware:\n"); 3191 dev_dbg(hsotg->dev, " op_mode=%d\n", 3192 hw->op_mode); 3193 dev_dbg(hsotg->dev, " arch=%d\n", 3194 hw->arch); 3195 dev_dbg(hsotg->dev, " dma_desc_enable=%d\n", 3196 hw->dma_desc_enable); 3197 dev_dbg(hsotg->dev, " power_optimized=%d\n", 3198 hw->power_optimized); 3199 dev_dbg(hsotg->dev, " i2c_enable=%d\n", 3200 hw->i2c_enable); 3201 dev_dbg(hsotg->dev, " hs_phy_type=%d\n", 3202 hw->hs_phy_type); 3203 dev_dbg(hsotg->dev, " fs_phy_type=%d\n", 3204 hw->fs_phy_type); 3205 dev_dbg(hsotg->dev, " utmi_phy_data_wdith=%d\n", 3206 hw->utmi_phy_data_width); 3207 dev_dbg(hsotg->dev, " num_dev_ep=%d\n", 3208 hw->num_dev_ep); 3209 dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n", 3210 hw->num_dev_perio_in_ep); 3211 dev_dbg(hsotg->dev, " host_channels=%d\n", 3212 hw->host_channels); 3213 dev_dbg(hsotg->dev, " max_transfer_size=%d\n", 3214 hw->max_transfer_size); 3215 dev_dbg(hsotg->dev, " max_packet_count=%d\n", 3216 hw->max_packet_count); 3217 dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n", 3218 hw->nperio_tx_q_depth); 3219 dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n", 3220 hw->host_perio_tx_q_depth); 3221 dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n", 3222 hw->dev_token_q_depth); 3223 dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n", 3224 hw->enable_dynamic_fifo); 3225 dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n", 3226 hw->en_multiple_tx_fifo); 3227 dev_dbg(hsotg->dev, " total_fifo_size=%d\n", 3228 hw->total_fifo_size); 3229 dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n", 3230 hw->host_rx_fifo_size); 3231 dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n", 3232 hw->host_nperio_tx_fifo_size); 3233 dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n", 3234 hw->host_perio_tx_fifo_size); 3235 dev_dbg(hsotg->dev, "\n"); 3236 3237 return 0; 3238 } 3239 3240 /* 3241 * Sets all parameters to the given value. 3242 * 3243 * Assumes that the dwc2_core_params struct contains only integers. 3244 */ 3245 void dwc2_set_all_params(struct dwc2_core_params *params, int value) 3246 { 3247 int *p = (int *)params; 3248 size_t size = sizeof(*params) / sizeof(*p); 3249 int i; 3250 3251 for (i = 0; i < size; i++) 3252 p[i] = value; 3253 } 3254 3255 3256 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg) 3257 { 3258 return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103; 3259 } 3260 3261 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg) 3262 { 3263 if (DWC2_READ_4(hsotg, GSNPSID) == 0xffffffff) 3264 return false; 3265 else 3266 return true; 3267 } 3268 3269 /** 3270 * dwc2_enable_global_interrupts() - Enables the controller's Global 3271 * Interrupt in the AHB Config register 3272 * 3273 * @hsotg: Programming view of DWC_otg controller 3274 */ 3275 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg) 3276 { 3277 u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG); 3278 3279 ahbcfg |= GAHBCFG_GLBL_INTR_EN; 3280 DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg); 3281 } 3282 3283 /** 3284 * dwc2_disable_global_interrupts() - Disables the controller's Global 3285 * Interrupt in the AHB Config register 3286 * 3287 * @hsotg: Programming view of DWC_otg controller 3288 */ 3289 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg) 3290 { 3291 u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG); 3292 3293 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN; 3294 DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg); 3295 } 3296