xref: /openbsd-src/sys/dev/usb/dwc2/dwc2_core.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: dwc2_core.c,v 1.9 2017/09/08 05:36:52 deraadt Exp $	*/
2 /*	$NetBSD: dwc2_core.c,v 1.6 2014/04/03 06:34:58 skrll Exp $	*/
3 
4 /*
5  * core.c - DesignWare HS OTG Controller common routines
6  *
7  * Copyright (C) 2004-2013 Synopsys, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The names of the above-listed copyright holders may not be used
19  *    to endorse or promote products derived from this software without
20  *    specific prior written permission.
21  *
22  * ALTERNATIVELY, this software may be distributed under the terms of the
23  * GNU General Public License ("GPL") as published by the Free Software
24  * Foundation; either version 2 of the License, or (at your option) any
25  * later version.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * The Core code provides basic services for accessing and managing the
42  * DWC_otg hardware. These services are used by both the Host Controller
43  * Driver and the Peripheral Controller Driver.
44  */
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/signal.h>
49 #include <sys/proc.h>
50 #include <sys/timeout.h>
51 #include <sys/mutex.h>
52 #include <sys/pool.h>
53 #include <sys/task.h>
54 
55 #include <machine/bus.h>
56 
57 #include <dev/usb/usb.h>
58 #include <dev/usb/usbdi.h>
59 #include <dev/usb/usbdivar.h>
60 #include <dev/usb/usb_mem.h>
61 
62 #include <dev/usb/dwc2/dwc2.h>
63 #include <dev/usb/dwc2/dwc2var.h>
64 
65 #include <dev/usb/dwc2/dwc2_core.h>
66 #include <dev/usb/dwc2/dwc2_hcd.h>
67 
68 /**
69  * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
70  * used in both device and host modes
71  *
72  * @hsotg: Programming view of the DWC_otg controller
73  */
74 STATIC void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
75 {
76 	u32 intmsk;
77 
78 	/* Clear any pending OTG Interrupts */
79 	DWC2_WRITE_4(hsotg, GOTGINT, 0xffffffff);
80 
81 	/* Clear any pending interrupts */
82 	DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff);
83 
84 	/* Enable the interrupts in the GINTMSK */
85 	intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
86 
87 	if (hsotg->core_params->dma_enable <= 0)
88 		intmsk |= GINTSTS_RXFLVL;
89 
90 	intmsk |= GINTSTS_CONIDSTSCHNG | GINTSTS_WKUPINT | GINTSTS_USBSUSP |
91 		  GINTSTS_SESSREQINT;
92 
93 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
94 }
95 
96 /*
97  * Initializes the FSLSPClkSel field of the HCFG register depending on the
98  * PHY type
99  */
100 STATIC void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
101 {
102 	u32 hcfg, val;
103 
104 	if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
105 	     hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
106 	     hsotg->core_params->ulpi_fs_ls > 0) ||
107 	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
108 		/* Full speed PHY */
109 		val = HCFG_FSLSPCLKSEL_48_MHZ;
110 	} else {
111 		/* High speed PHY running at full speed or high speed */
112 		val = HCFG_FSLSPCLKSEL_30_60_MHZ;
113 	}
114 
115 	dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
116 	hcfg = DWC2_READ_4(hsotg, HCFG);
117 	hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
118 	hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
119 	DWC2_WRITE_4(hsotg, HCFG, hcfg);
120 }
121 
122 /*
123  * Do core a soft reset of the core.  Be careful with this because it
124  * resets all the internal state machines of the core.
125  */
126 STATIC int dwc2_core_reset(struct dwc2_hsotg *hsotg)
127 {
128 	u32 greset;
129 	int count = 0;
130 
131 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
132 
133 	/* Wait for AHB master IDLE state */
134 	do {
135 		usleep_range(20000, 40000);
136 		greset = DWC2_READ_4(hsotg, GRSTCTL);
137 		if (++count > 50) {
138 			dev_warn(hsotg->dev,
139 				 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
140 				 __func__, greset);
141 			return -EBUSY;
142 		}
143 	} while (!(greset & GRSTCTL_AHBIDLE));
144 
145 	/* Core Soft Reset */
146 	count = 0;
147 	greset |= GRSTCTL_CSFTRST;
148 	DWC2_WRITE_4(hsotg, GRSTCTL, greset);
149 	do {
150 		usleep_range(20000, 40000);
151 		greset = DWC2_READ_4(hsotg, GRSTCTL);
152 		if (++count > 50) {
153 			dev_warn(hsotg->dev,
154 				 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
155 				 __func__, greset);
156 			return -EBUSY;
157 		}
158 	} while (greset & GRSTCTL_CSFTRST);
159 
160 	/*
161 	 * NOTE: This long sleep is _very_ important, otherwise the core will
162 	 * not stay in host mode after a connector ID change!
163 	 */
164 	usleep_range(150000, 200000);
165 
166 	return 0;
167 }
168 
169 STATIC int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
170 {
171 	u32 usbcfg, i2cctl;
172 	int retval = 0;
173 
174 	/*
175 	 * core_init() is now called on every switch so only call the
176 	 * following for the first time through
177 	 */
178 	if (select_phy) {
179 		dev_dbg(hsotg->dev, "FS PHY selected\n");
180 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
181 		usbcfg |= GUSBCFG_PHYSEL;
182 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
183 
184 		/* Reset after a PHY select */
185 		retval = dwc2_core_reset(hsotg);
186 		if (retval) {
187 			dev_err(hsotg->dev, "%s() Reset failed, aborting",
188 					__func__);
189 			return retval;
190 		}
191 	}
192 
193 	/*
194 	 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
195 	 * do this on HNP Dev/Host mode switches (done in dev_init and
196 	 * host_init).
197 	 */
198 	if (dwc2_is_host_mode(hsotg))
199 		dwc2_init_fs_ls_pclk_sel(hsotg);
200 
201 	if (hsotg->core_params->i2c_enable > 0) {
202 		dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
203 
204 		/* Program GUSBCFG.OtgUtmiFsSel to I2C */
205 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
206 		usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
207 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
208 
209 		/* Program GI2CCTL.I2CEn */
210 		i2cctl = DWC2_READ_4(hsotg, GI2CCTL);
211 		i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
212 		i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
213 		i2cctl &= ~GI2CCTL_I2CEN;
214 		DWC2_WRITE_4(hsotg, GI2CCTL, i2cctl);
215 		i2cctl |= GI2CCTL_I2CEN;
216 		DWC2_WRITE_4(hsotg, GI2CCTL, i2cctl);
217 	}
218 
219 	return retval;
220 }
221 
222 STATIC int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
223 {
224 	u32 usbcfg;
225 	int retval = 0;
226 
227 	if (!select_phy)
228 		return 0;
229 
230 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
231 
232 	/*
233 	 * HS PHY parameters. These parameters are preserved during soft reset
234 	 * so only program the first time. Do a soft reset immediately after
235 	 * setting phyif.
236 	 */
237 	switch (hsotg->core_params->phy_type) {
238 	case DWC2_PHY_TYPE_PARAM_ULPI:
239 		/* ULPI interface */
240 		dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
241 		usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
242 		usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
243 		if (hsotg->core_params->phy_ulpi_ddr > 0)
244 			usbcfg |= GUSBCFG_DDRSEL;
245 		break;
246 	case DWC2_PHY_TYPE_PARAM_UTMI:
247 		/* UTMI+ interface */
248 		dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
249 		usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
250 		if (hsotg->core_params->phy_utmi_width == 16)
251 			usbcfg |= GUSBCFG_PHYIF16;
252 		break;
253 	default:
254 		dev_err(hsotg->dev, "FS PHY selected at HS!\n");
255 		break;
256 	}
257 
258 	DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
259 
260 	/* Reset after setting the PHY parameters */
261 	retval = dwc2_core_reset(hsotg);
262 	if (retval) {
263 		dev_err(hsotg->dev, "%s() Reset failed, aborting",
264 				__func__);
265 		return retval;
266 	}
267 
268 	return retval;
269 }
270 
271 STATIC int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
272 {
273 	u32 usbcfg;
274 	int retval = 0;
275 
276 	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
277 	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
278 		/* If FS mode with FS PHY */
279 		retval = dwc2_fs_phy_init(hsotg, select_phy);
280 		if (retval)
281 			return retval;
282 	} else {
283 		/* High speed PHY */
284 		retval = dwc2_hs_phy_init(hsotg, select_phy);
285 		if (retval)
286 			return retval;
287 	}
288 
289 	if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
290 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
291 	    hsotg->core_params->ulpi_fs_ls > 0) {
292 		dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
293 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
294 		usbcfg |= GUSBCFG_ULPI_FS_LS;
295 		usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
296 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
297 	} else {
298 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
299 		usbcfg &= ~GUSBCFG_ULPI_FS_LS;
300 		usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
301 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
302 	}
303 
304 	return retval;
305 }
306 
307 STATIC int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
308 {
309 	u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
310 
311 	switch (hsotg->hw_params.arch) {
312 	case GHWCFG2_EXT_DMA_ARCH:
313 		dev_err(hsotg->dev, "External DMA Mode\n");
314 		if (hsotg->core_params->ahbcfg != -1) {
315 			ahbcfg &= GAHBCFG_CTRL_MASK;
316 			ahbcfg |= hsotg->core_params->ahbcfg &
317 				  ~GAHBCFG_CTRL_MASK;
318 		}
319 		break;
320 
321 	case GHWCFG2_INT_DMA_ARCH:
322 		dev_dbg(hsotg->dev, "Internal DMA Mode\n");
323 		if (hsotg->core_params->ahbcfg != -1) {
324 			ahbcfg &= GAHBCFG_CTRL_MASK;
325 			ahbcfg |= hsotg->core_params->ahbcfg &
326 				  ~GAHBCFG_CTRL_MASK;
327 		}
328 		break;
329 
330 	case GHWCFG2_SLAVE_ONLY_ARCH:
331 	default:
332 		dev_dbg(hsotg->dev, "Slave Only Mode\n");
333 		break;
334 	}
335 
336 	dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
337 		hsotg->core_params->dma_enable,
338 		hsotg->core_params->dma_desc_enable);
339 
340 	if (hsotg->core_params->dma_enable > 0) {
341 		if (hsotg->core_params->dma_desc_enable > 0)
342 			dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
343 		else
344 			dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
345 	} else {
346 		dev_dbg(hsotg->dev, "Using Slave mode\n");
347 		hsotg->core_params->dma_desc_enable = 0;
348 	}
349 
350 	if (hsotg->core_params->dma_enable > 0)
351 		ahbcfg |= GAHBCFG_DMA_EN;
352 
353 	DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
354 
355 	return 0;
356 }
357 
358 STATIC void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
359 {
360 	u32 usbcfg;
361 
362 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
363 	usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
364 
365 	switch (hsotg->hw_params.op_mode) {
366 	case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
367 		if (hsotg->core_params->otg_cap ==
368 				DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
369 			usbcfg |= GUSBCFG_HNPCAP;
370 		if (hsotg->core_params->otg_cap !=
371 				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
372 			usbcfg |= GUSBCFG_SRPCAP;
373 		break;
374 
375 	case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
376 	case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
377 	case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
378 		if (hsotg->core_params->otg_cap !=
379 				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
380 			usbcfg |= GUSBCFG_SRPCAP;
381 		break;
382 
383 	case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
384 	case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
385 	case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
386 	default:
387 		break;
388 	}
389 
390 	DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
391 }
392 
393 /**
394  * dwc2_core_init() - Initializes the DWC_otg controller registers and
395  * prepares the core for device mode or host mode operation
396  *
397  * @hsotg:      Programming view of the DWC_otg controller
398  * @select_phy: If true then also set the Phy type
399  * @irq:        If >= 0, the irq to register
400  */
401 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy)
402 {
403 	u32 usbcfg, otgctl;
404 	int retval;
405 
406 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
407 
408 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
409 
410 	/* Set ULPI External VBUS bit if needed */
411 	usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
412 	if (hsotg->core_params->phy_ulpi_ext_vbus ==
413 				DWC2_PHY_ULPI_EXTERNAL_VBUS)
414 		usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
415 
416 	/* Set external TS Dline pulsing bit if needed */
417 	usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
418 	if (hsotg->core_params->ts_dline > 0)
419 		usbcfg |= GUSBCFG_TERMSELDLPULSE;
420 
421 	DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
422 
423 	/* Reset the Controller */
424 	retval = dwc2_core_reset(hsotg);
425 	if (retval) {
426 		dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
427 				__func__);
428 		return retval;
429 	}
430 
431 	/*
432 	 * This needs to happen in FS mode before any other programming occurs
433 	 */
434 	retval = dwc2_phy_init(hsotg, select_phy);
435 	if (retval)
436 		return retval;
437 
438 	/* Program the GAHBCFG Register */
439 	retval = dwc2_gahbcfg_init(hsotg);
440 	if (retval)
441 		return retval;
442 
443 	/* Program the GUSBCFG register */
444 	dwc2_gusbcfg_init(hsotg);
445 
446 	/* Program the GOTGCTL register */
447 	otgctl = DWC2_READ_4(hsotg, GOTGCTL);
448 	otgctl &= ~GOTGCTL_OTGVER;
449 	if (hsotg->core_params->otg_ver > 0)
450 		otgctl |= GOTGCTL_OTGVER;
451 	DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
452 	dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
453 
454 	/* Clear the SRP success bit for FS-I2c */
455 	hsotg->srp_success = 0;
456 
457 	/* Enable common interrupts */
458 	dwc2_enable_common_interrupts(hsotg);
459 
460 	/*
461 	 * Do device or host intialization based on mode during PCD and
462 	 * HCD initialization
463 	 */
464 	if (dwc2_is_host_mode(hsotg)) {
465 		dev_dbg(hsotg->dev, "Host Mode\n");
466 		hsotg->op_state = OTG_STATE_A_HOST;
467 	} else {
468 		dev_dbg(hsotg->dev, "Device Mode\n");
469 		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
470 	}
471 
472 	return 0;
473 }
474 
475 /**
476  * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
477  *
478  * @hsotg: Programming view of DWC_otg controller
479  */
480 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
481 {
482 	u32 intmsk;
483 
484 	dev_dbg(hsotg->dev, "%s()\n", __func__);
485 
486 	/* Disable all interrupts */
487 	DWC2_WRITE_4(hsotg, GINTMSK, 0);
488 	DWC2_WRITE_4(hsotg, HAINTMSK, 0);
489 
490 	/* Enable the common interrupts */
491 	dwc2_enable_common_interrupts(hsotg);
492 
493 	/* Enable host mode interrupts without disturbing common interrupts */
494 	intmsk = DWC2_READ_4(hsotg, GINTMSK);
495 	intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
496 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
497 }
498 
499 /**
500  * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
501  *
502  * @hsotg: Programming view of DWC_otg controller
503  */
504 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
505 {
506 	u32 intmsk = DWC2_READ_4(hsotg, GINTMSK);
507 
508 	/* Disable host mode interrupts without disturbing common interrupts */
509 	intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
510 		    GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
511 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
512 }
513 
514 STATIC void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
515 {
516 	struct dwc2_core_params *params = hsotg->core_params;
517 	u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
518 
519 	if (!params->enable_dynamic_fifo)
520 		return;
521 
522 	/* Rx FIFO */
523 	grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
524 	dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
525 	grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
526 	grxfsiz |= params->host_rx_fifo_size <<
527 		   GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
528 	DWC2_WRITE_4(hsotg, GRXFSIZ, grxfsiz);
529 	dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", DWC2_READ_4(hsotg, GRXFSIZ));
530 
531 	/* Non-periodic Tx FIFO */
532 	dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
533 		DWC2_READ_4(hsotg, GNPTXFSIZ));
534 	nptxfsiz = params->host_nperio_tx_fifo_size <<
535 		   FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
536 	nptxfsiz |= params->host_rx_fifo_size <<
537 		    FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
538 	DWC2_WRITE_4(hsotg, GNPTXFSIZ, nptxfsiz);
539 	dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
540 		DWC2_READ_4(hsotg, GNPTXFSIZ));
541 
542 	/* Periodic Tx FIFO */
543 	dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
544 		DWC2_READ_4(hsotg, HPTXFSIZ));
545 	hptxfsiz = params->host_perio_tx_fifo_size <<
546 		   FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
547 	hptxfsiz |= (params->host_rx_fifo_size +
548 		     params->host_nperio_tx_fifo_size) <<
549 		    FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
550 	DWC2_WRITE_4(hsotg, HPTXFSIZ, hptxfsiz);
551 	dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
552 		DWC2_READ_4(hsotg, HPTXFSIZ));
553 
554 	if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
555 	    hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
556 		/*
557 		 * Global DFIFOCFG calculation for Host mode -
558 		 * include RxFIFO, NPTXFIFO and HPTXFIFO
559 		 */
560 		dfifocfg = DWC2_READ_4(hsotg, GDFIFOCFG);
561 		dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
562 		dfifocfg |= (params->host_rx_fifo_size +
563 			     params->host_nperio_tx_fifo_size +
564 			     params->host_perio_tx_fifo_size) <<
565 			    GDFIFOCFG_EPINFOBASE_SHIFT &
566 			    GDFIFOCFG_EPINFOBASE_MASK;
567 		DWC2_WRITE_4(hsotg, GDFIFOCFG, dfifocfg);
568 	}
569 }
570 
571 /**
572  * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
573  * Host mode
574  *
575  * @hsotg: Programming view of DWC_otg controller
576  *
577  * This function flushes the Tx and Rx FIFOs and flushes any entries in the
578  * request queues. Host channels are reset to ensure that they are ready for
579  * performing transfers.
580  */
581 void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
582 {
583 	u32 hcfg, hfir, otgctl;
584 
585 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
586 
587 	/* Restart the Phy Clock */
588 	DWC2_WRITE_4(hsotg, PCGCTL, 0);
589 
590 	/* Initialize Host Configuration Register */
591 	dwc2_init_fs_ls_pclk_sel(hsotg);
592 	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
593 		hcfg = DWC2_READ_4(hsotg, HCFG);
594 		hcfg |= HCFG_FSLSSUPP;
595 		DWC2_WRITE_4(hsotg, HCFG, hcfg);
596 	}
597 
598 	/*
599 	 * This bit allows dynamic reloading of the HFIR register during
600 	 * runtime. This bit needs to be programmed during initial configuration
601 	 * and its value must not be changed during runtime.
602 	 */
603 	if (hsotg->core_params->reload_ctl > 0) {
604 		hfir = DWC2_READ_4(hsotg, HFIR);
605 		hfir |= HFIR_RLDCTRL;
606 		DWC2_WRITE_4(hsotg, HFIR, hfir);
607 	}
608 
609 	if (hsotg->core_params->dma_desc_enable > 0) {
610 		u32 op_mode = hsotg->hw_params.op_mode;
611 		if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
612 		    !hsotg->hw_params.dma_desc_enable ||
613 		    op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
614 		    op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
615 		    op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
616 			dev_err(hsotg->dev,
617 				"Hardware does not support descriptor DMA mode -\n");
618 			dev_err(hsotg->dev,
619 				"falling back to buffer DMA mode.\n");
620 			hsotg->core_params->dma_desc_enable = 0;
621 		} else {
622 			hcfg = DWC2_READ_4(hsotg, HCFG);
623 			hcfg |= HCFG_DESCDMA;
624 			DWC2_WRITE_4(hsotg, HCFG, hcfg);
625 		}
626 	}
627 
628 	/* Configure data FIFO sizes */
629 	dwc2_config_fifos(hsotg);
630 
631 	/* TODO - check this */
632 	/* Clear Host Set HNP Enable in the OTG Control Register */
633 	otgctl = DWC2_READ_4(hsotg, GOTGCTL);
634 	otgctl &= ~GOTGCTL_HSTSETHNPEN;
635 	DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
636 
637 	/* Make sure the FIFOs are flushed */
638 	dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
639 	dwc2_flush_rx_fifo(hsotg);
640 
641 	/* Clear Host Set HNP Enable in the OTG Control Register */
642 	otgctl = DWC2_READ_4(hsotg, GOTGCTL);
643 	otgctl &= ~GOTGCTL_HSTSETHNPEN;
644 	DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
645 
646 	if (hsotg->core_params->dma_desc_enable <= 0) {
647 		int num_channels, i;
648 		u32 hcchar;
649 
650 		/* Flush out any leftover queued requests */
651 		num_channels = hsotg->core_params->host_channels;
652 		for (i = 0; i < num_channels; i++) {
653 			hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
654 			hcchar &= ~HCCHAR_CHENA;
655 			hcchar |= HCCHAR_CHDIS;
656 			hcchar &= ~HCCHAR_EPDIR;
657 			DWC2_WRITE_4(hsotg, HCCHAR(i), hcchar);
658 		}
659 
660 		/* Halt all channels to put them into a known state */
661 		for (i = 0; i < num_channels; i++) {
662 			int count = 0;
663 
664 			hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
665 			hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
666 			hcchar &= ~HCCHAR_EPDIR;
667 			DWC2_WRITE_4(hsotg, HCCHAR(i), hcchar);
668 			dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
669 				__func__, i);
670 			do {
671 				hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
672 				if (++count > 1000) {
673 					dev_err(hsotg->dev,
674 						"Unable to clear enable on channel %d\n",
675 						i);
676 					break;
677 				}
678 				udelay(1);
679 			} while (hcchar & HCCHAR_CHENA);
680 		}
681 	}
682 
683 	/* Turn on the vbus power */
684 	dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
685 	if (hsotg->op_state == OTG_STATE_A_HOST) {
686 		u32 hprt0 = dwc2_read_hprt0(hsotg);
687 
688 		dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
689 			!!(hprt0 & HPRT0_PWR));
690 		if (!(hprt0 & HPRT0_PWR)) {
691 			hprt0 |= HPRT0_PWR;
692 			DWC2_WRITE_4(hsotg, HPRT0, hprt0);
693 		}
694 	}
695 
696 	dwc2_enable_host_interrupts(hsotg);
697 }
698 
699 STATIC void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
700 				      struct dwc2_host_chan *chan)
701 {
702 	u32 hcintmsk = HCINTMSK_CHHLTD;
703 
704 	switch (chan->ep_type) {
705 	case USB_ENDPOINT_XFER_CONTROL:
706 	case USB_ENDPOINT_XFER_BULK:
707 		dev_vdbg(hsotg->dev, "control/bulk\n");
708 		hcintmsk |= HCINTMSK_XFERCOMPL;
709 		hcintmsk |= HCINTMSK_STALL;
710 		hcintmsk |= HCINTMSK_XACTERR;
711 		hcintmsk |= HCINTMSK_DATATGLERR;
712 		if (chan->ep_is_in) {
713 			hcintmsk |= HCINTMSK_BBLERR;
714 		} else {
715 			hcintmsk |= HCINTMSK_NAK;
716 			hcintmsk |= HCINTMSK_NYET;
717 			if (chan->do_ping)
718 				hcintmsk |= HCINTMSK_ACK;
719 		}
720 
721 		if (chan->do_split) {
722 			hcintmsk |= HCINTMSK_NAK;
723 			if (chan->complete_split)
724 				hcintmsk |= HCINTMSK_NYET;
725 			else
726 				hcintmsk |= HCINTMSK_ACK;
727 		}
728 
729 		if (chan->error_state)
730 			hcintmsk |= HCINTMSK_ACK;
731 		break;
732 
733 	case USB_ENDPOINT_XFER_INT:
734 		if (dbg_perio())
735 			dev_vdbg(hsotg->dev, "intr\n");
736 		hcintmsk |= HCINTMSK_XFERCOMPL;
737 		hcintmsk |= HCINTMSK_NAK;
738 		hcintmsk |= HCINTMSK_STALL;
739 		hcintmsk |= HCINTMSK_XACTERR;
740 		hcintmsk |= HCINTMSK_DATATGLERR;
741 		hcintmsk |= HCINTMSK_FRMOVRUN;
742 
743 		if (chan->ep_is_in)
744 			hcintmsk |= HCINTMSK_BBLERR;
745 		if (chan->error_state)
746 			hcintmsk |= HCINTMSK_ACK;
747 		if (chan->do_split) {
748 			if (chan->complete_split)
749 				hcintmsk |= HCINTMSK_NYET;
750 			else
751 				hcintmsk |= HCINTMSK_ACK;
752 		}
753 		break;
754 
755 	case USB_ENDPOINT_XFER_ISOC:
756 		if (dbg_perio())
757 			dev_vdbg(hsotg->dev, "isoc\n");
758 		hcintmsk |= HCINTMSK_XFERCOMPL;
759 		hcintmsk |= HCINTMSK_FRMOVRUN;
760 		hcintmsk |= HCINTMSK_ACK;
761 
762 		if (chan->ep_is_in) {
763 			hcintmsk |= HCINTMSK_XACTERR;
764 			hcintmsk |= HCINTMSK_BBLERR;
765 		}
766 		break;
767 	default:
768 		dev_err(hsotg->dev, "## Unknown EP type ##\n");
769 		break;
770 	}
771 
772 	DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
773 	if (dbg_hc(chan))
774 		dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
775 }
776 
777 STATIC void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
778 				    struct dwc2_host_chan *chan)
779 {
780 	u32 hcintmsk = HCINTMSK_CHHLTD;
781 
782 	/*
783 	 * For Descriptor DMA mode core halts the channel on AHB error.
784 	 * Interrupt is not required.
785 	 */
786 	if (hsotg->core_params->dma_desc_enable <= 0) {
787 		if (dbg_hc(chan))
788 			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
789 		hcintmsk |= HCINTMSK_AHBERR;
790 	} else {
791 		if (dbg_hc(chan))
792 			dev_vdbg(hsotg->dev, "desc DMA enabled\n");
793 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
794 			hcintmsk |= HCINTMSK_XFERCOMPL;
795 	}
796 
797 	if (chan->error_state && !chan->do_split &&
798 	    chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
799 		if (dbg_hc(chan))
800 			dev_vdbg(hsotg->dev, "setting ACK\n");
801 		hcintmsk |= HCINTMSK_ACK;
802 		if (chan->ep_is_in) {
803 			hcintmsk |= HCINTMSK_DATATGLERR;
804 			if (chan->ep_type != USB_ENDPOINT_XFER_INT)
805 				hcintmsk |= HCINTMSK_NAK;
806 		}
807 	}
808 
809 	DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
810 	if (dbg_hc(chan))
811 		dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
812 }
813 
814 STATIC void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
815 				struct dwc2_host_chan *chan)
816 {
817 	u32 intmsk;
818 
819 	if (hsotg->core_params->dma_enable > 0) {
820 		if (dbg_hc(chan))
821 			dev_vdbg(hsotg->dev, "DMA enabled\n");
822 		dwc2_hc_enable_dma_ints(hsotg, chan);
823 	} else {
824 		if (dbg_hc(chan))
825 			dev_vdbg(hsotg->dev, "DMA disabled\n");
826 		dwc2_hc_enable_slave_ints(hsotg, chan);
827 	}
828 
829 	/* Enable the top level host channel interrupt */
830 	intmsk = DWC2_READ_4(hsotg, HAINTMSK);
831 	intmsk |= 1 << chan->hc_num;
832 	DWC2_WRITE_4(hsotg, HAINTMSK, intmsk);
833 	if (dbg_hc(chan))
834 		dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
835 
836 	/* Make sure host channel interrupts are enabled */
837 	intmsk = DWC2_READ_4(hsotg, GINTMSK);
838 	intmsk |= GINTSTS_HCHINT;
839 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
840 	if (dbg_hc(chan))
841 		dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
842 }
843 
844 /**
845  * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
846  * a specific endpoint
847  *
848  * @hsotg: Programming view of DWC_otg controller
849  * @chan:  Information needed to initialize the host channel
850  *
851  * The HCCHARn register is set up with the characteristics specified in chan.
852  * Host channel interrupts that may need to be serviced while this transfer is
853  * in progress are enabled.
854  */
855 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
856 {
857 	u8 hc_num = chan->hc_num;
858 	u32 hcintmsk;
859 	u32 hcchar;
860 	u32 hcsplt = 0;
861 
862 	if (dbg_hc(chan))
863 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
864 
865 	/* Clear old interrupt conditions for this host channel */
866 	hcintmsk = 0xffffffff;
867 	hcintmsk &= ~HCINTMSK_RESERVED14_31;
868 	DWC2_WRITE_4(hsotg, HCINT(hc_num), hcintmsk);
869 
870 	/* Enable channel interrupts required for this transfer */
871 	dwc2_hc_enable_ints(hsotg, chan);
872 
873 	/*
874 	 * Program the HCCHARn register with the endpoint characteristics for
875 	 * the current transfer
876 	 */
877 	hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
878 	hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
879 	if (chan->ep_is_in)
880 		hcchar |= HCCHAR_EPDIR;
881 	if (chan->speed == USB_SPEED_LOW)
882 		hcchar |= HCCHAR_LSPDDEV;
883 	hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
884 	hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
885 	DWC2_WRITE_4(hsotg, HCCHAR(hc_num), hcchar);
886 	if (dbg_hc(chan)) {
887 		dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
888 			 hc_num, hcchar);
889 
890 		dev_vdbg(hsotg->dev, "%s: Channel %d\n",
891 			 __func__, hc_num);
892 		dev_vdbg(hsotg->dev, "	 Dev Addr: %d\n",
893 			 chan->dev_addr);
894 		dev_vdbg(hsotg->dev, "	 Ep Num: %d\n",
895 			 chan->ep_num);
896 		dev_vdbg(hsotg->dev, "	 Is In: %d\n",
897 			 chan->ep_is_in);
898 		dev_vdbg(hsotg->dev, "	 Is Low Speed: %d\n",
899 			 chan->speed == USB_SPEED_LOW);
900 		dev_vdbg(hsotg->dev, "	 Ep Type: %d\n",
901 			 chan->ep_type);
902 		dev_vdbg(hsotg->dev, "	 Max Pkt: %d\n",
903 			 chan->max_packet);
904 	}
905 
906 	/* Program the HCSPLT register for SPLITs */
907 	if (chan->do_split) {
908 		if (dbg_hc(chan))
909 			dev_vdbg(hsotg->dev,
910 				 "Programming HC %d with split --> %s\n",
911 				 hc_num,
912 				 chan->complete_split ? "CSPLIT" : "SSPLIT");
913 		if (chan->complete_split)
914 			hcsplt |= HCSPLT_COMPSPLT;
915 		hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
916 			  HCSPLT_XACTPOS_MASK;
917 		hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
918 			  HCSPLT_HUBADDR_MASK;
919 		hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
920 			  HCSPLT_PRTADDR_MASK;
921 		if (dbg_hc(chan)) {
922 			dev_vdbg(hsotg->dev, "	  comp split %d\n",
923 				 chan->complete_split);
924 			dev_vdbg(hsotg->dev, "	  xact pos %d\n",
925 				 chan->xact_pos);
926 			dev_vdbg(hsotg->dev, "	  hub addr %d\n",
927 				 chan->hub_addr);
928 			dev_vdbg(hsotg->dev, "	  hub port %d\n",
929 				 chan->hub_port);
930 			dev_vdbg(hsotg->dev, "	  is_in %d\n",
931 				 chan->ep_is_in);
932 			dev_vdbg(hsotg->dev, "	  Max Pkt %d\n",
933 				 chan->max_packet);
934 			dev_vdbg(hsotg->dev, "	  xferlen %d\n",
935 				 chan->xfer_len);
936 		}
937 	}
938 
939 	DWC2_WRITE_4(hsotg, HCSPLT(hc_num), hcsplt);
940 }
941 
942 /**
943  * dwc2_hc_halt() - Attempts to halt a host channel
944  *
945  * @hsotg:       Controller register interface
946  * @chan:        Host channel to halt
947  * @halt_status: Reason for halting the channel
948  *
949  * This function should only be called in Slave mode or to abort a transfer in
950  * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
951  * controller halts the channel when the transfer is complete or a condition
952  * occurs that requires application intervention.
953  *
954  * In slave mode, checks for a free request queue entry, then sets the Channel
955  * Enable and Channel Disable bits of the Host Channel Characteristics
956  * register of the specified channel to intiate the halt. If there is no free
957  * request queue entry, sets only the Channel Disable bit of the HCCHARn
958  * register to flush requests for this channel. In the latter case, sets a
959  * flag to indicate that the host channel needs to be halted when a request
960  * queue slot is open.
961  *
962  * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
963  * HCCHARn register. The controller ensures there is space in the request
964  * queue before submitting the halt request.
965  *
966  * Some time may elapse before the core flushes any posted requests for this
967  * host channel and halts. The Channel Halted interrupt handler completes the
968  * deactivation of the host channel.
969  */
970 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
971 		  enum dwc2_halt_status halt_status)
972 {
973 	u32 nptxsts, hptxsts, hcchar;
974 
975 	if (dbg_hc(chan))
976 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
977 	if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
978 		dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
979 
980 	if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
981 	    halt_status == DWC2_HC_XFER_AHB_ERR) {
982 		/*
983 		 * Disable all channel interrupts except Ch Halted. The QTD
984 		 * and QH state associated with this transfer has been cleared
985 		 * (in the case of URB_DEQUEUE), so the channel needs to be
986 		 * shut down carefully to prevent crashes.
987 		 */
988 		u32 hcintmsk = HCINTMSK_CHHLTD;
989 
990 		dev_vdbg(hsotg->dev, "dequeue/error\n");
991 		DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
992 
993 		/*
994 		 * Make sure no other interrupts besides halt are currently
995 		 * pending. Handling another interrupt could cause a crash due
996 		 * to the QTD and QH state.
997 		 */
998 		DWC2_WRITE_4(hsotg, HCINT(chan->hc_num), ~hcintmsk);
999 
1000 		/*
1001 		 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1002 		 * even if the channel was already halted for some other
1003 		 * reason
1004 		 */
1005 		chan->halt_status = halt_status;
1006 
1007 		hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1008 		if (!(hcchar & HCCHAR_CHENA)) {
1009 			/*
1010 			 * The channel is either already halted or it hasn't
1011 			 * started yet. In DMA mode, the transfer may halt if
1012 			 * it finishes normally or a condition occurs that
1013 			 * requires driver intervention. Don't want to halt
1014 			 * the channel again. In either Slave or DMA mode,
1015 			 * it's possible that the transfer has been assigned
1016 			 * to a channel, but not started yet when an URB is
1017 			 * dequeued. Don't want to halt a channel that hasn't
1018 			 * started yet.
1019 			 */
1020 			return;
1021 		}
1022 	}
1023 	if (chan->halt_pending) {
1024 		/*
1025 		 * A halt has already been issued for this channel. This might
1026 		 * happen when a transfer is aborted by a higher level in
1027 		 * the stack.
1028 		 */
1029 		dev_vdbg(hsotg->dev,
1030 			 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1031 			 __func__, chan->hc_num);
1032 		return;
1033 	}
1034 
1035 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1036 
1037 	/* No need to set the bit in DDMA for disabling the channel */
1038 	/* TODO check it everywhere channel is disabled */
1039 	if (hsotg->core_params->dma_desc_enable <= 0) {
1040 		if (dbg_hc(chan))
1041 			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1042 		hcchar |= HCCHAR_CHENA;
1043 	} else {
1044 		if (dbg_hc(chan))
1045 			dev_dbg(hsotg->dev, "desc DMA enabled\n");
1046 	}
1047 	hcchar |= HCCHAR_CHDIS;
1048 
1049 	if (hsotg->core_params->dma_enable <= 0) {
1050 		if (dbg_hc(chan))
1051 			dev_vdbg(hsotg->dev, "DMA not enabled\n");
1052 		hcchar |= HCCHAR_CHENA;
1053 
1054 		/* Check for space in the request queue to issue the halt */
1055 		if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1056 		    chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1057 			dev_vdbg(hsotg->dev, "control/bulk\n");
1058 			nptxsts = DWC2_READ_4(hsotg, GNPTXSTS);
1059 			if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1060 				dev_vdbg(hsotg->dev, "Disabling channel\n");
1061 				hcchar &= ~HCCHAR_CHENA;
1062 			}
1063 		} else {
1064 			if (dbg_perio())
1065 				dev_vdbg(hsotg->dev, "isoc/intr\n");
1066 			hptxsts = DWC2_READ_4(hsotg, HPTXSTS);
1067 			if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1068 			    hsotg->queuing_high_bandwidth) {
1069 				if (dbg_perio())
1070 					dev_vdbg(hsotg->dev, "Disabling channel\n");
1071 				hcchar &= ~HCCHAR_CHENA;
1072 			}
1073 		}
1074 	} else {
1075 		if (dbg_hc(chan))
1076 			dev_vdbg(hsotg->dev, "DMA enabled\n");
1077 	}
1078 
1079 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1080 	chan->halt_status = halt_status;
1081 
1082 	if (hcchar & HCCHAR_CHENA) {
1083 		if (dbg_hc(chan))
1084 			dev_vdbg(hsotg->dev, "Channel enabled\n");
1085 		chan->halt_pending = 1;
1086 		chan->halt_on_queue = 0;
1087 	} else {
1088 		if (dbg_hc(chan))
1089 			dev_vdbg(hsotg->dev, "Channel disabled\n");
1090 		chan->halt_on_queue = 1;
1091 	}
1092 
1093 	if (dbg_hc(chan)) {
1094 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1095 			 chan->hc_num);
1096 		dev_vdbg(hsotg->dev, "	 hcchar: 0x%08x\n",
1097 			 hcchar);
1098 		dev_vdbg(hsotg->dev, "	 halt_pending: %d\n",
1099 			 chan->halt_pending);
1100 		dev_vdbg(hsotg->dev, "	 halt_on_queue: %d\n",
1101 			 chan->halt_on_queue);
1102 		dev_vdbg(hsotg->dev, "	 halt_status: %d\n",
1103 			 chan->halt_status);
1104 	}
1105 }
1106 
1107 /**
1108  * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1109  *
1110  * @hsotg: Programming view of DWC_otg controller
1111  * @chan:  Identifies the host channel to clean up
1112  *
1113  * This function is normally called after a transfer is done and the host
1114  * channel is being released
1115  */
1116 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1117 {
1118 	u32 hcintmsk;
1119 
1120 	chan->xfer_started = 0;
1121 
1122 	/*
1123 	 * Clear channel interrupt enables and any unhandled channel interrupt
1124 	 * conditions
1125 	 */
1126 	DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), 0);
1127 	hcintmsk = 0xffffffff;
1128 	hcintmsk &= ~HCINTMSK_RESERVED14_31;
1129 	DWC2_WRITE_4(hsotg, HCINT(chan->hc_num), hcintmsk);
1130 }
1131 
1132 /**
1133  * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1134  * which frame a periodic transfer should occur
1135  *
1136  * @hsotg:  Programming view of DWC_otg controller
1137  * @chan:   Identifies the host channel to set up and its properties
1138  * @hcchar: Current value of the HCCHAR register for the specified host channel
1139  *
1140  * This function has no effect on non-periodic transfers
1141  */
1142 STATIC void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1143 				       struct dwc2_host_chan *chan, u32 *hcchar)
1144 {
1145 	if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1146 	    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1147 		/* 1 if _next_ frame is odd, 0 if it's even */
1148 		if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
1149 			*hcchar |= HCCHAR_ODDFRM;
1150 	}
1151 }
1152 
1153 STATIC void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1154 {
1155 	/* Set up the initial PID for the transfer */
1156 	if (chan->speed == USB_SPEED_HIGH) {
1157 		if (chan->ep_is_in) {
1158 			if (chan->multi_count == 1)
1159 				chan->data_pid_start = DWC2_HC_PID_DATA0;
1160 			else if (chan->multi_count == 2)
1161 				chan->data_pid_start = DWC2_HC_PID_DATA1;
1162 			else
1163 				chan->data_pid_start = DWC2_HC_PID_DATA2;
1164 		} else {
1165 			if (chan->multi_count == 1)
1166 				chan->data_pid_start = DWC2_HC_PID_DATA0;
1167 			else
1168 				chan->data_pid_start = DWC2_HC_PID_MDATA;
1169 		}
1170 	} else {
1171 		chan->data_pid_start = DWC2_HC_PID_DATA0;
1172 	}
1173 }
1174 
1175 /**
1176  * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1177  * the Host Channel
1178  *
1179  * @hsotg: Programming view of DWC_otg controller
1180  * @chan:  Information needed to initialize the host channel
1181  *
1182  * This function should only be called in Slave mode. For a channel associated
1183  * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1184  * associated with a periodic EP, the periodic Tx FIFO is written.
1185  *
1186  * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1187  * the number of bytes written to the Tx FIFO.
1188  */
1189 STATIC void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1190 				 struct dwc2_host_chan *chan)
1191 {
1192 	u32 i;
1193 	u32 remaining_count;
1194 	u32 byte_count;
1195 	u32 dword_count;
1196 	u32 *data_buf = (u32 *)chan->xfer_buf;
1197 	u32 data_fifo;
1198 
1199 	if (dbg_hc(chan))
1200 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1201 
1202 	data_fifo = HCFIFO(chan->hc_num);
1203 
1204 	remaining_count = chan->xfer_len - chan->xfer_count;
1205 	if (remaining_count > chan->max_packet)
1206 		byte_count = chan->max_packet;
1207 	else
1208 		byte_count = remaining_count;
1209 
1210 	dword_count = (byte_count + 3) / 4;
1211 
1212 	if (((unsigned long)data_buf & 0x3) == 0) {
1213 		/* xfer_buf is DWORD aligned */
1214 		for (i = 0; i < dword_count; i++, data_buf++)
1215 			DWC2_WRITE_4(hsotg, data_fifo, *data_buf);
1216 	} else {
1217 		/* xfer_buf is not DWORD aligned */
1218 		for (i = 0; i < dword_count; i++, data_buf++) {
1219 			u32 data = data_buf[0] | data_buf[1] << 8 |
1220 				   data_buf[2] << 16 | data_buf[3] << 24;
1221 			DWC2_WRITE_4(hsotg, data_fifo, data);
1222 		}
1223 	}
1224 
1225 	chan->xfer_count += byte_count;
1226 	chan->xfer_buf += byte_count;
1227 }
1228 
1229 /**
1230  * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1231  * channel and starts the transfer
1232  *
1233  * @hsotg: Programming view of DWC_otg controller
1234  * @chan:  Information needed to initialize the host channel. The xfer_len value
1235  *         may be reduced to accommodate the max widths of the XferSize and
1236  *         PktCnt fields in the HCTSIZn register. The multi_count value may be
1237  *         changed to reflect the final xfer_len value.
1238  *
1239  * This function may be called in either Slave mode or DMA mode. In Slave mode,
1240  * the caller must ensure that there is sufficient space in the request queue
1241  * and Tx Data FIFO.
1242  *
1243  * For an OUT transfer in Slave mode, it loads a data packet into the
1244  * appropriate FIFO. If necessary, additional data packets are loaded in the
1245  * Host ISR.
1246  *
1247  * For an IN transfer in Slave mode, a data packet is requested. The data
1248  * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1249  * additional data packets are requested in the Host ISR.
1250  *
1251  * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1252  * register along with a packet count of 1 and the channel is enabled. This
1253  * causes a single PING transaction to occur. Other fields in HCTSIZ are
1254  * simply set to 0 since no data transfer occurs in this case.
1255  *
1256  * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1257  * all the information required to perform the subsequent data transfer. In
1258  * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1259  * controller performs the entire PING protocol, then starts the data
1260  * transfer.
1261  */
1262 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1263 			    struct dwc2_host_chan *chan)
1264 {
1265 	u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1266 	u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1267 	u32 hcchar;
1268 	u32 hctsiz = 0;
1269 	u16 num_packets;
1270 
1271 	if (dbg_hc(chan))
1272 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1273 
1274 	if (chan->do_ping) {
1275 		if (hsotg->core_params->dma_enable <= 0) {
1276 			if (dbg_hc(chan))
1277 				dev_vdbg(hsotg->dev, "ping, no DMA\n");
1278 			dwc2_hc_do_ping(hsotg, chan);
1279 			chan->xfer_started = 1;
1280 			return;
1281 		} else {
1282 			if (dbg_hc(chan))
1283 				dev_vdbg(hsotg->dev, "ping, DMA\n");
1284 			hctsiz |= TSIZ_DOPNG;
1285 		}
1286 	}
1287 
1288 	if (chan->do_split) {
1289 		if (dbg_hc(chan))
1290 			dev_vdbg(hsotg->dev, "split\n");
1291 		num_packets = 1;
1292 
1293 		if (chan->complete_split && !chan->ep_is_in)
1294 			/*
1295 			 * For CSPLIT OUT Transfer, set the size to 0 so the
1296 			 * core doesn't expect any data written to the FIFO
1297 			 */
1298 			chan->xfer_len = 0;
1299 		else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1300 			chan->xfer_len = chan->max_packet;
1301 		else if (!chan->ep_is_in && chan->xfer_len > 188)
1302 			chan->xfer_len = 188;
1303 
1304 		hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1305 			  TSIZ_XFERSIZE_MASK;
1306 	} else {
1307 		if (dbg_hc(chan))
1308 			dev_vdbg(hsotg->dev, "no split\n");
1309 		/*
1310 		 * Ensure that the transfer length and packet count will fit
1311 		 * in the widths allocated for them in the HCTSIZn register
1312 		 */
1313 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1314 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1315 			/*
1316 			 * Make sure the transfer size is no larger than one
1317 			 * (micro)frame's worth of data. (A check was done
1318 			 * when the periodic transfer was accepted to ensure
1319 			 * that a (micro)frame's worth of data can be
1320 			 * programmed into a channel.)
1321 			 */
1322 			u32 max_periodic_len =
1323 				chan->multi_count * chan->max_packet;
1324 
1325 			if (chan->xfer_len > max_periodic_len)
1326 				chan->xfer_len = max_periodic_len;
1327 		} else if (chan->xfer_len > max_hc_xfer_size) {
1328 			/*
1329 			 * Make sure that xfer_len is a multiple of max packet
1330 			 * size
1331 			 */
1332 			chan->xfer_len =
1333 				max_hc_xfer_size - chan->max_packet + 1;
1334 		}
1335 
1336 		if (chan->xfer_len > 0) {
1337 			num_packets = (chan->xfer_len + chan->max_packet - 1) /
1338 					chan->max_packet;
1339 			if (num_packets > max_hc_pkt_count) {
1340 				num_packets = max_hc_pkt_count;
1341 				chan->xfer_len = num_packets * chan->max_packet;
1342 			}
1343 		} else {
1344 			/* Need 1 packet for transfer length of 0 */
1345 			num_packets = 1;
1346 		}
1347 
1348 		if (chan->ep_is_in)
1349 			/*
1350 			 * Always program an integral # of max packets for IN
1351 			 * transfers
1352 			 */
1353 			chan->xfer_len = num_packets * chan->max_packet;
1354 
1355 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1356 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1357 			/*
1358 			 * Make sure that the multi_count field matches the
1359 			 * actual transfer length
1360 			 */
1361 			chan->multi_count = num_packets;
1362 
1363 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1364 			dwc2_set_pid_isoc(chan);
1365 
1366 		hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1367 			  TSIZ_XFERSIZE_MASK;
1368 	}
1369 
1370 	chan->start_pkt_count = num_packets;
1371 	hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1372 	hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1373 		  TSIZ_SC_MC_PID_MASK;
1374 	DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
1375 	if (dbg_hc(chan)) {
1376 		dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1377 			 hctsiz, chan->hc_num);
1378 
1379 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1380 			 chan->hc_num);
1381 		dev_vdbg(hsotg->dev, "	 Xfer Size: %d\n",
1382 			 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1383 			 TSIZ_XFERSIZE_SHIFT);
1384 		dev_vdbg(hsotg->dev, "	 Num Pkts: %d\n",
1385 			 (hctsiz & TSIZ_PKTCNT_MASK) >>
1386 			 TSIZ_PKTCNT_SHIFT);
1387 		dev_vdbg(hsotg->dev, "	 Start PID: %d\n",
1388 			 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1389 			 TSIZ_SC_MC_PID_SHIFT);
1390 	}
1391 
1392 	if (hsotg->core_params->dma_enable > 0) {
1393 		dma_addr_t dma_addr;
1394 
1395 		if (chan->align_buf) {
1396 			if (dbg_hc(chan))
1397 				dev_vdbg(hsotg->dev, "align_buf\n");
1398 			dma_addr = chan->align_buf;
1399 		} else {
1400 			dma_addr = chan->xfer_dma;
1401 		}
1402 		struct dwc2_core_dma_config *dma_config =
1403 		    hsotg->core_dma_config;
1404 		if (dma_config == NULL) {
1405 			DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num),
1406 			    (u32)dma_addr);
1407 			if (dbg_hc(chan))
1408 				dev_vdbg(hsotg->dev,
1409 				    "Wrote %08lx to HCDMA(%d)\n",
1410 				     (unsigned long)dma_addr,
1411 				    chan->hc_num);
1412 		} else {
1413 			(void)(*dma_config->set_dma_addr)(
1414 			    dma_config->set_dma_addr_data, dma_addr,
1415 			    chan->hc_num);
1416 		}
1417 	}
1418 
1419 	/* Start the split */
1420 	if (chan->do_split) {
1421 		u32 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chan->hc_num));
1422 
1423 		hcsplt |= HCSPLT_SPLTENA;
1424 		DWC2_WRITE_4(hsotg, HCSPLT(chan->hc_num), hcsplt);
1425 	}
1426 
1427 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1428 	hcchar &= ~HCCHAR_MULTICNT_MASK;
1429 	hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1430 		  HCCHAR_MULTICNT_MASK;
1431 	dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1432 
1433 	if (hcchar & HCCHAR_CHDIS)
1434 		dev_warn(hsotg->dev,
1435 			 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1436 			 __func__, chan->hc_num, hcchar);
1437 
1438 	/* Set host channel enable after all other setup is complete */
1439 	hcchar |= HCCHAR_CHENA;
1440 	hcchar &= ~HCCHAR_CHDIS;
1441 
1442 	if (dbg_hc(chan))
1443 		dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n",
1444 			 (hcchar & HCCHAR_MULTICNT_MASK) >>
1445 			 HCCHAR_MULTICNT_SHIFT);
1446 
1447 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1448 	if (dbg_hc(chan))
1449 		dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1450 			 chan->hc_num);
1451 
1452 	chan->xfer_started = 1;
1453 	chan->requests++;
1454 
1455 	if (hsotg->core_params->dma_enable <= 0 &&
1456 	    !chan->ep_is_in && chan->xfer_len > 0)
1457 		/* Load OUT packet into the appropriate Tx FIFO */
1458 		dwc2_hc_write_packet(hsotg, chan);
1459 }
1460 
1461 /**
1462  * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1463  * host channel and starts the transfer in Descriptor DMA mode
1464  *
1465  * @hsotg: Programming view of DWC_otg controller
1466  * @chan:  Information needed to initialize the host channel
1467  *
1468  * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1469  * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1470  * with micro-frame bitmap.
1471  *
1472  * Initializes HCDMA register with descriptor list address and CTD value then
1473  * starts the transfer via enabling the channel.
1474  */
1475 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1476 				 struct dwc2_host_chan *chan)
1477 {
1478 	u32 hcchar;
1479 	u32 hc_dma;
1480 	u32 hctsiz = 0;
1481 
1482 	if (chan->do_ping)
1483 		hctsiz |= TSIZ_DOPNG;
1484 
1485 	if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1486 		dwc2_set_pid_isoc(chan);
1487 
1488 	/* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1489 	hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1490 		  TSIZ_SC_MC_PID_MASK;
1491 
1492 	/* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1493 	hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1494 
1495 	/* Non-zero only for high-speed interrupt endpoints */
1496 	hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1497 
1498 	if (dbg_hc(chan)) {
1499 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1500 			 chan->hc_num);
1501 		dev_vdbg(hsotg->dev, "	 Start PID: %d\n",
1502 			 chan->data_pid_start);
1503 		dev_vdbg(hsotg->dev, "	 NTD: %d\n", chan->ntd - 1);
1504 	}
1505 
1506 	DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
1507 
1508 	hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
1509 
1510 	/* Always start from first descriptor */
1511 	hc_dma &= ~HCDMA_CTD_MASK;
1512 	DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num), hc_dma);
1513 	if (dbg_hc(chan))
1514 		dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
1515 			 hc_dma, chan->hc_num);
1516 
1517 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1518 	hcchar &= ~HCCHAR_MULTICNT_MASK;
1519 	hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1520 		  HCCHAR_MULTICNT_MASK;
1521 
1522 	if (hcchar & HCCHAR_CHDIS)
1523 		dev_warn(hsotg->dev,
1524 			 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1525 			 __func__, chan->hc_num, hcchar);
1526 
1527 	/* Set host channel enable after all other setup is complete */
1528 	hcchar |= HCCHAR_CHENA;
1529 	hcchar &= ~HCCHAR_CHDIS;
1530 
1531 	if (dbg_hc(chan))
1532 		dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n",
1533 			 (hcchar & HCCHAR_MULTICNT_MASK) >>
1534 			 HCCHAR_MULTICNT_SHIFT);
1535 
1536 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1537 	if (dbg_hc(chan))
1538 		dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1539 			 chan->hc_num);
1540 
1541 	chan->xfer_started = 1;
1542 	chan->requests++;
1543 }
1544 
1545 /**
1546  * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1547  * a previous call to dwc2_hc_start_transfer()
1548  *
1549  * @hsotg: Programming view of DWC_otg controller
1550  * @chan:  Information needed to initialize the host channel
1551  *
1552  * The caller must ensure there is sufficient space in the request queue and Tx
1553  * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1554  * the controller acts autonomously to complete transfers programmed to a host
1555  * channel.
1556  *
1557  * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1558  * if there is any data remaining to be queued. For an IN transfer, another
1559  * data packet is always requested. For the SETUP phase of a control transfer,
1560  * this function does nothing.
1561  *
1562  * Return: 1 if a new request is queued, 0 if no more requests are required
1563  * for this transfer
1564  */
1565 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1566 			      struct dwc2_host_chan *chan)
1567 {
1568 	if (dbg_hc(chan))
1569 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1570 			 chan->hc_num);
1571 
1572 	if (chan->do_split)
1573 		/* SPLITs always queue just once per channel */
1574 		return 0;
1575 
1576 	if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1577 		/* SETUPs are queued only once since they can't be NAK'd */
1578 		return 0;
1579 
1580 	if (chan->ep_is_in) {
1581 		/*
1582 		 * Always queue another request for other IN transfers. If
1583 		 * back-to-back INs are issued and NAKs are received for both,
1584 		 * the driver may still be processing the first NAK when the
1585 		 * second NAK is received. When the interrupt handler clears
1586 		 * the NAK interrupt for the first NAK, the second NAK will
1587 		 * not be seen. So we can't depend on the NAK interrupt
1588 		 * handler to requeue a NAK'd request. Instead, IN requests
1589 		 * are issued each time this function is called. When the
1590 		 * transfer completes, the extra requests for the channel will
1591 		 * be flushed.
1592 		 */
1593 		u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1594 
1595 		dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1596 		hcchar |= HCCHAR_CHENA;
1597 		hcchar &= ~HCCHAR_CHDIS;
1598 		if (dbg_hc(chan))
1599 			dev_vdbg(hsotg->dev, "	 IN xfer: hcchar = 0x%08x\n",
1600 				 hcchar);
1601 		DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1602 		chan->requests++;
1603 		return 1;
1604 	}
1605 
1606 	/* OUT transfers */
1607 
1608 	if (chan->xfer_count < chan->xfer_len) {
1609 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1610 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1611 			u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1612 
1613 			dwc2_hc_set_even_odd_frame(hsotg, chan,
1614 						   &hcchar);
1615 		}
1616 
1617 		/* Load OUT packet into the appropriate Tx FIFO */
1618 		dwc2_hc_write_packet(hsotg, chan);
1619 		chan->requests++;
1620 		return 1;
1621 	}
1622 
1623 	return 0;
1624 }
1625 
1626 /**
1627  * dwc2_hc_do_ping() - Starts a PING transfer
1628  *
1629  * @hsotg: Programming view of DWC_otg controller
1630  * @chan:  Information needed to initialize the host channel
1631  *
1632  * This function should only be called in Slave mode. The Do Ping bit is set in
1633  * the HCTSIZ register, then the channel is enabled.
1634  */
1635 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1636 {
1637 	u32 hcchar;
1638 	u32 hctsiz;
1639 
1640 	if (dbg_hc(chan))
1641 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1642 			 chan->hc_num);
1643 
1644 
1645 	hctsiz = TSIZ_DOPNG;
1646 	hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1647 	DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
1648 
1649 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1650 	hcchar |= HCCHAR_CHENA;
1651 	hcchar &= ~HCCHAR_CHDIS;
1652 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1653 }
1654 
1655 /**
1656  * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
1657  * the HFIR register according to PHY type and speed
1658  *
1659  * @hsotg: Programming view of DWC_otg controller
1660  *
1661  * NOTE: The caller can modify the value of the HFIR register only after the
1662  * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
1663  * has been set
1664  */
1665 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
1666 {
1667 	u32 usbcfg;
1668 	u32 hprt0;
1669 	int clock = 60;	/* default value */
1670 
1671 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
1672 	hprt0 = DWC2_READ_4(hsotg, HPRT0);
1673 
1674 	if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
1675 	    !(usbcfg & GUSBCFG_PHYIF16))
1676 		clock = 60;
1677 	if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
1678 	    GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
1679 		clock = 48;
1680 	if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1681 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1682 		clock = 30;
1683 	if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1684 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
1685 		clock = 60;
1686 	if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1687 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1688 		clock = 48;
1689 	if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
1690 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
1691 		clock = 48;
1692 	if ((usbcfg & GUSBCFG_PHYSEL) &&
1693 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
1694 		clock = 48;
1695 
1696 	if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
1697 		/* High speed case */
1698 		return 125 * clock - 1;
1699 	else
1700 		/* FS/LS case */
1701 		return 1000 * clock - 1;
1702 }
1703 
1704 /**
1705  * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
1706  * buffer
1707  *
1708  * @core_if: Programming view of DWC_otg controller
1709  * @dest:    Destination buffer for the packet
1710  * @bytes:   Number of bytes to copy to the destination
1711  */
1712 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
1713 {
1714 	bus_size_t fifo = HCFIFO(0);
1715 	u32 *data_buf = (u32 *)dest;
1716 	int word_count = (bytes + 3) / 4;
1717 	int i;
1718 
1719 	/*
1720 	 * Todo: Account for the case where dest is not dword aligned. This
1721 	 * requires reading data from the FIFO into a u32 temp buffer, then
1722 	 * moving it into the data buffer.
1723 	 */
1724 
1725 	dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
1726 
1727 	for (i = 0; i < word_count; i++, data_buf++)
1728 		*data_buf = DWC2_READ_4(hsotg, fifo);
1729 }
1730 
1731 /**
1732  * dwc2_dump_host_registers() - Prints the host registers
1733  *
1734  * @hsotg: Programming view of DWC_otg controller
1735  *
1736  * NOTE: This function will be removed once the peripheral controller code
1737  * is integrated and the driver is stable
1738  */
1739 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
1740 {
1741 #ifdef DWC2_DEBUG
1742 	bus_size_t addr;
1743 	int i;
1744 
1745 	dev_dbg(hsotg->dev, "Host Global Registers\n");
1746 	addr = HCFG;
1747 	dev_dbg(hsotg->dev, "HCFG	 @0x%08lX : 0x%08X\n",
1748 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1749 	addr = HFIR;
1750 	dev_dbg(hsotg->dev, "HFIR	 @0x%08lX : 0x%08X\n",
1751 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1752 	addr = HFNUM;
1753 	dev_dbg(hsotg->dev, "HFNUM	 @0x%08lX : 0x%08X\n",
1754 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1755 	addr = HPTXSTS;
1756 	dev_dbg(hsotg->dev, "HPTXSTS	 @0x%08lX : 0x%08X\n",
1757 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1758 	addr = HAINT;
1759 	dev_dbg(hsotg->dev, "HAINT	 @0x%08lX : 0x%08X\n",
1760 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1761 	addr = HAINTMSK;
1762 	dev_dbg(hsotg->dev, "HAINTMSK	 @0x%08lX : 0x%08X\n",
1763 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1764 	if (hsotg->core_params->dma_desc_enable > 0) {
1765 		addr = HFLBADDR;
1766 		dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
1767 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1768 	}
1769 
1770 	addr = HPRT0;
1771 	dev_dbg(hsotg->dev, "HPRT0	 @0x%08lX : 0x%08X\n",
1772 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1773 
1774 	for (i = 0; i < hsotg->core_params->host_channels; i++) {
1775 		dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
1776 		addr = HCCHAR(i);
1777 		dev_dbg(hsotg->dev, "HCCHAR	 @0x%08lX : 0x%08X\n",
1778 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1779 		addr = HCSPLT(i);
1780 		dev_dbg(hsotg->dev, "HCSPLT	 @0x%08lX : 0x%08X\n",
1781 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1782 		addr = HCINT(i);
1783 		dev_dbg(hsotg->dev, "HCINT	 @0x%08lX : 0x%08X\n",
1784 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1785 		addr = HCINTMSK(i);
1786 		dev_dbg(hsotg->dev, "HCINTMSK	 @0x%08lX : 0x%08X\n",
1787 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1788 		addr = HCTSIZ(i);
1789 		dev_dbg(hsotg->dev, "HCTSIZ	 @0x%08lX : 0x%08X\n",
1790 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1791 		addr = HCDMA(i);
1792 		dev_dbg(hsotg->dev, "HCDMA	 @0x%08lX : 0x%08X\n",
1793 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1794 		if (hsotg->core_params->dma_desc_enable > 0) {
1795 			addr = HCDMAB(i);
1796 			dev_dbg(hsotg->dev, "HCDMAB	 @0x%08lX : 0x%08X\n",
1797 				(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1798 		}
1799 	}
1800 #endif
1801 }
1802 
1803 /**
1804  * dwc2_dump_global_registers() - Prints the core global registers
1805  *
1806  * @hsotg: Programming view of DWC_otg controller
1807  *
1808  * NOTE: This function will be removed once the peripheral controller code
1809  * is integrated and the driver is stable
1810  */
1811 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
1812 {
1813 #ifdef DWC2_DEBUG
1814 	bus_size_t addr;
1815 
1816 	dev_dbg(hsotg->dev, "Core Global Registers\n");
1817 	addr = GOTGCTL;
1818 	dev_dbg(hsotg->dev, "GOTGCTL	 @0x%08lX : 0x%08X\n",
1819 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1820 	addr = GOTGINT;
1821 	dev_dbg(hsotg->dev, "GOTGINT	 @0x%08lX : 0x%08X\n",
1822 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1823 	addr = GAHBCFG;
1824 	dev_dbg(hsotg->dev, "GAHBCFG	 @0x%08lX : 0x%08X\n",
1825 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1826 	addr = GUSBCFG;
1827 	dev_dbg(hsotg->dev, "GUSBCFG	 @0x%08lX : 0x%08X\n",
1828 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1829 	addr = GRSTCTL;
1830 	dev_dbg(hsotg->dev, "GRSTCTL	 @0x%08lX : 0x%08X\n",
1831 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1832 	addr = GINTSTS;
1833 	dev_dbg(hsotg->dev, "GINTSTS	 @0x%08lX : 0x%08X\n",
1834 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1835 	addr = GINTMSK;
1836 	dev_dbg(hsotg->dev, "GINTMSK	 @0x%08lX : 0x%08X\n",
1837 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1838 	addr = GRXSTSR;
1839 	dev_dbg(hsotg->dev, "GRXSTSR	 @0x%08lX : 0x%08X\n",
1840 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1841 	addr = GRXFSIZ;
1842 	dev_dbg(hsotg->dev, "GRXFSIZ	 @0x%08lX : 0x%08X\n",
1843 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1844 	addr = GNPTXFSIZ;
1845 	dev_dbg(hsotg->dev, "GNPTXFSIZ	 @0x%08lX : 0x%08X\n",
1846 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1847 	addr = GNPTXSTS;
1848 	dev_dbg(hsotg->dev, "GNPTXSTS	 @0x%08lX : 0x%08X\n",
1849 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1850 	addr = GI2CCTL;
1851 	dev_dbg(hsotg->dev, "GI2CCTL	 @0x%08lX : 0x%08X\n",
1852 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1853 	addr = GPVNDCTL;
1854 	dev_dbg(hsotg->dev, "GPVNDCTL	 @0x%08lX : 0x%08X\n",
1855 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1856 	addr = GGPIO;
1857 	dev_dbg(hsotg->dev, "GGPIO	 @0x%08lX : 0x%08X\n",
1858 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1859 	addr = GUID;
1860 	dev_dbg(hsotg->dev, "GUID	 @0x%08lX : 0x%08X\n",
1861 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1862 	addr = GSNPSID;
1863 	dev_dbg(hsotg->dev, "GSNPSID	 @0x%08lX : 0x%08X\n",
1864 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1865 	addr = GHWCFG1;
1866 	dev_dbg(hsotg->dev, "GHWCFG1	 @0x%08lX : 0x%08X\n",
1867 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1868 	addr = GHWCFG2;
1869 	dev_dbg(hsotg->dev, "GHWCFG2	 @0x%08lX : 0x%08X\n",
1870 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1871 	addr = GHWCFG3;
1872 	dev_dbg(hsotg->dev, "GHWCFG3	 @0x%08lX : 0x%08X\n",
1873 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1874 	addr = GHWCFG4;
1875 	dev_dbg(hsotg->dev, "GHWCFG4	 @0x%08lX : 0x%08X\n",
1876 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1877 	addr = GLPMCFG;
1878 	dev_dbg(hsotg->dev, "GLPMCFG	 @0x%08lX : 0x%08X\n",
1879 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1880 	addr = GPWRDN;
1881 	dev_dbg(hsotg->dev, "GPWRDN	 @0x%08lX : 0x%08X\n",
1882 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1883 	addr = GDFIFOCFG;
1884 	dev_dbg(hsotg->dev, "GDFIFOCFG	 @0x%08lX : 0x%08X\n",
1885 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1886 	addr = HPTXFSIZ;
1887 	dev_dbg(hsotg->dev, "HPTXFSIZ	 @0x%08lX : 0x%08X\n",
1888 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1889 
1890 	addr = PCGCTL;
1891 	dev_dbg(hsotg->dev, "PCGCTL	 @0x%08lX : 0x%08X\n",
1892 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1893 #endif
1894 }
1895 
1896 /**
1897  * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
1898  *
1899  * @hsotg: Programming view of DWC_otg controller
1900  * @num:   Tx FIFO to flush
1901  */
1902 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
1903 {
1904 	u32 greset;
1905 	int count = 0;
1906 
1907 	dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
1908 
1909 	greset = GRSTCTL_TXFFLSH;
1910 	greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
1911 	DWC2_WRITE_4(hsotg, GRSTCTL, greset);
1912 
1913 	do {
1914 		greset = DWC2_READ_4(hsotg, GRSTCTL);
1915 		if (++count > 10000) {
1916 			dev_warn(hsotg->dev,
1917 				 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
1918 				 __func__, greset,
1919 				 DWC2_READ_4(hsotg, GNPTXSTS));
1920 			break;
1921 		}
1922 		udelay(1);
1923 	} while (greset & GRSTCTL_TXFFLSH);
1924 
1925 	/* Wait for at least 3 PHY Clocks */
1926 	udelay(1);
1927 }
1928 
1929 /**
1930  * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
1931  *
1932  * @hsotg: Programming view of DWC_otg controller
1933  */
1934 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
1935 {
1936 	u32 greset;
1937 	int count = 0;
1938 
1939 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
1940 
1941 	greset = GRSTCTL_RXFFLSH;
1942 	DWC2_WRITE_4(hsotg, GRSTCTL, greset);
1943 
1944 	do {
1945 		greset = DWC2_READ_4(hsotg, GRSTCTL);
1946 		if (++count > 10000) {
1947 			dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
1948 				 __func__, greset);
1949 			break;
1950 		}
1951 		udelay(1);
1952 	} while (greset & GRSTCTL_RXFFLSH);
1953 
1954 	/* Wait for at least 3 PHY Clocks */
1955 	udelay(1);
1956 }
1957 
1958 #define DWC2_OUT_OF_BOUNDS(a, b, c)	((a) < (b) || (a) > (c))
1959 
1960 /* Parameter access functions */
1961 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
1962 {
1963 	int valid = 1;
1964 
1965 	switch (val) {
1966 	case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
1967 		if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
1968 			valid = 0;
1969 		break;
1970 	case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
1971 		switch (hsotg->hw_params.op_mode) {
1972 		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1973 		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
1974 		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
1975 		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
1976 			break;
1977 		default:
1978 			valid = 0;
1979 			break;
1980 		}
1981 		break;
1982 	case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
1983 		/* always valid */
1984 		break;
1985 	default:
1986 		valid = 0;
1987 		break;
1988 	}
1989 
1990 	if (!valid) {
1991 		if (val >= 0)
1992 			dev_err(hsotg->dev,
1993 				"%d invalid for otg_cap parameter. Check HW configuration.\n",
1994 				val);
1995 		switch (hsotg->hw_params.op_mode) {
1996 		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1997 			val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
1998 			break;
1999 		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2000 		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2001 		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2002 			val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
2003 			break;
2004 		default:
2005 			val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
2006 			break;
2007 		}
2008 		dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
2009 	}
2010 
2011 	hsotg->core_params->otg_cap = val;
2012 }
2013 
2014 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
2015 {
2016 	int valid = 1;
2017 
2018 	if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
2019 		valid = 0;
2020 	if (val < 0)
2021 		valid = 0;
2022 
2023 	if (!valid) {
2024 		if (val >= 0)
2025 			dev_err(hsotg->dev,
2026 				"%d invalid for dma_enable parameter. Check HW configuration.\n",
2027 				val);
2028 		val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
2029 		dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
2030 	}
2031 
2032 	hsotg->core_params->dma_enable = val;
2033 }
2034 
2035 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
2036 {
2037 	int valid = 1;
2038 
2039 	if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2040 			!hsotg->hw_params.dma_desc_enable))
2041 		valid = 0;
2042 	if (val < 0)
2043 		valid = 0;
2044 
2045 	if (!valid) {
2046 		if (val >= 0)
2047 			dev_err(hsotg->dev,
2048 				"%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2049 				val);
2050 		val = (hsotg->core_params->dma_enable > 0 &&
2051 			hsotg->hw_params.dma_desc_enable);
2052 		dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2053 	}
2054 
2055 	hsotg->core_params->dma_desc_enable = val;
2056 }
2057 
2058 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2059 						 int val)
2060 {
2061 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2062 		if (val >= 0) {
2063 			dev_err(hsotg->dev,
2064 				"Wrong value for host_support_fs_low_power\n");
2065 			dev_err(hsotg->dev,
2066 				"host_support_fs_low_power must be 0 or 1\n");
2067 		}
2068 		val = 0;
2069 		dev_dbg(hsotg->dev,
2070 			"Setting host_support_fs_low_power to %d\n", val);
2071 	}
2072 
2073 	hsotg->core_params->host_support_fs_ls_low_power = val;
2074 }
2075 
2076 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
2077 {
2078 	int valid = 1;
2079 
2080 	if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
2081 		valid = 0;
2082 	if (val < 0)
2083 		valid = 0;
2084 
2085 	if (!valid) {
2086 		if (val >= 0)
2087 			dev_err(hsotg->dev,
2088 				"%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2089 				val);
2090 		val = hsotg->hw_params.enable_dynamic_fifo;
2091 		dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2092 	}
2093 
2094 	hsotg->core_params->enable_dynamic_fifo = val;
2095 }
2096 
2097 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2098 {
2099 	int valid = 1;
2100 
2101 	if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
2102 		valid = 0;
2103 
2104 	if (!valid) {
2105 		if (val >= 0)
2106 			dev_err(hsotg->dev,
2107 				"%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2108 				val);
2109 		val = hsotg->hw_params.host_rx_fifo_size;
2110 		dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2111 	}
2112 
2113 	hsotg->core_params->host_rx_fifo_size = val;
2114 }
2115 
2116 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2117 {
2118 	int valid = 1;
2119 
2120 	if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
2121 		valid = 0;
2122 
2123 	if (!valid) {
2124 		if (val >= 0)
2125 			dev_err(hsotg->dev,
2126 				"%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2127 				val);
2128 		val = hsotg->hw_params.host_nperio_tx_fifo_size;
2129 		dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2130 			val);
2131 	}
2132 
2133 	hsotg->core_params->host_nperio_tx_fifo_size = val;
2134 }
2135 
2136 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2137 {
2138 	int valid = 1;
2139 
2140 	if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
2141 		valid = 0;
2142 
2143 	if (!valid) {
2144 		if (val >= 0)
2145 			dev_err(hsotg->dev,
2146 				"%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2147 				val);
2148 		val = hsotg->hw_params.host_perio_tx_fifo_size;
2149 		dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2150 			val);
2151 	}
2152 
2153 	hsotg->core_params->host_perio_tx_fifo_size = val;
2154 }
2155 
2156 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
2157 {
2158 	int valid = 1;
2159 
2160 	if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
2161 		valid = 0;
2162 
2163 	if (!valid) {
2164 		if (val >= 0)
2165 			dev_err(hsotg->dev,
2166 				"%d invalid for max_transfer_size. Check HW configuration.\n",
2167 				val);
2168 		val = hsotg->hw_params.max_transfer_size;
2169 		dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2170 	}
2171 
2172 	hsotg->core_params->max_transfer_size = val;
2173 }
2174 
2175 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
2176 {
2177 	int valid = 1;
2178 
2179 	if (val < 15 || val > hsotg->hw_params.max_packet_count)
2180 		valid = 0;
2181 
2182 	if (!valid) {
2183 		if (val >= 0)
2184 			dev_err(hsotg->dev,
2185 				"%d invalid for max_packet_count. Check HW configuration.\n",
2186 				val);
2187 		val = hsotg->hw_params.max_packet_count;
2188 		dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2189 	}
2190 
2191 	hsotg->core_params->max_packet_count = val;
2192 }
2193 
2194 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2195 {
2196 	int valid = 1;
2197 
2198 	if (val < 1 || val > hsotg->hw_params.host_channels)
2199 		valid = 0;
2200 
2201 	if (!valid) {
2202 		if (val >= 0)
2203 			dev_err(hsotg->dev,
2204 				"%d invalid for host_channels. Check HW configuration.\n",
2205 				val);
2206 		val = hsotg->hw_params.host_channels;
2207 		dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2208 	}
2209 
2210 	hsotg->core_params->host_channels = val;
2211 }
2212 
2213 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2214 {
2215 	int valid = 0;
2216 	u32 hs_phy_type, fs_phy_type;
2217 
2218 	if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
2219 			       DWC2_PHY_TYPE_PARAM_ULPI)) {
2220 		if (val >= 0) {
2221 			dev_err(hsotg->dev, "Wrong value for phy_type\n");
2222 			dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2223 		}
2224 
2225 		valid = 0;
2226 	}
2227 
2228 	hs_phy_type = hsotg->hw_params.hs_phy_type;
2229 	fs_phy_type = hsotg->hw_params.fs_phy_type;
2230 	if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2231 	    (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2232 	     hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2233 		valid = 1;
2234 	else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2235 		 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2236 		  hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2237 		valid = 1;
2238 	else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2239 		 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2240 		valid = 1;
2241 
2242 	if (!valid) {
2243 		if (val >= 0)
2244 			dev_err(hsotg->dev,
2245 				"%d invalid for phy_type. Check HW configuration.\n",
2246 				val);
2247 		val = DWC2_PHY_TYPE_PARAM_FS;
2248 		if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2249 			if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2250 			    hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2251 				val = DWC2_PHY_TYPE_PARAM_UTMI;
2252 			else
2253 				val = DWC2_PHY_TYPE_PARAM_ULPI;
2254 		}
2255 		dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2256 	}
2257 
2258 	hsotg->core_params->phy_type = val;
2259 }
2260 
2261 STATIC int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2262 {
2263 	return hsotg->core_params->phy_type;
2264 }
2265 
2266 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2267 {
2268 	int valid = 1;
2269 
2270 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2271 		if (val >= 0) {
2272 			dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2273 			dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2274 		}
2275 		valid = 0;
2276 	}
2277 
2278 	if (val == DWC2_SPEED_PARAM_HIGH &&
2279 	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2280 		valid = 0;
2281 
2282 	if (!valid) {
2283 		if (val >= 0)
2284 			dev_err(hsotg->dev,
2285 				"%d invalid for speed parameter. Check HW configuration.\n",
2286 				val);
2287 		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
2288 				DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
2289 		dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2290 	}
2291 
2292 	hsotg->core_params->speed = val;
2293 }
2294 
2295 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2296 {
2297 	int valid = 1;
2298 
2299 	if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2300 			       DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
2301 		if (val >= 0) {
2302 			dev_err(hsotg->dev,
2303 				"Wrong value for host_ls_low_power_phy_clk parameter\n");
2304 			dev_err(hsotg->dev,
2305 				"host_ls_low_power_phy_clk must be 0 or 1\n");
2306 		}
2307 		valid = 0;
2308 	}
2309 
2310 	if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2311 	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2312 		valid = 0;
2313 
2314 	if (!valid) {
2315 		if (val >= 0)
2316 			dev_err(hsotg->dev,
2317 				"%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2318 				val);
2319 		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2320 			? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2321 			: DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2322 		dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2323 			val);
2324 	}
2325 
2326 	hsotg->core_params->host_ls_low_power_phy_clk = val;
2327 }
2328 
2329 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2330 {
2331 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2332 		if (val >= 0) {
2333 			dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2334 			dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2335 		}
2336 		val = 0;
2337 		dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2338 	}
2339 
2340 	hsotg->core_params->phy_ulpi_ddr = val;
2341 }
2342 
2343 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2344 {
2345 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2346 		if (val >= 0) {
2347 			dev_err(hsotg->dev,
2348 				"Wrong value for phy_ulpi_ext_vbus\n");
2349 			dev_err(hsotg->dev,
2350 				"phy_ulpi_ext_vbus must be 0 or 1\n");
2351 		}
2352 		val = 0;
2353 		dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2354 	}
2355 
2356 	hsotg->core_params->phy_ulpi_ext_vbus = val;
2357 }
2358 
2359 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2360 {
2361 	int valid = 0;
2362 
2363 	switch (hsotg->hw_params.utmi_phy_data_width) {
2364 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2365 		valid = (val == 8);
2366 		break;
2367 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2368 		valid = (val == 16);
2369 		break;
2370 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2371 		valid = (val == 8 || val == 16);
2372 		break;
2373 	}
2374 
2375 	if (!valid) {
2376 		if (val >= 0) {
2377 			dev_err(hsotg->dev,
2378 				"%d invalid for phy_utmi_width. Check HW configuration.\n",
2379 				val);
2380 		}
2381 		val = (hsotg->hw_params.utmi_phy_data_width ==
2382 		       GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
2383 		dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2384 	}
2385 
2386 	hsotg->core_params->phy_utmi_width = val;
2387 }
2388 
2389 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
2390 {
2391 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2392 		if (val >= 0) {
2393 			dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2394 			dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2395 		}
2396 		val = 0;
2397 		dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
2398 	}
2399 
2400 	hsotg->core_params->ulpi_fs_ls = val;
2401 }
2402 
2403 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
2404 {
2405 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2406 		if (val >= 0) {
2407 			dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2408 			dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2409 		}
2410 		val = 0;
2411 		dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
2412 	}
2413 
2414 	hsotg->core_params->ts_dline = val;
2415 }
2416 
2417 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
2418 {
2419 	int valid = 1;
2420 
2421 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2422 		if (val >= 0) {
2423 			dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2424 			dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2425 		}
2426 
2427 		valid = 0;
2428 	}
2429 
2430 	if (val == 1 && !(hsotg->hw_params.i2c_enable))
2431 		valid = 0;
2432 
2433 	if (!valid) {
2434 		if (val >= 0)
2435 			dev_err(hsotg->dev,
2436 				"%d invalid for i2c_enable. Check HW configuration.\n",
2437 				val);
2438 		val = hsotg->hw_params.i2c_enable;
2439 		dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2440 	}
2441 
2442 	hsotg->core_params->i2c_enable = val;
2443 }
2444 
2445 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
2446 {
2447 	int valid = 1;
2448 
2449 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2450 		if (val >= 0) {
2451 			dev_err(hsotg->dev,
2452 				"Wrong value for en_multiple_tx_fifo,\n");
2453 			dev_err(hsotg->dev,
2454 				"en_multiple_tx_fifo must be 0 or 1\n");
2455 		}
2456 		valid = 0;
2457 	}
2458 
2459 	if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
2460 		valid = 0;
2461 
2462 	if (!valid) {
2463 		if (val >= 0)
2464 			dev_err(hsotg->dev,
2465 				"%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2466 				val);
2467 		val = hsotg->hw_params.en_multiple_tx_fifo;
2468 		dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
2469 	}
2470 
2471 	hsotg->core_params->en_multiple_tx_fifo = val;
2472 }
2473 
2474 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
2475 {
2476 	int valid = 1;
2477 
2478 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2479 		if (val >= 0) {
2480 			dev_err(hsotg->dev,
2481 				"'%d' invalid for parameter reload_ctl\n", val);
2482 			dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2483 		}
2484 		valid = 0;
2485 	}
2486 
2487 	if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
2488 		valid = 0;
2489 
2490 	if (!valid) {
2491 		if (val >= 0)
2492 			dev_err(hsotg->dev,
2493 				"%d invalid for parameter reload_ctl. Check HW configuration.\n",
2494 				val);
2495 		val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
2496 		dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
2497 	}
2498 
2499 	hsotg->core_params->reload_ctl = val;
2500 }
2501 
2502 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
2503 {
2504 	if (val != -1)
2505 		hsotg->core_params->ahbcfg = val;
2506 	else
2507 		hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
2508 						GAHBCFG_HBSTLEN_SHIFT;
2509 }
2510 
2511 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
2512 {
2513 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2514 		if (val >= 0) {
2515 			dev_err(hsotg->dev,
2516 				"'%d' invalid for parameter otg_ver\n", val);
2517 			dev_err(hsotg->dev,
2518 				"otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2519 		}
2520 		val = 0;
2521 		dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
2522 	}
2523 
2524 	hsotg->core_params->otg_ver = val;
2525 }
2526 
2527 STATIC void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
2528 {
2529 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2530 		if (val >= 0) {
2531 			dev_err(hsotg->dev,
2532 				"'%d' invalid for parameter uframe_sched\n",
2533 				val);
2534 			dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
2535 		}
2536 		val = 1;
2537 		dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
2538 	}
2539 
2540 	hsotg->core_params->uframe_sched = val;
2541 }
2542 
2543 /*
2544  * This function is called during module intialization to pass module parameters
2545  * for the DWC_otg core.
2546  */
2547 void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
2548 			 const struct dwc2_core_params *params)
2549 {
2550 	dev_dbg(hsotg->dev, "%s()\n", __func__);
2551 
2552 	dwc2_set_param_otg_cap(hsotg, params->otg_cap);
2553 	dwc2_set_param_dma_enable(hsotg, params->dma_enable);
2554 	dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
2555 	dwc2_set_param_host_support_fs_ls_low_power(hsotg,
2556 			params->host_support_fs_ls_low_power);
2557 	dwc2_set_param_enable_dynamic_fifo(hsotg,
2558 			params->enable_dynamic_fifo);
2559 	dwc2_set_param_host_rx_fifo_size(hsotg,
2560 			params->host_rx_fifo_size);
2561 	dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
2562 			params->host_nperio_tx_fifo_size);
2563 	dwc2_set_param_host_perio_tx_fifo_size(hsotg,
2564 			params->host_perio_tx_fifo_size);
2565 	dwc2_set_param_max_transfer_size(hsotg,
2566 			params->max_transfer_size);
2567 	dwc2_set_param_max_packet_count(hsotg,
2568 			params->max_packet_count);
2569 	dwc2_set_param_host_channels(hsotg, params->host_channels);
2570 	dwc2_set_param_phy_type(hsotg, params->phy_type);
2571 	dwc2_set_param_speed(hsotg, params->speed);
2572 	dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
2573 			params->host_ls_low_power_phy_clk);
2574 	dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
2575 	dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
2576 			params->phy_ulpi_ext_vbus);
2577 	dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
2578 	dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
2579 	dwc2_set_param_ts_dline(hsotg, params->ts_dline);
2580 	dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
2581 	dwc2_set_param_en_multiple_tx_fifo(hsotg,
2582 			params->en_multiple_tx_fifo);
2583 	dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
2584 	dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
2585 	dwc2_set_param_otg_ver(hsotg, params->otg_ver);
2586 	dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
2587 }
2588 
2589 /**
2590  * During device initialization, read various hardware configuration
2591  * registers and interpret the contents.
2592  */
2593 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
2594 {
2595 	struct dwc2_hw_params *hw = &hsotg->hw_params;
2596 	unsigned width;
2597 	u32 hwcfg2, hwcfg3, hwcfg4;
2598 	u32 hptxfsiz, grxfsiz, gnptxfsiz;
2599 	u32 gusbcfg;
2600 
2601 	/*
2602 	 * Attempt to ensure this device is really a DWC_otg Controller.
2603 	 * Read and verify the GSNPSID register contents. The value should be
2604 	 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
2605 	 * as in "OTG version 2.xx" or "OTG version 3.xx".
2606 	 */
2607 	hw->snpsid = DWC2_READ_4(hsotg, GSNPSID);
2608 	if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
2609 	    (hw->snpsid & 0xfffff000) != 0x4f543000) {
2610 		dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
2611 			hw->snpsid);
2612 		return -ENODEV;
2613 	}
2614 
2615 	dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
2616 		hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
2617 		hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
2618 
2619 	hwcfg2 = DWC2_READ_4(hsotg, GHWCFG2);
2620 	hwcfg3 = DWC2_READ_4(hsotg, GHWCFG3);
2621 	hwcfg4 = DWC2_READ_4(hsotg, GHWCFG4);
2622 	gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ);
2623 	grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
2624 
2625 	dev_dbg(hsotg->dev, "hwcfg1=%08x\n", DWC2_READ_4(hsotg, GHWCFG1));
2626 	dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
2627 	dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
2628 	dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
2629 	dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
2630 	dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
2631 
2632 	/* Force host mode to get HPTXFSIZ exact power on value */
2633 	gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
2634 	gusbcfg |= GUSBCFG_FORCEHOSTMODE;
2635 	DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg);
2636 	usleep_range(100000, 150000);
2637 
2638 	hptxfsiz = DWC2_READ_4(hsotg, HPTXFSIZ);
2639 	dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
2640 	gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
2641 	gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
2642 	DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg);
2643 	usleep_range(100000, 150000);
2644 
2645 	/* hwcfg2 */
2646 	hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
2647 		      GHWCFG2_OP_MODE_SHIFT;
2648 	hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
2649 		   GHWCFG2_ARCHITECTURE_SHIFT;
2650 	hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
2651 	hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
2652 				GHWCFG2_NUM_HOST_CHAN_SHIFT);
2653 	hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
2654 			  GHWCFG2_HS_PHY_TYPE_SHIFT;
2655 	hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
2656 			  GHWCFG2_FS_PHY_TYPE_SHIFT;
2657 	hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
2658 			 GHWCFG2_NUM_DEV_EP_SHIFT;
2659 	hw->nperio_tx_q_depth =
2660 		(hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
2661 		GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
2662 	hw->host_perio_tx_q_depth =
2663 		(hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
2664 		GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
2665 	hw->dev_token_q_depth =
2666 		(hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
2667 		GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
2668 
2669 	/* hwcfg3 */
2670 	width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
2671 		GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
2672 	hw->max_transfer_size = (1 << (width + 11)) - 1;
2673 	width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
2674 		GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
2675 	hw->max_packet_count = (1 << (width + 4)) - 1;
2676 	hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
2677 	hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
2678 			      GHWCFG3_DFIFO_DEPTH_SHIFT;
2679 
2680 	/* hwcfg4 */
2681 	hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
2682 	hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
2683 				  GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
2684 	hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
2685 	hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
2686 	hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
2687 				  GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
2688 
2689 	/* fifo sizes */
2690 	hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
2691 				GRXFSIZ_DEPTH_SHIFT;
2692 	hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2693 				       FIFOSIZE_DEPTH_SHIFT;
2694 	hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2695 				      FIFOSIZE_DEPTH_SHIFT;
2696 
2697 	dev_dbg(hsotg->dev, "Detected values from hardware:\n");
2698 	dev_dbg(hsotg->dev, "  op_mode=%d\n",
2699 		hw->op_mode);
2700 	dev_dbg(hsotg->dev, "  arch=%d\n",
2701 		hw->arch);
2702 	dev_dbg(hsotg->dev, "  dma_desc_enable=%d\n",
2703 		hw->dma_desc_enable);
2704 	dev_dbg(hsotg->dev, "  power_optimized=%d\n",
2705 		hw->power_optimized);
2706 	dev_dbg(hsotg->dev, "  i2c_enable=%d\n",
2707 		hw->i2c_enable);
2708 	dev_dbg(hsotg->dev, "  hs_phy_type=%d\n",
2709 		hw->hs_phy_type);
2710 	dev_dbg(hsotg->dev, "  fs_phy_type=%d\n",
2711 		hw->fs_phy_type);
2712 	dev_dbg(hsotg->dev, "  utmi_phy_data_wdith=%d\n",
2713 		hw->utmi_phy_data_width);
2714 	dev_dbg(hsotg->dev, "  num_dev_ep=%d\n",
2715 		hw->num_dev_ep);
2716 	dev_dbg(hsotg->dev, "  num_dev_perio_in_ep=%d\n",
2717 		hw->num_dev_perio_in_ep);
2718 	dev_dbg(hsotg->dev, "  host_channels=%d\n",
2719 		hw->host_channels);
2720 	dev_dbg(hsotg->dev, "  max_transfer_size=%d\n",
2721 		hw->max_transfer_size);
2722 	dev_dbg(hsotg->dev, "  max_packet_count=%d\n",
2723 		hw->max_packet_count);
2724 	dev_dbg(hsotg->dev, "  nperio_tx_q_depth=0x%0x\n",
2725 		hw->nperio_tx_q_depth);
2726 	dev_dbg(hsotg->dev, "  host_perio_tx_q_depth=0x%0x\n",
2727 		hw->host_perio_tx_q_depth);
2728 	dev_dbg(hsotg->dev, "  dev_token_q_depth=0x%0x\n",
2729 		hw->dev_token_q_depth);
2730 	dev_dbg(hsotg->dev, "  enable_dynamic_fifo=%d\n",
2731 		hw->enable_dynamic_fifo);
2732 	dev_dbg(hsotg->dev, "  en_multiple_tx_fifo=%d\n",
2733 		hw->en_multiple_tx_fifo);
2734 	dev_dbg(hsotg->dev, "  total_fifo_size=%d\n",
2735 		hw->total_fifo_size);
2736 	dev_dbg(hsotg->dev, "  host_rx_fifo_size=%d\n",
2737 		hw->host_rx_fifo_size);
2738 	dev_dbg(hsotg->dev, "  host_nperio_tx_fifo_size=%d\n",
2739 		hw->host_nperio_tx_fifo_size);
2740 	dev_dbg(hsotg->dev, "  host_perio_tx_fifo_size=%d\n",
2741 		hw->host_perio_tx_fifo_size);
2742 	dev_dbg(hsotg->dev, "\n");
2743 
2744 	return 0;
2745 }
2746 
2747 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
2748 {
2749 	return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
2750 }
2751 
2752 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
2753 {
2754 	if (DWC2_READ_4(hsotg, GSNPSID) == 0xffffffff)
2755 		return false;
2756 	else
2757 		return true;
2758 }
2759 
2760 /**
2761  * dwc2_enable_global_interrupts() - Enables the controller's Global
2762  * Interrupt in the AHB Config register
2763  *
2764  * @hsotg: Programming view of DWC_otg controller
2765  */
2766 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
2767 {
2768 	u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
2769 
2770 	ahbcfg |= GAHBCFG_GLBL_INTR_EN;
2771 	DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
2772 }
2773 
2774 /**
2775  * dwc2_disable_global_interrupts() - Disables the controller's Global
2776  * Interrupt in the AHB Config register
2777  *
2778  * @hsotg: Programming view of DWC_otg controller
2779  */
2780 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
2781 {
2782 	u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
2783 
2784 	ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
2785 	DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
2786 }
2787