xref: /netbsd-src/sys/external/bsd/dwc2/dist/dwc2_core.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: dwc2_core.c,v 1.6 2014/04/03 06:34:58 skrll Exp $	*/
2 
3 /*
4  * core.c - DesignWare HS OTG Controller common routines
5  *
6  * Copyright (C) 2004-2013 Synopsys, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The names of the above-listed copyright holders may not be used
18  *    to endorse or promote products derived from this software without
19  *    specific prior written permission.
20  *
21  * ALTERNATIVELY, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") as published by the Free Software
23  * Foundation; either version 2 of the License, or (at your option) any
24  * later version.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * The Core code provides basic services for accessing and managing the
41  * DWC_otg hardware. These services are used by both the Host Controller
42  * Driver and the Peripheral Controller Driver.
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: dwc2_core.c,v 1.6 2014/04/03 06:34:58 skrll Exp $");
47 
48 #include <sys/types.h>
49 #include <sys/bus.h>
50 #include <sys/proc.h>
51 #include <sys/callout.h>
52 #include <sys/mutex.h>
53 #include <sys/pool.h>
54 #include <sys/workqueue.h>
55 
56 #include <dev/usb/usb.h>
57 #include <dev/usb/usbdi.h>
58 #include <dev/usb/usbdivar.h>
59 #include <dev/usb/usb_mem.h>
60 
61 #include <linux/kernel.h>
62 #include <linux/list.h>
63 
64 #include <dwc2/dwc2.h>
65 #include <dwc2/dwc2var.h>
66 
67 #include "dwc2_core.h"
68 #include "dwc2_hcd.h"
69 
70 /**
71  * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
72  * used in both device and host modes
73  *
74  * @hsotg: Programming view of the DWC_otg controller
75  */
76 static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
77 {
78 	u32 intmsk;
79 
80 	/* Clear any pending OTG Interrupts */
81 	DWC2_WRITE_4(hsotg, GOTGINT, 0xffffffff);
82 
83 	/* Clear any pending interrupts */
84 	DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff);
85 
86 	/* Enable the interrupts in the GINTMSK */
87 	intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
88 
89 	if (hsotg->core_params->dma_enable <= 0)
90 		intmsk |= GINTSTS_RXFLVL;
91 
92 	intmsk |= GINTSTS_CONIDSTSCHNG | GINTSTS_WKUPINT | GINTSTS_USBSUSP |
93 		  GINTSTS_SESSREQINT;
94 
95 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
96 }
97 
98 /*
99  * Initializes the FSLSPClkSel field of the HCFG register depending on the
100  * PHY type
101  */
102 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
103 {
104 	u32 hcfg, val;
105 
106 	if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
107 	     hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
108 	     hsotg->core_params->ulpi_fs_ls > 0) ||
109 	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
110 		/* Full speed PHY */
111 		val = HCFG_FSLSPCLKSEL_48_MHZ;
112 	} else {
113 		/* High speed PHY running at full speed or high speed */
114 		val = HCFG_FSLSPCLKSEL_30_60_MHZ;
115 	}
116 
117 	dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
118 	hcfg = DWC2_READ_4(hsotg, HCFG);
119 	hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
120 	hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
121 	DWC2_WRITE_4(hsotg, HCFG, hcfg);
122 }
123 
124 /*
125  * Do core a soft reset of the core.  Be careful with this because it
126  * resets all the internal state machines of the core.
127  */
128 static int dwc2_core_reset(struct dwc2_hsotg *hsotg)
129 {
130 	u32 greset;
131 	int count = 0;
132 
133 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
134 
135 	/* Wait for AHB master IDLE state */
136 	do {
137 		usleep_range(20000, 40000);
138 		greset = DWC2_READ_4(hsotg, GRSTCTL);
139 		if (++count > 50) {
140 			dev_warn(hsotg->dev,
141 				 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
142 				 __func__, greset);
143 			return -EBUSY;
144 		}
145 	} while (!(greset & GRSTCTL_AHBIDLE));
146 
147 	/* Core Soft Reset */
148 	count = 0;
149 	greset |= GRSTCTL_CSFTRST;
150 	DWC2_WRITE_4(hsotg, GRSTCTL, greset);
151 	do {
152 		usleep_range(20000, 40000);
153 		greset = DWC2_READ_4(hsotg, GRSTCTL);
154 		if (++count > 50) {
155 			dev_warn(hsotg->dev,
156 				 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
157 				 __func__, greset);
158 			return -EBUSY;
159 		}
160 	} while (greset & GRSTCTL_CSFTRST);
161 
162 	/*
163 	 * NOTE: This long sleep is _very_ important, otherwise the core will
164 	 * not stay in host mode after a connector ID change!
165 	 */
166 	usleep_range(150000, 200000);
167 
168 	return 0;
169 }
170 
171 static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
172 {
173 	u32 usbcfg, i2cctl;
174 	int retval = 0;
175 
176 	/*
177 	 * core_init() is now called on every switch so only call the
178 	 * following for the first time through
179 	 */
180 	if (select_phy) {
181 		dev_dbg(hsotg->dev, "FS PHY selected\n");
182 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
183 		usbcfg |= GUSBCFG_PHYSEL;
184 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
185 
186 		/* Reset after a PHY select */
187 		retval = dwc2_core_reset(hsotg);
188 		if (retval) {
189 			dev_err(hsotg->dev, "%s() Reset failed, aborting",
190 					__func__);
191 			return retval;
192 		}
193 	}
194 
195 	/*
196 	 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
197 	 * do this on HNP Dev/Host mode switches (done in dev_init and
198 	 * host_init).
199 	 */
200 	if (dwc2_is_host_mode(hsotg))
201 		dwc2_init_fs_ls_pclk_sel(hsotg);
202 
203 	if (hsotg->core_params->i2c_enable > 0) {
204 		dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
205 
206 		/* Program GUSBCFG.OtgUtmiFsSel to I2C */
207 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
208 		usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
209 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
210 
211 		/* Program GI2CCTL.I2CEn */
212 		i2cctl = DWC2_READ_4(hsotg, GI2CCTL);
213 		i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
214 		i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
215 		i2cctl &= ~GI2CCTL_I2CEN;
216 		DWC2_WRITE_4(hsotg, GI2CCTL, i2cctl);
217 		i2cctl |= GI2CCTL_I2CEN;
218 		DWC2_WRITE_4(hsotg, GI2CCTL, i2cctl);
219 	}
220 
221 	return retval;
222 }
223 
224 static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
225 {
226 	u32 usbcfg;
227 	int retval = 0;
228 
229 	if (!select_phy)
230 		return 0;
231 
232 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
233 
234 	/*
235 	 * HS PHY parameters. These parameters are preserved during soft reset
236 	 * so only program the first time. Do a soft reset immediately after
237 	 * setting phyif.
238 	 */
239 	switch (hsotg->core_params->phy_type) {
240 	case DWC2_PHY_TYPE_PARAM_ULPI:
241 		/* ULPI interface */
242 		dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
243 		usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
244 		usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
245 		if (hsotg->core_params->phy_ulpi_ddr > 0)
246 			usbcfg |= GUSBCFG_DDRSEL;
247 		break;
248 	case DWC2_PHY_TYPE_PARAM_UTMI:
249 		/* UTMI+ interface */
250 		dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
251 		usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
252 		if (hsotg->core_params->phy_utmi_width == 16)
253 			usbcfg |= GUSBCFG_PHYIF16;
254 		break;
255 	default:
256 		dev_err(hsotg->dev, "FS PHY selected at HS!\n");
257 		break;
258 	}
259 
260 	DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
261 
262 	/* Reset after setting the PHY parameters */
263 	retval = dwc2_core_reset(hsotg);
264 	if (retval) {
265 		dev_err(hsotg->dev, "%s() Reset failed, aborting",
266 				__func__);
267 		return retval;
268 	}
269 
270 	return retval;
271 }
272 
273 static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
274 {
275 	u32 usbcfg;
276 	int retval = 0;
277 
278 	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
279 	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
280 		/* If FS mode with FS PHY */
281 		retval = dwc2_fs_phy_init(hsotg, select_phy);
282 		if (retval)
283 			return retval;
284 	} else {
285 		/* High speed PHY */
286 		retval = dwc2_hs_phy_init(hsotg, select_phy);
287 		if (retval)
288 			return retval;
289 	}
290 
291 	if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
292 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
293 	    hsotg->core_params->ulpi_fs_ls > 0) {
294 		dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
295 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
296 		usbcfg |= GUSBCFG_ULPI_FS_LS;
297 		usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
298 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
299 	} else {
300 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
301 		usbcfg &= ~GUSBCFG_ULPI_FS_LS;
302 		usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
303 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
304 	}
305 
306 	return retval;
307 }
308 
309 static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
310 {
311 	u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
312 
313 	switch (hsotg->hw_params.arch) {
314 	case GHWCFG2_EXT_DMA_ARCH:
315 		dev_err(hsotg->dev, "External DMA Mode not supported\n");
316 		return -EINVAL;
317 
318 	case GHWCFG2_INT_DMA_ARCH:
319 		dev_dbg(hsotg->dev, "Internal DMA Mode\n");
320 		if (hsotg->core_params->ahbcfg != -1) {
321 			ahbcfg &= GAHBCFG_CTRL_MASK;
322 			ahbcfg |= hsotg->core_params->ahbcfg &
323 				  ~GAHBCFG_CTRL_MASK;
324 		}
325 		break;
326 
327 	case GHWCFG2_SLAVE_ONLY_ARCH:
328 	default:
329 		dev_dbg(hsotg->dev, "Slave Only Mode\n");
330 		break;
331 	}
332 
333 	dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
334 		hsotg->core_params->dma_enable,
335 		hsotg->core_params->dma_desc_enable);
336 
337 	if (hsotg->core_params->dma_enable > 0) {
338 		if (hsotg->core_params->dma_desc_enable > 0)
339 			dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
340 		else
341 			dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
342 	} else {
343 		dev_dbg(hsotg->dev, "Using Slave mode\n");
344 		hsotg->core_params->dma_desc_enable = 0;
345 	}
346 
347 	if (hsotg->core_params->dma_enable > 0)
348 		ahbcfg |= GAHBCFG_DMA_EN;
349 
350 	DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
351 
352 	return 0;
353 }
354 
355 static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
356 {
357 	u32 usbcfg;
358 
359 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
360 	usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
361 
362 	switch (hsotg->hw_params.op_mode) {
363 	case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
364 		if (hsotg->core_params->otg_cap ==
365 				DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
366 			usbcfg |= GUSBCFG_HNPCAP;
367 		if (hsotg->core_params->otg_cap !=
368 				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
369 			usbcfg |= GUSBCFG_SRPCAP;
370 		break;
371 
372 	case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
373 	case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
374 	case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
375 		if (hsotg->core_params->otg_cap !=
376 				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
377 			usbcfg |= GUSBCFG_SRPCAP;
378 		break;
379 
380 	case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
381 	case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
382 	case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
383 	default:
384 		break;
385 	}
386 
387 	DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
388 }
389 
390 /**
391  * dwc2_core_init() - Initializes the DWC_otg controller registers and
392  * prepares the core for device mode or host mode operation
393  *
394  * @hsotg:      Programming view of the DWC_otg controller
395  * @select_phy: If true then also set the Phy type
396  * @irq:        If >= 0, the irq to register
397  */
398 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy)
399 {
400 	u32 usbcfg, otgctl;
401 	int retval;
402 
403 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
404 
405 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
406 
407 	/* Set ULPI External VBUS bit if needed */
408 	usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
409 	if (hsotg->core_params->phy_ulpi_ext_vbus ==
410 				DWC2_PHY_ULPI_EXTERNAL_VBUS)
411 		usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
412 
413 	/* Set external TS Dline pulsing bit if needed */
414 	usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
415 	if (hsotg->core_params->ts_dline > 0)
416 		usbcfg |= GUSBCFG_TERMSELDLPULSE;
417 
418 	DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
419 
420 	/* Reset the Controller */
421 	retval = dwc2_core_reset(hsotg);
422 	if (retval) {
423 		dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
424 				__func__);
425 		return retval;
426 	}
427 
428 	/*
429 	 * This needs to happen in FS mode before any other programming occurs
430 	 */
431 	retval = dwc2_phy_init(hsotg, select_phy);
432 	if (retval)
433 		return retval;
434 
435 	/* Program the GAHBCFG Register */
436 	retval = dwc2_gahbcfg_init(hsotg);
437 	if (retval)
438 		return retval;
439 
440 	/* Program the GUSBCFG register */
441 	dwc2_gusbcfg_init(hsotg);
442 
443 	/* Program the GOTGCTL register */
444 	otgctl = DWC2_READ_4(hsotg, GOTGCTL);
445 	otgctl &= ~GOTGCTL_OTGVER;
446 	if (hsotg->core_params->otg_ver > 0)
447 		otgctl |= GOTGCTL_OTGVER;
448 	DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
449 	dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
450 
451 	/* Clear the SRP success bit for FS-I2c */
452 	hsotg->srp_success = 0;
453 
454 	/* Enable common interrupts */
455 	dwc2_enable_common_interrupts(hsotg);
456 
457 	/*
458 	 * Do device or host intialization based on mode during PCD and
459 	 * HCD initialization
460 	 */
461 	if (dwc2_is_host_mode(hsotg)) {
462 		dev_dbg(hsotg->dev, "Host Mode\n");
463 		hsotg->op_state = OTG_STATE_A_HOST;
464 	} else {
465 		dev_dbg(hsotg->dev, "Device Mode\n");
466 		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
467 	}
468 
469 	return 0;
470 }
471 
472 /**
473  * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
474  *
475  * @hsotg: Programming view of DWC_otg controller
476  */
477 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
478 {
479 	u32 intmsk;
480 
481 	dev_dbg(hsotg->dev, "%s()\n", __func__);
482 
483 	/* Disable all interrupts */
484 	DWC2_WRITE_4(hsotg, GINTMSK, 0);
485 	DWC2_WRITE_4(hsotg, HAINTMSK, 0);
486 
487 	/* Enable the common interrupts */
488 	dwc2_enable_common_interrupts(hsotg);
489 
490 	/* Enable host mode interrupts without disturbing common interrupts */
491 	intmsk = DWC2_READ_4(hsotg, GINTMSK);
492 	intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
493 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
494 }
495 
496 /**
497  * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
498  *
499  * @hsotg: Programming view of DWC_otg controller
500  */
501 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
502 {
503 	u32 intmsk = DWC2_READ_4(hsotg, GINTMSK);
504 
505 	/* Disable host mode interrupts without disturbing common interrupts */
506 	intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
507 		    GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
508 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
509 }
510 
511 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
512 {
513 	struct dwc2_core_params *params = hsotg->core_params;
514 	u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
515 
516 	if (!params->enable_dynamic_fifo)
517 		return;
518 
519 	/* Rx FIFO */
520 	grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
521 	dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
522 	grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
523 	grxfsiz |= params->host_rx_fifo_size <<
524 		   GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
525 	DWC2_WRITE_4(hsotg, GRXFSIZ, grxfsiz);
526 	dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", DWC2_READ_4(hsotg, GRXFSIZ));
527 
528 	/* Non-periodic Tx FIFO */
529 	dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
530 		DWC2_READ_4(hsotg, GNPTXFSIZ));
531 	nptxfsiz = params->host_nperio_tx_fifo_size <<
532 		   FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
533 	nptxfsiz |= params->host_rx_fifo_size <<
534 		    FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
535 	DWC2_WRITE_4(hsotg, GNPTXFSIZ, nptxfsiz);
536 	dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
537 		DWC2_READ_4(hsotg, GNPTXFSIZ));
538 
539 	/* Periodic Tx FIFO */
540 	dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
541 		DWC2_READ_4(hsotg, HPTXFSIZ));
542 	hptxfsiz = params->host_perio_tx_fifo_size <<
543 		   FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
544 	hptxfsiz |= (params->host_rx_fifo_size +
545 		     params->host_nperio_tx_fifo_size) <<
546 		    FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
547 	DWC2_WRITE_4(hsotg, HPTXFSIZ, hptxfsiz);
548 	dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
549 		DWC2_READ_4(hsotg, HPTXFSIZ));
550 
551 	if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
552 	    hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
553 		/*
554 		 * Global DFIFOCFG calculation for Host mode -
555 		 * include RxFIFO, NPTXFIFO and HPTXFIFO
556 		 */
557 		dfifocfg = DWC2_READ_4(hsotg, GDFIFOCFG);
558 		dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
559 		dfifocfg |= (params->host_rx_fifo_size +
560 			     params->host_nperio_tx_fifo_size +
561 			     params->host_perio_tx_fifo_size) <<
562 			    GDFIFOCFG_EPINFOBASE_SHIFT &
563 			    GDFIFOCFG_EPINFOBASE_MASK;
564 		DWC2_WRITE_4(hsotg, GDFIFOCFG, dfifocfg);
565 	}
566 }
567 
568 /**
569  * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
570  * Host mode
571  *
572  * @hsotg: Programming view of DWC_otg controller
573  *
574  * This function flushes the Tx and Rx FIFOs and flushes any entries in the
575  * request queues. Host channels are reset to ensure that they are ready for
576  * performing transfers.
577  */
578 void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
579 {
580 	u32 hcfg, hfir, otgctl;
581 
582 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
583 
584 	/* Restart the Phy Clock */
585 	DWC2_WRITE_4(hsotg, PCGCTL, 0);
586 
587 	/* Initialize Host Configuration Register */
588 	dwc2_init_fs_ls_pclk_sel(hsotg);
589 	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
590 		hcfg = DWC2_READ_4(hsotg, HCFG);
591 		hcfg |= HCFG_FSLSSUPP;
592 		DWC2_WRITE_4(hsotg, HCFG, hcfg);
593 	}
594 
595 	/*
596 	 * This bit allows dynamic reloading of the HFIR register during
597 	 * runtime. This bit needs to be programmed during initial configuration
598 	 * and its value must not be changed during runtime.
599 	 */
600 	if (hsotg->core_params->reload_ctl > 0) {
601 		hfir = DWC2_READ_4(hsotg, HFIR);
602 		hfir |= HFIR_RLDCTRL;
603 		DWC2_WRITE_4(hsotg, HFIR, hfir);
604 	}
605 
606 	if (hsotg->core_params->dma_desc_enable > 0) {
607 		u32 op_mode = hsotg->hw_params.op_mode;
608 		if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
609 		    !hsotg->hw_params.dma_desc_enable ||
610 		    op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
611 		    op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
612 		    op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
613 			dev_err(hsotg->dev,
614 				"Hardware does not support descriptor DMA mode -\n");
615 			dev_err(hsotg->dev,
616 				"falling back to buffer DMA mode.\n");
617 			hsotg->core_params->dma_desc_enable = 0;
618 		} else {
619 			hcfg = DWC2_READ_4(hsotg, HCFG);
620 			hcfg |= HCFG_DESCDMA;
621 			DWC2_WRITE_4(hsotg, HCFG, hcfg);
622 		}
623 	}
624 
625 	/* Configure data FIFO sizes */
626 	dwc2_config_fifos(hsotg);
627 
628 	/* TODO - check this */
629 	/* Clear Host Set HNP Enable in the OTG Control Register */
630 	otgctl = DWC2_READ_4(hsotg, GOTGCTL);
631 	otgctl &= ~GOTGCTL_HSTSETHNPEN;
632 	DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
633 
634 	/* Make sure the FIFOs are flushed */
635 	dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
636 	dwc2_flush_rx_fifo(hsotg);
637 
638 	/* Clear Host Set HNP Enable in the OTG Control Register */
639 	otgctl = DWC2_READ_4(hsotg, GOTGCTL);
640 	otgctl &= ~GOTGCTL_HSTSETHNPEN;
641 	DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
642 
643 	if (hsotg->core_params->dma_desc_enable <= 0) {
644 		int num_channels, i;
645 		u32 hcchar;
646 
647 		/* Flush out any leftover queued requests */
648 		num_channels = hsotg->core_params->host_channels;
649 		for (i = 0; i < num_channels; i++) {
650 			hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
651 			hcchar &= ~HCCHAR_CHENA;
652 			hcchar |= HCCHAR_CHDIS;
653 			hcchar &= ~HCCHAR_EPDIR;
654 			DWC2_WRITE_4(hsotg, HCCHAR(i), hcchar);
655 		}
656 
657 		/* Halt all channels to put them into a known state */
658 		for (i = 0; i < num_channels; i++) {
659 			int count = 0;
660 
661 			hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
662 			hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
663 			hcchar &= ~HCCHAR_EPDIR;
664 			DWC2_WRITE_4(hsotg, HCCHAR(i), hcchar);
665 			dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
666 				__func__, i);
667 			do {
668 				hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
669 				if (++count > 1000) {
670 					dev_err(hsotg->dev,
671 						"Unable to clear enable on channel %d\n",
672 						i);
673 					break;
674 				}
675 				udelay(1);
676 			} while (hcchar & HCCHAR_CHENA);
677 		}
678 	}
679 
680 	/* Turn on the vbus power */
681 	dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
682 	if (hsotg->op_state == OTG_STATE_A_HOST) {
683 		u32 hprt0 = dwc2_read_hprt0(hsotg);
684 
685 		dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
686 			!!(hprt0 & HPRT0_PWR));
687 		if (!(hprt0 & HPRT0_PWR)) {
688 			hprt0 |= HPRT0_PWR;
689 			DWC2_WRITE_4(hsotg, HPRT0, hprt0);
690 		}
691 	}
692 
693 	dwc2_enable_host_interrupts(hsotg);
694 }
695 
696 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
697 				      struct dwc2_host_chan *chan)
698 {
699 	u32 hcintmsk = HCINTMSK_CHHLTD;
700 
701 	switch (chan->ep_type) {
702 	case USB_ENDPOINT_XFER_CONTROL:
703 	case USB_ENDPOINT_XFER_BULK:
704 		dev_vdbg(hsotg->dev, "control/bulk\n");
705 		hcintmsk |= HCINTMSK_XFERCOMPL;
706 		hcintmsk |= HCINTMSK_STALL;
707 		hcintmsk |= HCINTMSK_XACTERR;
708 		hcintmsk |= HCINTMSK_DATATGLERR;
709 		if (chan->ep_is_in) {
710 			hcintmsk |= HCINTMSK_BBLERR;
711 		} else {
712 			hcintmsk |= HCINTMSK_NAK;
713 			hcintmsk |= HCINTMSK_NYET;
714 			if (chan->do_ping)
715 				hcintmsk |= HCINTMSK_ACK;
716 		}
717 
718 		if (chan->do_split) {
719 			hcintmsk |= HCINTMSK_NAK;
720 			if (chan->complete_split)
721 				hcintmsk |= HCINTMSK_NYET;
722 			else
723 				hcintmsk |= HCINTMSK_ACK;
724 		}
725 
726 		if (chan->error_state)
727 			hcintmsk |= HCINTMSK_ACK;
728 		break;
729 
730 	case USB_ENDPOINT_XFER_INT:
731 		if (dbg_perio())
732 			dev_vdbg(hsotg->dev, "intr\n");
733 		hcintmsk |= HCINTMSK_XFERCOMPL;
734 		hcintmsk |= HCINTMSK_NAK;
735 		hcintmsk |= HCINTMSK_STALL;
736 		hcintmsk |= HCINTMSK_XACTERR;
737 		hcintmsk |= HCINTMSK_DATATGLERR;
738 		hcintmsk |= HCINTMSK_FRMOVRUN;
739 
740 		if (chan->ep_is_in)
741 			hcintmsk |= HCINTMSK_BBLERR;
742 		if (chan->error_state)
743 			hcintmsk |= HCINTMSK_ACK;
744 		if (chan->do_split) {
745 			if (chan->complete_split)
746 				hcintmsk |= HCINTMSK_NYET;
747 			else
748 				hcintmsk |= HCINTMSK_ACK;
749 		}
750 		break;
751 
752 	case USB_ENDPOINT_XFER_ISOC:
753 		if (dbg_perio())
754 			dev_vdbg(hsotg->dev, "isoc\n");
755 		hcintmsk |= HCINTMSK_XFERCOMPL;
756 		hcintmsk |= HCINTMSK_FRMOVRUN;
757 		hcintmsk |= HCINTMSK_ACK;
758 
759 		if (chan->ep_is_in) {
760 			hcintmsk |= HCINTMSK_XACTERR;
761 			hcintmsk |= HCINTMSK_BBLERR;
762 		}
763 		break;
764 	default:
765 		dev_err(hsotg->dev, "## Unknown EP type ##\n");
766 		break;
767 	}
768 
769 	DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
770 	if (dbg_hc(chan))
771 		dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
772 }
773 
774 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
775 				    struct dwc2_host_chan *chan)
776 {
777 	u32 hcintmsk = HCINTMSK_CHHLTD;
778 
779 	/*
780 	 * For Descriptor DMA mode core halts the channel on AHB error.
781 	 * Interrupt is not required.
782 	 */
783 	if (hsotg->core_params->dma_desc_enable <= 0) {
784 		if (dbg_hc(chan))
785 			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
786 		hcintmsk |= HCINTMSK_AHBERR;
787 	} else {
788 		if (dbg_hc(chan))
789 			dev_vdbg(hsotg->dev, "desc DMA enabled\n");
790 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
791 			hcintmsk |= HCINTMSK_XFERCOMPL;
792 	}
793 
794 	if (chan->error_state && !chan->do_split &&
795 	    chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
796 		if (dbg_hc(chan))
797 			dev_vdbg(hsotg->dev, "setting ACK\n");
798 		hcintmsk |= HCINTMSK_ACK;
799 		if (chan->ep_is_in) {
800 			hcintmsk |= HCINTMSK_DATATGLERR;
801 			if (chan->ep_type != USB_ENDPOINT_XFER_INT)
802 				hcintmsk |= HCINTMSK_NAK;
803 		}
804 	}
805 
806 	DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
807 	if (dbg_hc(chan))
808 		dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
809 }
810 
811 static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
812 				struct dwc2_host_chan *chan)
813 {
814 	u32 intmsk;
815 
816 	if (hsotg->core_params->dma_enable > 0) {
817 		if (dbg_hc(chan))
818 			dev_vdbg(hsotg->dev, "DMA enabled\n");
819 		dwc2_hc_enable_dma_ints(hsotg, chan);
820 	} else {
821 		if (dbg_hc(chan))
822 			dev_vdbg(hsotg->dev, "DMA disabled\n");
823 		dwc2_hc_enable_slave_ints(hsotg, chan);
824 	}
825 
826 	/* Enable the top level host channel interrupt */
827 	intmsk = DWC2_READ_4(hsotg, HAINTMSK);
828 	intmsk |= 1 << chan->hc_num;
829 	DWC2_WRITE_4(hsotg, HAINTMSK, intmsk);
830 	if (dbg_hc(chan))
831 		dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
832 
833 	/* Make sure host channel interrupts are enabled */
834 	intmsk = DWC2_READ_4(hsotg, GINTMSK);
835 	intmsk |= GINTSTS_HCHINT;
836 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
837 	if (dbg_hc(chan))
838 		dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
839 }
840 
841 /**
842  * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
843  * a specific endpoint
844  *
845  * @hsotg: Programming view of DWC_otg controller
846  * @chan:  Information needed to initialize the host channel
847  *
848  * The HCCHARn register is set up with the characteristics specified in chan.
849  * Host channel interrupts that may need to be serviced while this transfer is
850  * in progress are enabled.
851  */
852 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
853 {
854 	u8 hc_num = chan->hc_num;
855 	u32 hcintmsk;
856 	u32 hcchar;
857 	u32 hcsplt = 0;
858 
859 	if (dbg_hc(chan))
860 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
861 
862 	/* Clear old interrupt conditions for this host channel */
863 	hcintmsk = 0xffffffff;
864 	hcintmsk &= ~HCINTMSK_RESERVED14_31;
865 	DWC2_WRITE_4(hsotg, HCINT(hc_num), hcintmsk);
866 
867 	/* Enable channel interrupts required for this transfer */
868 	dwc2_hc_enable_ints(hsotg, chan);
869 
870 	/*
871 	 * Program the HCCHARn register with the endpoint characteristics for
872 	 * the current transfer
873 	 */
874 	hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
875 	hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
876 	if (chan->ep_is_in)
877 		hcchar |= HCCHAR_EPDIR;
878 	if (chan->speed == USB_SPEED_LOW)
879 		hcchar |= HCCHAR_LSPDDEV;
880 	hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
881 	hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
882 	DWC2_WRITE_4(hsotg, HCCHAR(hc_num), hcchar);
883 	if (dbg_hc(chan)) {
884 		dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
885 			 hc_num, hcchar);
886 
887 		dev_vdbg(hsotg->dev, "%s: Channel %d\n",
888 			 __func__, hc_num);
889 		dev_vdbg(hsotg->dev, "	 Dev Addr: %d\n",
890 			 chan->dev_addr);
891 		dev_vdbg(hsotg->dev, "	 Ep Num: %d\n",
892 			 chan->ep_num);
893 		dev_vdbg(hsotg->dev, "	 Is In: %d\n",
894 			 chan->ep_is_in);
895 		dev_vdbg(hsotg->dev, "	 Is Low Speed: %d\n",
896 			 chan->speed == USB_SPEED_LOW);
897 		dev_vdbg(hsotg->dev, "	 Ep Type: %d\n",
898 			 chan->ep_type);
899 		dev_vdbg(hsotg->dev, "	 Max Pkt: %d\n",
900 			 chan->max_packet);
901 	}
902 
903 	/* Program the HCSPLT register for SPLITs */
904 	if (chan->do_split) {
905 		if (dbg_hc(chan))
906 			dev_vdbg(hsotg->dev,
907 				 "Programming HC %d with split --> %s\n",
908 				 hc_num,
909 				 chan->complete_split ? "CSPLIT" : "SSPLIT");
910 		if (chan->complete_split)
911 			hcsplt |= HCSPLT_COMPSPLT;
912 		hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
913 			  HCSPLT_XACTPOS_MASK;
914 		hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
915 			  HCSPLT_HUBADDR_MASK;
916 		hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
917 			  HCSPLT_PRTADDR_MASK;
918 		if (dbg_hc(chan)) {
919 			dev_vdbg(hsotg->dev, "	  comp split %d\n",
920 				 chan->complete_split);
921 			dev_vdbg(hsotg->dev, "	  xact pos %d\n",
922 				 chan->xact_pos);
923 			dev_vdbg(hsotg->dev, "	  hub addr %d\n",
924 				 chan->hub_addr);
925 			dev_vdbg(hsotg->dev, "	  hub port %d\n",
926 				 chan->hub_port);
927 			dev_vdbg(hsotg->dev, "	  is_in %d\n",
928 				 chan->ep_is_in);
929 			dev_vdbg(hsotg->dev, "	  Max Pkt %d\n",
930 				 chan->max_packet);
931 			dev_vdbg(hsotg->dev, "	  xferlen %d\n",
932 				 chan->xfer_len);
933 		}
934 	}
935 
936 	DWC2_WRITE_4(hsotg, HCSPLT(hc_num), hcsplt);
937 }
938 
939 /**
940  * dwc2_hc_halt() - Attempts to halt a host channel
941  *
942  * @hsotg:       Controller register interface
943  * @chan:        Host channel to halt
944  * @halt_status: Reason for halting the channel
945  *
946  * This function should only be called in Slave mode or to abort a transfer in
947  * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
948  * controller halts the channel when the transfer is complete or a condition
949  * occurs that requires application intervention.
950  *
951  * In slave mode, checks for a free request queue entry, then sets the Channel
952  * Enable and Channel Disable bits of the Host Channel Characteristics
953  * register of the specified channel to intiate the halt. If there is no free
954  * request queue entry, sets only the Channel Disable bit of the HCCHARn
955  * register to flush requests for this channel. In the latter case, sets a
956  * flag to indicate that the host channel needs to be halted when a request
957  * queue slot is open.
958  *
959  * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
960  * HCCHARn register. The controller ensures there is space in the request
961  * queue before submitting the halt request.
962  *
963  * Some time may elapse before the core flushes any posted requests for this
964  * host channel and halts. The Channel Halted interrupt handler completes the
965  * deactivation of the host channel.
966  */
967 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
968 		  enum dwc2_halt_status halt_status)
969 {
970 	u32 nptxsts, hptxsts, hcchar;
971 
972 	if (dbg_hc(chan))
973 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
974 	if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
975 		dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
976 
977 	if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
978 	    halt_status == DWC2_HC_XFER_AHB_ERR) {
979 		/*
980 		 * Disable all channel interrupts except Ch Halted. The QTD
981 		 * and QH state associated with this transfer has been cleared
982 		 * (in the case of URB_DEQUEUE), so the channel needs to be
983 		 * shut down carefully to prevent crashes.
984 		 */
985 		u32 hcintmsk = HCINTMSK_CHHLTD;
986 
987 		dev_vdbg(hsotg->dev, "dequeue/error\n");
988 		DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
989 
990 		/*
991 		 * Make sure no other interrupts besides halt are currently
992 		 * pending. Handling another interrupt could cause a crash due
993 		 * to the QTD and QH state.
994 		 */
995 		DWC2_WRITE_4(hsotg, HCINT(chan->hc_num), ~hcintmsk);
996 
997 		/*
998 		 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
999 		 * even if the channel was already halted for some other
1000 		 * reason
1001 		 */
1002 		chan->halt_status = halt_status;
1003 
1004 		hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1005 		if (!(hcchar & HCCHAR_CHENA)) {
1006 			/*
1007 			 * The channel is either already halted or it hasn't
1008 			 * started yet. In DMA mode, the transfer may halt if
1009 			 * it finishes normally or a condition occurs that
1010 			 * requires driver intervention. Don't want to halt
1011 			 * the channel again. In either Slave or DMA mode,
1012 			 * it's possible that the transfer has been assigned
1013 			 * to a channel, but not started yet when an URB is
1014 			 * dequeued. Don't want to halt a channel that hasn't
1015 			 * started yet.
1016 			 */
1017 			return;
1018 		}
1019 	}
1020 	if (chan->halt_pending) {
1021 		/*
1022 		 * A halt has already been issued for this channel. This might
1023 		 * happen when a transfer is aborted by a higher level in
1024 		 * the stack.
1025 		 */
1026 		dev_vdbg(hsotg->dev,
1027 			 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1028 			 __func__, chan->hc_num);
1029 		return;
1030 	}
1031 
1032 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1033 
1034 	/* No need to set the bit in DDMA for disabling the channel */
1035 	/* TODO check it everywhere channel is disabled */
1036 	if (hsotg->core_params->dma_desc_enable <= 0) {
1037 		if (dbg_hc(chan))
1038 			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1039 		hcchar |= HCCHAR_CHENA;
1040 	} else {
1041 		if (dbg_hc(chan))
1042 			dev_dbg(hsotg->dev, "desc DMA enabled\n");
1043 	}
1044 	hcchar |= HCCHAR_CHDIS;
1045 
1046 	if (hsotg->core_params->dma_enable <= 0) {
1047 		if (dbg_hc(chan))
1048 			dev_vdbg(hsotg->dev, "DMA not enabled\n");
1049 		hcchar |= HCCHAR_CHENA;
1050 
1051 		/* Check for space in the request queue to issue the halt */
1052 		if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1053 		    chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1054 			dev_vdbg(hsotg->dev, "control/bulk\n");
1055 			nptxsts = DWC2_READ_4(hsotg, GNPTXSTS);
1056 			if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1057 				dev_vdbg(hsotg->dev, "Disabling channel\n");
1058 				hcchar &= ~HCCHAR_CHENA;
1059 			}
1060 		} else {
1061 			if (dbg_perio())
1062 				dev_vdbg(hsotg->dev, "isoc/intr\n");
1063 			hptxsts = DWC2_READ_4(hsotg, HPTXSTS);
1064 			if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1065 			    hsotg->queuing_high_bandwidth) {
1066 				if (dbg_perio())
1067 					dev_vdbg(hsotg->dev, "Disabling channel\n");
1068 				hcchar &= ~HCCHAR_CHENA;
1069 			}
1070 		}
1071 	} else {
1072 		if (dbg_hc(chan))
1073 			dev_vdbg(hsotg->dev, "DMA enabled\n");
1074 	}
1075 
1076 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1077 	chan->halt_status = halt_status;
1078 
1079 	if (hcchar & HCCHAR_CHENA) {
1080 		if (dbg_hc(chan))
1081 			dev_vdbg(hsotg->dev, "Channel enabled\n");
1082 		chan->halt_pending = 1;
1083 		chan->halt_on_queue = 0;
1084 	} else {
1085 		if (dbg_hc(chan))
1086 			dev_vdbg(hsotg->dev, "Channel disabled\n");
1087 		chan->halt_on_queue = 1;
1088 	}
1089 
1090 	if (dbg_hc(chan)) {
1091 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1092 			 chan->hc_num);
1093 		dev_vdbg(hsotg->dev, "	 hcchar: 0x%08x\n",
1094 			 hcchar);
1095 		dev_vdbg(hsotg->dev, "	 halt_pending: %d\n",
1096 			 chan->halt_pending);
1097 		dev_vdbg(hsotg->dev, "	 halt_on_queue: %d\n",
1098 			 chan->halt_on_queue);
1099 		dev_vdbg(hsotg->dev, "	 halt_status: %d\n",
1100 			 chan->halt_status);
1101 	}
1102 }
1103 
1104 /**
1105  * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1106  *
1107  * @hsotg: Programming view of DWC_otg controller
1108  * @chan:  Identifies the host channel to clean up
1109  *
1110  * This function is normally called after a transfer is done and the host
1111  * channel is being released
1112  */
1113 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1114 {
1115 	u32 hcintmsk;
1116 
1117 	chan->xfer_started = 0;
1118 
1119 	/*
1120 	 * Clear channel interrupt enables and any unhandled channel interrupt
1121 	 * conditions
1122 	 */
1123 	DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), 0);
1124 	hcintmsk = 0xffffffff;
1125 	hcintmsk &= ~HCINTMSK_RESERVED14_31;
1126 	DWC2_WRITE_4(hsotg, HCINT(chan->hc_num), hcintmsk);
1127 }
1128 
1129 /**
1130  * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1131  * which frame a periodic transfer should occur
1132  *
1133  * @hsotg:  Programming view of DWC_otg controller
1134  * @chan:   Identifies the host channel to set up and its properties
1135  * @hcchar: Current value of the HCCHAR register for the specified host channel
1136  *
1137  * This function has no effect on non-periodic transfers
1138  */
1139 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1140 				       struct dwc2_host_chan *chan, u32 *hcchar)
1141 {
1142 	if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1143 	    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1144 		/* 1 if _next_ frame is odd, 0 if it's even */
1145 		if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
1146 			*hcchar |= HCCHAR_ODDFRM;
1147 	}
1148 }
1149 
1150 static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1151 {
1152 	/* Set up the initial PID for the transfer */
1153 	if (chan->speed == USB_SPEED_HIGH) {
1154 		if (chan->ep_is_in) {
1155 			if (chan->multi_count == 1)
1156 				chan->data_pid_start = DWC2_HC_PID_DATA0;
1157 			else if (chan->multi_count == 2)
1158 				chan->data_pid_start = DWC2_HC_PID_DATA1;
1159 			else
1160 				chan->data_pid_start = DWC2_HC_PID_DATA2;
1161 		} else {
1162 			if (chan->multi_count == 1)
1163 				chan->data_pid_start = DWC2_HC_PID_DATA0;
1164 			else
1165 				chan->data_pid_start = DWC2_HC_PID_MDATA;
1166 		}
1167 	} else {
1168 		chan->data_pid_start = DWC2_HC_PID_DATA0;
1169 	}
1170 }
1171 
1172 /**
1173  * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1174  * the Host Channel
1175  *
1176  * @hsotg: Programming view of DWC_otg controller
1177  * @chan:  Information needed to initialize the host channel
1178  *
1179  * This function should only be called in Slave mode. For a channel associated
1180  * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1181  * associated with a periodic EP, the periodic Tx FIFO is written.
1182  *
1183  * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1184  * the number of bytes written to the Tx FIFO.
1185  */
1186 static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1187 				 struct dwc2_host_chan *chan)
1188 {
1189 	u32 i;
1190 	u32 remaining_count;
1191 	u32 byte_count;
1192 	u32 dword_count;
1193 	u32 *data_buf = (u32 *)chan->xfer_buf;
1194 	u32 data_fifo;
1195 
1196 	if (dbg_hc(chan))
1197 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1198 
1199 	data_fifo = HCFIFO(chan->hc_num);
1200 
1201 	remaining_count = chan->xfer_len - chan->xfer_count;
1202 	if (remaining_count > chan->max_packet)
1203 		byte_count = chan->max_packet;
1204 	else
1205 		byte_count = remaining_count;
1206 
1207 	dword_count = (byte_count + 3) / 4;
1208 
1209 	if (((unsigned long)data_buf & 0x3) == 0) {
1210 		/* xfer_buf is DWORD aligned */
1211 		for (i = 0; i < dword_count; i++, data_buf++)
1212 			DWC2_WRITE_4(hsotg, data_fifo, *data_buf);
1213 	} else {
1214 		/* xfer_buf is not DWORD aligned */
1215 		for (i = 0; i < dword_count; i++, data_buf++) {
1216 			u32 data = data_buf[0] | data_buf[1] << 8 |
1217 				   data_buf[2] << 16 | data_buf[3] << 24;
1218 			DWC2_WRITE_4(hsotg, data_fifo, data);
1219 		}
1220 	}
1221 
1222 	chan->xfer_count += byte_count;
1223 	chan->xfer_buf += byte_count;
1224 }
1225 
1226 /**
1227  * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1228  * channel and starts the transfer
1229  *
1230  * @hsotg: Programming view of DWC_otg controller
1231  * @chan:  Information needed to initialize the host channel. The xfer_len value
1232  *         may be reduced to accommodate the max widths of the XferSize and
1233  *         PktCnt fields in the HCTSIZn register. The multi_count value may be
1234  *         changed to reflect the final xfer_len value.
1235  *
1236  * This function may be called in either Slave mode or DMA mode. In Slave mode,
1237  * the caller must ensure that there is sufficient space in the request queue
1238  * and Tx Data FIFO.
1239  *
1240  * For an OUT transfer in Slave mode, it loads a data packet into the
1241  * appropriate FIFO. If necessary, additional data packets are loaded in the
1242  * Host ISR.
1243  *
1244  * For an IN transfer in Slave mode, a data packet is requested. The data
1245  * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1246  * additional data packets are requested in the Host ISR.
1247  *
1248  * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1249  * register along with a packet count of 1 and the channel is enabled. This
1250  * causes a single PING transaction to occur. Other fields in HCTSIZ are
1251  * simply set to 0 since no data transfer occurs in this case.
1252  *
1253  * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1254  * all the information required to perform the subsequent data transfer. In
1255  * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1256  * controller performs the entire PING protocol, then starts the data
1257  * transfer.
1258  */
1259 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1260 			    struct dwc2_host_chan *chan)
1261 {
1262 	u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1263 	u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1264 	u32 hcchar;
1265 	u32 hctsiz = 0;
1266 	u16 num_packets;
1267 
1268 	if (dbg_hc(chan))
1269 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1270 
1271 	if (chan->do_ping) {
1272 		if (hsotg->core_params->dma_enable <= 0) {
1273 			if (dbg_hc(chan))
1274 				dev_vdbg(hsotg->dev, "ping, no DMA\n");
1275 			dwc2_hc_do_ping(hsotg, chan);
1276 			chan->xfer_started = 1;
1277 			return;
1278 		} else {
1279 			if (dbg_hc(chan))
1280 				dev_vdbg(hsotg->dev, "ping, DMA\n");
1281 			hctsiz |= TSIZ_DOPNG;
1282 		}
1283 	}
1284 
1285 	if (chan->do_split) {
1286 		if (dbg_hc(chan))
1287 			dev_vdbg(hsotg->dev, "split\n");
1288 		num_packets = 1;
1289 
1290 		if (chan->complete_split && !chan->ep_is_in)
1291 			/*
1292 			 * For CSPLIT OUT Transfer, set the size to 0 so the
1293 			 * core doesn't expect any data written to the FIFO
1294 			 */
1295 			chan->xfer_len = 0;
1296 		else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1297 			chan->xfer_len = chan->max_packet;
1298 		else if (!chan->ep_is_in && chan->xfer_len > 188)
1299 			chan->xfer_len = 188;
1300 
1301 		hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1302 			  TSIZ_XFERSIZE_MASK;
1303 	} else {
1304 		if (dbg_hc(chan))
1305 			dev_vdbg(hsotg->dev, "no split\n");
1306 		/*
1307 		 * Ensure that the transfer length and packet count will fit
1308 		 * in the widths allocated for them in the HCTSIZn register
1309 		 */
1310 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1311 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1312 			/*
1313 			 * Make sure the transfer size is no larger than one
1314 			 * (micro)frame's worth of data. (A check was done
1315 			 * when the periodic transfer was accepted to ensure
1316 			 * that a (micro)frame's worth of data can be
1317 			 * programmed into a channel.)
1318 			 */
1319 			u32 max_periodic_len =
1320 				chan->multi_count * chan->max_packet;
1321 
1322 			if (chan->xfer_len > max_periodic_len)
1323 				chan->xfer_len = max_periodic_len;
1324 		} else if (chan->xfer_len > max_hc_xfer_size) {
1325 			/*
1326 			 * Make sure that xfer_len is a multiple of max packet
1327 			 * size
1328 			 */
1329 			chan->xfer_len =
1330 				max_hc_xfer_size - chan->max_packet + 1;
1331 		}
1332 
1333 		if (chan->xfer_len > 0) {
1334 			num_packets = (chan->xfer_len + chan->max_packet - 1) /
1335 					chan->max_packet;
1336 			if (num_packets > max_hc_pkt_count) {
1337 				num_packets = max_hc_pkt_count;
1338 				chan->xfer_len = num_packets * chan->max_packet;
1339 			}
1340 		} else {
1341 			/* Need 1 packet for transfer length of 0 */
1342 			num_packets = 1;
1343 		}
1344 
1345 		if (chan->ep_is_in)
1346 			/*
1347 			 * Always program an integral # of max packets for IN
1348 			 * transfers
1349 			 */
1350 			chan->xfer_len = num_packets * chan->max_packet;
1351 
1352 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1353 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1354 			/*
1355 			 * Make sure that the multi_count field matches the
1356 			 * actual transfer length
1357 			 */
1358 			chan->multi_count = num_packets;
1359 
1360 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1361 			dwc2_set_pid_isoc(chan);
1362 
1363 		hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1364 			  TSIZ_XFERSIZE_MASK;
1365 	}
1366 
1367 	chan->start_pkt_count = num_packets;
1368 	hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1369 	hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1370 		  TSIZ_SC_MC_PID_MASK;
1371 	DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
1372 	if (dbg_hc(chan)) {
1373 		dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1374 			 hctsiz, chan->hc_num);
1375 
1376 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1377 			 chan->hc_num);
1378 		dev_vdbg(hsotg->dev, "	 Xfer Size: %d\n",
1379 			 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1380 			 TSIZ_XFERSIZE_SHIFT);
1381 		dev_vdbg(hsotg->dev, "	 Num Pkts: %d\n",
1382 			 (hctsiz & TSIZ_PKTCNT_MASK) >>
1383 			 TSIZ_PKTCNT_SHIFT);
1384 		dev_vdbg(hsotg->dev, "	 Start PID: %d\n",
1385 			 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1386 			 TSIZ_SC_MC_PID_SHIFT);
1387 	}
1388 
1389 	if (hsotg->core_params->dma_enable > 0) {
1390 		dma_addr_t dma_addr;
1391 
1392 		if (chan->align_buf) {
1393 			if (dbg_hc(chan))
1394 				dev_vdbg(hsotg->dev, "align_buf\n");
1395 			dma_addr = chan->align_buf;
1396 		} else {
1397 			dma_addr = chan->xfer_dma;
1398 		}
1399 		DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num), (u32)dma_addr);
1400 		if (dbg_hc(chan))
1401 			dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1402 				 (unsigned long)dma_addr, chan->hc_num);
1403 	}
1404 
1405 	/* Start the split */
1406 	if (chan->do_split) {
1407 		u32 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chan->hc_num));
1408 
1409 		hcsplt |= HCSPLT_SPLTENA;
1410 		DWC2_WRITE_4(hsotg, HCSPLT(chan->hc_num), hcsplt);
1411 	}
1412 
1413 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1414 	hcchar &= ~HCCHAR_MULTICNT_MASK;
1415 	hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1416 		  HCCHAR_MULTICNT_MASK;
1417 	dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1418 
1419 	if (hcchar & HCCHAR_CHDIS)
1420 		dev_warn(hsotg->dev,
1421 			 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1422 			 __func__, chan->hc_num, hcchar);
1423 
1424 	/* Set host channel enable after all other setup is complete */
1425 	hcchar |= HCCHAR_CHENA;
1426 	hcchar &= ~HCCHAR_CHDIS;
1427 
1428 	if (dbg_hc(chan))
1429 		dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n",
1430 			 (hcchar & HCCHAR_MULTICNT_MASK) >>
1431 			 HCCHAR_MULTICNT_SHIFT);
1432 
1433 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1434 	if (dbg_hc(chan))
1435 		dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1436 			 chan->hc_num);
1437 
1438 	chan->xfer_started = 1;
1439 	chan->requests++;
1440 
1441 	if (hsotg->core_params->dma_enable <= 0 &&
1442 	    !chan->ep_is_in && chan->xfer_len > 0)
1443 		/* Load OUT packet into the appropriate Tx FIFO */
1444 		dwc2_hc_write_packet(hsotg, chan);
1445 }
1446 
1447 /**
1448  * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1449  * host channel and starts the transfer in Descriptor DMA mode
1450  *
1451  * @hsotg: Programming view of DWC_otg controller
1452  * @chan:  Information needed to initialize the host channel
1453  *
1454  * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1455  * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1456  * with micro-frame bitmap.
1457  *
1458  * Initializes HCDMA register with descriptor list address and CTD value then
1459  * starts the transfer via enabling the channel.
1460  */
1461 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1462 				 struct dwc2_host_chan *chan)
1463 {
1464 	u32 hcchar;
1465 	u32 hc_dma;
1466 	u32 hctsiz = 0;
1467 
1468 	if (chan->do_ping)
1469 		hctsiz |= TSIZ_DOPNG;
1470 
1471 	if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1472 		dwc2_set_pid_isoc(chan);
1473 
1474 	/* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1475 	hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1476 		  TSIZ_SC_MC_PID_MASK;
1477 
1478 	/* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1479 	hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1480 
1481 	/* Non-zero only for high-speed interrupt endpoints */
1482 	hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1483 
1484 	if (dbg_hc(chan)) {
1485 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1486 			 chan->hc_num);
1487 		dev_vdbg(hsotg->dev, "	 Start PID: %d\n",
1488 			 chan->data_pid_start);
1489 		dev_vdbg(hsotg->dev, "	 NTD: %d\n", chan->ntd - 1);
1490 	}
1491 
1492 	DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
1493 
1494 	hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
1495 
1496 	/* Always start from first descriptor */
1497 	hc_dma &= ~HCDMA_CTD_MASK;
1498 	DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num), hc_dma);
1499 	if (dbg_hc(chan))
1500 		dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
1501 			 hc_dma, chan->hc_num);
1502 
1503 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1504 	hcchar &= ~HCCHAR_MULTICNT_MASK;
1505 	hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1506 		  HCCHAR_MULTICNT_MASK;
1507 
1508 	if (hcchar & HCCHAR_CHDIS)
1509 		dev_warn(hsotg->dev,
1510 			 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1511 			 __func__, chan->hc_num, hcchar);
1512 
1513 	/* Set host channel enable after all other setup is complete */
1514 	hcchar |= HCCHAR_CHENA;
1515 	hcchar &= ~HCCHAR_CHDIS;
1516 
1517 	if (dbg_hc(chan))
1518 		dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n",
1519 			 (hcchar & HCCHAR_MULTICNT_MASK) >>
1520 			 HCCHAR_MULTICNT_SHIFT);
1521 
1522 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1523 	if (dbg_hc(chan))
1524 		dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1525 			 chan->hc_num);
1526 
1527 	chan->xfer_started = 1;
1528 	chan->requests++;
1529 }
1530 
1531 /**
1532  * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1533  * a previous call to dwc2_hc_start_transfer()
1534  *
1535  * @hsotg: Programming view of DWC_otg controller
1536  * @chan:  Information needed to initialize the host channel
1537  *
1538  * The caller must ensure there is sufficient space in the request queue and Tx
1539  * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1540  * the controller acts autonomously to complete transfers programmed to a host
1541  * channel.
1542  *
1543  * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1544  * if there is any data remaining to be queued. For an IN transfer, another
1545  * data packet is always requested. For the SETUP phase of a control transfer,
1546  * this function does nothing.
1547  *
1548  * Return: 1 if a new request is queued, 0 if no more requests are required
1549  * for this transfer
1550  */
1551 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1552 			      struct dwc2_host_chan *chan)
1553 {
1554 	if (dbg_hc(chan))
1555 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1556 			 chan->hc_num);
1557 
1558 	if (chan->do_split)
1559 		/* SPLITs always queue just once per channel */
1560 		return 0;
1561 
1562 	if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1563 		/* SETUPs are queued only once since they can't be NAK'd */
1564 		return 0;
1565 
1566 	if (chan->ep_is_in) {
1567 		/*
1568 		 * Always queue another request for other IN transfers. If
1569 		 * back-to-back INs are issued and NAKs are received for both,
1570 		 * the driver may still be processing the first NAK when the
1571 		 * second NAK is received. When the interrupt handler clears
1572 		 * the NAK interrupt for the first NAK, the second NAK will
1573 		 * not be seen. So we can't depend on the NAK interrupt
1574 		 * handler to requeue a NAK'd request. Instead, IN requests
1575 		 * are issued each time this function is called. When the
1576 		 * transfer completes, the extra requests for the channel will
1577 		 * be flushed.
1578 		 */
1579 		u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1580 
1581 		dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1582 		hcchar |= HCCHAR_CHENA;
1583 		hcchar &= ~HCCHAR_CHDIS;
1584 		if (dbg_hc(chan))
1585 			dev_vdbg(hsotg->dev, "	 IN xfer: hcchar = 0x%08x\n",
1586 				 hcchar);
1587 		DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1588 		chan->requests++;
1589 		return 1;
1590 	}
1591 
1592 	/* OUT transfers */
1593 
1594 	if (chan->xfer_count < chan->xfer_len) {
1595 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1596 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1597 			u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1598 
1599 			dwc2_hc_set_even_odd_frame(hsotg, chan,
1600 						   &hcchar);
1601 		}
1602 
1603 		/* Load OUT packet into the appropriate Tx FIFO */
1604 		dwc2_hc_write_packet(hsotg, chan);
1605 		chan->requests++;
1606 		return 1;
1607 	}
1608 
1609 	return 0;
1610 }
1611 
1612 /**
1613  * dwc2_hc_do_ping() - Starts a PING transfer
1614  *
1615  * @hsotg: Programming view of DWC_otg controller
1616  * @chan:  Information needed to initialize the host channel
1617  *
1618  * This function should only be called in Slave mode. The Do Ping bit is set in
1619  * the HCTSIZ register, then the channel is enabled.
1620  */
1621 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1622 {
1623 	u32 hcchar;
1624 	u32 hctsiz;
1625 
1626 	if (dbg_hc(chan))
1627 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1628 			 chan->hc_num);
1629 
1630 
1631 	hctsiz = TSIZ_DOPNG;
1632 	hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1633 	DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
1634 
1635 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1636 	hcchar |= HCCHAR_CHENA;
1637 	hcchar &= ~HCCHAR_CHDIS;
1638 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1639 }
1640 
1641 /**
1642  * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
1643  * the HFIR register according to PHY type and speed
1644  *
1645  * @hsotg: Programming view of DWC_otg controller
1646  *
1647  * NOTE: The caller can modify the value of the HFIR register only after the
1648  * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
1649  * has been set
1650  */
1651 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
1652 {
1653 	u32 usbcfg;
1654 	u32 hprt0;
1655 	int clock = 60;	/* default value */
1656 
1657 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
1658 	hprt0 = DWC2_READ_4(hsotg, HPRT0);
1659 
1660 	if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
1661 	    !(usbcfg & GUSBCFG_PHYIF16))
1662 		clock = 60;
1663 	if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
1664 	    GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
1665 		clock = 48;
1666 	if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1667 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1668 		clock = 30;
1669 	if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1670 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
1671 		clock = 60;
1672 	if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1673 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1674 		clock = 48;
1675 	if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
1676 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
1677 		clock = 48;
1678 	if ((usbcfg & GUSBCFG_PHYSEL) &&
1679 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
1680 		clock = 48;
1681 
1682 	if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
1683 		/* High speed case */
1684 		return 125 * clock;
1685 	else
1686 		/* FS/LS case */
1687 		return 1000 * clock;
1688 }
1689 
1690 /**
1691  * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
1692  * buffer
1693  *
1694  * @core_if: Programming view of DWC_otg controller
1695  * @dest:    Destination buffer for the packet
1696  * @bytes:   Number of bytes to copy to the destination
1697  */
1698 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
1699 {
1700 	bus_size_t fifo = HCFIFO(0);
1701 	u32 *data_buf = (u32 *)dest;
1702 	int word_count = (bytes + 3) / 4;
1703 	int i;
1704 
1705 	/*
1706 	 * Todo: Account for the case where dest is not dword aligned. This
1707 	 * requires reading data from the FIFO into a u32 temp buffer, then
1708 	 * moving it into the data buffer.
1709 	 */
1710 
1711 	dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
1712 
1713 	for (i = 0; i < word_count; i++, data_buf++)
1714 		*data_buf = DWC2_READ_4(hsotg, fifo);
1715 }
1716 
1717 /**
1718  * dwc2_dump_host_registers() - Prints the host registers
1719  *
1720  * @hsotg: Programming view of DWC_otg controller
1721  *
1722  * NOTE: This function will be removed once the peripheral controller code
1723  * is integrated and the driver is stable
1724  */
1725 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
1726 {
1727 #ifdef DWC2_DEBUG
1728 	bus_size_t addr;
1729 	int i;
1730 
1731 	dev_dbg(hsotg->dev, "Host Global Registers\n");
1732 	addr = HCFG;
1733 	dev_dbg(hsotg->dev, "HCFG	 @0x%08lX : 0x%08X\n",
1734 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1735 	addr = HFIR;
1736 	dev_dbg(hsotg->dev, "HFIR	 @0x%08lX : 0x%08X\n",
1737 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1738 	addr = HFNUM;
1739 	dev_dbg(hsotg->dev, "HFNUM	 @0x%08lX : 0x%08X\n",
1740 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1741 	addr = HPTXSTS;
1742 	dev_dbg(hsotg->dev, "HPTXSTS	 @0x%08lX : 0x%08X\n",
1743 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1744 	addr = HAINT;
1745 	dev_dbg(hsotg->dev, "HAINT	 @0x%08lX : 0x%08X\n",
1746 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1747 	addr = HAINTMSK;
1748 	dev_dbg(hsotg->dev, "HAINTMSK	 @0x%08lX : 0x%08X\n",
1749 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1750 	if (hsotg->core_params->dma_desc_enable > 0) {
1751 		addr = HFLBADDR;
1752 		dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
1753 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1754 	}
1755 
1756 	addr = HPRT0;
1757 	dev_dbg(hsotg->dev, "HPRT0	 @0x%08lX : 0x%08X\n",
1758 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1759 
1760 	for (i = 0; i < hsotg->core_params->host_channels; i++) {
1761 		dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
1762 		addr = HCCHAR(i);
1763 		dev_dbg(hsotg->dev, "HCCHAR	 @0x%08lX : 0x%08X\n",
1764 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1765 		addr = HCSPLT(i);
1766 		dev_dbg(hsotg->dev, "HCSPLT	 @0x%08lX : 0x%08X\n",
1767 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1768 		addr = HCINT(i);
1769 		dev_dbg(hsotg->dev, "HCINT	 @0x%08lX : 0x%08X\n",
1770 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1771 		addr = HCINTMSK(i);
1772 		dev_dbg(hsotg->dev, "HCINTMSK	 @0x%08lX : 0x%08X\n",
1773 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1774 		addr = HCTSIZ(i);
1775 		dev_dbg(hsotg->dev, "HCTSIZ	 @0x%08lX : 0x%08X\n",
1776 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1777 		addr = HCDMA(i);
1778 		dev_dbg(hsotg->dev, "HCDMA	 @0x%08lX : 0x%08X\n",
1779 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1780 		if (hsotg->core_params->dma_desc_enable > 0) {
1781 			addr = HCDMAB(i);
1782 			dev_dbg(hsotg->dev, "HCDMAB	 @0x%08lX : 0x%08X\n",
1783 				(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1784 		}
1785 	}
1786 #endif
1787 }
1788 
1789 /**
1790  * dwc2_dump_global_registers() - Prints the core global registers
1791  *
1792  * @hsotg: Programming view of DWC_otg controller
1793  *
1794  * NOTE: This function will be removed once the peripheral controller code
1795  * is integrated and the driver is stable
1796  */
1797 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
1798 {
1799 #ifdef DWC2_DEBUG
1800 	bus_size_t addr;
1801 
1802 	dev_dbg(hsotg->dev, "Core Global Registers\n");
1803 	addr = GOTGCTL;
1804 	dev_dbg(hsotg->dev, "GOTGCTL	 @0x%08lX : 0x%08X\n",
1805 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1806 	addr = GOTGINT;
1807 	dev_dbg(hsotg->dev, "GOTGINT	 @0x%08lX : 0x%08X\n",
1808 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1809 	addr = GAHBCFG;
1810 	dev_dbg(hsotg->dev, "GAHBCFG	 @0x%08lX : 0x%08X\n",
1811 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1812 	addr = GUSBCFG;
1813 	dev_dbg(hsotg->dev, "GUSBCFG	 @0x%08lX : 0x%08X\n",
1814 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1815 	addr = GRSTCTL;
1816 	dev_dbg(hsotg->dev, "GRSTCTL	 @0x%08lX : 0x%08X\n",
1817 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1818 	addr = GINTSTS;
1819 	dev_dbg(hsotg->dev, "GINTSTS	 @0x%08lX : 0x%08X\n",
1820 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1821 	addr = GINTMSK;
1822 	dev_dbg(hsotg->dev, "GINTMSK	 @0x%08lX : 0x%08X\n",
1823 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1824 	addr = GRXSTSR;
1825 	dev_dbg(hsotg->dev, "GRXSTSR	 @0x%08lX : 0x%08X\n",
1826 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1827 	addr = GRXFSIZ;
1828 	dev_dbg(hsotg->dev, "GRXFSIZ	 @0x%08lX : 0x%08X\n",
1829 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1830 	addr = GNPTXFSIZ;
1831 	dev_dbg(hsotg->dev, "GNPTXFSIZ	 @0x%08lX : 0x%08X\n",
1832 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1833 	addr = GNPTXSTS;
1834 	dev_dbg(hsotg->dev, "GNPTXSTS	 @0x%08lX : 0x%08X\n",
1835 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1836 	addr = GI2CCTL;
1837 	dev_dbg(hsotg->dev, "GI2CCTL	 @0x%08lX : 0x%08X\n",
1838 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1839 	addr = GPVNDCTL;
1840 	dev_dbg(hsotg->dev, "GPVNDCTL	 @0x%08lX : 0x%08X\n",
1841 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1842 	addr = GGPIO;
1843 	dev_dbg(hsotg->dev, "GGPIO	 @0x%08lX : 0x%08X\n",
1844 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1845 	addr = GUID;
1846 	dev_dbg(hsotg->dev, "GUID	 @0x%08lX : 0x%08X\n",
1847 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1848 	addr = GSNPSID;
1849 	dev_dbg(hsotg->dev, "GSNPSID	 @0x%08lX : 0x%08X\n",
1850 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1851 	addr = GHWCFG1;
1852 	dev_dbg(hsotg->dev, "GHWCFG1	 @0x%08lX : 0x%08X\n",
1853 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1854 	addr = GHWCFG2;
1855 	dev_dbg(hsotg->dev, "GHWCFG2	 @0x%08lX : 0x%08X\n",
1856 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1857 	addr = GHWCFG3;
1858 	dev_dbg(hsotg->dev, "GHWCFG3	 @0x%08lX : 0x%08X\n",
1859 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1860 	addr = GHWCFG4;
1861 	dev_dbg(hsotg->dev, "GHWCFG4	 @0x%08lX : 0x%08X\n",
1862 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1863 	addr = GLPMCFG;
1864 	dev_dbg(hsotg->dev, "GLPMCFG	 @0x%08lX : 0x%08X\n",
1865 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1866 	addr = GPWRDN;
1867 	dev_dbg(hsotg->dev, "GPWRDN	 @0x%08lX : 0x%08X\n",
1868 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1869 	addr = GDFIFOCFG;
1870 	dev_dbg(hsotg->dev, "GDFIFOCFG	 @0x%08lX : 0x%08X\n",
1871 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1872 	addr = HPTXFSIZ;
1873 	dev_dbg(hsotg->dev, "HPTXFSIZ	 @0x%08lX : 0x%08X\n",
1874 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1875 
1876 	addr = PCGCTL;
1877 	dev_dbg(hsotg->dev, "PCGCTL	 @0x%08lX : 0x%08X\n",
1878 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1879 #endif
1880 }
1881 
1882 /**
1883  * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
1884  *
1885  * @hsotg: Programming view of DWC_otg controller
1886  * @num:   Tx FIFO to flush
1887  */
1888 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
1889 {
1890 	u32 greset;
1891 	int count = 0;
1892 
1893 	dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
1894 
1895 	greset = GRSTCTL_TXFFLSH;
1896 	greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
1897 	DWC2_WRITE_4(hsotg, GRSTCTL, greset);
1898 
1899 	do {
1900 		greset = DWC2_READ_4(hsotg, GRSTCTL);
1901 		if (++count > 10000) {
1902 			dev_warn(hsotg->dev,
1903 				 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
1904 				 __func__, greset,
1905 				 DWC2_READ_4(hsotg, GNPTXSTS));
1906 			break;
1907 		}
1908 		udelay(1);
1909 	} while (greset & GRSTCTL_TXFFLSH);
1910 
1911 	/* Wait for at least 3 PHY Clocks */
1912 	udelay(1);
1913 }
1914 
1915 /**
1916  * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
1917  *
1918  * @hsotg: Programming view of DWC_otg controller
1919  */
1920 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
1921 {
1922 	u32 greset;
1923 	int count = 0;
1924 
1925 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
1926 
1927 	greset = GRSTCTL_RXFFLSH;
1928 	DWC2_WRITE_4(hsotg, GRSTCTL, greset);
1929 
1930 	do {
1931 		greset = DWC2_READ_4(hsotg, GRSTCTL);
1932 		if (++count > 10000) {
1933 			dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
1934 				 __func__, greset);
1935 			break;
1936 		}
1937 		udelay(1);
1938 	} while (greset & GRSTCTL_RXFFLSH);
1939 
1940 	/* Wait for at least 3 PHY Clocks */
1941 	udelay(1);
1942 }
1943 
1944 #define DWC2_OUT_OF_BOUNDS(a, b, c)	((a) < (b) || (a) > (c))
1945 
1946 /* Parameter access functions */
1947 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
1948 {
1949 	int valid = 1;
1950 
1951 	switch (val) {
1952 	case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
1953 		if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
1954 			valid = 0;
1955 		break;
1956 	case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
1957 		switch (hsotg->hw_params.op_mode) {
1958 		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1959 		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
1960 		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
1961 		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
1962 			break;
1963 		default:
1964 			valid = 0;
1965 			break;
1966 		}
1967 		break;
1968 	case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
1969 		/* always valid */
1970 		break;
1971 	default:
1972 		valid = 0;
1973 		break;
1974 	}
1975 
1976 	if (!valid) {
1977 		if (val >= 0)
1978 			dev_err(hsotg->dev,
1979 				"%d invalid for otg_cap parameter. Check HW configuration.\n",
1980 				val);
1981 		switch (hsotg->hw_params.op_mode) {
1982 		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1983 			val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
1984 			break;
1985 		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
1986 		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
1987 		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
1988 			val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
1989 			break;
1990 		default:
1991 			val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
1992 			break;
1993 		}
1994 		dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
1995 	}
1996 
1997 	hsotg->core_params->otg_cap = val;
1998 }
1999 
2000 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
2001 {
2002 	int valid = 1;
2003 
2004 	if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
2005 		valid = 0;
2006 	if (val < 0)
2007 		valid = 0;
2008 
2009 	if (!valid) {
2010 		if (val >= 0)
2011 			dev_err(hsotg->dev,
2012 				"%d invalid for dma_enable parameter. Check HW configuration.\n",
2013 				val);
2014 		val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
2015 		dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
2016 	}
2017 
2018 	hsotg->core_params->dma_enable = val;
2019 }
2020 
2021 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
2022 {
2023 	int valid = 1;
2024 
2025 	if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2026 			!hsotg->hw_params.dma_desc_enable))
2027 		valid = 0;
2028 	if (val < 0)
2029 		valid = 0;
2030 
2031 	if (!valid) {
2032 		if (val >= 0)
2033 			dev_err(hsotg->dev,
2034 				"%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2035 				val);
2036 		val = (hsotg->core_params->dma_enable > 0 &&
2037 			hsotg->hw_params.dma_desc_enable);
2038 		dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2039 	}
2040 
2041 	hsotg->core_params->dma_desc_enable = val;
2042 }
2043 
2044 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2045 						 int val)
2046 {
2047 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2048 		if (val >= 0) {
2049 			dev_err(hsotg->dev,
2050 				"Wrong value for host_support_fs_low_power\n");
2051 			dev_err(hsotg->dev,
2052 				"host_support_fs_low_power must be 0 or 1\n");
2053 		}
2054 		val = 0;
2055 		dev_dbg(hsotg->dev,
2056 			"Setting host_support_fs_low_power to %d\n", val);
2057 	}
2058 
2059 	hsotg->core_params->host_support_fs_ls_low_power = val;
2060 }
2061 
2062 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
2063 {
2064 	int valid = 1;
2065 
2066 	if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
2067 		valid = 0;
2068 	if (val < 0)
2069 		valid = 0;
2070 
2071 	if (!valid) {
2072 		if (val >= 0)
2073 			dev_err(hsotg->dev,
2074 				"%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2075 				val);
2076 		val = hsotg->hw_params.enable_dynamic_fifo;
2077 		dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2078 	}
2079 
2080 	hsotg->core_params->enable_dynamic_fifo = val;
2081 }
2082 
2083 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2084 {
2085 	int valid = 1;
2086 
2087 	if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
2088 		valid = 0;
2089 
2090 	if (!valid) {
2091 		if (val >= 0)
2092 			dev_err(hsotg->dev,
2093 				"%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2094 				val);
2095 		val = hsotg->hw_params.host_rx_fifo_size;
2096 		dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2097 	}
2098 
2099 	hsotg->core_params->host_rx_fifo_size = val;
2100 }
2101 
2102 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2103 {
2104 	int valid = 1;
2105 
2106 	if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
2107 		valid = 0;
2108 
2109 	if (!valid) {
2110 		if (val >= 0)
2111 			dev_err(hsotg->dev,
2112 				"%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2113 				val);
2114 		val = hsotg->hw_params.host_nperio_tx_fifo_size;
2115 		dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2116 			val);
2117 	}
2118 
2119 	hsotg->core_params->host_nperio_tx_fifo_size = val;
2120 }
2121 
2122 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2123 {
2124 	int valid = 1;
2125 
2126 	if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
2127 		valid = 0;
2128 
2129 	if (!valid) {
2130 		if (val >= 0)
2131 			dev_err(hsotg->dev,
2132 				"%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2133 				val);
2134 		val = hsotg->hw_params.host_perio_tx_fifo_size;
2135 		dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2136 			val);
2137 	}
2138 
2139 	hsotg->core_params->host_perio_tx_fifo_size = val;
2140 }
2141 
2142 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
2143 {
2144 	int valid = 1;
2145 
2146 	if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
2147 		valid = 0;
2148 
2149 	if (!valid) {
2150 		if (val >= 0)
2151 			dev_err(hsotg->dev,
2152 				"%d invalid for max_transfer_size. Check HW configuration.\n",
2153 				val);
2154 		val = hsotg->hw_params.max_transfer_size;
2155 		dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2156 	}
2157 
2158 	hsotg->core_params->max_transfer_size = val;
2159 }
2160 
2161 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
2162 {
2163 	int valid = 1;
2164 
2165 	if (val < 15 || val > hsotg->hw_params.max_packet_count)
2166 		valid = 0;
2167 
2168 	if (!valid) {
2169 		if (val >= 0)
2170 			dev_err(hsotg->dev,
2171 				"%d invalid for max_packet_count. Check HW configuration.\n",
2172 				val);
2173 		val = hsotg->hw_params.max_packet_count;
2174 		dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2175 	}
2176 
2177 	hsotg->core_params->max_packet_count = val;
2178 }
2179 
2180 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2181 {
2182 	int valid = 1;
2183 
2184 	if (val < 1 || val > hsotg->hw_params.host_channels)
2185 		valid = 0;
2186 
2187 	if (!valid) {
2188 		if (val >= 0)
2189 			dev_err(hsotg->dev,
2190 				"%d invalid for host_channels. Check HW configuration.\n",
2191 				val);
2192 		val = hsotg->hw_params.host_channels;
2193 		dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2194 	}
2195 
2196 	hsotg->core_params->host_channels = val;
2197 }
2198 
2199 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2200 {
2201 	int valid = 0;
2202 	u32 hs_phy_type, fs_phy_type;
2203 
2204 	if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
2205 			       DWC2_PHY_TYPE_PARAM_ULPI)) {
2206 		if (val >= 0) {
2207 			dev_err(hsotg->dev, "Wrong value for phy_type\n");
2208 			dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2209 		}
2210 
2211 		valid = 0;
2212 	}
2213 
2214 	hs_phy_type = hsotg->hw_params.hs_phy_type;
2215 	fs_phy_type = hsotg->hw_params.fs_phy_type;
2216 	if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2217 	    (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2218 	     hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2219 		valid = 1;
2220 	else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2221 		 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2222 		  hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2223 		valid = 1;
2224 	else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2225 		 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2226 		valid = 1;
2227 
2228 	if (!valid) {
2229 		if (val >= 0)
2230 			dev_err(hsotg->dev,
2231 				"%d invalid for phy_type. Check HW configuration.\n",
2232 				val);
2233 		val = DWC2_PHY_TYPE_PARAM_FS;
2234 		if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2235 			if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2236 			    hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2237 				val = DWC2_PHY_TYPE_PARAM_UTMI;
2238 			else
2239 				val = DWC2_PHY_TYPE_PARAM_ULPI;
2240 		}
2241 		dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2242 	}
2243 
2244 	hsotg->core_params->phy_type = val;
2245 }
2246 
2247 static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2248 {
2249 	return hsotg->core_params->phy_type;
2250 }
2251 
2252 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2253 {
2254 	int valid = 1;
2255 
2256 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2257 		if (val >= 0) {
2258 			dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2259 			dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2260 		}
2261 		valid = 0;
2262 	}
2263 
2264 	if (val == DWC2_SPEED_PARAM_HIGH &&
2265 	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2266 		valid = 0;
2267 
2268 	if (!valid) {
2269 		if (val >= 0)
2270 			dev_err(hsotg->dev,
2271 				"%d invalid for speed parameter. Check HW configuration.\n",
2272 				val);
2273 		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
2274 				DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
2275 		dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2276 	}
2277 
2278 	hsotg->core_params->speed = val;
2279 }
2280 
2281 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2282 {
2283 	int valid = 1;
2284 
2285 	if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2286 			       DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
2287 		if (val >= 0) {
2288 			dev_err(hsotg->dev,
2289 				"Wrong value for host_ls_low_power_phy_clk parameter\n");
2290 			dev_err(hsotg->dev,
2291 				"host_ls_low_power_phy_clk must be 0 or 1\n");
2292 		}
2293 		valid = 0;
2294 	}
2295 
2296 	if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2297 	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2298 		valid = 0;
2299 
2300 	if (!valid) {
2301 		if (val >= 0)
2302 			dev_err(hsotg->dev,
2303 				"%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2304 				val);
2305 		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2306 			? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2307 			: DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2308 		dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2309 			val);
2310 	}
2311 
2312 	hsotg->core_params->host_ls_low_power_phy_clk = val;
2313 }
2314 
2315 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2316 {
2317 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2318 		if (val >= 0) {
2319 			dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2320 			dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2321 		}
2322 		val = 0;
2323 		dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2324 	}
2325 
2326 	hsotg->core_params->phy_ulpi_ddr = val;
2327 }
2328 
2329 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2330 {
2331 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2332 		if (val >= 0) {
2333 			dev_err(hsotg->dev,
2334 				"Wrong value for phy_ulpi_ext_vbus\n");
2335 			dev_err(hsotg->dev,
2336 				"phy_ulpi_ext_vbus must be 0 or 1\n");
2337 		}
2338 		val = 0;
2339 		dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2340 	}
2341 
2342 	hsotg->core_params->phy_ulpi_ext_vbus = val;
2343 }
2344 
2345 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2346 {
2347 	int valid = 0;
2348 
2349 	switch (hsotg->hw_params.utmi_phy_data_width) {
2350 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2351 		valid = (val == 8);
2352 		break;
2353 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2354 		valid = (val == 16);
2355 		break;
2356 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2357 		valid = (val == 8 || val == 16);
2358 		break;
2359 	}
2360 
2361 	if (!valid) {
2362 		if (val >= 0) {
2363 			dev_err(hsotg->dev,
2364 				"%d invalid for phy_utmi_width. Check HW configuration.\n",
2365 				val);
2366 		}
2367 		val = (hsotg->hw_params.utmi_phy_data_width ==
2368 		       GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
2369 		dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2370 	}
2371 
2372 	hsotg->core_params->phy_utmi_width = val;
2373 }
2374 
2375 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
2376 {
2377 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2378 		if (val >= 0) {
2379 			dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2380 			dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2381 		}
2382 		val = 0;
2383 		dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
2384 	}
2385 
2386 	hsotg->core_params->ulpi_fs_ls = val;
2387 }
2388 
2389 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
2390 {
2391 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2392 		if (val >= 0) {
2393 			dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2394 			dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2395 		}
2396 		val = 0;
2397 		dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
2398 	}
2399 
2400 	hsotg->core_params->ts_dline = val;
2401 }
2402 
2403 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
2404 {
2405 	int valid = 1;
2406 
2407 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2408 		if (val >= 0) {
2409 			dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2410 			dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2411 		}
2412 
2413 		valid = 0;
2414 	}
2415 
2416 	if (val == 1 && !(hsotg->hw_params.i2c_enable))
2417 		valid = 0;
2418 
2419 	if (!valid) {
2420 		if (val >= 0)
2421 			dev_err(hsotg->dev,
2422 				"%d invalid for i2c_enable. Check HW configuration.\n",
2423 				val);
2424 		val = hsotg->hw_params.i2c_enable;
2425 		dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2426 	}
2427 
2428 	hsotg->core_params->i2c_enable = val;
2429 }
2430 
2431 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
2432 {
2433 	int valid = 1;
2434 
2435 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2436 		if (val >= 0) {
2437 			dev_err(hsotg->dev,
2438 				"Wrong value for en_multiple_tx_fifo,\n");
2439 			dev_err(hsotg->dev,
2440 				"en_multiple_tx_fifo must be 0 or 1\n");
2441 		}
2442 		valid = 0;
2443 	}
2444 
2445 	if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
2446 		valid = 0;
2447 
2448 	if (!valid) {
2449 		if (val >= 0)
2450 			dev_err(hsotg->dev,
2451 				"%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2452 				val);
2453 		val = hsotg->hw_params.en_multiple_tx_fifo;
2454 		dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
2455 	}
2456 
2457 	hsotg->core_params->en_multiple_tx_fifo = val;
2458 }
2459 
2460 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
2461 {
2462 	int valid = 1;
2463 
2464 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2465 		if (val >= 0) {
2466 			dev_err(hsotg->dev,
2467 				"'%d' invalid for parameter reload_ctl\n", val);
2468 			dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2469 		}
2470 		valid = 0;
2471 	}
2472 
2473 	if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
2474 		valid = 0;
2475 
2476 	if (!valid) {
2477 		if (val >= 0)
2478 			dev_err(hsotg->dev,
2479 				"%d invalid for parameter reload_ctl. Check HW configuration.\n",
2480 				val);
2481 		val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
2482 		dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
2483 	}
2484 
2485 	hsotg->core_params->reload_ctl = val;
2486 }
2487 
2488 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
2489 {
2490 	if (val != -1)
2491 		hsotg->core_params->ahbcfg = val;
2492 	else
2493 		hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
2494 						GAHBCFG_HBSTLEN_SHIFT;
2495 }
2496 
2497 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
2498 {
2499 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2500 		if (val >= 0) {
2501 			dev_err(hsotg->dev,
2502 				"'%d' invalid for parameter otg_ver\n", val);
2503 			dev_err(hsotg->dev,
2504 				"otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2505 		}
2506 		val = 0;
2507 		dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
2508 	}
2509 
2510 	hsotg->core_params->otg_ver = val;
2511 }
2512 
2513 static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
2514 {
2515 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2516 		if (val >= 0) {
2517 			dev_err(hsotg->dev,
2518 				"'%d' invalid for parameter uframe_sched\n",
2519 				val);
2520 			dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
2521 		}
2522 		val = 1;
2523 		dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
2524 	}
2525 
2526 	hsotg->core_params->uframe_sched = val;
2527 }
2528 
2529 /*
2530  * This function is called during module intialization to pass module parameters
2531  * for the DWC_otg core.
2532  */
2533 void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
2534 			 const struct dwc2_core_params *params)
2535 {
2536 	dev_dbg(hsotg->dev, "%s()\n", __func__);
2537 
2538 	dwc2_set_param_otg_cap(hsotg, params->otg_cap);
2539 	dwc2_set_param_dma_enable(hsotg, params->dma_enable);
2540 	dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
2541 	dwc2_set_param_host_support_fs_ls_low_power(hsotg,
2542 			params->host_support_fs_ls_low_power);
2543 	dwc2_set_param_enable_dynamic_fifo(hsotg,
2544 			params->enable_dynamic_fifo);
2545 	dwc2_set_param_host_rx_fifo_size(hsotg,
2546 			params->host_rx_fifo_size);
2547 	dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
2548 			params->host_nperio_tx_fifo_size);
2549 	dwc2_set_param_host_perio_tx_fifo_size(hsotg,
2550 			params->host_perio_tx_fifo_size);
2551 	dwc2_set_param_max_transfer_size(hsotg,
2552 			params->max_transfer_size);
2553 	dwc2_set_param_max_packet_count(hsotg,
2554 			params->max_packet_count);
2555 	dwc2_set_param_host_channels(hsotg, params->host_channels);
2556 	dwc2_set_param_phy_type(hsotg, params->phy_type);
2557 	dwc2_set_param_speed(hsotg, params->speed);
2558 	dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
2559 			params->host_ls_low_power_phy_clk);
2560 	dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
2561 	dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
2562 			params->phy_ulpi_ext_vbus);
2563 	dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
2564 	dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
2565 	dwc2_set_param_ts_dline(hsotg, params->ts_dline);
2566 	dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
2567 	dwc2_set_param_en_multiple_tx_fifo(hsotg,
2568 			params->en_multiple_tx_fifo);
2569 	dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
2570 	dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
2571 	dwc2_set_param_otg_ver(hsotg, params->otg_ver);
2572 	dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
2573 }
2574 
2575 /**
2576  * During device initialization, read various hardware configuration
2577  * registers and interpret the contents.
2578  */
2579 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
2580 {
2581 	struct dwc2_hw_params *hw = &hsotg->hw_params;
2582 	unsigned width;
2583 	u32 hwcfg2, hwcfg3, hwcfg4;
2584 	u32 hptxfsiz, grxfsiz, gnptxfsiz;
2585 	u32 gusbcfg;
2586 
2587 	/*
2588 	 * Attempt to ensure this device is really a DWC_otg Controller.
2589 	 * Read and verify the GSNPSID register contents. The value should be
2590 	 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
2591 	 * as in "OTG version 2.xx" or "OTG version 3.xx".
2592 	 */
2593 	hw->snpsid = DWC2_READ_4(hsotg, GSNPSID);
2594 	if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
2595 	    (hw->snpsid & 0xfffff000) != 0x4f543000) {
2596 		dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
2597 			hw->snpsid);
2598 		return -ENODEV;
2599 	}
2600 
2601 	dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
2602 		hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
2603 		hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
2604 
2605 	hwcfg2 = DWC2_READ_4(hsotg, GHWCFG2);
2606 	hwcfg3 = DWC2_READ_4(hsotg, GHWCFG3);
2607 	hwcfg4 = DWC2_READ_4(hsotg, GHWCFG4);
2608 	gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ);
2609 	grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
2610 
2611 	dev_dbg(hsotg->dev, "hwcfg1=%08x\n", DWC2_READ_4(hsotg, GHWCFG1));
2612 	dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
2613 	dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
2614 	dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
2615 	dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
2616 	dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
2617 
2618 	/* Force host mode to get HPTXFSIZ exact power on value */
2619 	gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
2620 	gusbcfg |= GUSBCFG_FORCEHOSTMODE;
2621 	DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg);
2622 	usleep_range(100000, 150000);
2623 
2624 	hptxfsiz = DWC2_READ_4(hsotg, HPTXFSIZ);
2625 	dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
2626 	gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
2627 	gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
2628 	DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg);
2629 	usleep_range(100000, 150000);
2630 
2631 	/* hwcfg2 */
2632 	hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
2633 		      GHWCFG2_OP_MODE_SHIFT;
2634 	hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
2635 		   GHWCFG2_ARCHITECTURE_SHIFT;
2636 	hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
2637 	hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
2638 				GHWCFG2_NUM_HOST_CHAN_SHIFT);
2639 	hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
2640 			  GHWCFG2_HS_PHY_TYPE_SHIFT;
2641 	hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
2642 			  GHWCFG2_FS_PHY_TYPE_SHIFT;
2643 	hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
2644 			 GHWCFG2_NUM_DEV_EP_SHIFT;
2645 	hw->nperio_tx_q_depth =
2646 		(hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
2647 		GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
2648 	hw->host_perio_tx_q_depth =
2649 		(hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
2650 		GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
2651 	hw->dev_token_q_depth =
2652 		(hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
2653 		GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
2654 
2655 	/* hwcfg3 */
2656 	width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
2657 		GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
2658 	hw->max_transfer_size = (1 << (width + 11)) - 1;
2659 	width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
2660 		GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
2661 	hw->max_packet_count = (1 << (width + 4)) - 1;
2662 	hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
2663 	hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
2664 			      GHWCFG3_DFIFO_DEPTH_SHIFT;
2665 
2666 	/* hwcfg4 */
2667 	hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
2668 	hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
2669 				  GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
2670 	hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
2671 	hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
2672 	hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
2673 				  GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
2674 
2675 	/* fifo sizes */
2676 	hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
2677 				GRXFSIZ_DEPTH_SHIFT;
2678 	hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2679 				       FIFOSIZE_DEPTH_SHIFT;
2680 	hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2681 				      FIFOSIZE_DEPTH_SHIFT;
2682 
2683 	dev_dbg(hsotg->dev, "Detected values from hardware:\n");
2684 	dev_dbg(hsotg->dev, "  op_mode=%d\n",
2685 		hw->op_mode);
2686 	dev_dbg(hsotg->dev, "  arch=%d\n",
2687 		hw->arch);
2688 	dev_dbg(hsotg->dev, "  dma_desc_enable=%d\n",
2689 		hw->dma_desc_enable);
2690 	dev_dbg(hsotg->dev, "  power_optimized=%d\n",
2691 		hw->power_optimized);
2692 	dev_dbg(hsotg->dev, "  i2c_enable=%d\n",
2693 		hw->i2c_enable);
2694 	dev_dbg(hsotg->dev, "  hs_phy_type=%d\n",
2695 		hw->hs_phy_type);
2696 	dev_dbg(hsotg->dev, "  fs_phy_type=%d\n",
2697 		hw->fs_phy_type);
2698 	dev_dbg(hsotg->dev, "  utmi_phy_data_wdith=%d\n",
2699 		hw->utmi_phy_data_width);
2700 	dev_dbg(hsotg->dev, "  num_dev_ep=%d\n",
2701 		hw->num_dev_ep);
2702 	dev_dbg(hsotg->dev, "  num_dev_perio_in_ep=%d\n",
2703 		hw->num_dev_perio_in_ep);
2704 	dev_dbg(hsotg->dev, "  host_channels=%d\n",
2705 		hw->host_channels);
2706 	dev_dbg(hsotg->dev, "  max_transfer_size=%d\n",
2707 		hw->max_transfer_size);
2708 	dev_dbg(hsotg->dev, "  max_packet_count=%d\n",
2709 		hw->max_packet_count);
2710 	dev_dbg(hsotg->dev, "  nperio_tx_q_depth=0x%0x\n",
2711 		hw->nperio_tx_q_depth);
2712 	dev_dbg(hsotg->dev, "  host_perio_tx_q_depth=0x%0x\n",
2713 		hw->host_perio_tx_q_depth);
2714 	dev_dbg(hsotg->dev, "  dev_token_q_depth=0x%0x\n",
2715 		hw->dev_token_q_depth);
2716 	dev_dbg(hsotg->dev, "  enable_dynamic_fifo=%d\n",
2717 		hw->enable_dynamic_fifo);
2718 	dev_dbg(hsotg->dev, "  en_multiple_tx_fifo=%d\n",
2719 		hw->en_multiple_tx_fifo);
2720 	dev_dbg(hsotg->dev, "  total_fifo_size=%d\n",
2721 		hw->total_fifo_size);
2722 	dev_dbg(hsotg->dev, "  host_rx_fifo_size=%d\n",
2723 		hw->host_rx_fifo_size);
2724 	dev_dbg(hsotg->dev, "  host_nperio_tx_fifo_size=%d\n",
2725 		hw->host_nperio_tx_fifo_size);
2726 	dev_dbg(hsotg->dev, "  host_perio_tx_fifo_size=%d\n",
2727 		hw->host_perio_tx_fifo_size);
2728 	dev_dbg(hsotg->dev, "\n");
2729 
2730 	return 0;
2731 }
2732 
2733 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
2734 {
2735 	return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
2736 }
2737 
2738 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
2739 {
2740 	if (DWC2_READ_4(hsotg, GSNPSID) == 0xffffffff)
2741 		return false;
2742 	else
2743 		return true;
2744 }
2745 
2746 /**
2747  * dwc2_enable_global_interrupts() - Enables the controller's Global
2748  * Interrupt in the AHB Config register
2749  *
2750  * @hsotg: Programming view of DWC_otg controller
2751  */
2752 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
2753 {
2754 	u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
2755 
2756 	ahbcfg |= GAHBCFG_GLBL_INTR_EN;
2757 	DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
2758 }
2759 
2760 /**
2761  * dwc2_disable_global_interrupts() - Disables the controller's Global
2762  * Interrupt in the AHB Config register
2763  *
2764  * @hsotg: Programming view of DWC_otg controller
2765  */
2766 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
2767 {
2768 	u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
2769 
2770 	ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
2771 	DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
2772 }
2773