xref: /netbsd-src/sys/external/bsd/dwc2/dist/dwc2_core.c (revision 37afb7eb6895c833050f8bfb1d1bb2f99f332539)
1 /*	$NetBSD: dwc2_core.c,v 1.7 2015/05/01 06:58:40 hikaru Exp $	*/
2 
3 /*
4  * core.c - DesignWare HS OTG Controller common routines
5  *
6  * Copyright (C) 2004-2013 Synopsys, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The names of the above-listed copyright holders may not be used
18  *    to endorse or promote products derived from this software without
19  *    specific prior written permission.
20  *
21  * ALTERNATIVELY, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") as published by the Free Software
23  * Foundation; either version 2 of the License, or (at your option) any
24  * later version.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * The Core code provides basic services for accessing and managing the
41  * DWC_otg hardware. These services are used by both the Host Controller
42  * Driver and the Peripheral Controller Driver.
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: dwc2_core.c,v 1.7 2015/05/01 06:58:40 hikaru Exp $");
47 
48 #include <sys/types.h>
49 #include <sys/bus.h>
50 #include <sys/proc.h>
51 #include <sys/callout.h>
52 #include <sys/mutex.h>
53 #include <sys/pool.h>
54 #include <sys/workqueue.h>
55 
56 #include <dev/usb/usb.h>
57 #include <dev/usb/usbdi.h>
58 #include <dev/usb/usbdivar.h>
59 #include <dev/usb/usb_mem.h>
60 
61 #include <linux/kernel.h>
62 #include <linux/list.h>
63 
64 #include <dwc2/dwc2.h>
65 #include <dwc2/dwc2var.h>
66 
67 #include "dwc2_core.h"
68 #include "dwc2_hcd.h"
69 
70 /**
71  * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
72  * used in both device and host modes
73  *
74  * @hsotg: Programming view of the DWC_otg controller
75  */
76 static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
77 {
78 	u32 intmsk;
79 
80 	/* Clear any pending OTG Interrupts */
81 	DWC2_WRITE_4(hsotg, GOTGINT, 0xffffffff);
82 
83 	/* Clear any pending interrupts */
84 	DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff);
85 
86 	/* Enable the interrupts in the GINTMSK */
87 	intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
88 
89 	if (hsotg->core_params->dma_enable <= 0)
90 		intmsk |= GINTSTS_RXFLVL;
91 
92 	intmsk |= GINTSTS_CONIDSTSCHNG | GINTSTS_WKUPINT | GINTSTS_USBSUSP |
93 		  GINTSTS_SESSREQINT;
94 
95 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
96 }
97 
98 /*
99  * Initializes the FSLSPClkSel field of the HCFG register depending on the
100  * PHY type
101  */
102 static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
103 {
104 	u32 hcfg, val;
105 
106 	if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
107 	     hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
108 	     hsotg->core_params->ulpi_fs_ls > 0) ||
109 	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
110 		/* Full speed PHY */
111 		val = HCFG_FSLSPCLKSEL_48_MHZ;
112 	} else {
113 		/* High speed PHY running at full speed or high speed */
114 		val = HCFG_FSLSPCLKSEL_30_60_MHZ;
115 	}
116 
117 	dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
118 	hcfg = DWC2_READ_4(hsotg, HCFG);
119 	hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
120 	hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
121 	DWC2_WRITE_4(hsotg, HCFG, hcfg);
122 }
123 
124 /*
125  * Do core a soft reset of the core.  Be careful with this because it
126  * resets all the internal state machines of the core.
127  */
128 static int dwc2_core_reset(struct dwc2_hsotg *hsotg)
129 {
130 	u32 greset;
131 	int count = 0;
132 
133 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
134 
135 	/* Wait for AHB master IDLE state */
136 	do {
137 		usleep_range(20000, 40000);
138 		greset = DWC2_READ_4(hsotg, GRSTCTL);
139 		if (++count > 50) {
140 			dev_warn(hsotg->dev,
141 				 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
142 				 __func__, greset);
143 			return -EBUSY;
144 		}
145 	} while (!(greset & GRSTCTL_AHBIDLE));
146 
147 	/* Core Soft Reset */
148 	count = 0;
149 	greset |= GRSTCTL_CSFTRST;
150 	DWC2_WRITE_4(hsotg, GRSTCTL, greset);
151 	do {
152 		usleep_range(20000, 40000);
153 		greset = DWC2_READ_4(hsotg, GRSTCTL);
154 		if (++count > 50) {
155 			dev_warn(hsotg->dev,
156 				 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
157 				 __func__, greset);
158 			return -EBUSY;
159 		}
160 	} while (greset & GRSTCTL_CSFTRST);
161 
162 	/*
163 	 * NOTE: This long sleep is _very_ important, otherwise the core will
164 	 * not stay in host mode after a connector ID change!
165 	 */
166 	usleep_range(150000, 200000);
167 
168 	return 0;
169 }
170 
171 static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
172 {
173 	u32 usbcfg, i2cctl;
174 	int retval = 0;
175 
176 	/*
177 	 * core_init() is now called on every switch so only call the
178 	 * following for the first time through
179 	 */
180 	if (select_phy) {
181 		dev_dbg(hsotg->dev, "FS PHY selected\n");
182 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
183 		usbcfg |= GUSBCFG_PHYSEL;
184 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
185 
186 		/* Reset after a PHY select */
187 		retval = dwc2_core_reset(hsotg);
188 		if (retval) {
189 			dev_err(hsotg->dev, "%s() Reset failed, aborting",
190 					__func__);
191 			return retval;
192 		}
193 	}
194 
195 	/*
196 	 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
197 	 * do this on HNP Dev/Host mode switches (done in dev_init and
198 	 * host_init).
199 	 */
200 	if (dwc2_is_host_mode(hsotg))
201 		dwc2_init_fs_ls_pclk_sel(hsotg);
202 
203 	if (hsotg->core_params->i2c_enable > 0) {
204 		dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
205 
206 		/* Program GUSBCFG.OtgUtmiFsSel to I2C */
207 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
208 		usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
209 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
210 
211 		/* Program GI2CCTL.I2CEn */
212 		i2cctl = DWC2_READ_4(hsotg, GI2CCTL);
213 		i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
214 		i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
215 		i2cctl &= ~GI2CCTL_I2CEN;
216 		DWC2_WRITE_4(hsotg, GI2CCTL, i2cctl);
217 		i2cctl |= GI2CCTL_I2CEN;
218 		DWC2_WRITE_4(hsotg, GI2CCTL, i2cctl);
219 	}
220 
221 	return retval;
222 }
223 
224 static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
225 {
226 	u32 usbcfg;
227 	int retval = 0;
228 
229 	if (!select_phy)
230 		return 0;
231 
232 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
233 
234 	/*
235 	 * HS PHY parameters. These parameters are preserved during soft reset
236 	 * so only program the first time. Do a soft reset immediately after
237 	 * setting phyif.
238 	 */
239 	switch (hsotg->core_params->phy_type) {
240 	case DWC2_PHY_TYPE_PARAM_ULPI:
241 		/* ULPI interface */
242 		dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
243 		usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
244 		usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
245 		if (hsotg->core_params->phy_ulpi_ddr > 0)
246 			usbcfg |= GUSBCFG_DDRSEL;
247 		break;
248 	case DWC2_PHY_TYPE_PARAM_UTMI:
249 		/* UTMI+ interface */
250 		dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
251 		usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
252 		if (hsotg->core_params->phy_utmi_width == 16)
253 			usbcfg |= GUSBCFG_PHYIF16;
254 		break;
255 	default:
256 		dev_err(hsotg->dev, "FS PHY selected at HS!\n");
257 		break;
258 	}
259 
260 	DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
261 
262 	/* Reset after setting the PHY parameters */
263 	retval = dwc2_core_reset(hsotg);
264 	if (retval) {
265 		dev_err(hsotg->dev, "%s() Reset failed, aborting",
266 				__func__);
267 		return retval;
268 	}
269 
270 	return retval;
271 }
272 
273 static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
274 {
275 	u32 usbcfg;
276 	int retval = 0;
277 
278 	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
279 	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
280 		/* If FS mode with FS PHY */
281 		retval = dwc2_fs_phy_init(hsotg, select_phy);
282 		if (retval)
283 			return retval;
284 	} else {
285 		/* High speed PHY */
286 		retval = dwc2_hs_phy_init(hsotg, select_phy);
287 		if (retval)
288 			return retval;
289 	}
290 
291 	if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
292 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
293 	    hsotg->core_params->ulpi_fs_ls > 0) {
294 		dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
295 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
296 		usbcfg |= GUSBCFG_ULPI_FS_LS;
297 		usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
298 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
299 	} else {
300 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
301 		usbcfg &= ~GUSBCFG_ULPI_FS_LS;
302 		usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
303 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
304 	}
305 
306 	return retval;
307 }
308 
309 static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
310 {
311 	u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
312 
313 	switch (hsotg->hw_params.arch) {
314 	case GHWCFG2_EXT_DMA_ARCH:
315 		dev_dbg(hsotg->dev, "External DMA Mode\n");
316 		if (hsotg->core_params->ahbcfg != -1) {
317 			ahbcfg &= GAHBCFG_CTRL_MASK;
318 			ahbcfg |= hsotg->core_params->ahbcfg &
319 				  ~GAHBCFG_CTRL_MASK;
320 		}
321 		break;
322 
323 	case GHWCFG2_INT_DMA_ARCH:
324 		dev_dbg(hsotg->dev, "Internal DMA Mode\n");
325 		if (hsotg->core_params->ahbcfg != -1) {
326 			ahbcfg &= GAHBCFG_CTRL_MASK;
327 			ahbcfg |= hsotg->core_params->ahbcfg &
328 				  ~GAHBCFG_CTRL_MASK;
329 		}
330 		break;
331 
332 	case GHWCFG2_SLAVE_ONLY_ARCH:
333 	default:
334 		dev_dbg(hsotg->dev, "Slave Only Mode\n");
335 		break;
336 	}
337 
338 	dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
339 		hsotg->core_params->dma_enable,
340 		hsotg->core_params->dma_desc_enable);
341 
342 	if (hsotg->core_params->dma_enable > 0) {
343 		if (hsotg->core_params->dma_desc_enable > 0)
344 			dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
345 		else
346 			dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
347 	} else {
348 		dev_dbg(hsotg->dev, "Using Slave mode\n");
349 		hsotg->core_params->dma_desc_enable = 0;
350 	}
351 
352 	if (hsotg->core_params->dma_enable > 0)
353 		ahbcfg |= GAHBCFG_DMA_EN;
354 
355 	DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
356 
357 	return 0;
358 }
359 
360 static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
361 {
362 	u32 usbcfg;
363 
364 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
365 	usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
366 
367 	switch (hsotg->hw_params.op_mode) {
368 	case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
369 		if (hsotg->core_params->otg_cap ==
370 				DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
371 			usbcfg |= GUSBCFG_HNPCAP;
372 		if (hsotg->core_params->otg_cap !=
373 				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
374 			usbcfg |= GUSBCFG_SRPCAP;
375 		break;
376 
377 	case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
378 	case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
379 	case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
380 		if (hsotg->core_params->otg_cap !=
381 				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
382 			usbcfg |= GUSBCFG_SRPCAP;
383 		break;
384 
385 	case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
386 	case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
387 	case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
388 	default:
389 		break;
390 	}
391 
392 	DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
393 }
394 
395 /**
396  * dwc2_core_init() - Initializes the DWC_otg controller registers and
397  * prepares the core for device mode or host mode operation
398  *
399  * @hsotg:      Programming view of the DWC_otg controller
400  * @select_phy: If true then also set the Phy type
401  * @irq:        If >= 0, the irq to register
402  */
403 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy)
404 {
405 	u32 usbcfg, otgctl;
406 	int retval;
407 
408 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
409 
410 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
411 
412 	/* Set ULPI External VBUS bit if needed */
413 	usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
414 	if (hsotg->core_params->phy_ulpi_ext_vbus ==
415 				DWC2_PHY_ULPI_EXTERNAL_VBUS)
416 		usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
417 
418 	/* Set external TS Dline pulsing bit if needed */
419 	usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
420 	if (hsotg->core_params->ts_dline > 0)
421 		usbcfg |= GUSBCFG_TERMSELDLPULSE;
422 
423 	DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
424 
425 	/* Reset the Controller */
426 	retval = dwc2_core_reset(hsotg);
427 	if (retval) {
428 		dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
429 				__func__);
430 		return retval;
431 	}
432 
433 	/*
434 	 * This needs to happen in FS mode before any other programming occurs
435 	 */
436 	retval = dwc2_phy_init(hsotg, select_phy);
437 	if (retval)
438 		return retval;
439 
440 	/* Program the GAHBCFG Register */
441 	retval = dwc2_gahbcfg_init(hsotg);
442 	if (retval)
443 		return retval;
444 
445 	/* Program the GUSBCFG register */
446 	dwc2_gusbcfg_init(hsotg);
447 
448 	/* Program the GOTGCTL register */
449 	otgctl = DWC2_READ_4(hsotg, GOTGCTL);
450 	otgctl &= ~GOTGCTL_OTGVER;
451 	if (hsotg->core_params->otg_ver > 0)
452 		otgctl |= GOTGCTL_OTGVER;
453 	DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
454 	dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
455 
456 	/* Clear the SRP success bit for FS-I2c */
457 	hsotg->srp_success = 0;
458 
459 	/* Enable common interrupts */
460 	dwc2_enable_common_interrupts(hsotg);
461 
462 	/*
463 	 * Do device or host intialization based on mode during PCD and
464 	 * HCD initialization
465 	 */
466 	if (dwc2_is_host_mode(hsotg)) {
467 		dev_dbg(hsotg->dev, "Host Mode\n");
468 		hsotg->op_state = OTG_STATE_A_HOST;
469 	} else {
470 		dev_dbg(hsotg->dev, "Device Mode\n");
471 		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
472 	}
473 
474 	return 0;
475 }
476 
477 /**
478  * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
479  *
480  * @hsotg: Programming view of DWC_otg controller
481  */
482 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
483 {
484 	u32 intmsk;
485 
486 	dev_dbg(hsotg->dev, "%s()\n", __func__);
487 
488 	/* Disable all interrupts */
489 	DWC2_WRITE_4(hsotg, GINTMSK, 0);
490 	DWC2_WRITE_4(hsotg, HAINTMSK, 0);
491 
492 	/* Enable the common interrupts */
493 	dwc2_enable_common_interrupts(hsotg);
494 
495 	/* Enable host mode interrupts without disturbing common interrupts */
496 	intmsk = DWC2_READ_4(hsotg, GINTMSK);
497 	intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
498 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
499 }
500 
501 /**
502  * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
503  *
504  * @hsotg: Programming view of DWC_otg controller
505  */
506 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
507 {
508 	u32 intmsk = DWC2_READ_4(hsotg, GINTMSK);
509 
510 	/* Disable host mode interrupts without disturbing common interrupts */
511 	intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
512 		    GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
513 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
514 }
515 
516 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
517 {
518 	struct dwc2_core_params *params = hsotg->core_params;
519 	u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
520 
521 	if (!params->enable_dynamic_fifo)
522 		return;
523 
524 	/* Rx FIFO */
525 	grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
526 	dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
527 	grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
528 	grxfsiz |= params->host_rx_fifo_size <<
529 		   GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
530 	DWC2_WRITE_4(hsotg, GRXFSIZ, grxfsiz);
531 	dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", DWC2_READ_4(hsotg, GRXFSIZ));
532 
533 	/* Non-periodic Tx FIFO */
534 	dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
535 		DWC2_READ_4(hsotg, GNPTXFSIZ));
536 	nptxfsiz = params->host_nperio_tx_fifo_size <<
537 		   FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
538 	nptxfsiz |= params->host_rx_fifo_size <<
539 		    FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
540 	DWC2_WRITE_4(hsotg, GNPTXFSIZ, nptxfsiz);
541 	dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
542 		DWC2_READ_4(hsotg, GNPTXFSIZ));
543 
544 	/* Periodic Tx FIFO */
545 	dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
546 		DWC2_READ_4(hsotg, HPTXFSIZ));
547 	hptxfsiz = params->host_perio_tx_fifo_size <<
548 		   FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
549 	hptxfsiz |= (params->host_rx_fifo_size +
550 		     params->host_nperio_tx_fifo_size) <<
551 		    FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
552 	DWC2_WRITE_4(hsotg, HPTXFSIZ, hptxfsiz);
553 	dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
554 		DWC2_READ_4(hsotg, HPTXFSIZ));
555 
556 	if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
557 	    hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
558 		/*
559 		 * Global DFIFOCFG calculation for Host mode -
560 		 * include RxFIFO, NPTXFIFO and HPTXFIFO
561 		 */
562 		dfifocfg = DWC2_READ_4(hsotg, GDFIFOCFG);
563 		dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
564 		dfifocfg |= (params->host_rx_fifo_size +
565 			     params->host_nperio_tx_fifo_size +
566 			     params->host_perio_tx_fifo_size) <<
567 			    GDFIFOCFG_EPINFOBASE_SHIFT &
568 			    GDFIFOCFG_EPINFOBASE_MASK;
569 		DWC2_WRITE_4(hsotg, GDFIFOCFG, dfifocfg);
570 	}
571 }
572 
573 /**
574  * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
575  * Host mode
576  *
577  * @hsotg: Programming view of DWC_otg controller
578  *
579  * This function flushes the Tx and Rx FIFOs and flushes any entries in the
580  * request queues. Host channels are reset to ensure that they are ready for
581  * performing transfers.
582  */
583 void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
584 {
585 	u32 hcfg, hfir, otgctl;
586 
587 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
588 
589 	/* Restart the Phy Clock */
590 	DWC2_WRITE_4(hsotg, PCGCTL, 0);
591 
592 	/* Initialize Host Configuration Register */
593 	dwc2_init_fs_ls_pclk_sel(hsotg);
594 	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
595 		hcfg = DWC2_READ_4(hsotg, HCFG);
596 		hcfg |= HCFG_FSLSSUPP;
597 		DWC2_WRITE_4(hsotg, HCFG, hcfg);
598 	}
599 
600 	/*
601 	 * This bit allows dynamic reloading of the HFIR register during
602 	 * runtime. This bit needs to be programmed during initial configuration
603 	 * and its value must not be changed during runtime.
604 	 */
605 	if (hsotg->core_params->reload_ctl > 0) {
606 		hfir = DWC2_READ_4(hsotg, HFIR);
607 		hfir |= HFIR_RLDCTRL;
608 		DWC2_WRITE_4(hsotg, HFIR, hfir);
609 	}
610 
611 	if (hsotg->core_params->dma_desc_enable > 0) {
612 		u32 op_mode = hsotg->hw_params.op_mode;
613 		if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
614 		    !hsotg->hw_params.dma_desc_enable ||
615 		    op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
616 		    op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
617 		    op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
618 			dev_err(hsotg->dev,
619 				"Hardware does not support descriptor DMA mode -\n");
620 			dev_err(hsotg->dev,
621 				"falling back to buffer DMA mode.\n");
622 			hsotg->core_params->dma_desc_enable = 0;
623 		} else {
624 			hcfg = DWC2_READ_4(hsotg, HCFG);
625 			hcfg |= HCFG_DESCDMA;
626 			DWC2_WRITE_4(hsotg, HCFG, hcfg);
627 		}
628 	}
629 
630 	/* Configure data FIFO sizes */
631 	dwc2_config_fifos(hsotg);
632 
633 	/* TODO - check this */
634 	/* Clear Host Set HNP Enable in the OTG Control Register */
635 	otgctl = DWC2_READ_4(hsotg, GOTGCTL);
636 	otgctl &= ~GOTGCTL_HSTSETHNPEN;
637 	DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
638 
639 	/* Make sure the FIFOs are flushed */
640 	dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
641 	dwc2_flush_rx_fifo(hsotg);
642 
643 	/* Clear Host Set HNP Enable in the OTG Control Register */
644 	otgctl = DWC2_READ_4(hsotg, GOTGCTL);
645 	otgctl &= ~GOTGCTL_HSTSETHNPEN;
646 	DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
647 
648 	if (hsotg->core_params->dma_desc_enable <= 0) {
649 		int num_channels, i;
650 		u32 hcchar;
651 
652 		/* Flush out any leftover queued requests */
653 		num_channels = hsotg->core_params->host_channels;
654 		for (i = 0; i < num_channels; i++) {
655 			hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
656 			hcchar &= ~HCCHAR_CHENA;
657 			hcchar |= HCCHAR_CHDIS;
658 			hcchar &= ~HCCHAR_EPDIR;
659 			DWC2_WRITE_4(hsotg, HCCHAR(i), hcchar);
660 		}
661 
662 		/* Halt all channels to put them into a known state */
663 		for (i = 0; i < num_channels; i++) {
664 			int count = 0;
665 
666 			hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
667 			hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
668 			hcchar &= ~HCCHAR_EPDIR;
669 			DWC2_WRITE_4(hsotg, HCCHAR(i), hcchar);
670 			dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
671 				__func__, i);
672 			do {
673 				hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
674 				if (++count > 1000) {
675 					dev_err(hsotg->dev,
676 						"Unable to clear enable on channel %d\n",
677 						i);
678 					break;
679 				}
680 				udelay(1);
681 			} while (hcchar & HCCHAR_CHENA);
682 		}
683 	}
684 
685 	/* Turn on the vbus power */
686 	dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
687 	if (hsotg->op_state == OTG_STATE_A_HOST) {
688 		u32 hprt0 = dwc2_read_hprt0(hsotg);
689 
690 		dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
691 			!!(hprt0 & HPRT0_PWR));
692 		if (!(hprt0 & HPRT0_PWR)) {
693 			hprt0 |= HPRT0_PWR;
694 			DWC2_WRITE_4(hsotg, HPRT0, hprt0);
695 		}
696 	}
697 
698 	dwc2_enable_host_interrupts(hsotg);
699 }
700 
701 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
702 				      struct dwc2_host_chan *chan)
703 {
704 	u32 hcintmsk = HCINTMSK_CHHLTD;
705 
706 	switch (chan->ep_type) {
707 	case USB_ENDPOINT_XFER_CONTROL:
708 	case USB_ENDPOINT_XFER_BULK:
709 		dev_vdbg(hsotg->dev, "control/bulk\n");
710 		hcintmsk |= HCINTMSK_XFERCOMPL;
711 		hcintmsk |= HCINTMSK_STALL;
712 		hcintmsk |= HCINTMSK_XACTERR;
713 		hcintmsk |= HCINTMSK_DATATGLERR;
714 		if (chan->ep_is_in) {
715 			hcintmsk |= HCINTMSK_BBLERR;
716 		} else {
717 			hcintmsk |= HCINTMSK_NAK;
718 			hcintmsk |= HCINTMSK_NYET;
719 			if (chan->do_ping)
720 				hcintmsk |= HCINTMSK_ACK;
721 		}
722 
723 		if (chan->do_split) {
724 			hcintmsk |= HCINTMSK_NAK;
725 			if (chan->complete_split)
726 				hcintmsk |= HCINTMSK_NYET;
727 			else
728 				hcintmsk |= HCINTMSK_ACK;
729 		}
730 
731 		if (chan->error_state)
732 			hcintmsk |= HCINTMSK_ACK;
733 		break;
734 
735 	case USB_ENDPOINT_XFER_INT:
736 		if (dbg_perio())
737 			dev_vdbg(hsotg->dev, "intr\n");
738 		hcintmsk |= HCINTMSK_XFERCOMPL;
739 		hcintmsk |= HCINTMSK_NAK;
740 		hcintmsk |= HCINTMSK_STALL;
741 		hcintmsk |= HCINTMSK_XACTERR;
742 		hcintmsk |= HCINTMSK_DATATGLERR;
743 		hcintmsk |= HCINTMSK_FRMOVRUN;
744 
745 		if (chan->ep_is_in)
746 			hcintmsk |= HCINTMSK_BBLERR;
747 		if (chan->error_state)
748 			hcintmsk |= HCINTMSK_ACK;
749 		if (chan->do_split) {
750 			if (chan->complete_split)
751 				hcintmsk |= HCINTMSK_NYET;
752 			else
753 				hcintmsk |= HCINTMSK_ACK;
754 		}
755 		break;
756 
757 	case USB_ENDPOINT_XFER_ISOC:
758 		if (dbg_perio())
759 			dev_vdbg(hsotg->dev, "isoc\n");
760 		hcintmsk |= HCINTMSK_XFERCOMPL;
761 		hcintmsk |= HCINTMSK_FRMOVRUN;
762 		hcintmsk |= HCINTMSK_ACK;
763 
764 		if (chan->ep_is_in) {
765 			hcintmsk |= HCINTMSK_XACTERR;
766 			hcintmsk |= HCINTMSK_BBLERR;
767 		}
768 		break;
769 	default:
770 		dev_err(hsotg->dev, "## Unknown EP type ##\n");
771 		break;
772 	}
773 
774 	DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
775 	if (dbg_hc(chan))
776 		dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
777 }
778 
779 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
780 				    struct dwc2_host_chan *chan)
781 {
782 	u32 hcintmsk = HCINTMSK_CHHLTD;
783 
784 	/*
785 	 * For Descriptor DMA mode core halts the channel on AHB error.
786 	 * Interrupt is not required.
787 	 */
788 	if (hsotg->core_params->dma_desc_enable <= 0) {
789 		if (dbg_hc(chan))
790 			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
791 		hcintmsk |= HCINTMSK_AHBERR;
792 	} else {
793 		if (dbg_hc(chan))
794 			dev_vdbg(hsotg->dev, "desc DMA enabled\n");
795 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
796 			hcintmsk |= HCINTMSK_XFERCOMPL;
797 	}
798 
799 	if (chan->error_state && !chan->do_split &&
800 	    chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
801 		if (dbg_hc(chan))
802 			dev_vdbg(hsotg->dev, "setting ACK\n");
803 		hcintmsk |= HCINTMSK_ACK;
804 		if (chan->ep_is_in) {
805 			hcintmsk |= HCINTMSK_DATATGLERR;
806 			if (chan->ep_type != USB_ENDPOINT_XFER_INT)
807 				hcintmsk |= HCINTMSK_NAK;
808 		}
809 	}
810 
811 	DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
812 	if (dbg_hc(chan))
813 		dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
814 }
815 
816 static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
817 				struct dwc2_host_chan *chan)
818 {
819 	u32 intmsk;
820 
821 	if (hsotg->core_params->dma_enable > 0) {
822 		if (dbg_hc(chan))
823 			dev_vdbg(hsotg->dev, "DMA enabled\n");
824 		dwc2_hc_enable_dma_ints(hsotg, chan);
825 	} else {
826 		if (dbg_hc(chan))
827 			dev_vdbg(hsotg->dev, "DMA disabled\n");
828 		dwc2_hc_enable_slave_ints(hsotg, chan);
829 	}
830 
831 	/* Enable the top level host channel interrupt */
832 	intmsk = DWC2_READ_4(hsotg, HAINTMSK);
833 	intmsk |= 1 << chan->hc_num;
834 	DWC2_WRITE_4(hsotg, HAINTMSK, intmsk);
835 	if (dbg_hc(chan))
836 		dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
837 
838 	/* Make sure host channel interrupts are enabled */
839 	intmsk = DWC2_READ_4(hsotg, GINTMSK);
840 	intmsk |= GINTSTS_HCHINT;
841 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
842 	if (dbg_hc(chan))
843 		dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
844 }
845 
846 /**
847  * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
848  * a specific endpoint
849  *
850  * @hsotg: Programming view of DWC_otg controller
851  * @chan:  Information needed to initialize the host channel
852  *
853  * The HCCHARn register is set up with the characteristics specified in chan.
854  * Host channel interrupts that may need to be serviced while this transfer is
855  * in progress are enabled.
856  */
857 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
858 {
859 	u8 hc_num = chan->hc_num;
860 	u32 hcintmsk;
861 	u32 hcchar;
862 	u32 hcsplt = 0;
863 
864 	if (dbg_hc(chan))
865 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
866 
867 	/* Clear old interrupt conditions for this host channel */
868 	hcintmsk = 0xffffffff;
869 	hcintmsk &= ~HCINTMSK_RESERVED14_31;
870 	DWC2_WRITE_4(hsotg, HCINT(hc_num), hcintmsk);
871 
872 	/* Enable channel interrupts required for this transfer */
873 	dwc2_hc_enable_ints(hsotg, chan);
874 
875 	/*
876 	 * Program the HCCHARn register with the endpoint characteristics for
877 	 * the current transfer
878 	 */
879 	hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
880 	hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
881 	if (chan->ep_is_in)
882 		hcchar |= HCCHAR_EPDIR;
883 	if (chan->speed == USB_SPEED_LOW)
884 		hcchar |= HCCHAR_LSPDDEV;
885 	hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
886 	hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
887 	DWC2_WRITE_4(hsotg, HCCHAR(hc_num), hcchar);
888 	if (dbg_hc(chan)) {
889 		dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
890 			 hc_num, hcchar);
891 
892 		dev_vdbg(hsotg->dev, "%s: Channel %d\n",
893 			 __func__, hc_num);
894 		dev_vdbg(hsotg->dev, "	 Dev Addr: %d\n",
895 			 chan->dev_addr);
896 		dev_vdbg(hsotg->dev, "	 Ep Num: %d\n",
897 			 chan->ep_num);
898 		dev_vdbg(hsotg->dev, "	 Is In: %d\n",
899 			 chan->ep_is_in);
900 		dev_vdbg(hsotg->dev, "	 Is Low Speed: %d\n",
901 			 chan->speed == USB_SPEED_LOW);
902 		dev_vdbg(hsotg->dev, "	 Ep Type: %d\n",
903 			 chan->ep_type);
904 		dev_vdbg(hsotg->dev, "	 Max Pkt: %d\n",
905 			 chan->max_packet);
906 	}
907 
908 	/* Program the HCSPLT register for SPLITs */
909 	if (chan->do_split) {
910 		if (dbg_hc(chan))
911 			dev_vdbg(hsotg->dev,
912 				 "Programming HC %d with split --> %s\n",
913 				 hc_num,
914 				 chan->complete_split ? "CSPLIT" : "SSPLIT");
915 		if (chan->complete_split)
916 			hcsplt |= HCSPLT_COMPSPLT;
917 		hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
918 			  HCSPLT_XACTPOS_MASK;
919 		hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
920 			  HCSPLT_HUBADDR_MASK;
921 		hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
922 			  HCSPLT_PRTADDR_MASK;
923 		if (dbg_hc(chan)) {
924 			dev_vdbg(hsotg->dev, "	  comp split %d\n",
925 				 chan->complete_split);
926 			dev_vdbg(hsotg->dev, "	  xact pos %d\n",
927 				 chan->xact_pos);
928 			dev_vdbg(hsotg->dev, "	  hub addr %d\n",
929 				 chan->hub_addr);
930 			dev_vdbg(hsotg->dev, "	  hub port %d\n",
931 				 chan->hub_port);
932 			dev_vdbg(hsotg->dev, "	  is_in %d\n",
933 				 chan->ep_is_in);
934 			dev_vdbg(hsotg->dev, "	  Max Pkt %d\n",
935 				 chan->max_packet);
936 			dev_vdbg(hsotg->dev, "	  xferlen %d\n",
937 				 chan->xfer_len);
938 		}
939 	}
940 
941 	DWC2_WRITE_4(hsotg, HCSPLT(hc_num), hcsplt);
942 }
943 
944 /**
945  * dwc2_hc_halt() - Attempts to halt a host channel
946  *
947  * @hsotg:       Controller register interface
948  * @chan:        Host channel to halt
949  * @halt_status: Reason for halting the channel
950  *
951  * This function should only be called in Slave mode or to abort a transfer in
952  * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
953  * controller halts the channel when the transfer is complete or a condition
954  * occurs that requires application intervention.
955  *
956  * In slave mode, checks for a free request queue entry, then sets the Channel
957  * Enable and Channel Disable bits of the Host Channel Characteristics
958  * register of the specified channel to intiate the halt. If there is no free
959  * request queue entry, sets only the Channel Disable bit of the HCCHARn
960  * register to flush requests for this channel. In the latter case, sets a
961  * flag to indicate that the host channel needs to be halted when a request
962  * queue slot is open.
963  *
964  * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
965  * HCCHARn register. The controller ensures there is space in the request
966  * queue before submitting the halt request.
967  *
968  * Some time may elapse before the core flushes any posted requests for this
969  * host channel and halts. The Channel Halted interrupt handler completes the
970  * deactivation of the host channel.
971  */
972 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
973 		  enum dwc2_halt_status halt_status)
974 {
975 	u32 nptxsts, hptxsts, hcchar;
976 
977 	if (dbg_hc(chan))
978 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
979 	if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
980 		dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
981 
982 	if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
983 	    halt_status == DWC2_HC_XFER_AHB_ERR) {
984 		/*
985 		 * Disable all channel interrupts except Ch Halted. The QTD
986 		 * and QH state associated with this transfer has been cleared
987 		 * (in the case of URB_DEQUEUE), so the channel needs to be
988 		 * shut down carefully to prevent crashes.
989 		 */
990 		u32 hcintmsk = HCINTMSK_CHHLTD;
991 
992 		dev_vdbg(hsotg->dev, "dequeue/error\n");
993 		DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
994 
995 		/*
996 		 * Make sure no other interrupts besides halt are currently
997 		 * pending. Handling another interrupt could cause a crash due
998 		 * to the QTD and QH state.
999 		 */
1000 		DWC2_WRITE_4(hsotg, HCINT(chan->hc_num), ~hcintmsk);
1001 
1002 		/*
1003 		 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1004 		 * even if the channel was already halted for some other
1005 		 * reason
1006 		 */
1007 		chan->halt_status = halt_status;
1008 
1009 		hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1010 		if (!(hcchar & HCCHAR_CHENA)) {
1011 			/*
1012 			 * The channel is either already halted or it hasn't
1013 			 * started yet. In DMA mode, the transfer may halt if
1014 			 * it finishes normally or a condition occurs that
1015 			 * requires driver intervention. Don't want to halt
1016 			 * the channel again. In either Slave or DMA mode,
1017 			 * it's possible that the transfer has been assigned
1018 			 * to a channel, but not started yet when an URB is
1019 			 * dequeued. Don't want to halt a channel that hasn't
1020 			 * started yet.
1021 			 */
1022 			return;
1023 		}
1024 	}
1025 	if (chan->halt_pending) {
1026 		/*
1027 		 * A halt has already been issued for this channel. This might
1028 		 * happen when a transfer is aborted by a higher level in
1029 		 * the stack.
1030 		 */
1031 		dev_vdbg(hsotg->dev,
1032 			 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1033 			 __func__, chan->hc_num);
1034 		return;
1035 	}
1036 
1037 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1038 
1039 	/* No need to set the bit in DDMA for disabling the channel */
1040 	/* TODO check it everywhere channel is disabled */
1041 	if (hsotg->core_params->dma_desc_enable <= 0) {
1042 		if (dbg_hc(chan))
1043 			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1044 		hcchar |= HCCHAR_CHENA;
1045 	} else {
1046 		if (dbg_hc(chan))
1047 			dev_dbg(hsotg->dev, "desc DMA enabled\n");
1048 	}
1049 	hcchar |= HCCHAR_CHDIS;
1050 
1051 	if (hsotg->core_params->dma_enable <= 0) {
1052 		if (dbg_hc(chan))
1053 			dev_vdbg(hsotg->dev, "DMA not enabled\n");
1054 		hcchar |= HCCHAR_CHENA;
1055 
1056 		/* Check for space in the request queue to issue the halt */
1057 		if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1058 		    chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1059 			dev_vdbg(hsotg->dev, "control/bulk\n");
1060 			nptxsts = DWC2_READ_4(hsotg, GNPTXSTS);
1061 			if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1062 				dev_vdbg(hsotg->dev, "Disabling channel\n");
1063 				hcchar &= ~HCCHAR_CHENA;
1064 			}
1065 		} else {
1066 			if (dbg_perio())
1067 				dev_vdbg(hsotg->dev, "isoc/intr\n");
1068 			hptxsts = DWC2_READ_4(hsotg, HPTXSTS);
1069 			if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1070 			    hsotg->queuing_high_bandwidth) {
1071 				if (dbg_perio())
1072 					dev_vdbg(hsotg->dev, "Disabling channel\n");
1073 				hcchar &= ~HCCHAR_CHENA;
1074 			}
1075 		}
1076 	} else {
1077 		if (dbg_hc(chan))
1078 			dev_vdbg(hsotg->dev, "DMA enabled\n");
1079 	}
1080 
1081 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1082 	chan->halt_status = halt_status;
1083 
1084 	if (hcchar & HCCHAR_CHENA) {
1085 		if (dbg_hc(chan))
1086 			dev_vdbg(hsotg->dev, "Channel enabled\n");
1087 		chan->halt_pending = 1;
1088 		chan->halt_on_queue = 0;
1089 	} else {
1090 		if (dbg_hc(chan))
1091 			dev_vdbg(hsotg->dev, "Channel disabled\n");
1092 		chan->halt_on_queue = 1;
1093 	}
1094 
1095 	if (dbg_hc(chan)) {
1096 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1097 			 chan->hc_num);
1098 		dev_vdbg(hsotg->dev, "	 hcchar: 0x%08x\n",
1099 			 hcchar);
1100 		dev_vdbg(hsotg->dev, "	 halt_pending: %d\n",
1101 			 chan->halt_pending);
1102 		dev_vdbg(hsotg->dev, "	 halt_on_queue: %d\n",
1103 			 chan->halt_on_queue);
1104 		dev_vdbg(hsotg->dev, "	 halt_status: %d\n",
1105 			 chan->halt_status);
1106 	}
1107 }
1108 
1109 /**
1110  * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1111  *
1112  * @hsotg: Programming view of DWC_otg controller
1113  * @chan:  Identifies the host channel to clean up
1114  *
1115  * This function is normally called after a transfer is done and the host
1116  * channel is being released
1117  */
1118 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1119 {
1120 	u32 hcintmsk;
1121 
1122 	chan->xfer_started = 0;
1123 
1124 	/*
1125 	 * Clear channel interrupt enables and any unhandled channel interrupt
1126 	 * conditions
1127 	 */
1128 	DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), 0);
1129 	hcintmsk = 0xffffffff;
1130 	hcintmsk &= ~HCINTMSK_RESERVED14_31;
1131 	DWC2_WRITE_4(hsotg, HCINT(chan->hc_num), hcintmsk);
1132 }
1133 
1134 /**
1135  * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1136  * which frame a periodic transfer should occur
1137  *
1138  * @hsotg:  Programming view of DWC_otg controller
1139  * @chan:   Identifies the host channel to set up and its properties
1140  * @hcchar: Current value of the HCCHAR register for the specified host channel
1141  *
1142  * This function has no effect on non-periodic transfers
1143  */
1144 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1145 				       struct dwc2_host_chan *chan, u32 *hcchar)
1146 {
1147 	if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1148 	    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1149 		/* 1 if _next_ frame is odd, 0 if it's even */
1150 		if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
1151 			*hcchar |= HCCHAR_ODDFRM;
1152 	}
1153 }
1154 
1155 static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1156 {
1157 	/* Set up the initial PID for the transfer */
1158 	if (chan->speed == USB_SPEED_HIGH) {
1159 		if (chan->ep_is_in) {
1160 			if (chan->multi_count == 1)
1161 				chan->data_pid_start = DWC2_HC_PID_DATA0;
1162 			else if (chan->multi_count == 2)
1163 				chan->data_pid_start = DWC2_HC_PID_DATA1;
1164 			else
1165 				chan->data_pid_start = DWC2_HC_PID_DATA2;
1166 		} else {
1167 			if (chan->multi_count == 1)
1168 				chan->data_pid_start = DWC2_HC_PID_DATA0;
1169 			else
1170 				chan->data_pid_start = DWC2_HC_PID_MDATA;
1171 		}
1172 	} else {
1173 		chan->data_pid_start = DWC2_HC_PID_DATA0;
1174 	}
1175 }
1176 
1177 /**
1178  * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1179  * the Host Channel
1180  *
1181  * @hsotg: Programming view of DWC_otg controller
1182  * @chan:  Information needed to initialize the host channel
1183  *
1184  * This function should only be called in Slave mode. For a channel associated
1185  * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1186  * associated with a periodic EP, the periodic Tx FIFO is written.
1187  *
1188  * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1189  * the number of bytes written to the Tx FIFO.
1190  */
1191 static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1192 				 struct dwc2_host_chan *chan)
1193 {
1194 	u32 i;
1195 	u32 remaining_count;
1196 	u32 byte_count;
1197 	u32 dword_count;
1198 	u32 *data_buf = (u32 *)chan->xfer_buf;
1199 	u32 data_fifo;
1200 
1201 	if (dbg_hc(chan))
1202 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1203 
1204 	data_fifo = HCFIFO(chan->hc_num);
1205 
1206 	remaining_count = chan->xfer_len - chan->xfer_count;
1207 	if (remaining_count > chan->max_packet)
1208 		byte_count = chan->max_packet;
1209 	else
1210 		byte_count = remaining_count;
1211 
1212 	dword_count = (byte_count + 3) / 4;
1213 
1214 	if (((unsigned long)data_buf & 0x3) == 0) {
1215 		/* xfer_buf is DWORD aligned */
1216 		for (i = 0; i < dword_count; i++, data_buf++)
1217 			DWC2_WRITE_4(hsotg, data_fifo, *data_buf);
1218 	} else {
1219 		/* xfer_buf is not DWORD aligned */
1220 		for (i = 0; i < dword_count; i++, data_buf++) {
1221 			u32 data = data_buf[0] | data_buf[1] << 8 |
1222 				   data_buf[2] << 16 | data_buf[3] << 24;
1223 			DWC2_WRITE_4(hsotg, data_fifo, data);
1224 		}
1225 	}
1226 
1227 	chan->xfer_count += byte_count;
1228 	chan->xfer_buf += byte_count;
1229 }
1230 
1231 /**
1232  * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1233  * channel and starts the transfer
1234  *
1235  * @hsotg: Programming view of DWC_otg controller
1236  * @chan:  Information needed to initialize the host channel. The xfer_len value
1237  *         may be reduced to accommodate the max widths of the XferSize and
1238  *         PktCnt fields in the HCTSIZn register. The multi_count value may be
1239  *         changed to reflect the final xfer_len value.
1240  *
1241  * This function may be called in either Slave mode or DMA mode. In Slave mode,
1242  * the caller must ensure that there is sufficient space in the request queue
1243  * and Tx Data FIFO.
1244  *
1245  * For an OUT transfer in Slave mode, it loads a data packet into the
1246  * appropriate FIFO. If necessary, additional data packets are loaded in the
1247  * Host ISR.
1248  *
1249  * For an IN transfer in Slave mode, a data packet is requested. The data
1250  * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1251  * additional data packets are requested in the Host ISR.
1252  *
1253  * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1254  * register along with a packet count of 1 and the channel is enabled. This
1255  * causes a single PING transaction to occur. Other fields in HCTSIZ are
1256  * simply set to 0 since no data transfer occurs in this case.
1257  *
1258  * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1259  * all the information required to perform the subsequent data transfer. In
1260  * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1261  * controller performs the entire PING protocol, then starts the data
1262  * transfer.
1263  */
1264 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1265 			    struct dwc2_host_chan *chan)
1266 {
1267 	u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1268 	u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1269 	u32 hcchar;
1270 	u32 hctsiz = 0;
1271 	u16 num_packets;
1272 
1273 	if (dbg_hc(chan))
1274 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1275 
1276 	if (chan->do_ping) {
1277 		if (hsotg->core_params->dma_enable <= 0) {
1278 			if (dbg_hc(chan))
1279 				dev_vdbg(hsotg->dev, "ping, no DMA\n");
1280 			dwc2_hc_do_ping(hsotg, chan);
1281 			chan->xfer_started = 1;
1282 			return;
1283 		} else {
1284 			if (dbg_hc(chan))
1285 				dev_vdbg(hsotg->dev, "ping, DMA\n");
1286 			hctsiz |= TSIZ_DOPNG;
1287 		}
1288 	}
1289 
1290 	if (chan->do_split) {
1291 		if (dbg_hc(chan))
1292 			dev_vdbg(hsotg->dev, "split\n");
1293 		num_packets = 1;
1294 
1295 		if (chan->complete_split && !chan->ep_is_in)
1296 			/*
1297 			 * For CSPLIT OUT Transfer, set the size to 0 so the
1298 			 * core doesn't expect any data written to the FIFO
1299 			 */
1300 			chan->xfer_len = 0;
1301 		else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1302 			chan->xfer_len = chan->max_packet;
1303 		else if (!chan->ep_is_in && chan->xfer_len > 188)
1304 			chan->xfer_len = 188;
1305 
1306 		hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1307 			  TSIZ_XFERSIZE_MASK;
1308 	} else {
1309 		if (dbg_hc(chan))
1310 			dev_vdbg(hsotg->dev, "no split\n");
1311 		/*
1312 		 * Ensure that the transfer length and packet count will fit
1313 		 * in the widths allocated for them in the HCTSIZn register
1314 		 */
1315 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1316 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1317 			/*
1318 			 * Make sure the transfer size is no larger than one
1319 			 * (micro)frame's worth of data. (A check was done
1320 			 * when the periodic transfer was accepted to ensure
1321 			 * that a (micro)frame's worth of data can be
1322 			 * programmed into a channel.)
1323 			 */
1324 			u32 max_periodic_len =
1325 				chan->multi_count * chan->max_packet;
1326 
1327 			if (chan->xfer_len > max_periodic_len)
1328 				chan->xfer_len = max_periodic_len;
1329 		} else if (chan->xfer_len > max_hc_xfer_size) {
1330 			/*
1331 			 * Make sure that xfer_len is a multiple of max packet
1332 			 * size
1333 			 */
1334 			chan->xfer_len =
1335 				max_hc_xfer_size - chan->max_packet + 1;
1336 		}
1337 
1338 		if (chan->xfer_len > 0) {
1339 			num_packets = (chan->xfer_len + chan->max_packet - 1) /
1340 					chan->max_packet;
1341 			if (num_packets > max_hc_pkt_count) {
1342 				num_packets = max_hc_pkt_count;
1343 				chan->xfer_len = num_packets * chan->max_packet;
1344 			}
1345 		} else {
1346 			/* Need 1 packet for transfer length of 0 */
1347 			num_packets = 1;
1348 		}
1349 
1350 		if (chan->ep_is_in)
1351 			/*
1352 			 * Always program an integral # of max packets for IN
1353 			 * transfers
1354 			 */
1355 			chan->xfer_len = num_packets * chan->max_packet;
1356 
1357 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1358 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1359 			/*
1360 			 * Make sure that the multi_count field matches the
1361 			 * actual transfer length
1362 			 */
1363 			chan->multi_count = num_packets;
1364 
1365 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1366 			dwc2_set_pid_isoc(chan);
1367 
1368 		hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1369 			  TSIZ_XFERSIZE_MASK;
1370 	}
1371 
1372 	chan->start_pkt_count = num_packets;
1373 	hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1374 	hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1375 		  TSIZ_SC_MC_PID_MASK;
1376 	DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
1377 	if (dbg_hc(chan)) {
1378 		dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1379 			 hctsiz, chan->hc_num);
1380 
1381 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1382 			 chan->hc_num);
1383 		dev_vdbg(hsotg->dev, "	 Xfer Size: %d\n",
1384 			 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1385 			 TSIZ_XFERSIZE_SHIFT);
1386 		dev_vdbg(hsotg->dev, "	 Num Pkts: %d\n",
1387 			 (hctsiz & TSIZ_PKTCNT_MASK) >>
1388 			 TSIZ_PKTCNT_SHIFT);
1389 		dev_vdbg(hsotg->dev, "	 Start PID: %d\n",
1390 			 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1391 			 TSIZ_SC_MC_PID_SHIFT);
1392 	}
1393 
1394 	if (hsotg->core_params->dma_enable > 0) {
1395 		dma_addr_t dma_addr;
1396 
1397 		if (chan->align_buf) {
1398 			if (dbg_hc(chan))
1399 				dev_vdbg(hsotg->dev, "align_buf\n");
1400 			dma_addr = chan->align_buf;
1401 		} else {
1402 			dma_addr = chan->xfer_dma;
1403 		}
1404 		if (hsotg->hsotg_sc->sc_set_dma_addr == NULL) {
1405 			DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num),
1406 			    (u32)dma_addr);
1407 			if (dbg_hc(chan))
1408 				dev_vdbg(hsotg->dev,
1409 				    "Wrote %08lx to HCDMA(%d)\n",
1410 				     (unsigned long)dma_addr,
1411 				    chan->hc_num);
1412 		} else {
1413 			(void)(*hsotg->hsotg_sc->sc_set_dma_addr)(
1414 			    hsotg->dev, dma_addr, chan->hc_num);
1415 		}
1416 	}
1417 
1418 	/* Start the split */
1419 	if (chan->do_split) {
1420 		u32 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chan->hc_num));
1421 
1422 		hcsplt |= HCSPLT_SPLTENA;
1423 		DWC2_WRITE_4(hsotg, HCSPLT(chan->hc_num), hcsplt);
1424 	}
1425 
1426 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1427 	hcchar &= ~HCCHAR_MULTICNT_MASK;
1428 	hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1429 		  HCCHAR_MULTICNT_MASK;
1430 	dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1431 
1432 	if (hcchar & HCCHAR_CHDIS)
1433 		dev_warn(hsotg->dev,
1434 			 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1435 			 __func__, chan->hc_num, hcchar);
1436 
1437 	/* Set host channel enable after all other setup is complete */
1438 	hcchar |= HCCHAR_CHENA;
1439 	hcchar &= ~HCCHAR_CHDIS;
1440 
1441 	if (dbg_hc(chan))
1442 		dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n",
1443 			 (hcchar & HCCHAR_MULTICNT_MASK) >>
1444 			 HCCHAR_MULTICNT_SHIFT);
1445 
1446 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1447 	if (dbg_hc(chan))
1448 		dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1449 			 chan->hc_num);
1450 
1451 	chan->xfer_started = 1;
1452 	chan->requests++;
1453 
1454 	if (hsotg->core_params->dma_enable <= 0 &&
1455 	    !chan->ep_is_in && chan->xfer_len > 0)
1456 		/* Load OUT packet into the appropriate Tx FIFO */
1457 		dwc2_hc_write_packet(hsotg, chan);
1458 }
1459 
1460 /**
1461  * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1462  * host channel and starts the transfer in Descriptor DMA mode
1463  *
1464  * @hsotg: Programming view of DWC_otg controller
1465  * @chan:  Information needed to initialize the host channel
1466  *
1467  * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1468  * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1469  * with micro-frame bitmap.
1470  *
1471  * Initializes HCDMA register with descriptor list address and CTD value then
1472  * starts the transfer via enabling the channel.
1473  */
1474 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1475 				 struct dwc2_host_chan *chan)
1476 {
1477 	u32 hcchar;
1478 	u32 hc_dma;
1479 	u32 hctsiz = 0;
1480 
1481 	if (chan->do_ping)
1482 		hctsiz |= TSIZ_DOPNG;
1483 
1484 	if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1485 		dwc2_set_pid_isoc(chan);
1486 
1487 	/* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1488 	hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1489 		  TSIZ_SC_MC_PID_MASK;
1490 
1491 	/* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1492 	hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1493 
1494 	/* Non-zero only for high-speed interrupt endpoints */
1495 	hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1496 
1497 	if (dbg_hc(chan)) {
1498 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1499 			 chan->hc_num);
1500 		dev_vdbg(hsotg->dev, "	 Start PID: %d\n",
1501 			 chan->data_pid_start);
1502 		dev_vdbg(hsotg->dev, "	 NTD: %d\n", chan->ntd - 1);
1503 	}
1504 
1505 	DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
1506 
1507 	hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
1508 
1509 	/* Always start from first descriptor */
1510 	hc_dma &= ~HCDMA_CTD_MASK;
1511 	DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num), hc_dma);
1512 	if (dbg_hc(chan))
1513 		dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
1514 			 hc_dma, chan->hc_num);
1515 
1516 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1517 	hcchar &= ~HCCHAR_MULTICNT_MASK;
1518 	hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1519 		  HCCHAR_MULTICNT_MASK;
1520 
1521 	if (hcchar & HCCHAR_CHDIS)
1522 		dev_warn(hsotg->dev,
1523 			 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1524 			 __func__, chan->hc_num, hcchar);
1525 
1526 	/* Set host channel enable after all other setup is complete */
1527 	hcchar |= HCCHAR_CHENA;
1528 	hcchar &= ~HCCHAR_CHDIS;
1529 
1530 	if (dbg_hc(chan))
1531 		dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n",
1532 			 (hcchar & HCCHAR_MULTICNT_MASK) >>
1533 			 HCCHAR_MULTICNT_SHIFT);
1534 
1535 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1536 	if (dbg_hc(chan))
1537 		dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1538 			 chan->hc_num);
1539 
1540 	chan->xfer_started = 1;
1541 	chan->requests++;
1542 }
1543 
1544 /**
1545  * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1546  * a previous call to dwc2_hc_start_transfer()
1547  *
1548  * @hsotg: Programming view of DWC_otg controller
1549  * @chan:  Information needed to initialize the host channel
1550  *
1551  * The caller must ensure there is sufficient space in the request queue and Tx
1552  * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1553  * the controller acts autonomously to complete transfers programmed to a host
1554  * channel.
1555  *
1556  * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1557  * if there is any data remaining to be queued. For an IN transfer, another
1558  * data packet is always requested. For the SETUP phase of a control transfer,
1559  * this function does nothing.
1560  *
1561  * Return: 1 if a new request is queued, 0 if no more requests are required
1562  * for this transfer
1563  */
1564 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1565 			      struct dwc2_host_chan *chan)
1566 {
1567 	if (dbg_hc(chan))
1568 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1569 			 chan->hc_num);
1570 
1571 	if (chan->do_split)
1572 		/* SPLITs always queue just once per channel */
1573 		return 0;
1574 
1575 	if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1576 		/* SETUPs are queued only once since they can't be NAK'd */
1577 		return 0;
1578 
1579 	if (chan->ep_is_in) {
1580 		/*
1581 		 * Always queue another request for other IN transfers. If
1582 		 * back-to-back INs are issued and NAKs are received for both,
1583 		 * the driver may still be processing the first NAK when the
1584 		 * second NAK is received. When the interrupt handler clears
1585 		 * the NAK interrupt for the first NAK, the second NAK will
1586 		 * not be seen. So we can't depend on the NAK interrupt
1587 		 * handler to requeue a NAK'd request. Instead, IN requests
1588 		 * are issued each time this function is called. When the
1589 		 * transfer completes, the extra requests for the channel will
1590 		 * be flushed.
1591 		 */
1592 		u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1593 
1594 		dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1595 		hcchar |= HCCHAR_CHENA;
1596 		hcchar &= ~HCCHAR_CHDIS;
1597 		if (dbg_hc(chan))
1598 			dev_vdbg(hsotg->dev, "	 IN xfer: hcchar = 0x%08x\n",
1599 				 hcchar);
1600 		DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1601 		chan->requests++;
1602 		return 1;
1603 	}
1604 
1605 	/* OUT transfers */
1606 
1607 	if (chan->xfer_count < chan->xfer_len) {
1608 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1609 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1610 			u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1611 
1612 			dwc2_hc_set_even_odd_frame(hsotg, chan,
1613 						   &hcchar);
1614 		}
1615 
1616 		/* Load OUT packet into the appropriate Tx FIFO */
1617 		dwc2_hc_write_packet(hsotg, chan);
1618 		chan->requests++;
1619 		return 1;
1620 	}
1621 
1622 	return 0;
1623 }
1624 
1625 /**
1626  * dwc2_hc_do_ping() - Starts a PING transfer
1627  *
1628  * @hsotg: Programming view of DWC_otg controller
1629  * @chan:  Information needed to initialize the host channel
1630  *
1631  * This function should only be called in Slave mode. The Do Ping bit is set in
1632  * the HCTSIZ register, then the channel is enabled.
1633  */
1634 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1635 {
1636 	u32 hcchar;
1637 	u32 hctsiz;
1638 
1639 	if (dbg_hc(chan))
1640 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1641 			 chan->hc_num);
1642 
1643 
1644 	hctsiz = TSIZ_DOPNG;
1645 	hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1646 	DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
1647 
1648 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1649 	hcchar |= HCCHAR_CHENA;
1650 	hcchar &= ~HCCHAR_CHDIS;
1651 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1652 }
1653 
1654 /**
1655  * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
1656  * the HFIR register according to PHY type and speed
1657  *
1658  * @hsotg: Programming view of DWC_otg controller
1659  *
1660  * NOTE: The caller can modify the value of the HFIR register only after the
1661  * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
1662  * has been set
1663  */
1664 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
1665 {
1666 	u32 usbcfg;
1667 	u32 hprt0;
1668 	int clock = 60;	/* default value */
1669 
1670 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
1671 	hprt0 = DWC2_READ_4(hsotg, HPRT0);
1672 
1673 	if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
1674 	    !(usbcfg & GUSBCFG_PHYIF16))
1675 		clock = 60;
1676 	if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
1677 	    GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
1678 		clock = 48;
1679 	if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1680 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1681 		clock = 30;
1682 	if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1683 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
1684 		clock = 60;
1685 	if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1686 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1687 		clock = 48;
1688 	if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
1689 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
1690 		clock = 48;
1691 	if ((usbcfg & GUSBCFG_PHYSEL) &&
1692 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
1693 		clock = 48;
1694 
1695 	if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
1696 		/* High speed case */
1697 		return 125 * clock;
1698 	else
1699 		/* FS/LS case */
1700 		return 1000 * clock;
1701 }
1702 
1703 /**
1704  * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
1705  * buffer
1706  *
1707  * @core_if: Programming view of DWC_otg controller
1708  * @dest:    Destination buffer for the packet
1709  * @bytes:   Number of bytes to copy to the destination
1710  */
1711 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
1712 {
1713 	bus_size_t fifo = HCFIFO(0);
1714 	u32 *data_buf = (u32 *)dest;
1715 	int word_count = (bytes + 3) / 4;
1716 	int i;
1717 
1718 	/*
1719 	 * Todo: Account for the case where dest is not dword aligned. This
1720 	 * requires reading data from the FIFO into a u32 temp buffer, then
1721 	 * moving it into the data buffer.
1722 	 */
1723 
1724 	dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
1725 
1726 	for (i = 0; i < word_count; i++, data_buf++)
1727 		*data_buf = DWC2_READ_4(hsotg, fifo);
1728 }
1729 
1730 /**
1731  * dwc2_dump_host_registers() - Prints the host registers
1732  *
1733  * @hsotg: Programming view of DWC_otg controller
1734  *
1735  * NOTE: This function will be removed once the peripheral controller code
1736  * is integrated and the driver is stable
1737  */
1738 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
1739 {
1740 #ifdef DWC2_DEBUG
1741 	bus_size_t addr;
1742 	int i;
1743 
1744 	dev_dbg(hsotg->dev, "Host Global Registers\n");
1745 	addr = HCFG;
1746 	dev_dbg(hsotg->dev, "HCFG	 @0x%08lX : 0x%08X\n",
1747 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1748 	addr = HFIR;
1749 	dev_dbg(hsotg->dev, "HFIR	 @0x%08lX : 0x%08X\n",
1750 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1751 	addr = HFNUM;
1752 	dev_dbg(hsotg->dev, "HFNUM	 @0x%08lX : 0x%08X\n",
1753 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1754 	addr = HPTXSTS;
1755 	dev_dbg(hsotg->dev, "HPTXSTS	 @0x%08lX : 0x%08X\n",
1756 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1757 	addr = HAINT;
1758 	dev_dbg(hsotg->dev, "HAINT	 @0x%08lX : 0x%08X\n",
1759 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1760 	addr = HAINTMSK;
1761 	dev_dbg(hsotg->dev, "HAINTMSK	 @0x%08lX : 0x%08X\n",
1762 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1763 	if (hsotg->core_params->dma_desc_enable > 0) {
1764 		addr = HFLBADDR;
1765 		dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
1766 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1767 	}
1768 
1769 	addr = HPRT0;
1770 	dev_dbg(hsotg->dev, "HPRT0	 @0x%08lX : 0x%08X\n",
1771 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1772 
1773 	for (i = 0; i < hsotg->core_params->host_channels; i++) {
1774 		dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
1775 		addr = HCCHAR(i);
1776 		dev_dbg(hsotg->dev, "HCCHAR	 @0x%08lX : 0x%08X\n",
1777 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1778 		addr = HCSPLT(i);
1779 		dev_dbg(hsotg->dev, "HCSPLT	 @0x%08lX : 0x%08X\n",
1780 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1781 		addr = HCINT(i);
1782 		dev_dbg(hsotg->dev, "HCINT	 @0x%08lX : 0x%08X\n",
1783 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1784 		addr = HCINTMSK(i);
1785 		dev_dbg(hsotg->dev, "HCINTMSK	 @0x%08lX : 0x%08X\n",
1786 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1787 		addr = HCTSIZ(i);
1788 		dev_dbg(hsotg->dev, "HCTSIZ	 @0x%08lX : 0x%08X\n",
1789 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1790 		addr = HCDMA(i);
1791 		dev_dbg(hsotg->dev, "HCDMA	 @0x%08lX : 0x%08X\n",
1792 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1793 		if (hsotg->core_params->dma_desc_enable > 0) {
1794 			addr = HCDMAB(i);
1795 			dev_dbg(hsotg->dev, "HCDMAB	 @0x%08lX : 0x%08X\n",
1796 				(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1797 		}
1798 	}
1799 #endif
1800 }
1801 
1802 /**
1803  * dwc2_dump_global_registers() - Prints the core global registers
1804  *
1805  * @hsotg: Programming view of DWC_otg controller
1806  *
1807  * NOTE: This function will be removed once the peripheral controller code
1808  * is integrated and the driver is stable
1809  */
1810 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
1811 {
1812 #ifdef DWC2_DEBUG
1813 	bus_size_t addr;
1814 
1815 	dev_dbg(hsotg->dev, "Core Global Registers\n");
1816 	addr = GOTGCTL;
1817 	dev_dbg(hsotg->dev, "GOTGCTL	 @0x%08lX : 0x%08X\n",
1818 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1819 	addr = GOTGINT;
1820 	dev_dbg(hsotg->dev, "GOTGINT	 @0x%08lX : 0x%08X\n",
1821 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1822 	addr = GAHBCFG;
1823 	dev_dbg(hsotg->dev, "GAHBCFG	 @0x%08lX : 0x%08X\n",
1824 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1825 	addr = GUSBCFG;
1826 	dev_dbg(hsotg->dev, "GUSBCFG	 @0x%08lX : 0x%08X\n",
1827 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1828 	addr = GRSTCTL;
1829 	dev_dbg(hsotg->dev, "GRSTCTL	 @0x%08lX : 0x%08X\n",
1830 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1831 	addr = GINTSTS;
1832 	dev_dbg(hsotg->dev, "GINTSTS	 @0x%08lX : 0x%08X\n",
1833 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1834 	addr = GINTMSK;
1835 	dev_dbg(hsotg->dev, "GINTMSK	 @0x%08lX : 0x%08X\n",
1836 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1837 	addr = GRXSTSR;
1838 	dev_dbg(hsotg->dev, "GRXSTSR	 @0x%08lX : 0x%08X\n",
1839 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1840 	addr = GRXFSIZ;
1841 	dev_dbg(hsotg->dev, "GRXFSIZ	 @0x%08lX : 0x%08X\n",
1842 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1843 	addr = GNPTXFSIZ;
1844 	dev_dbg(hsotg->dev, "GNPTXFSIZ	 @0x%08lX : 0x%08X\n",
1845 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1846 	addr = GNPTXSTS;
1847 	dev_dbg(hsotg->dev, "GNPTXSTS	 @0x%08lX : 0x%08X\n",
1848 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1849 	addr = GI2CCTL;
1850 	dev_dbg(hsotg->dev, "GI2CCTL	 @0x%08lX : 0x%08X\n",
1851 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1852 	addr = GPVNDCTL;
1853 	dev_dbg(hsotg->dev, "GPVNDCTL	 @0x%08lX : 0x%08X\n",
1854 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1855 	addr = GGPIO;
1856 	dev_dbg(hsotg->dev, "GGPIO	 @0x%08lX : 0x%08X\n",
1857 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1858 	addr = GUID;
1859 	dev_dbg(hsotg->dev, "GUID	 @0x%08lX : 0x%08X\n",
1860 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1861 	addr = GSNPSID;
1862 	dev_dbg(hsotg->dev, "GSNPSID	 @0x%08lX : 0x%08X\n",
1863 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1864 	addr = GHWCFG1;
1865 	dev_dbg(hsotg->dev, "GHWCFG1	 @0x%08lX : 0x%08X\n",
1866 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1867 	addr = GHWCFG2;
1868 	dev_dbg(hsotg->dev, "GHWCFG2	 @0x%08lX : 0x%08X\n",
1869 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1870 	addr = GHWCFG3;
1871 	dev_dbg(hsotg->dev, "GHWCFG3	 @0x%08lX : 0x%08X\n",
1872 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1873 	addr = GHWCFG4;
1874 	dev_dbg(hsotg->dev, "GHWCFG4	 @0x%08lX : 0x%08X\n",
1875 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1876 	addr = GLPMCFG;
1877 	dev_dbg(hsotg->dev, "GLPMCFG	 @0x%08lX : 0x%08X\n",
1878 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1879 	addr = GPWRDN;
1880 	dev_dbg(hsotg->dev, "GPWRDN	 @0x%08lX : 0x%08X\n",
1881 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1882 	addr = GDFIFOCFG;
1883 	dev_dbg(hsotg->dev, "GDFIFOCFG	 @0x%08lX : 0x%08X\n",
1884 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1885 	addr = HPTXFSIZ;
1886 	dev_dbg(hsotg->dev, "HPTXFSIZ	 @0x%08lX : 0x%08X\n",
1887 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1888 
1889 	addr = PCGCTL;
1890 	dev_dbg(hsotg->dev, "PCGCTL	 @0x%08lX : 0x%08X\n",
1891 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
1892 #endif
1893 }
1894 
1895 /**
1896  * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
1897  *
1898  * @hsotg: Programming view of DWC_otg controller
1899  * @num:   Tx FIFO to flush
1900  */
1901 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
1902 {
1903 	u32 greset;
1904 	int count = 0;
1905 
1906 	dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
1907 
1908 	greset = GRSTCTL_TXFFLSH;
1909 	greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
1910 	DWC2_WRITE_4(hsotg, GRSTCTL, greset);
1911 
1912 	do {
1913 		greset = DWC2_READ_4(hsotg, GRSTCTL);
1914 		if (++count > 10000) {
1915 			dev_warn(hsotg->dev,
1916 				 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
1917 				 __func__, greset,
1918 				 DWC2_READ_4(hsotg, GNPTXSTS));
1919 			break;
1920 		}
1921 		udelay(1);
1922 	} while (greset & GRSTCTL_TXFFLSH);
1923 
1924 	/* Wait for at least 3 PHY Clocks */
1925 	udelay(1);
1926 }
1927 
1928 /**
1929  * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
1930  *
1931  * @hsotg: Programming view of DWC_otg controller
1932  */
1933 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
1934 {
1935 	u32 greset;
1936 	int count = 0;
1937 
1938 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
1939 
1940 	greset = GRSTCTL_RXFFLSH;
1941 	DWC2_WRITE_4(hsotg, GRSTCTL, greset);
1942 
1943 	do {
1944 		greset = DWC2_READ_4(hsotg, GRSTCTL);
1945 		if (++count > 10000) {
1946 			dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
1947 				 __func__, greset);
1948 			break;
1949 		}
1950 		udelay(1);
1951 	} while (greset & GRSTCTL_RXFFLSH);
1952 
1953 	/* Wait for at least 3 PHY Clocks */
1954 	udelay(1);
1955 }
1956 
1957 #define DWC2_OUT_OF_BOUNDS(a, b, c)	((a) < (b) || (a) > (c))
1958 
1959 /* Parameter access functions */
1960 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
1961 {
1962 	int valid = 1;
1963 
1964 	switch (val) {
1965 	case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
1966 		if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
1967 			valid = 0;
1968 		break;
1969 	case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
1970 		switch (hsotg->hw_params.op_mode) {
1971 		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1972 		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
1973 		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
1974 		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
1975 			break;
1976 		default:
1977 			valid = 0;
1978 			break;
1979 		}
1980 		break;
1981 	case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
1982 		/* always valid */
1983 		break;
1984 	default:
1985 		valid = 0;
1986 		break;
1987 	}
1988 
1989 	if (!valid) {
1990 		if (val >= 0)
1991 			dev_err(hsotg->dev,
1992 				"%d invalid for otg_cap parameter. Check HW configuration.\n",
1993 				val);
1994 		switch (hsotg->hw_params.op_mode) {
1995 		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1996 			val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
1997 			break;
1998 		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
1999 		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2000 		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2001 			val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
2002 			break;
2003 		default:
2004 			val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
2005 			break;
2006 		}
2007 		dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
2008 	}
2009 
2010 	hsotg->core_params->otg_cap = val;
2011 }
2012 
2013 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
2014 {
2015 	int valid = 1;
2016 
2017 	if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
2018 		valid = 0;
2019 	if (val < 0)
2020 		valid = 0;
2021 
2022 	if (!valid) {
2023 		if (val >= 0)
2024 			dev_err(hsotg->dev,
2025 				"%d invalid for dma_enable parameter. Check HW configuration.\n",
2026 				val);
2027 		val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
2028 		dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
2029 	}
2030 
2031 	hsotg->core_params->dma_enable = val;
2032 }
2033 
2034 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
2035 {
2036 	int valid = 1;
2037 
2038 	if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2039 			!hsotg->hw_params.dma_desc_enable))
2040 		valid = 0;
2041 	if (val < 0)
2042 		valid = 0;
2043 
2044 	if (!valid) {
2045 		if (val >= 0)
2046 			dev_err(hsotg->dev,
2047 				"%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2048 				val);
2049 		val = (hsotg->core_params->dma_enable > 0 &&
2050 			hsotg->hw_params.dma_desc_enable);
2051 		dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2052 	}
2053 
2054 	hsotg->core_params->dma_desc_enable = val;
2055 }
2056 
2057 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2058 						 int val)
2059 {
2060 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2061 		if (val >= 0) {
2062 			dev_err(hsotg->dev,
2063 				"Wrong value for host_support_fs_low_power\n");
2064 			dev_err(hsotg->dev,
2065 				"host_support_fs_low_power must be 0 or 1\n");
2066 		}
2067 		val = 0;
2068 		dev_dbg(hsotg->dev,
2069 			"Setting host_support_fs_low_power to %d\n", val);
2070 	}
2071 
2072 	hsotg->core_params->host_support_fs_ls_low_power = val;
2073 }
2074 
2075 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
2076 {
2077 	int valid = 1;
2078 
2079 	if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
2080 		valid = 0;
2081 	if (val < 0)
2082 		valid = 0;
2083 
2084 	if (!valid) {
2085 		if (val >= 0)
2086 			dev_err(hsotg->dev,
2087 				"%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2088 				val);
2089 		val = hsotg->hw_params.enable_dynamic_fifo;
2090 		dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2091 	}
2092 
2093 	hsotg->core_params->enable_dynamic_fifo = val;
2094 }
2095 
2096 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2097 {
2098 	int valid = 1;
2099 
2100 	if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
2101 		valid = 0;
2102 
2103 	if (!valid) {
2104 		if (val >= 0)
2105 			dev_err(hsotg->dev,
2106 				"%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2107 				val);
2108 		val = hsotg->hw_params.host_rx_fifo_size;
2109 		dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2110 	}
2111 
2112 	hsotg->core_params->host_rx_fifo_size = val;
2113 }
2114 
2115 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2116 {
2117 	int valid = 1;
2118 
2119 	if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
2120 		valid = 0;
2121 
2122 	if (!valid) {
2123 		if (val >= 0)
2124 			dev_err(hsotg->dev,
2125 				"%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2126 				val);
2127 		val = hsotg->hw_params.host_nperio_tx_fifo_size;
2128 		dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2129 			val);
2130 	}
2131 
2132 	hsotg->core_params->host_nperio_tx_fifo_size = val;
2133 }
2134 
2135 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2136 {
2137 	int valid = 1;
2138 
2139 	if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
2140 		valid = 0;
2141 
2142 	if (!valid) {
2143 		if (val >= 0)
2144 			dev_err(hsotg->dev,
2145 				"%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2146 				val);
2147 		val = hsotg->hw_params.host_perio_tx_fifo_size;
2148 		dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2149 			val);
2150 	}
2151 
2152 	hsotg->core_params->host_perio_tx_fifo_size = val;
2153 }
2154 
2155 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
2156 {
2157 	int valid = 1;
2158 
2159 	if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
2160 		valid = 0;
2161 
2162 	if (!valid) {
2163 		if (val >= 0)
2164 			dev_err(hsotg->dev,
2165 				"%d invalid for max_transfer_size. Check HW configuration.\n",
2166 				val);
2167 		val = hsotg->hw_params.max_transfer_size;
2168 		dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2169 	}
2170 
2171 	hsotg->core_params->max_transfer_size = val;
2172 }
2173 
2174 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
2175 {
2176 	int valid = 1;
2177 
2178 	if (val < 15 || val > hsotg->hw_params.max_packet_count)
2179 		valid = 0;
2180 
2181 	if (!valid) {
2182 		if (val >= 0)
2183 			dev_err(hsotg->dev,
2184 				"%d invalid for max_packet_count. Check HW configuration.\n",
2185 				val);
2186 		val = hsotg->hw_params.max_packet_count;
2187 		dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2188 	}
2189 
2190 	hsotg->core_params->max_packet_count = val;
2191 }
2192 
2193 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2194 {
2195 	int valid = 1;
2196 
2197 	if (val < 1 || val > hsotg->hw_params.host_channels)
2198 		valid = 0;
2199 
2200 	if (!valid) {
2201 		if (val >= 0)
2202 			dev_err(hsotg->dev,
2203 				"%d invalid for host_channels. Check HW configuration.\n",
2204 				val);
2205 		val = hsotg->hw_params.host_channels;
2206 		dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2207 	}
2208 
2209 	hsotg->core_params->host_channels = val;
2210 }
2211 
2212 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2213 {
2214 	int valid = 0;
2215 	u32 hs_phy_type, fs_phy_type;
2216 
2217 	if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
2218 			       DWC2_PHY_TYPE_PARAM_ULPI)) {
2219 		if (val >= 0) {
2220 			dev_err(hsotg->dev, "Wrong value for phy_type\n");
2221 			dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2222 		}
2223 
2224 		valid = 0;
2225 	}
2226 
2227 	hs_phy_type = hsotg->hw_params.hs_phy_type;
2228 	fs_phy_type = hsotg->hw_params.fs_phy_type;
2229 	if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2230 	    (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2231 	     hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2232 		valid = 1;
2233 	else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2234 		 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2235 		  hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2236 		valid = 1;
2237 	else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2238 		 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2239 		valid = 1;
2240 
2241 	if (!valid) {
2242 		if (val >= 0)
2243 			dev_err(hsotg->dev,
2244 				"%d invalid for phy_type. Check HW configuration.\n",
2245 				val);
2246 		val = DWC2_PHY_TYPE_PARAM_FS;
2247 		if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2248 			if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2249 			    hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2250 				val = DWC2_PHY_TYPE_PARAM_UTMI;
2251 			else
2252 				val = DWC2_PHY_TYPE_PARAM_ULPI;
2253 		}
2254 		dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2255 	}
2256 
2257 	hsotg->core_params->phy_type = val;
2258 }
2259 
2260 static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2261 {
2262 	return hsotg->core_params->phy_type;
2263 }
2264 
2265 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2266 {
2267 	int valid = 1;
2268 
2269 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2270 		if (val >= 0) {
2271 			dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2272 			dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2273 		}
2274 		valid = 0;
2275 	}
2276 
2277 	if (val == DWC2_SPEED_PARAM_HIGH &&
2278 	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2279 		valid = 0;
2280 
2281 	if (!valid) {
2282 		if (val >= 0)
2283 			dev_err(hsotg->dev,
2284 				"%d invalid for speed parameter. Check HW configuration.\n",
2285 				val);
2286 		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
2287 				DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
2288 		dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2289 	}
2290 
2291 	hsotg->core_params->speed = val;
2292 }
2293 
2294 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2295 {
2296 	int valid = 1;
2297 
2298 	if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2299 			       DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
2300 		if (val >= 0) {
2301 			dev_err(hsotg->dev,
2302 				"Wrong value for host_ls_low_power_phy_clk parameter\n");
2303 			dev_err(hsotg->dev,
2304 				"host_ls_low_power_phy_clk must be 0 or 1\n");
2305 		}
2306 		valid = 0;
2307 	}
2308 
2309 	if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2310 	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2311 		valid = 0;
2312 
2313 	if (!valid) {
2314 		if (val >= 0)
2315 			dev_err(hsotg->dev,
2316 				"%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2317 				val);
2318 		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2319 			? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2320 			: DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2321 		dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2322 			val);
2323 	}
2324 
2325 	hsotg->core_params->host_ls_low_power_phy_clk = val;
2326 }
2327 
2328 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2329 {
2330 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2331 		if (val >= 0) {
2332 			dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2333 			dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2334 		}
2335 		val = 0;
2336 		dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2337 	}
2338 
2339 	hsotg->core_params->phy_ulpi_ddr = val;
2340 }
2341 
2342 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2343 {
2344 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2345 		if (val >= 0) {
2346 			dev_err(hsotg->dev,
2347 				"Wrong value for phy_ulpi_ext_vbus\n");
2348 			dev_err(hsotg->dev,
2349 				"phy_ulpi_ext_vbus must be 0 or 1\n");
2350 		}
2351 		val = 0;
2352 		dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2353 	}
2354 
2355 	hsotg->core_params->phy_ulpi_ext_vbus = val;
2356 }
2357 
2358 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2359 {
2360 	int valid = 0;
2361 
2362 	switch (hsotg->hw_params.utmi_phy_data_width) {
2363 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2364 		valid = (val == 8);
2365 		break;
2366 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2367 		valid = (val == 16);
2368 		break;
2369 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2370 		valid = (val == 8 || val == 16);
2371 		break;
2372 	}
2373 
2374 	if (!valid) {
2375 		if (val >= 0) {
2376 			dev_err(hsotg->dev,
2377 				"%d invalid for phy_utmi_width. Check HW configuration.\n",
2378 				val);
2379 		}
2380 		val = (hsotg->hw_params.utmi_phy_data_width ==
2381 		       GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
2382 		dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2383 	}
2384 
2385 	hsotg->core_params->phy_utmi_width = val;
2386 }
2387 
2388 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
2389 {
2390 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2391 		if (val >= 0) {
2392 			dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2393 			dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2394 		}
2395 		val = 0;
2396 		dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
2397 	}
2398 
2399 	hsotg->core_params->ulpi_fs_ls = val;
2400 }
2401 
2402 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
2403 {
2404 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2405 		if (val >= 0) {
2406 			dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2407 			dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2408 		}
2409 		val = 0;
2410 		dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
2411 	}
2412 
2413 	hsotg->core_params->ts_dline = val;
2414 }
2415 
2416 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
2417 {
2418 	int valid = 1;
2419 
2420 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2421 		if (val >= 0) {
2422 			dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2423 			dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2424 		}
2425 
2426 		valid = 0;
2427 	}
2428 
2429 	if (val == 1 && !(hsotg->hw_params.i2c_enable))
2430 		valid = 0;
2431 
2432 	if (!valid) {
2433 		if (val >= 0)
2434 			dev_err(hsotg->dev,
2435 				"%d invalid for i2c_enable. Check HW configuration.\n",
2436 				val);
2437 		val = hsotg->hw_params.i2c_enable;
2438 		dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2439 	}
2440 
2441 	hsotg->core_params->i2c_enable = val;
2442 }
2443 
2444 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
2445 {
2446 	int valid = 1;
2447 
2448 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2449 		if (val >= 0) {
2450 			dev_err(hsotg->dev,
2451 				"Wrong value for en_multiple_tx_fifo,\n");
2452 			dev_err(hsotg->dev,
2453 				"en_multiple_tx_fifo must be 0 or 1\n");
2454 		}
2455 		valid = 0;
2456 	}
2457 
2458 	if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
2459 		valid = 0;
2460 
2461 	if (!valid) {
2462 		if (val >= 0)
2463 			dev_err(hsotg->dev,
2464 				"%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2465 				val);
2466 		val = hsotg->hw_params.en_multiple_tx_fifo;
2467 		dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
2468 	}
2469 
2470 	hsotg->core_params->en_multiple_tx_fifo = val;
2471 }
2472 
2473 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
2474 {
2475 	int valid = 1;
2476 
2477 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2478 		if (val >= 0) {
2479 			dev_err(hsotg->dev,
2480 				"'%d' invalid for parameter reload_ctl\n", val);
2481 			dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2482 		}
2483 		valid = 0;
2484 	}
2485 
2486 	if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
2487 		valid = 0;
2488 
2489 	if (!valid) {
2490 		if (val >= 0)
2491 			dev_err(hsotg->dev,
2492 				"%d invalid for parameter reload_ctl. Check HW configuration.\n",
2493 				val);
2494 		val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
2495 		dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
2496 	}
2497 
2498 	hsotg->core_params->reload_ctl = val;
2499 }
2500 
2501 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
2502 {
2503 	if (val != -1)
2504 		hsotg->core_params->ahbcfg = val;
2505 	else
2506 		hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
2507 						GAHBCFG_HBSTLEN_SHIFT;
2508 }
2509 
2510 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
2511 {
2512 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2513 		if (val >= 0) {
2514 			dev_err(hsotg->dev,
2515 				"'%d' invalid for parameter otg_ver\n", val);
2516 			dev_err(hsotg->dev,
2517 				"otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2518 		}
2519 		val = 0;
2520 		dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
2521 	}
2522 
2523 	hsotg->core_params->otg_ver = val;
2524 }
2525 
2526 static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
2527 {
2528 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2529 		if (val >= 0) {
2530 			dev_err(hsotg->dev,
2531 				"'%d' invalid for parameter uframe_sched\n",
2532 				val);
2533 			dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
2534 		}
2535 		val = 1;
2536 		dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
2537 	}
2538 
2539 	hsotg->core_params->uframe_sched = val;
2540 }
2541 
2542 /*
2543  * This function is called during module intialization to pass module parameters
2544  * for the DWC_otg core.
2545  */
2546 void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
2547 			 const struct dwc2_core_params *params)
2548 {
2549 	dev_dbg(hsotg->dev, "%s()\n", __func__);
2550 
2551 	dwc2_set_param_otg_cap(hsotg, params->otg_cap);
2552 	dwc2_set_param_dma_enable(hsotg, params->dma_enable);
2553 	dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
2554 	dwc2_set_param_host_support_fs_ls_low_power(hsotg,
2555 			params->host_support_fs_ls_low_power);
2556 	dwc2_set_param_enable_dynamic_fifo(hsotg,
2557 			params->enable_dynamic_fifo);
2558 	dwc2_set_param_host_rx_fifo_size(hsotg,
2559 			params->host_rx_fifo_size);
2560 	dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
2561 			params->host_nperio_tx_fifo_size);
2562 	dwc2_set_param_host_perio_tx_fifo_size(hsotg,
2563 			params->host_perio_tx_fifo_size);
2564 	dwc2_set_param_max_transfer_size(hsotg,
2565 			params->max_transfer_size);
2566 	dwc2_set_param_max_packet_count(hsotg,
2567 			params->max_packet_count);
2568 	dwc2_set_param_host_channels(hsotg, params->host_channels);
2569 	dwc2_set_param_phy_type(hsotg, params->phy_type);
2570 	dwc2_set_param_speed(hsotg, params->speed);
2571 	dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
2572 			params->host_ls_low_power_phy_clk);
2573 	dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
2574 	dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
2575 			params->phy_ulpi_ext_vbus);
2576 	dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
2577 	dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
2578 	dwc2_set_param_ts_dline(hsotg, params->ts_dline);
2579 	dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
2580 	dwc2_set_param_en_multiple_tx_fifo(hsotg,
2581 			params->en_multiple_tx_fifo);
2582 	dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
2583 	dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
2584 	dwc2_set_param_otg_ver(hsotg, params->otg_ver);
2585 	dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
2586 }
2587 
2588 /**
2589  * During device initialization, read various hardware configuration
2590  * registers and interpret the contents.
2591  */
2592 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
2593 {
2594 	struct dwc2_hw_params *hw = &hsotg->hw_params;
2595 	unsigned width;
2596 	u32 hwcfg2, hwcfg3, hwcfg4;
2597 	u32 hptxfsiz, grxfsiz, gnptxfsiz;
2598 	u32 gusbcfg;
2599 
2600 	/*
2601 	 * Attempt to ensure this device is really a DWC_otg Controller.
2602 	 * Read and verify the GSNPSID register contents. The value should be
2603 	 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
2604 	 * as in "OTG version 2.xx" or "OTG version 3.xx".
2605 	 */
2606 	hw->snpsid = DWC2_READ_4(hsotg, GSNPSID);
2607 	if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
2608 	    (hw->snpsid & 0xfffff000) != 0x4f543000) {
2609 		dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
2610 			hw->snpsid);
2611 		return -ENODEV;
2612 	}
2613 
2614 	dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
2615 		hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
2616 		hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
2617 
2618 	hwcfg2 = DWC2_READ_4(hsotg, GHWCFG2);
2619 	hwcfg3 = DWC2_READ_4(hsotg, GHWCFG3);
2620 	hwcfg4 = DWC2_READ_4(hsotg, GHWCFG4);
2621 	gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ);
2622 	grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
2623 
2624 	dev_dbg(hsotg->dev, "hwcfg1=%08x\n", DWC2_READ_4(hsotg, GHWCFG1));
2625 	dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
2626 	dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
2627 	dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
2628 	dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
2629 	dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
2630 
2631 	/* Force host mode to get HPTXFSIZ exact power on value */
2632 	gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
2633 	gusbcfg |= GUSBCFG_FORCEHOSTMODE;
2634 	DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg);
2635 	usleep_range(100000, 150000);
2636 
2637 	hptxfsiz = DWC2_READ_4(hsotg, HPTXFSIZ);
2638 	dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
2639 	gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
2640 	gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
2641 	DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg);
2642 	usleep_range(100000, 150000);
2643 
2644 	/* hwcfg2 */
2645 	hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
2646 		      GHWCFG2_OP_MODE_SHIFT;
2647 	hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
2648 		   GHWCFG2_ARCHITECTURE_SHIFT;
2649 	hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
2650 	hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
2651 				GHWCFG2_NUM_HOST_CHAN_SHIFT);
2652 	hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
2653 			  GHWCFG2_HS_PHY_TYPE_SHIFT;
2654 	hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
2655 			  GHWCFG2_FS_PHY_TYPE_SHIFT;
2656 	hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
2657 			 GHWCFG2_NUM_DEV_EP_SHIFT;
2658 	hw->nperio_tx_q_depth =
2659 		(hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
2660 		GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
2661 	hw->host_perio_tx_q_depth =
2662 		(hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
2663 		GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
2664 	hw->dev_token_q_depth =
2665 		(hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
2666 		GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
2667 
2668 	/* hwcfg3 */
2669 	width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
2670 		GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
2671 	hw->max_transfer_size = (1 << (width + 11)) - 1;
2672 	width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
2673 		GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
2674 	hw->max_packet_count = (1 << (width + 4)) - 1;
2675 	hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
2676 	hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
2677 			      GHWCFG3_DFIFO_DEPTH_SHIFT;
2678 
2679 	/* hwcfg4 */
2680 	hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
2681 	hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
2682 				  GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
2683 	hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
2684 	hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
2685 	hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
2686 				  GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
2687 
2688 	/* fifo sizes */
2689 	hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
2690 				GRXFSIZ_DEPTH_SHIFT;
2691 	hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2692 				       FIFOSIZE_DEPTH_SHIFT;
2693 	hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2694 				      FIFOSIZE_DEPTH_SHIFT;
2695 
2696 	dev_dbg(hsotg->dev, "Detected values from hardware:\n");
2697 	dev_dbg(hsotg->dev, "  op_mode=%d\n",
2698 		hw->op_mode);
2699 	dev_dbg(hsotg->dev, "  arch=%d\n",
2700 		hw->arch);
2701 	dev_dbg(hsotg->dev, "  dma_desc_enable=%d\n",
2702 		hw->dma_desc_enable);
2703 	dev_dbg(hsotg->dev, "  power_optimized=%d\n",
2704 		hw->power_optimized);
2705 	dev_dbg(hsotg->dev, "  i2c_enable=%d\n",
2706 		hw->i2c_enable);
2707 	dev_dbg(hsotg->dev, "  hs_phy_type=%d\n",
2708 		hw->hs_phy_type);
2709 	dev_dbg(hsotg->dev, "  fs_phy_type=%d\n",
2710 		hw->fs_phy_type);
2711 	dev_dbg(hsotg->dev, "  utmi_phy_data_wdith=%d\n",
2712 		hw->utmi_phy_data_width);
2713 	dev_dbg(hsotg->dev, "  num_dev_ep=%d\n",
2714 		hw->num_dev_ep);
2715 	dev_dbg(hsotg->dev, "  num_dev_perio_in_ep=%d\n",
2716 		hw->num_dev_perio_in_ep);
2717 	dev_dbg(hsotg->dev, "  host_channels=%d\n",
2718 		hw->host_channels);
2719 	dev_dbg(hsotg->dev, "  max_transfer_size=%d\n",
2720 		hw->max_transfer_size);
2721 	dev_dbg(hsotg->dev, "  max_packet_count=%d\n",
2722 		hw->max_packet_count);
2723 	dev_dbg(hsotg->dev, "  nperio_tx_q_depth=0x%0x\n",
2724 		hw->nperio_tx_q_depth);
2725 	dev_dbg(hsotg->dev, "  host_perio_tx_q_depth=0x%0x\n",
2726 		hw->host_perio_tx_q_depth);
2727 	dev_dbg(hsotg->dev, "  dev_token_q_depth=0x%0x\n",
2728 		hw->dev_token_q_depth);
2729 	dev_dbg(hsotg->dev, "  enable_dynamic_fifo=%d\n",
2730 		hw->enable_dynamic_fifo);
2731 	dev_dbg(hsotg->dev, "  en_multiple_tx_fifo=%d\n",
2732 		hw->en_multiple_tx_fifo);
2733 	dev_dbg(hsotg->dev, "  total_fifo_size=%d\n",
2734 		hw->total_fifo_size);
2735 	dev_dbg(hsotg->dev, "  host_rx_fifo_size=%d\n",
2736 		hw->host_rx_fifo_size);
2737 	dev_dbg(hsotg->dev, "  host_nperio_tx_fifo_size=%d\n",
2738 		hw->host_nperio_tx_fifo_size);
2739 	dev_dbg(hsotg->dev, "  host_perio_tx_fifo_size=%d\n",
2740 		hw->host_perio_tx_fifo_size);
2741 	dev_dbg(hsotg->dev, "\n");
2742 
2743 	return 0;
2744 }
2745 
2746 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
2747 {
2748 	return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
2749 }
2750 
2751 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
2752 {
2753 	if (DWC2_READ_4(hsotg, GSNPSID) == 0xffffffff)
2754 		return false;
2755 	else
2756 		return true;
2757 }
2758 
2759 /**
2760  * dwc2_enable_global_interrupts() - Enables the controller's Global
2761  * Interrupt in the AHB Config register
2762  *
2763  * @hsotg: Programming view of DWC_otg controller
2764  */
2765 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
2766 {
2767 	u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
2768 
2769 	ahbcfg |= GAHBCFG_GLBL_INTR_EN;
2770 	DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
2771 }
2772 
2773 /**
2774  * dwc2_disable_global_interrupts() - Disables the controller's Global
2775  * Interrupt in the AHB Config register
2776  *
2777  * @hsotg: Programming view of DWC_otg controller
2778  */
2779 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
2780 {
2781 	u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
2782 
2783 	ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
2784 	DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
2785 }
2786