xref: /openbsd-src/sys/dev/usb/dwc2/dwc2_core.c (revision 24bb5fcea3ed904bc467217bdaadb5dfc618d5bf)
1 /*	$OpenBSD: dwc2_core.c,v 1.10 2021/07/22 18:32:33 mglocker Exp $	*/
2 /*	$NetBSD: dwc2_core.c,v 1.6 2014/04/03 06:34:58 skrll Exp $	*/
3 
4 /*
5  * core.c - DesignWare HS OTG Controller common routines
6  *
7  * Copyright (C) 2004-2013 Synopsys, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The names of the above-listed copyright holders may not be used
19  *    to endorse or promote products derived from this software without
20  *    specific prior written permission.
21  *
22  * ALTERNATIVELY, this software may be distributed under the terms of the
23  * GNU General Public License ("GPL") as published by the Free Software
24  * Foundation; either version 2 of the License, or (at your option) any
25  * later version.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * The Core code provides basic services for accessing and managing the
42  * DWC_otg hardware. These services are used by both the Host Controller
43  * Driver and the Peripheral Controller Driver.
44  */
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/signal.h>
49 #include <sys/proc.h>
50 #include <sys/timeout.h>
51 #include <sys/mutex.h>
52 #include <sys/pool.h>
53 #include <sys/task.h>
54 
55 #include <machine/bus.h>
56 
57 #include <dev/usb/usb.h>
58 #include <dev/usb/usbdi.h>
59 #include <dev/usb/usbdivar.h>
60 #include <dev/usb/usb_mem.h>
61 
62 #include <dev/usb/dwc2/dwc2.h>
63 #include <dev/usb/dwc2/dwc2var.h>
64 
65 #include <dev/usb/dwc2/dwc2_core.h>
66 #include <dev/usb/dwc2/dwc2_hcd.h>
67 
68 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
69 /**
70  * dwc2_backup_host_registers() - Backup controller host registers.
71  * When suspending usb bus, registers needs to be backuped
72  * if controller power is disabled once suspended.
73  *
74  * @hsotg: Programming view of the DWC_otg controller
75  */
76 STATIC int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
77 {
78 	struct dwc2_hregs_backup *hr;
79 	int i;
80 
81 	dev_dbg(hsotg->dev, "%s\n", __func__);
82 
83 	/* Backup Host regs */
84 	hr = &hsotg->hr_backup;
85 	hr->hcfg = DWC2_READ_4(hsotg, HCFG);
86 	hr->haintmsk = DWC2_READ_4(hsotg, HAINTMSK);
87 	for (i = 0; i < hsotg->core_params->host_channels; ++i)
88 		hr->hcintmsk[i] = DWC2_READ_4(hsotg, HCINTMSK(i));
89 
90 	hr->hprt0 = DWC2_READ_4(hsotg, HPRT0);
91 	hr->hfir = DWC2_READ_4(hsotg, HFIR);
92 	hr->valid = true;
93 
94 	return 0;
95 }
96 
97 /**
98  * dwc2_restore_host_registers() - Restore controller host registers.
99  * When resuming usb bus, device registers needs to be restored
100  * if controller power were disabled.
101  *
102  * @hsotg: Programming view of the DWC_otg controller
103  */
104 STATIC int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
105 {
106 	struct dwc2_hregs_backup *hr;
107 	int i;
108 
109 	dev_dbg(hsotg->dev, "%s\n", __func__);
110 
111 	/* Restore host regs */
112 	hr = &hsotg->hr_backup;
113 	if (!hr->valid) {
114 		dev_err(hsotg->dev, "%s: no host registers to restore\n",
115 				__func__);
116 		return -EINVAL;
117 	}
118 	hr->valid = false;
119 
120 	DWC2_WRITE_4(hsotg, HCFG, hr->hcfg);
121 	DWC2_WRITE_4(hsotg, HAINTMSK, hr->haintmsk);
122 
123 	for (i = 0; i < hsotg->core_params->host_channels; ++i)
124 		DWC2_WRITE_4(hsotg, HCINTMSK(i), hr->hcintmsk[i]);
125 
126 	DWC2_WRITE_4(hsotg, HPRT0, hr->hprt0);
127 	DWC2_WRITE_4(hsotg, HFIR, hr->hfir);
128 	hsotg->frame_number = 0;
129 
130 	return 0;
131 }
132 #else
133 static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
134 { return 0; }
135 
136 static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
137 { return 0; }
138 #endif
139 
140 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
141 	IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
142 /**
143  * dwc2_backup_device_registers() - Backup controller device registers.
144  * When suspending usb bus, registers needs to be backuped
145  * if controller power is disabled once suspended.
146  *
147  * @hsotg: Programming view of the DWC_otg controller
148  */
149 STATIC int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
150 {
151 	struct dwc2_dregs_backup *dr;
152 	int i;
153 
154 	dev_dbg(hsotg->dev, "%s\n", __func__);
155 
156 	/* Backup dev regs */
157 	dr = &hsotg->dr_backup;
158 
159 	dr->dcfg = DWC2_READ_4(hsotg, DCFG);
160 	dr->dctl = DWC2_READ_4(hsotg, DCTL);
161 	dr->daintmsk = DWC2_READ_4(hsotg, DAINTMSK);
162 	dr->diepmsk = DWC2_READ_4(hsotg, DIEPMSK);
163 	dr->doepmsk = DWC2_READ_4(hsotg, DOEPMSK);
164 
165 	for (i = 0; i < hsotg->num_of_eps; i++) {
166 		/* Backup IN EPs */
167 		dr->diepctl[i] = DWC2_READ_4(hsotg, DIEPCTL(i));
168 
169 		/* Ensure DATA PID is correctly configured */
170 		if (dr->diepctl[i] & DXEPCTL_DPID)
171 			dr->diepctl[i] |= DXEPCTL_SETD1PID;
172 		else
173 			dr->diepctl[i] |= DXEPCTL_SETD0PID;
174 
175 		dr->dieptsiz[i] = DWC2_READ_4(hsotg, DIEPTSIZ(i));
176 		dr->diepdma[i] = DWC2_READ_4(hsotg, DIEPDMA(i));
177 
178 		/* Backup OUT EPs */
179 		dr->doepctl[i] = DWC2_READ_4(hsotg, DOEPCTL(i));
180 
181 		/* Ensure DATA PID is correctly configured */
182 		if (dr->doepctl[i] & DXEPCTL_DPID)
183 			dr->doepctl[i] |= DXEPCTL_SETD1PID;
184 		else
185 			dr->doepctl[i] |= DXEPCTL_SETD0PID;
186 
187 		dr->doeptsiz[i] = DWC2_READ_4(hsotg, DOEPTSIZ(i));
188 		dr->doepdma[i] = DWC2_READ_4(hsotg, DOEPDMA(i));
189 	}
190 	dr->valid = true;
191 	return 0;
192 }
193 
194 /**
195  * dwc2_restore_device_registers() - Restore controller device registers.
196  * When resuming usb bus, device registers needs to be restored
197  * if controller power were disabled.
198  *
199  * @hsotg: Programming view of the DWC_otg controller
200  */
201 STATIC int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
202 {
203 	struct dwc2_dregs_backup *dr;
204 	u32 dctl;
205 	int i;
206 
207 	dev_dbg(hsotg->dev, "%s\n", __func__);
208 
209 	/* Restore dev regs */
210 	dr = &hsotg->dr_backup;
211 	if (!dr->valid) {
212 		dev_err(hsotg->dev, "%s: no device registers to restore\n",
213 				__func__);
214 		return -EINVAL;
215 	}
216 	dr->valid = false;
217 
218 	DWC2_WRITE_4(hsotg, DCFG, dr->dcfg);
219 	DWC2_WRITE_4(hsotg, DCTL, dr->dctl);
220 	DWC2_WRITE_4(hsotg, DAINTMSK, dr->daintmsk);
221 	DWC2_WRITE_4(hsotg, DIEPMSK, dr->diepmsk);
222 	DWC2_WRITE_4(hsotg, DOEPMSK, dr->doepmsk);
223 
224 	for (i = 0; i < hsotg->num_of_eps; i++) {
225 		/* Restore IN EPs */
226 		DWC2_WRITE_4(hsotg, DIEPCTL(i), dr->diepctl[i]);
227 		DWC2_WRITE_4(hsotg, DIEPTSIZ(i), dr->dieptsiz[i]);
228 		DWC2_WRITE_4(hsotg, DIEPDMA(i), dr->diepdma[i]);
229 
230 		/* Restore OUT EPs */
231 		DWC2_WRITE_4(hsotg, DOEPCTL(i), dr->doepctl[i]);
232 		DWC2_WRITE_4(hsotg, DOEPTSIZ(i), dr->doeptsiz[i]);
233 		DWC2_WRITE_4(hsotg, DOEPDMA(i), dr->doepdma[i]);
234 	}
235 
236 	/* Set the Power-On Programming done bit */
237 	dctl = DWC2_READ_4(hsotg, DCTL);
238 	dctl |= DCTL_PWRONPRGDONE;
239 	DWC2_WRITE_4(hsotg, DCTL, dctl);
240 
241 	return 0;
242 }
243 #else
244 static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
245 { return 0; }
246 
247 static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
248 { return 0; }
249 #endif
250 
251 /**
252  * dwc2_backup_global_registers() - Backup global controller registers.
253  * When suspending usb bus, registers needs to be backuped
254  * if controller power is disabled once suspended.
255  *
256  * @hsotg: Programming view of the DWC_otg controller
257  */
258 STATIC int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
259 {
260 	struct dwc2_gregs_backup *gr;
261 	int i;
262 
263 	/* Backup global regs */
264 	gr = &hsotg->gr_backup;
265 
266 	gr->gotgctl = DWC2_READ_4(hsotg, GOTGCTL);
267 	gr->gintmsk = DWC2_READ_4(hsotg, GINTMSK);
268 	gr->gahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
269 	gr->gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
270 	gr->grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
271 	gr->gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ);
272 	gr->hptxfsiz = DWC2_READ_4(hsotg, HPTXFSIZ);
273 	gr->gdfifocfg = DWC2_READ_4(hsotg, GDFIFOCFG);
274 	for (i = 0; i < MAX_EPS_CHANNELS; i++)
275 		gr->dtxfsiz[i] = DWC2_READ_4(hsotg, DPTXFSIZN(i));
276 
277 	gr->valid = true;
278 	return 0;
279 }
280 
281 /**
282  * dwc2_restore_global_registers() - Restore controller global registers.
283  * When resuming usb bus, device registers needs to be restored
284  * if controller power were disabled.
285  *
286  * @hsotg: Programming view of the DWC_otg controller
287  */
288 STATIC int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
289 {
290 	struct dwc2_gregs_backup *gr;
291 	int i;
292 
293 	dev_dbg(hsotg->dev, "%s\n", __func__);
294 
295 	/* Restore global regs */
296 	gr = &hsotg->gr_backup;
297 	if (!gr->valid) {
298 		dev_err(hsotg->dev, "%s: no global registers to restore\n",
299 				__func__);
300 		return -EINVAL;
301 	}
302 	gr->valid = false;
303 
304 	DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff);
305 	DWC2_WRITE_4(hsotg, GOTGCTL, gr->gotgctl);
306 	DWC2_WRITE_4(hsotg, GINTMSK, gr->gintmsk);
307 	DWC2_WRITE_4(hsotg, GUSBCFG, gr->gusbcfg);
308 	DWC2_WRITE_4(hsotg, GAHBCFG, gr->gahbcfg);
309 	DWC2_WRITE_4(hsotg, GRXFSIZ, gr->grxfsiz);
310 	DWC2_WRITE_4(hsotg, GNPTXFSIZ, gr->gnptxfsiz);
311 	DWC2_WRITE_4(hsotg, HPTXFSIZ, gr->hptxfsiz);
312 	DWC2_WRITE_4(hsotg, GDFIFOCFG, gr->gdfifocfg);
313 	for (i = 0; i < MAX_EPS_CHANNELS; i++)
314 		DWC2_WRITE_4(hsotg, DPTXFSIZN(i), gr->dtxfsiz[i]);
315 
316 	return 0;
317 }
318 
319 /**
320  * dwc2_exit_hibernation() - Exit controller from Partial Power Down.
321  *
322  * @hsotg: Programming view of the DWC_otg controller
323  * @restore: Controller registers need to be restored
324  */
325 int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore)
326 {
327 	u32 pcgcctl;
328 	int ret = 0;
329 
330 	if (!hsotg->core_params->hibernation)
331 		return -ENOTSUP;
332 
333 	pcgcctl = DWC2_READ_4(hsotg, PCGCTL);
334 	pcgcctl &= ~PCGCTL_STOPPCLK;
335 	DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
336 
337 	pcgcctl = DWC2_READ_4(hsotg, PCGCTL);
338 	pcgcctl &= ~PCGCTL_PWRCLMP;
339 	DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
340 
341 	pcgcctl = DWC2_READ_4(hsotg, PCGCTL);
342 	pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
343 	DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
344 
345 	udelay(100);
346 	if (restore) {
347 		ret = dwc2_restore_global_registers(hsotg);
348 		if (ret) {
349 			dev_err(hsotg->dev, "%s: failed to restore registers\n",
350 					__func__);
351 			return ret;
352 		}
353 		if (dwc2_is_host_mode(hsotg)) {
354 			ret = dwc2_restore_host_registers(hsotg);
355 			if (ret) {
356 				dev_err(hsotg->dev, "%s: failed to restore host registers\n",
357 						__func__);
358 				return ret;
359 			}
360 		} else {
361 			ret = dwc2_restore_device_registers(hsotg);
362 			if (ret) {
363 				dev_err(hsotg->dev, "%s: failed to restore device registers\n",
364 						__func__);
365 				return ret;
366 			}
367 		}
368 	}
369 
370 	return ret;
371 }
372 
373 /**
374  * dwc2_enter_hibernation() - Put controller in Partial Power Down.
375  *
376  * @hsotg: Programming view of the DWC_otg controller
377  */
378 int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
379 {
380 	u32 pcgcctl;
381 	int ret = 0;
382 
383 	if (!hsotg->core_params->hibernation)
384 		return -ENOTSUP;
385 
386 	/* Backup all registers */
387 	ret = dwc2_backup_global_registers(hsotg);
388 	if (ret) {
389 		dev_err(hsotg->dev, "%s: failed to backup global registers\n",
390 				__func__);
391 		return ret;
392 	}
393 
394 	if (dwc2_is_host_mode(hsotg)) {
395 		ret = dwc2_backup_host_registers(hsotg);
396 		if (ret) {
397 			dev_err(hsotg->dev, "%s: failed to backup host registers\n",
398 					__func__);
399 			return ret;
400 		}
401 	} else {
402 		ret = dwc2_backup_device_registers(hsotg);
403 		if (ret) {
404 			dev_err(hsotg->dev, "%s: failed to backup device registers\n",
405 					__func__);
406 			return ret;
407 		}
408 	}
409 
410 	/*
411 	 * Clear any pending interrupts since dwc2 will not be able to
412 	 * clear them after entering hibernation.
413 	 */
414 	DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff);
415 
416 	/* Put the controller in low power state */
417 	pcgcctl = DWC2_READ_4(hsotg, PCGCTL);
418 
419 	pcgcctl |= PCGCTL_PWRCLMP;
420 	DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
421 	ndelay(20);
422 
423 	pcgcctl |= PCGCTL_RSTPDWNMODULE;
424 	DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
425 	ndelay(20);
426 
427 	pcgcctl |= PCGCTL_STOPPCLK;
428 	DWC2_WRITE_4(hsotg, PCGCTL, pcgcctl);
429 
430 	return ret;
431 }
432 
433 /**
434  * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
435  * used in both device and host modes
436  *
437  * @hsotg: Programming view of the DWC_otg controller
438  */
439 STATIC void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
440 {
441 	u32 intmsk;
442 
443 	/* Clear any pending OTG Interrupts */
444 	DWC2_WRITE_4(hsotg, GOTGINT, 0xffffffff);
445 
446 	/* Clear any pending interrupts */
447 	DWC2_WRITE_4(hsotg, GINTSTS, 0xffffffff);
448 
449 	/* Enable the interrupts in the GINTMSK */
450 	intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
451 
452 	if (hsotg->core_params->dma_enable <= 0)
453 		intmsk |= GINTSTS_RXFLVL;
454 	if (hsotg->core_params->external_id_pin_ctl <= 0)
455 		intmsk |= GINTSTS_CONIDSTSCHNG;
456 
457 	intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
458 		  GINTSTS_SESSREQINT;
459 
460 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
461 }
462 
463 /*
464  * Initializes the FSLSPClkSel field of the HCFG register depending on the
465  * PHY type
466  */
467 STATIC void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
468 {
469 	u32 hcfg, val;
470 
471 	if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
472 	     hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
473 	     hsotg->core_params->ulpi_fs_ls > 0) ||
474 	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
475 		/* Full speed PHY */
476 		val = HCFG_FSLSPCLKSEL_48_MHZ;
477 	} else {
478 		/* High speed PHY running at full speed or high speed */
479 		val = HCFG_FSLSPCLKSEL_30_60_MHZ;
480 	}
481 
482 	dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
483 	hcfg = DWC2_READ_4(hsotg, HCFG);
484 	hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
485 	hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
486 	DWC2_WRITE_4(hsotg, HCFG, hcfg);
487 }
488 
489 /*
490  * Do core a soft reset of the core.  Be careful with this because it
491  * resets all the internal state machines of the core.
492  */
493 int dwc2_core_reset(struct dwc2_hsotg *hsotg)
494 {
495 	u32 greset;
496 	int count = 0;
497 
498 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
499 
500 	/* Core Soft Reset */
501 	greset = DWC2_READ_4(hsotg, GRSTCTL);
502 	greset |= GRSTCTL_CSFTRST;
503 	DWC2_WRITE_4(hsotg, GRSTCTL, greset);
504 	do {
505 		udelay(1);
506 		greset = DWC2_READ_4(hsotg, GRSTCTL);
507 		if (++count > 50) {
508 			dev_warn(hsotg->dev,
509 				 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
510 				 __func__, greset);
511 			return -EBUSY;
512 		}
513 	} while (greset & GRSTCTL_CSFTRST);
514 
515 	/* Wait for AHB master IDLE state */
516 	count = 0;
517 	do {
518 		udelay(1);
519 		greset = DWC2_READ_4(hsotg, GRSTCTL);
520 		if (++count > 50) {
521 			dev_warn(hsotg->dev,
522 				 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
523 				 __func__, greset);
524 			return -EBUSY;
525 		}
526 	} while (!(greset & GRSTCTL_AHBIDLE));
527 
528 	return 0;
529 }
530 
531 /*
532  * Force the mode of the controller.
533  *
534  * Forcing the mode is needed for two cases:
535  *
536  * 1) If the dr_mode is set to either HOST or PERIPHERAL we force the
537  * controller to stay in a particular mode regardless of ID pin
538  * changes. We do this usually after a core reset.
539  *
540  * 2) During probe we want to read reset values of the hw
541  * configuration registers that are only available in either host or
542  * device mode. We may need to force the mode if the current mode does
543  * not allow us to access the register in the mode that we want.
544  *
545  * In either case it only makes sense to force the mode if the
546  * controller hardware is OTG capable.
547  *
548  * Checks are done in this function to determine whether doing a force
549  * would be valid or not.
550  *
551  * If a force is done, it requires a 25ms delay to take effect.
552  *
553  * Returns true if the mode was forced.
554  */
555 STATIC bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
556 {
557 	struct dwc2_softc *sc = hsotg->hsotg_sc;
558 	u32 gusbcfg;
559 	u32 set;
560 	u32 clear;
561 
562 	dev_dbg(hsotg->dev, "Forcing mode to %s\n", host ? "host" : "device");
563 
564 	/*
565 	 * Force mode has no effect if the hardware is not OTG.
566 	 */
567 	if (!dwc2_hw_is_otg(hsotg))
568 		return false;
569 
570 	/*
571 	 * If dr_mode is either peripheral or host only, there is no
572 	 * need to ever force the mode to the opposite mode.
573 	 */
574 	if (host && hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
575 		WARN_ON(1);
576 		return false;
577 	}
578 
579 	if (!host && hsotg->dr_mode == USB_DR_MODE_HOST) {
580 		WARN_ON(1);
581 		return false;
582 	}
583 
584 	gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
585 
586 	set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE;
587 	clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE;
588 
589 	gusbcfg &= ~clear;
590 	gusbcfg |= set;
591 	DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg);
592 
593 	usb_delay_ms(&sc->sc_bus, 25);
594 	return true;
595 }
596 
597 /*
598  * Clears the force mode bits.
599  */
600 STATIC void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
601 {
602 	struct dwc2_softc *sc = hsotg->hsotg_sc;
603 	u32 gusbcfg;
604 
605 	gusbcfg = DWC2_READ_4(hsotg, GUSBCFG);
606 	gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
607 	gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
608 	DWC2_WRITE_4(hsotg, GUSBCFG, gusbcfg);
609 
610 	/*
611 	 * NOTE: This long sleep is _very_ important, otherwise the core will
612 	 * not stay in host mode after a connector ID change!
613 	 */
614 	usb_delay_ms(&sc->sc_bus, 25);
615 }
616 
617 /*
618  * Sets or clears force mode based on the dr_mode parameter.
619  */
620 void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
621 {
622 	switch (hsotg->dr_mode) {
623 	case USB_DR_MODE_HOST:
624 		dwc2_force_mode(hsotg, true);
625 		break;
626 	case USB_DR_MODE_PERIPHERAL:
627 		dwc2_force_mode(hsotg, false);
628 		break;
629 	case USB_DR_MODE_OTG:
630 		dwc2_clear_force_mode(hsotg);
631 		break;
632 	default:
633 		dev_warn(hsotg->dev, "%s() Invalid dr_mode=%d\n",
634 			 __func__, hsotg->dr_mode);
635 		break;
636 	}
637 }
638 
639 /*
640  * Do core a soft reset of the core.  Be careful with this because it
641  * resets all the internal state machines of the core.
642  *
643  * Additionally this will apply force mode as per the hsotg->dr_mode
644  * parameter.
645  */
646 int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg)
647 {
648 	int retval;
649 
650 	retval = dwc2_core_reset(hsotg);
651 	if (retval)
652 		return retval;
653 
654 	dwc2_force_dr_mode(hsotg);
655 	return 0;
656 }
657 
658 STATIC int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
659 {
660 	u32 usbcfg, i2cctl;
661 	int retval = 0;
662 
663 	/*
664 	 * core_init() is now called on every switch so only call the
665 	 * following for the first time through
666 	 */
667 	if (select_phy) {
668 		dev_dbg(hsotg->dev, "FS PHY selected\n");
669 
670 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
671 		if (!(usbcfg & GUSBCFG_PHYSEL)) {
672 			usbcfg |= GUSBCFG_PHYSEL;
673 			DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
674 
675 			/* Reset after a PHY select */
676 			retval = dwc2_core_reset_and_force_dr_mode(hsotg);
677 
678 			if (retval) {
679 				dev_err(hsotg->dev,
680 					"%s: Reset failed, aborting", __func__);
681 				return retval;
682 			}
683 		}
684 	}
685 
686 	/*
687 	 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
688 	 * do this on HNP Dev/Host mode switches (done in dev_init and
689 	 * host_init).
690 	 */
691 	if (dwc2_is_host_mode(hsotg))
692 		dwc2_init_fs_ls_pclk_sel(hsotg);
693 
694 	if (hsotg->core_params->i2c_enable > 0) {
695 		dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
696 
697 		/* Program GUSBCFG.OtgUtmiFsSel to I2C */
698 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
699 		usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
700 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
701 
702 		/* Program GI2CCTL.I2CEn */
703 		i2cctl = DWC2_READ_4(hsotg, GI2CCTL);
704 		i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
705 		i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
706 		i2cctl &= ~GI2CCTL_I2CEN;
707 		DWC2_WRITE_4(hsotg, GI2CCTL, i2cctl);
708 		i2cctl |= GI2CCTL_I2CEN;
709 		DWC2_WRITE_4(hsotg, GI2CCTL, i2cctl);
710 	}
711 
712 	return retval;
713 }
714 
715 STATIC int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
716 {
717 	u32 usbcfg, usbcfg_old;
718 	int retval = 0;
719 
720 	if (!select_phy)
721 		return 0;
722 
723 	usbcfg = usbcfg_old = DWC2_READ_4(hsotg, GUSBCFG);
724 
725 	/*
726 	 * HS PHY parameters. These parameters are preserved during soft reset
727 	 * so only program the first time. Do a soft reset immediately after
728 	 * setting phyif.
729 	 */
730 	switch (hsotg->core_params->phy_type) {
731 	case DWC2_PHY_TYPE_PARAM_ULPI:
732 		/* ULPI interface */
733 		dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
734 		usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
735 		usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
736 		if (hsotg->core_params->phy_ulpi_ddr > 0)
737 			usbcfg |= GUSBCFG_DDRSEL;
738 		break;
739 	case DWC2_PHY_TYPE_PARAM_UTMI:
740 		/* UTMI+ interface */
741 		dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
742 		usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
743 		if (hsotg->core_params->phy_utmi_width == 16)
744 			usbcfg |= GUSBCFG_PHYIF16;
745 		break;
746 	default:
747 		dev_err(hsotg->dev, "FS PHY selected at HS!\n");
748 		break;
749 	}
750 
751 	if (usbcfg != usbcfg_old) {
752 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
753 
754 		/* Reset after setting the PHY parameters */
755 		retval = dwc2_core_reset_and_force_dr_mode(hsotg);
756 		if (retval) {
757 			dev_err(hsotg->dev,
758 				"%s: Reset failed, aborting", __func__);
759 			return retval;
760 		}
761 	}
762 
763 	return retval;
764 }
765 
766 STATIC int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
767 {
768 	u32 usbcfg;
769 	int retval = 0;
770 
771 	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
772 	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
773 		/* If FS mode with FS PHY */
774 		retval = dwc2_fs_phy_init(hsotg, select_phy);
775 		if (retval)
776 			return retval;
777 	} else {
778 		/* High speed PHY */
779 		retval = dwc2_hs_phy_init(hsotg, select_phy);
780 		if (retval)
781 			return retval;
782 	}
783 
784 	if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
785 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
786 	    hsotg->core_params->ulpi_fs_ls > 0) {
787 		dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
788 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
789 		usbcfg |= GUSBCFG_ULPI_FS_LS;
790 		usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
791 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
792 	} else {
793 		usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
794 		usbcfg &= ~GUSBCFG_ULPI_FS_LS;
795 		usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
796 		DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
797 	}
798 
799 	return retval;
800 }
801 
802 STATIC int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
803 {
804 	struct dwc2_softc *sc = hsotg->hsotg_sc;
805 	u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
806 
807 	switch (hsotg->hw_params.arch) {
808 	case GHWCFG2_EXT_DMA_ARCH:
809 		dev_dbg(hsotg->dev, "External DMA Mode\n");
810 		if (!sc->sc_set_dma_addr) {
811 			dev_err(hsotg->dev, "External DMA Mode not supported\n");
812 			return -EINVAL;
813 		}
814 		if (hsotg->core_params->ahbcfg != -1) {
815 			ahbcfg &= GAHBCFG_CTRL_MASK;
816 			ahbcfg |= hsotg->core_params->ahbcfg &
817 				  ~GAHBCFG_CTRL_MASK;
818 		}
819 		break;
820 
821 	case GHWCFG2_INT_DMA_ARCH:
822 		dev_dbg(hsotg->dev, "Internal DMA Mode\n");
823 		if (hsotg->core_params->ahbcfg != -1) {
824 			ahbcfg &= GAHBCFG_CTRL_MASK;
825 			ahbcfg |= hsotg->core_params->ahbcfg &
826 				  ~GAHBCFG_CTRL_MASK;
827 		}
828 		break;
829 
830 	case GHWCFG2_SLAVE_ONLY_ARCH:
831 	default:
832 		dev_dbg(hsotg->dev, "Slave Only Mode\n");
833 		break;
834 	}
835 
836 	dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
837 		hsotg->core_params->dma_enable,
838 		hsotg->core_params->dma_desc_enable);
839 
840 	if (hsotg->core_params->dma_enable > 0) {
841 		if (hsotg->core_params->dma_desc_enable > 0)
842 			dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
843 		else
844 			dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
845 	} else {
846 		dev_dbg(hsotg->dev, "Using Slave mode\n");
847 		hsotg->core_params->dma_desc_enable = 0;
848 	}
849 
850 	if (hsotg->core_params->dma_enable > 0)
851 		ahbcfg |= GAHBCFG_DMA_EN;
852 
853 	DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
854 
855 	return 0;
856 }
857 
858 STATIC void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
859 {
860 	u32 usbcfg;
861 
862 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
863 	usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
864 
865 	switch (hsotg->hw_params.op_mode) {
866 	case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
867 		if (hsotg->core_params->otg_cap ==
868 				DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
869 			usbcfg |= GUSBCFG_HNPCAP;
870 		if (hsotg->core_params->otg_cap !=
871 				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
872 			usbcfg |= GUSBCFG_SRPCAP;
873 		break;
874 
875 	case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
876 	case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
877 	case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
878 		if (hsotg->core_params->otg_cap !=
879 				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
880 			usbcfg |= GUSBCFG_SRPCAP;
881 		break;
882 
883 	case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
884 	case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
885 	case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
886 	default:
887 		break;
888 	}
889 
890 	DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
891 }
892 
893 /**
894  * dwc2_core_init() - Initializes the DWC_otg controller registers and
895  * prepares the core for device mode or host mode operation
896  *
897  * @hsotg:         Programming view of the DWC_otg controller
898  * @initial_setup: If true then this is the first init for this instance.
899  */
900 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
901 {
902 	u32 usbcfg, otgctl;
903 	int retval;
904 
905 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
906 
907 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
908 
909 	/* Set ULPI External VBUS bit if needed */
910 	usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
911 	if (hsotg->core_params->phy_ulpi_ext_vbus ==
912 				DWC2_PHY_ULPI_EXTERNAL_VBUS)
913 		usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
914 
915 	/* Set external TS Dline pulsing bit if needed */
916 	usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
917 	if (hsotg->core_params->ts_dline > 0)
918 		usbcfg |= GUSBCFG_TERMSELDLPULSE;
919 
920 	DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
921 
922 	/*
923 	 * Reset the Controller
924 	 *
925 	 * We only need to reset the controller if this is a re-init.
926 	 * For the first init we know for sure that earlier code reset us (it
927 	 * needed to in order to properly detect various parameters).
928 	 */
929 	if (!initial_setup) {
930 		retval = dwc2_core_reset_and_force_dr_mode(hsotg);
931 		if (retval) {
932 			dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
933 					__func__);
934 			return retval;
935 		}
936 	}
937 
938 	/*
939 	 * This needs to happen in FS mode before any other programming occurs
940 	 */
941 	retval = dwc2_phy_init(hsotg, initial_setup);
942 	if (retval)
943 		return retval;
944 
945 	/* Program the GAHBCFG Register */
946 	retval = dwc2_gahbcfg_init(hsotg);
947 	if (retval)
948 		return retval;
949 
950 	/* Program the GUSBCFG register */
951 	dwc2_gusbcfg_init(hsotg);
952 
953 	/* Program the GOTGCTL register */
954 	otgctl = DWC2_READ_4(hsotg, GOTGCTL);
955 	otgctl &= ~GOTGCTL_OTGVER;
956 	if (hsotg->core_params->otg_ver > 0)
957 		otgctl |= GOTGCTL_OTGVER;
958 	DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
959 	dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
960 
961 	/* Clear the SRP success bit for FS-I2c */
962 	hsotg->srp_success = 0;
963 
964 	/* Enable common interrupts */
965 	dwc2_enable_common_interrupts(hsotg);
966 
967 	/*
968 	 * Do device or host initialization based on mode during PCD and
969 	 * HCD initialization
970 	 */
971 	if (dwc2_is_host_mode(hsotg)) {
972 		dev_dbg(hsotg->dev, "Host Mode\n");
973 		hsotg->op_state = OTG_STATE_A_HOST;
974 	} else {
975 		dev_dbg(hsotg->dev, "Device Mode\n");
976 		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
977 	}
978 
979 	return 0;
980 }
981 
982 /**
983  * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
984  *
985  * @hsotg: Programming view of DWC_otg controller
986  */
987 void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
988 {
989 	u32 intmsk;
990 
991 	dev_dbg(hsotg->dev, "%s()\n", __func__);
992 
993 	/* Disable all interrupts */
994 	DWC2_WRITE_4(hsotg, GINTMSK, 0);
995 	DWC2_WRITE_4(hsotg, HAINTMSK, 0);
996 
997 	/* Enable the common interrupts */
998 	dwc2_enable_common_interrupts(hsotg);
999 
1000 	/* Enable host mode interrupts without disturbing common interrupts */
1001 	intmsk = DWC2_READ_4(hsotg, GINTMSK);
1002 	intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
1003 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
1004 }
1005 
1006 /**
1007  * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
1008  *
1009  * @hsotg: Programming view of DWC_otg controller
1010  */
1011 void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
1012 {
1013 	u32 intmsk = DWC2_READ_4(hsotg, GINTMSK);
1014 
1015 	/* Disable host mode interrupts without disturbing common interrupts */
1016 	intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
1017 		    GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
1018 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
1019 }
1020 
1021 /*
1022  * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
1023  * For system that have a total fifo depth that is smaller than the default
1024  * RX + TX fifo size.
1025  *
1026  * @hsotg: Programming view of DWC_otg controller
1027  */
1028 STATIC void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
1029 {
1030 	struct dwc2_core_params *params = hsotg->core_params;
1031 	struct dwc2_hw_params *hw = &hsotg->hw_params;
1032 	u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
1033 
1034 	total_fifo_size = hw->total_fifo_size;
1035 	rxfsiz = params->host_rx_fifo_size;
1036 	nptxfsiz = params->host_nperio_tx_fifo_size;
1037 	ptxfsiz = params->host_perio_tx_fifo_size;
1038 
1039 	/*
1040 	 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
1041 	 * allocation with support for high bandwidth endpoints. Synopsys
1042 	 * defines MPS(Max Packet size) for a periodic EP=1024, and for
1043 	 * non-periodic as 512.
1044 	 */
1045 	if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
1046 		/*
1047 		 * For Buffer DMA mode/Scatter Gather DMA mode
1048 		 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
1049 		 * with n = number of host channel.
1050 		 * 2 * ((1024/4) + 2) = 516
1051 		 */
1052 		rxfsiz = 516 + hw->host_channels;
1053 
1054 		/*
1055 		 * min non-periodic tx fifo depth
1056 		 * 2 * (largest non-periodic USB packet used / 4)
1057 		 * 2 * (512/4) = 256
1058 		 */
1059 		nptxfsiz = 256;
1060 
1061 		/*
1062 		 * min periodic tx fifo depth
1063 		 * (largest packet size*MC)/4
1064 		 * (1024 * 3)/4 = 768
1065 		 */
1066 		ptxfsiz = 768;
1067 
1068 		params->host_rx_fifo_size = rxfsiz;
1069 		params->host_nperio_tx_fifo_size = nptxfsiz;
1070 		params->host_perio_tx_fifo_size = ptxfsiz;
1071 	}
1072 
1073 	/*
1074 	 * If the summation of RX, NPTX and PTX fifo sizes is still
1075 	 * bigger than the total_fifo_size, then we have a problem.
1076 	 *
1077 	 * We won't be able to allocate as many endpoints. Right now,
1078 	 * we're just printing an error message, but ideally this FIFO
1079 	 * allocation algorithm would be improved in the future.
1080 	 *
1081 	 * FIXME improve this FIFO allocation algorithm.
1082 	 */
1083 	if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz))
1084 		dev_err(hsotg->dev, "invalid fifo sizes\n");
1085 }
1086 
1087 STATIC void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
1088 {
1089 	struct dwc2_core_params *params = hsotg->core_params;
1090 	u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
1091 
1092 	if (!params->enable_dynamic_fifo)
1093 		return;
1094 
1095 	dwc2_calculate_dynamic_fifo(hsotg);
1096 
1097 	/* Rx FIFO */
1098 	grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
1099 	dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
1100 	grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
1101 	grxfsiz |= params->host_rx_fifo_size <<
1102 		   GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
1103 	DWC2_WRITE_4(hsotg, GRXFSIZ, grxfsiz);
1104 	dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
1105 		DWC2_READ_4(hsotg, GRXFSIZ));
1106 
1107 	/* Non-periodic Tx FIFO */
1108 	dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
1109 		DWC2_READ_4(hsotg, GNPTXFSIZ));
1110 	nptxfsiz = params->host_nperio_tx_fifo_size <<
1111 		   FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
1112 	nptxfsiz |= params->host_rx_fifo_size <<
1113 		    FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
1114 	DWC2_WRITE_4(hsotg, GNPTXFSIZ, nptxfsiz);
1115 	dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
1116 		DWC2_READ_4(hsotg, GNPTXFSIZ));
1117 
1118 	/* Periodic Tx FIFO */
1119 	dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
1120 		DWC2_READ_4(hsotg, HPTXFSIZ));
1121 	hptxfsiz = params->host_perio_tx_fifo_size <<
1122 		   FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
1123 	hptxfsiz |= (params->host_rx_fifo_size +
1124 		     params->host_nperio_tx_fifo_size) <<
1125 		    FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
1126 	DWC2_WRITE_4(hsotg, HPTXFSIZ, hptxfsiz);
1127 	dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
1128 		DWC2_READ_4(hsotg, HPTXFSIZ));
1129 
1130 	if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
1131 	    hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
1132 		/*
1133 		 * Global DFIFOCFG calculation for Host mode -
1134 		 * include RxFIFO, NPTXFIFO and HPTXFIFO
1135 		 */
1136 		dfifocfg = DWC2_READ_4(hsotg, GDFIFOCFG);
1137 		dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
1138 		dfifocfg |= (params->host_rx_fifo_size +
1139 			     params->host_nperio_tx_fifo_size +
1140 			     params->host_perio_tx_fifo_size) <<
1141 			    GDFIFOCFG_EPINFOBASE_SHIFT &
1142 			    GDFIFOCFG_EPINFOBASE_MASK;
1143 		DWC2_WRITE_4(hsotg, GDFIFOCFG, dfifocfg);
1144 	}
1145 }
1146 
1147 /**
1148  * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
1149  * Host mode
1150  *
1151  * @hsotg: Programming view of DWC_otg controller
1152  *
1153  * This function flushes the Tx and Rx FIFOs and flushes any entries in the
1154  * request queues. Host channels are reset to ensure that they are ready for
1155  * performing transfers.
1156  */
1157 void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
1158 {
1159 	u32 hcfg, hfir, otgctl;
1160 
1161 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
1162 
1163 	/* Restart the Phy Clock */
1164 	DWC2_WRITE_4(hsotg, PCGCTL, 0);
1165 
1166 	/* Initialize Host Configuration Register */
1167 	dwc2_init_fs_ls_pclk_sel(hsotg);
1168 	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
1169 		hcfg = DWC2_READ_4(hsotg, HCFG);
1170 		hcfg |= HCFG_FSLSSUPP;
1171 		DWC2_WRITE_4(hsotg, HCFG, hcfg);
1172 	}
1173 
1174 	/*
1175 	 * This bit allows dynamic reloading of the HFIR register during
1176 	 * runtime. This bit needs to be programmed during initial configuration
1177 	 * and its value must not be changed during runtime.
1178 	 */
1179 	if (hsotg->core_params->reload_ctl > 0) {
1180 		hfir = DWC2_READ_4(hsotg, HFIR);
1181 		hfir |= HFIR_RLDCTRL;
1182 		DWC2_WRITE_4(hsotg, HFIR, hfir);
1183 	}
1184 
1185 	if (hsotg->core_params->dma_desc_enable > 0) {
1186 		u32 op_mode = hsotg->hw_params.op_mode;
1187 		if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
1188 		    !hsotg->hw_params.dma_desc_enable ||
1189 		    op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
1190 		    op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
1191 		    op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
1192 			dev_err(hsotg->dev,
1193 				"Hardware does not support descriptor DMA mode -\n");
1194 			dev_err(hsotg->dev,
1195 				"falling back to buffer DMA mode.\n");
1196 			hsotg->core_params->dma_desc_enable = 0;
1197 		} else {
1198 			hcfg = DWC2_READ_4(hsotg, HCFG);
1199 			hcfg |= HCFG_DESCDMA;
1200 			DWC2_WRITE_4(hsotg, HCFG, hcfg);
1201 		}
1202 	}
1203 
1204 	/* Configure data FIFO sizes */
1205 	dwc2_config_fifos(hsotg);
1206 
1207 	/* TODO - check this */
1208 	/* Clear Host Set HNP Enable in the OTG Control Register */
1209 	otgctl = DWC2_READ_4(hsotg, GOTGCTL);
1210 	otgctl &= ~GOTGCTL_HSTSETHNPEN;
1211 	DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
1212 
1213 	/* Make sure the FIFOs are flushed */
1214 	dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
1215 	dwc2_flush_rx_fifo(hsotg);
1216 
1217 	/* Clear Host Set HNP Enable in the OTG Control Register */
1218 	otgctl = DWC2_READ_4(hsotg, GOTGCTL);
1219 	otgctl &= ~GOTGCTL_HSTSETHNPEN;
1220 	DWC2_WRITE_4(hsotg, GOTGCTL, otgctl);
1221 
1222 	if (hsotg->core_params->dma_desc_enable <= 0) {
1223 		int num_channels, i;
1224 		u32 hcchar;
1225 
1226 		/* Flush out any leftover queued requests */
1227 		num_channels = hsotg->core_params->host_channels;
1228 		for (i = 0; i < num_channels; i++) {
1229 			hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
1230 			hcchar &= ~HCCHAR_CHENA;
1231 			hcchar |= HCCHAR_CHDIS;
1232 			hcchar &= ~HCCHAR_EPDIR;
1233 			DWC2_WRITE_4(hsotg, HCCHAR(i), hcchar);
1234 		}
1235 
1236 		/* Halt all channels to put them into a known state */
1237 		for (i = 0; i < num_channels; i++) {
1238 			int count = 0;
1239 
1240 			hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
1241 			hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
1242 			hcchar &= ~HCCHAR_EPDIR;
1243 			DWC2_WRITE_4(hsotg, HCCHAR(i), hcchar);
1244 			dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
1245 				__func__, i);
1246 			do {
1247 				hcchar = DWC2_READ_4(hsotg, HCCHAR(i));
1248 				if (++count > 1000) {
1249 					dev_err(hsotg->dev,
1250 						"Unable to clear enable on channel %d\n",
1251 						i);
1252 					break;
1253 				}
1254 				udelay(1);
1255 			} while (hcchar & HCCHAR_CHENA);
1256 		}
1257 	}
1258 
1259 	/* Turn on the vbus power */
1260 	dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
1261 	if (hsotg->op_state == OTG_STATE_A_HOST) {
1262 		u32 hprt0 = dwc2_read_hprt0(hsotg);
1263 
1264 		dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
1265 			!!(hprt0 & HPRT0_PWR));
1266 		if (!(hprt0 & HPRT0_PWR)) {
1267 			hprt0 |= HPRT0_PWR;
1268 			DWC2_WRITE_4(hsotg, HPRT0, hprt0);
1269 		}
1270 	}
1271 
1272 	dwc2_enable_host_interrupts(hsotg);
1273 }
1274 
1275 STATIC void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
1276 				      struct dwc2_host_chan *chan)
1277 {
1278 	u32 hcintmsk = HCINTMSK_CHHLTD;
1279 
1280 	switch (chan->ep_type) {
1281 	case USB_ENDPOINT_XFER_CONTROL:
1282 	case USB_ENDPOINT_XFER_BULK:
1283 		dev_vdbg(hsotg->dev, "control/bulk\n");
1284 		hcintmsk |= HCINTMSK_XFERCOMPL;
1285 		hcintmsk |= HCINTMSK_STALL;
1286 		hcintmsk |= HCINTMSK_XACTERR;
1287 		hcintmsk |= HCINTMSK_DATATGLERR;
1288 		if (chan->ep_is_in) {
1289 			hcintmsk |= HCINTMSK_BBLERR;
1290 		} else {
1291 			hcintmsk |= HCINTMSK_NAK;
1292 			hcintmsk |= HCINTMSK_NYET;
1293 			if (chan->do_ping)
1294 				hcintmsk |= HCINTMSK_ACK;
1295 		}
1296 
1297 		if (chan->do_split) {
1298 			hcintmsk |= HCINTMSK_NAK;
1299 			if (chan->complete_split)
1300 				hcintmsk |= HCINTMSK_NYET;
1301 			else
1302 				hcintmsk |= HCINTMSK_ACK;
1303 		}
1304 
1305 		if (chan->error_state)
1306 			hcintmsk |= HCINTMSK_ACK;
1307 		break;
1308 
1309 	case USB_ENDPOINT_XFER_INT:
1310 		if (dbg_perio())
1311 			dev_vdbg(hsotg->dev, "intr\n");
1312 		hcintmsk |= HCINTMSK_XFERCOMPL;
1313 		hcintmsk |= HCINTMSK_NAK;
1314 		hcintmsk |= HCINTMSK_STALL;
1315 		hcintmsk |= HCINTMSK_XACTERR;
1316 		hcintmsk |= HCINTMSK_DATATGLERR;
1317 		hcintmsk |= HCINTMSK_FRMOVRUN;
1318 
1319 		if (chan->ep_is_in)
1320 			hcintmsk |= HCINTMSK_BBLERR;
1321 		if (chan->error_state)
1322 			hcintmsk |= HCINTMSK_ACK;
1323 		if (chan->do_split) {
1324 			if (chan->complete_split)
1325 				hcintmsk |= HCINTMSK_NYET;
1326 			else
1327 				hcintmsk |= HCINTMSK_ACK;
1328 		}
1329 		break;
1330 
1331 	case USB_ENDPOINT_XFER_ISOC:
1332 		if (dbg_perio())
1333 			dev_vdbg(hsotg->dev, "isoc\n");
1334 		hcintmsk |= HCINTMSK_XFERCOMPL;
1335 		hcintmsk |= HCINTMSK_FRMOVRUN;
1336 		hcintmsk |= HCINTMSK_ACK;
1337 
1338 		if (chan->ep_is_in) {
1339 			hcintmsk |= HCINTMSK_XACTERR;
1340 			hcintmsk |= HCINTMSK_BBLERR;
1341 		}
1342 		break;
1343 	default:
1344 		dev_err(hsotg->dev, "## Unknown EP type ##\n");
1345 		break;
1346 	}
1347 
1348 	DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
1349 	if (dbg_hc(chan))
1350 		dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
1351 }
1352 
1353 STATIC void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
1354 				    struct dwc2_host_chan *chan)
1355 {
1356 	u32 hcintmsk = HCINTMSK_CHHLTD;
1357 
1358 	/*
1359 	 * For Descriptor DMA mode core halts the channel on AHB error.
1360 	 * Interrupt is not required.
1361 	 */
1362 	if (hsotg->core_params->dma_desc_enable <= 0) {
1363 		if (dbg_hc(chan))
1364 			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1365 		hcintmsk |= HCINTMSK_AHBERR;
1366 	} else {
1367 		if (dbg_hc(chan))
1368 			dev_vdbg(hsotg->dev, "desc DMA enabled\n");
1369 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1370 			hcintmsk |= HCINTMSK_XFERCOMPL;
1371 	}
1372 
1373 	if (chan->error_state && !chan->do_split &&
1374 	    chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1375 		if (dbg_hc(chan))
1376 			dev_vdbg(hsotg->dev, "setting ACK\n");
1377 		hcintmsk |= HCINTMSK_ACK;
1378 		if (chan->ep_is_in) {
1379 			hcintmsk |= HCINTMSK_DATATGLERR;
1380 			if (chan->ep_type != USB_ENDPOINT_XFER_INT)
1381 				hcintmsk |= HCINTMSK_NAK;
1382 		}
1383 	}
1384 
1385 	DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
1386 	if (dbg_hc(chan))
1387 		dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
1388 }
1389 
1390 STATIC void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
1391 				struct dwc2_host_chan *chan)
1392 {
1393 	u32 intmsk;
1394 
1395 	if (hsotg->core_params->dma_enable > 0) {
1396 		if (dbg_hc(chan))
1397 			dev_vdbg(hsotg->dev, "DMA enabled\n");
1398 		dwc2_hc_enable_dma_ints(hsotg, chan);
1399 	} else {
1400 		if (dbg_hc(chan))
1401 			dev_vdbg(hsotg->dev, "DMA disabled\n");
1402 		dwc2_hc_enable_slave_ints(hsotg, chan);
1403 	}
1404 
1405 	/* Enable the top level host channel interrupt */
1406 	intmsk = DWC2_READ_4(hsotg, HAINTMSK);
1407 	intmsk |= 1 << chan->hc_num;
1408 	DWC2_WRITE_4(hsotg, HAINTMSK, intmsk);
1409 	if (dbg_hc(chan))
1410 		dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
1411 
1412 	/* Make sure host channel interrupts are enabled */
1413 	intmsk = DWC2_READ_4(hsotg, GINTMSK);
1414 	intmsk |= GINTSTS_HCHINT;
1415 	DWC2_WRITE_4(hsotg, GINTMSK, intmsk);
1416 	if (dbg_hc(chan))
1417 		dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
1418 }
1419 
1420 /**
1421  * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
1422  * a specific endpoint
1423  *
1424  * @hsotg: Programming view of DWC_otg controller
1425  * @chan:  Information needed to initialize the host channel
1426  *
1427  * The HCCHARn register is set up with the characteristics specified in chan.
1428  * Host channel interrupts that may need to be serviced while this transfer is
1429  * in progress are enabled.
1430  */
1431 void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1432 {
1433 	u8 hc_num = chan->hc_num;
1434 	u32 hcintmsk;
1435 	u32 hcchar;
1436 	u32 hcsplt = 0;
1437 
1438 	if (dbg_hc(chan))
1439 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1440 
1441 	/* Clear old interrupt conditions for this host channel */
1442 	hcintmsk = 0xffffffff;
1443 	hcintmsk &= ~HCINTMSK_RESERVED14_31;
1444 	DWC2_WRITE_4(hsotg, HCINT(hc_num), hcintmsk);
1445 
1446 	/* Enable channel interrupts required for this transfer */
1447 	dwc2_hc_enable_ints(hsotg, chan);
1448 
1449 	/*
1450 	 * Program the HCCHARn register with the endpoint characteristics for
1451 	 * the current transfer
1452 	 */
1453 	hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
1454 	hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
1455 	if (chan->ep_is_in)
1456 		hcchar |= HCCHAR_EPDIR;
1457 	if (chan->speed == USB_SPEED_LOW)
1458 		hcchar |= HCCHAR_LSPDDEV;
1459 	hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
1460 	hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
1461 	DWC2_WRITE_4(hsotg, HCCHAR(hc_num), hcchar);
1462 	if (dbg_hc(chan)) {
1463 		dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
1464 			 hc_num, hcchar);
1465 
1466 		dev_vdbg(hsotg->dev, "%s: Channel %d\n",
1467 			 __func__, hc_num);
1468 		dev_vdbg(hsotg->dev, "	 Dev Addr: %d\n",
1469 			 chan->dev_addr);
1470 		dev_vdbg(hsotg->dev, "	 Ep Num: %d\n",
1471 			 chan->ep_num);
1472 		dev_vdbg(hsotg->dev, "	 Is In: %d\n",
1473 			 chan->ep_is_in);
1474 		dev_vdbg(hsotg->dev, "	 Is Low Speed: %d\n",
1475 			 chan->speed == USB_SPEED_LOW);
1476 		dev_vdbg(hsotg->dev, "	 Ep Type: %d\n",
1477 			 chan->ep_type);
1478 		dev_vdbg(hsotg->dev, "	 Max Pkt: %d\n",
1479 			 chan->max_packet);
1480 	}
1481 
1482 	/* Program the HCSPLT register for SPLITs */
1483 	if (chan->do_split) {
1484 		if (dbg_hc(chan))
1485 			dev_vdbg(hsotg->dev,
1486 				 "Programming HC %d with split --> %s\n",
1487 				 hc_num,
1488 				 chan->complete_split ? "CSPLIT" : "SSPLIT");
1489 		if (chan->complete_split)
1490 			hcsplt |= HCSPLT_COMPSPLT;
1491 		hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
1492 			  HCSPLT_XACTPOS_MASK;
1493 		hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
1494 			  HCSPLT_HUBADDR_MASK;
1495 		hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
1496 			  HCSPLT_PRTADDR_MASK;
1497 		if (dbg_hc(chan)) {
1498 			dev_vdbg(hsotg->dev, "	  comp split %d\n",
1499 				 chan->complete_split);
1500 			dev_vdbg(hsotg->dev, "	  xact pos %d\n",
1501 				 chan->xact_pos);
1502 			dev_vdbg(hsotg->dev, "	  hub addr %d\n",
1503 				 chan->hub_addr);
1504 			dev_vdbg(hsotg->dev, "	  hub port %d\n",
1505 				 chan->hub_port);
1506 			dev_vdbg(hsotg->dev, "	  is_in %d\n",
1507 				 chan->ep_is_in);
1508 			dev_vdbg(hsotg->dev, "	  Max Pkt %d\n",
1509 				 chan->max_packet);
1510 			dev_vdbg(hsotg->dev, "	  xferlen %d\n",
1511 				 chan->xfer_len);
1512 		}
1513 	}
1514 
1515 	DWC2_WRITE_4(hsotg, HCSPLT(hc_num), hcsplt);
1516 }
1517 
1518 /**
1519  * dwc2_hc_halt() - Attempts to halt a host channel
1520  *
1521  * @hsotg:       Controller register interface
1522  * @chan:        Host channel to halt
1523  * @halt_status: Reason for halting the channel
1524  *
1525  * This function should only be called in Slave mode or to abort a transfer in
1526  * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
1527  * controller halts the channel when the transfer is complete or a condition
1528  * occurs that requires application intervention.
1529  *
1530  * In slave mode, checks for a free request queue entry, then sets the Channel
1531  * Enable and Channel Disable bits of the Host Channel Characteristics
1532  * register of the specified channel to intiate the halt. If there is no free
1533  * request queue entry, sets only the Channel Disable bit of the HCCHARn
1534  * register to flush requests for this channel. In the latter case, sets a
1535  * flag to indicate that the host channel needs to be halted when a request
1536  * queue slot is open.
1537  *
1538  * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1539  * HCCHARn register. The controller ensures there is space in the request
1540  * queue before submitting the halt request.
1541  *
1542  * Some time may elapse before the core flushes any posted requests for this
1543  * host channel and halts. The Channel Halted interrupt handler completes the
1544  * deactivation of the host channel.
1545  */
1546 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
1547 		  enum dwc2_halt_status halt_status)
1548 {
1549 	u32 nptxsts, hptxsts, hcchar;
1550 
1551 	if (dbg_hc(chan))
1552 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1553 	if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
1554 		dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
1555 
1556 	if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1557 	    halt_status == DWC2_HC_XFER_AHB_ERR) {
1558 		/*
1559 		 * Disable all channel interrupts except Ch Halted. The QTD
1560 		 * and QH state associated with this transfer has been cleared
1561 		 * (in the case of URB_DEQUEUE), so the channel needs to be
1562 		 * shut down carefully to prevent crashes.
1563 		 */
1564 		u32 hcintmsk = HCINTMSK_CHHLTD;
1565 
1566 		dev_vdbg(hsotg->dev, "dequeue/error\n");
1567 		DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), hcintmsk);
1568 
1569 		/*
1570 		 * Make sure no other interrupts besides halt are currently
1571 		 * pending. Handling another interrupt could cause a crash due
1572 		 * to the QTD and QH state.
1573 		 */
1574 		DWC2_WRITE_4(hsotg, HCINT(chan->hc_num), ~hcintmsk);
1575 
1576 		/*
1577 		 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1578 		 * even if the channel was already halted for some other
1579 		 * reason
1580 		 */
1581 		chan->halt_status = halt_status;
1582 
1583 		hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1584 		if (!(hcchar & HCCHAR_CHENA)) {
1585 			/*
1586 			 * The channel is either already halted or it hasn't
1587 			 * started yet. In DMA mode, the transfer may halt if
1588 			 * it finishes normally or a condition occurs that
1589 			 * requires driver intervention. Don't want to halt
1590 			 * the channel again. In either Slave or DMA mode,
1591 			 * it's possible that the transfer has been assigned
1592 			 * to a channel, but not started yet when an URB is
1593 			 * dequeued. Don't want to halt a channel that hasn't
1594 			 * started yet.
1595 			 */
1596 			return;
1597 		}
1598 	}
1599 	if (chan->halt_pending) {
1600 		/*
1601 		 * A halt has already been issued for this channel. This might
1602 		 * happen when a transfer is aborted by a higher level in
1603 		 * the stack.
1604 		 */
1605 		dev_vdbg(hsotg->dev,
1606 			 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1607 			 __func__, chan->hc_num);
1608 		return;
1609 	}
1610 
1611 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
1612 
1613 	/* No need to set the bit in DDMA for disabling the channel */
1614 	/* TODO check it everywhere channel is disabled */
1615 	if (hsotg->core_params->dma_desc_enable <= 0) {
1616 		if (dbg_hc(chan))
1617 			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1618 		hcchar |= HCCHAR_CHENA;
1619 	} else {
1620 		if (dbg_hc(chan))
1621 			dev_dbg(hsotg->dev, "desc DMA enabled\n");
1622 	}
1623 	hcchar |= HCCHAR_CHDIS;
1624 
1625 	if (hsotg->core_params->dma_enable <= 0) {
1626 		if (dbg_hc(chan))
1627 			dev_vdbg(hsotg->dev, "DMA not enabled\n");
1628 		hcchar |= HCCHAR_CHENA;
1629 
1630 		/* Check for space in the request queue to issue the halt */
1631 		if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1632 		    chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1633 			dev_vdbg(hsotg->dev, "control/bulk\n");
1634 			nptxsts = DWC2_READ_4(hsotg, GNPTXSTS);
1635 			if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1636 				dev_vdbg(hsotg->dev, "Disabling channel\n");
1637 				hcchar &= ~HCCHAR_CHENA;
1638 			}
1639 		} else {
1640 			if (dbg_perio())
1641 				dev_vdbg(hsotg->dev, "isoc/intr\n");
1642 			hptxsts = DWC2_READ_4(hsotg, HPTXSTS);
1643 			if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1644 			    hsotg->queuing_high_bandwidth) {
1645 				if (dbg_perio())
1646 					dev_vdbg(hsotg->dev, "Disabling channel\n");
1647 				hcchar &= ~HCCHAR_CHENA;
1648 			}
1649 		}
1650 	} else {
1651 		if (dbg_hc(chan))
1652 			dev_vdbg(hsotg->dev, "DMA enabled\n");
1653 	}
1654 
1655 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
1656 	chan->halt_status = halt_status;
1657 
1658 	if (hcchar & HCCHAR_CHENA) {
1659 		if (dbg_hc(chan))
1660 			dev_vdbg(hsotg->dev, "Channel enabled\n");
1661 		chan->halt_pending = 1;
1662 		chan->halt_on_queue = 0;
1663 	} else {
1664 		if (dbg_hc(chan))
1665 			dev_vdbg(hsotg->dev, "Channel disabled\n");
1666 		chan->halt_on_queue = 1;
1667 	}
1668 
1669 	if (dbg_hc(chan)) {
1670 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1671 			 chan->hc_num);
1672 		dev_vdbg(hsotg->dev, "	 hcchar: 0x%08x\n",
1673 			 hcchar);
1674 		dev_vdbg(hsotg->dev, "	 halt_pending: %d\n",
1675 			 chan->halt_pending);
1676 		dev_vdbg(hsotg->dev, "	 halt_on_queue: %d\n",
1677 			 chan->halt_on_queue);
1678 		dev_vdbg(hsotg->dev, "	 halt_status: %d\n",
1679 			 chan->halt_status);
1680 	}
1681 }
1682 
1683 /**
1684  * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1685  *
1686  * @hsotg: Programming view of DWC_otg controller
1687  * @chan:  Identifies the host channel to clean up
1688  *
1689  * This function is normally called after a transfer is done and the host
1690  * channel is being released
1691  */
1692 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1693 {
1694 	u32 hcintmsk;
1695 
1696 	chan->xfer_started = 0;
1697 
1698 	/*
1699 	 * Clear channel interrupt enables and any unhandled channel interrupt
1700 	 * conditions
1701 	 */
1702 	DWC2_WRITE_4(hsotg, HCINTMSK(chan->hc_num), 0);
1703 	hcintmsk = 0xffffffff;
1704 	hcintmsk &= ~HCINTMSK_RESERVED14_31;
1705 	DWC2_WRITE_4(hsotg, HCINT(chan->hc_num), hcintmsk);
1706 }
1707 
1708 /**
1709  * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1710  * which frame a periodic transfer should occur
1711  *
1712  * @hsotg:  Programming view of DWC_otg controller
1713  * @chan:   Identifies the host channel to set up and its properties
1714  * @hcchar: Current value of the HCCHAR register for the specified host channel
1715  *
1716  * This function has no effect on non-periodic transfers
1717  */
1718 STATIC void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1719 				       struct dwc2_host_chan *chan, u32 *hcchar)
1720 {
1721 	if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1722 	    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1723 		/* 1 if _next_ frame is odd, 0 if it's even */
1724 		if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
1725 			*hcchar |= HCCHAR_ODDFRM;
1726 	}
1727 }
1728 
1729 STATIC void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1730 {
1731 	/* Set up the initial PID for the transfer */
1732 	if (chan->speed == USB_SPEED_HIGH) {
1733 		if (chan->ep_is_in) {
1734 			if (chan->multi_count == 1)
1735 				chan->data_pid_start = DWC2_HC_PID_DATA0;
1736 			else if (chan->multi_count == 2)
1737 				chan->data_pid_start = DWC2_HC_PID_DATA1;
1738 			else
1739 				chan->data_pid_start = DWC2_HC_PID_DATA2;
1740 		} else {
1741 			if (chan->multi_count == 1)
1742 				chan->data_pid_start = DWC2_HC_PID_DATA0;
1743 			else
1744 				chan->data_pid_start = DWC2_HC_PID_MDATA;
1745 		}
1746 	} else {
1747 		chan->data_pid_start = DWC2_HC_PID_DATA0;
1748 	}
1749 }
1750 
1751 /**
1752  * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1753  * the Host Channel
1754  *
1755  * @hsotg: Programming view of DWC_otg controller
1756  * @chan:  Information needed to initialize the host channel
1757  *
1758  * This function should only be called in Slave mode. For a channel associated
1759  * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1760  * associated with a periodic EP, the periodic Tx FIFO is written.
1761  *
1762  * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1763  * the number of bytes written to the Tx FIFO.
1764  */
1765 STATIC void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1766 				 struct dwc2_host_chan *chan)
1767 {
1768 	u32 i;
1769 	u32 remaining_count;
1770 	u32 byte_count;
1771 	u32 dword_count;
1772 	u32 *data_buf = (u32 *)chan->xfer_buf;
1773 	u32 data_fifo;
1774 
1775 	if (dbg_hc(chan))
1776 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1777 
1778 	data_fifo = HCFIFO(chan->hc_num);
1779 
1780 	remaining_count = chan->xfer_len - chan->xfer_count;
1781 	if (remaining_count > chan->max_packet)
1782 		byte_count = chan->max_packet;
1783 	else
1784 		byte_count = remaining_count;
1785 
1786 	dword_count = (byte_count + 3) / 4;
1787 
1788 	if (((unsigned long)data_buf & 0x3) == 0) {
1789 		/* xfer_buf is DWORD aligned */
1790 		for (i = 0; i < dword_count; i++, data_buf++)
1791 			DWC2_WRITE_4(hsotg, data_fifo, *data_buf);
1792 	} else {
1793 		/* xfer_buf is not DWORD aligned */
1794 		for (i = 0; i < dword_count; i++, data_buf++) {
1795 			u32 data = data_buf[0] | data_buf[1] << 8 |
1796 				   data_buf[2] << 16 | data_buf[3] << 24;
1797 			DWC2_WRITE_4(hsotg, data_fifo, data);
1798 		}
1799 	}
1800 
1801 	chan->xfer_count += byte_count;
1802 	chan->xfer_buf += byte_count;
1803 }
1804 
1805 /**
1806  * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1807  * channel and starts the transfer
1808  *
1809  * @hsotg: Programming view of DWC_otg controller
1810  * @chan:  Information needed to initialize the host channel. The xfer_len value
1811  *         may be reduced to accommodate the max widths of the XferSize and
1812  *         PktCnt fields in the HCTSIZn register. The multi_count value may be
1813  *         changed to reflect the final xfer_len value.
1814  *
1815  * This function may be called in either Slave mode or DMA mode. In Slave mode,
1816  * the caller must ensure that there is sufficient space in the request queue
1817  * and Tx Data FIFO.
1818  *
1819  * For an OUT transfer in Slave mode, it loads a data packet into the
1820  * appropriate FIFO. If necessary, additional data packets are loaded in the
1821  * Host ISR.
1822  *
1823  * For an IN transfer in Slave mode, a data packet is requested. The data
1824  * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1825  * additional data packets are requested in the Host ISR.
1826  *
1827  * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1828  * register along with a packet count of 1 and the channel is enabled. This
1829  * causes a single PING transaction to occur. Other fields in HCTSIZ are
1830  * simply set to 0 since no data transfer occurs in this case.
1831  *
1832  * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1833  * all the information required to perform the subsequent data transfer. In
1834  * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1835  * controller performs the entire PING protocol, then starts the data
1836  * transfer.
1837  */
1838 void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1839 			    struct dwc2_host_chan *chan)
1840 {
1841 	u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1842 	u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1843 	u32 hcchar;
1844 	u32 hctsiz = 0;
1845 	u16 num_packets;
1846 	u32 ec_mc;
1847 
1848 	if (dbg_hc(chan))
1849 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1850 
1851 	if (chan->do_ping) {
1852 		if (hsotg->core_params->dma_enable <= 0) {
1853 			if (dbg_hc(chan))
1854 				dev_vdbg(hsotg->dev, "ping, no DMA\n");
1855 			dwc2_hc_do_ping(hsotg, chan);
1856 			chan->xfer_started = 1;
1857 			return;
1858 		} else {
1859 			if (dbg_hc(chan))
1860 				dev_vdbg(hsotg->dev, "ping, DMA\n");
1861 			hctsiz |= TSIZ_DOPNG;
1862 		}
1863 	}
1864 
1865 	if (chan->do_split) {
1866 		if (dbg_hc(chan))
1867 			dev_vdbg(hsotg->dev, "split\n");
1868 		num_packets = 1;
1869 
1870 		if (chan->complete_split && !chan->ep_is_in)
1871 			/*
1872 			 * For CSPLIT OUT Transfer, set the size to 0 so the
1873 			 * core doesn't expect any data written to the FIFO
1874 			 */
1875 			chan->xfer_len = 0;
1876 		else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1877 			chan->xfer_len = chan->max_packet;
1878 		else if (!chan->ep_is_in && chan->xfer_len > 188)
1879 			chan->xfer_len = 188;
1880 
1881 		hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1882 			  TSIZ_XFERSIZE_MASK;
1883 
1884 		/* For split set ec_mc for immediate retries */
1885 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1886 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1887 			ec_mc = 3;
1888 		else
1889 			ec_mc = 1;
1890 	} else {
1891 		if (dbg_hc(chan))
1892 			dev_vdbg(hsotg->dev, "no split\n");
1893 		/*
1894 		 * Ensure that the transfer length and packet count will fit
1895 		 * in the widths allocated for them in the HCTSIZn register
1896 		 */
1897 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1898 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1899 			/*
1900 			 * Make sure the transfer size is no larger than one
1901 			 * (micro)frame's worth of data. (A check was done
1902 			 * when the periodic transfer was accepted to ensure
1903 			 * that a (micro)frame's worth of data can be
1904 			 * programmed into a channel.)
1905 			 */
1906 			u32 max_periodic_len =
1907 				chan->multi_count * chan->max_packet;
1908 
1909 			if (chan->xfer_len > max_periodic_len)
1910 				chan->xfer_len = max_periodic_len;
1911 		} else if (chan->xfer_len > max_hc_xfer_size) {
1912 			/*
1913 			 * Make sure that xfer_len is a multiple of max packet
1914 			 * size
1915 			 */
1916 			chan->xfer_len =
1917 				max_hc_xfer_size - chan->max_packet + 1;
1918 		}
1919 
1920 		if (chan->xfer_len > 0) {
1921 			num_packets = (chan->xfer_len + chan->max_packet - 1) /
1922 					chan->max_packet;
1923 			if (num_packets > max_hc_pkt_count) {
1924 				num_packets = max_hc_pkt_count;
1925 				chan->xfer_len = num_packets * chan->max_packet;
1926 			}
1927 		} else {
1928 			/* Need 1 packet for transfer length of 0 */
1929 			num_packets = 1;
1930 		}
1931 
1932 		if (chan->ep_is_in)
1933 			/*
1934 			 * Always program an integral # of max packets for IN
1935 			 * transfers
1936 			 */
1937 			chan->xfer_len = num_packets * chan->max_packet;
1938 
1939 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1940 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1941 			/*
1942 			 * Make sure that the multi_count field matches the
1943 			 * actual transfer length
1944 			 */
1945 			chan->multi_count = num_packets;
1946 
1947 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1948 			dwc2_set_pid_isoc(chan);
1949 
1950 		hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1951 			  TSIZ_XFERSIZE_MASK;
1952 
1953 		/* The ec_mc gets the multi_count for non-split */
1954 		ec_mc = chan->multi_count;
1955 	}
1956 
1957 	chan->start_pkt_count = num_packets;
1958 	hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1959 	hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1960 		  TSIZ_SC_MC_PID_MASK;
1961 	DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
1962 	if (dbg_hc(chan)) {
1963 		dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1964 			 hctsiz, chan->hc_num);
1965 
1966 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1967 			 chan->hc_num);
1968 		dev_vdbg(hsotg->dev, "	 Xfer Size: %d\n",
1969 			 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1970 			 TSIZ_XFERSIZE_SHIFT);
1971 		dev_vdbg(hsotg->dev, "	 Num Pkts: %d\n",
1972 			 (hctsiz & TSIZ_PKTCNT_MASK) >>
1973 			 TSIZ_PKTCNT_SHIFT);
1974 		dev_vdbg(hsotg->dev, "	 Start PID: %d\n",
1975 			 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1976 			 TSIZ_SC_MC_PID_SHIFT);
1977 	}
1978 
1979 	if (hsotg->core_params->dma_enable > 0) {
1980 		dma_addr_t dma_addr;
1981 
1982 		if (chan->align_buf) {
1983 			if (dbg_hc(chan))
1984 				dev_vdbg(hsotg->dev, "align_buf\n");
1985 			dma_addr = chan->align_buf;
1986 		} else {
1987 			dma_addr = chan->xfer_dma;
1988 		}
1989 		if (hsotg->hsotg_sc->sc_set_dma_addr == NULL) {
1990 			DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num),
1991 			    (u32)dma_addr);
1992 			if (dbg_hc(chan))
1993 				dev_vdbg(hsotg->dev,
1994 				    "Wrote %08lx to HCDMA(%d)\n",
1995 				     (unsigned long)dma_addr,
1996 				    chan->hc_num);
1997 		} else {
1998 			(void)(*hsotg->hsotg_sc->sc_set_dma_addr)(
1999 			    hsotg->dev, dma_addr, chan->hc_num);
2000 		}
2001 	}
2002 
2003 	/* Start the split */
2004 	if (chan->do_split) {
2005 		u32 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chan->hc_num));
2006 
2007 		hcsplt |= HCSPLT_SPLTENA;
2008 		DWC2_WRITE_4(hsotg, HCSPLT(chan->hc_num), hcsplt);
2009 	}
2010 
2011 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
2012 	hcchar &= ~HCCHAR_MULTICNT_MASK;
2013 	hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
2014 	dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
2015 
2016 	if (hcchar & HCCHAR_CHDIS)
2017 		dev_warn(hsotg->dev,
2018 			 "%s: chdis set, channel %d, hcchar 0x%08x\n",
2019 			 __func__, chan->hc_num, hcchar);
2020 
2021 	/* Set host channel enable after all other setup is complete */
2022 	hcchar |= HCCHAR_CHENA;
2023 	hcchar &= ~HCCHAR_CHDIS;
2024 
2025 	if (dbg_hc(chan))
2026 		dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n",
2027 			 (hcchar & HCCHAR_MULTICNT_MASK) >>
2028 			 HCCHAR_MULTICNT_SHIFT);
2029 
2030 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
2031 	if (dbg_hc(chan))
2032 		dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
2033 			 chan->hc_num);
2034 
2035 	chan->xfer_started = 1;
2036 	chan->requests++;
2037 
2038 	if (hsotg->core_params->dma_enable <= 0 &&
2039 	    !chan->ep_is_in && chan->xfer_len > 0)
2040 		/* Load OUT packet into the appropriate Tx FIFO */
2041 		dwc2_hc_write_packet(hsotg, chan);
2042 }
2043 
2044 /**
2045  * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
2046  * host channel and starts the transfer in Descriptor DMA mode
2047  *
2048  * @hsotg: Programming view of DWC_otg controller
2049  * @chan:  Information needed to initialize the host channel
2050  *
2051  * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
2052  * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
2053  * with micro-frame bitmap.
2054  *
2055  * Initializes HCDMA register with descriptor list address and CTD value then
2056  * starts the transfer via enabling the channel.
2057  */
2058 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
2059 				 struct dwc2_host_chan *chan)
2060 {
2061 	u32 hcchar;
2062 	u32 hctsiz = 0;
2063 
2064 	if (chan->do_ping)
2065 		hctsiz |= TSIZ_DOPNG;
2066 
2067 	if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
2068 		dwc2_set_pid_isoc(chan);
2069 
2070 	/* Packet Count and Xfer Size are not used in Descriptor DMA mode */
2071 	hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
2072 		  TSIZ_SC_MC_PID_MASK;
2073 
2074 	/* 0 - 1 descriptor, 1 - 2 descriptors, etc */
2075 	hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
2076 
2077 	/* Non-zero only for high-speed interrupt endpoints */
2078 	hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
2079 
2080 	if (dbg_hc(chan)) {
2081 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2082 			 chan->hc_num);
2083 		dev_vdbg(hsotg->dev, "	 Start PID: %d\n",
2084 			 chan->data_pid_start);
2085 		dev_vdbg(hsotg->dev, "	 NTD: %d\n", chan->ntd - 1);
2086 	}
2087 
2088 	DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
2089 
2090 	usb_syncmem(&chan->desc_list_usbdma, 0, chan->desc_list_sz,
2091 	    BUS_DMASYNC_PREWRITE);
2092 
2093 	if (hsotg->hsotg_sc->sc_set_dma_addr == NULL) {
2094 		DWC2_WRITE_4(hsotg, HCDMA(chan->hc_num), chan->desc_list_addr);
2095 		if (dbg_hc(chan))
2096 			dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
2097 				&chan->desc_list_addr, chan->hc_num);
2098 	} else {
2099 		(void)(*hsotg->hsotg_sc->sc_set_dma_addr)(
2100 		    hsotg->dev, chan->desc_list_addr, chan->hc_num);
2101 		if (dbg_hc(chan))
2102 			dev_vdbg(hsotg->dev, "Wrote %pad to ext dma(%d)\n",
2103 				&chan->desc_list_addr, chan->hc_num);
2104 	}
2105 
2106 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
2107 	hcchar &= ~HCCHAR_MULTICNT_MASK;
2108 	hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
2109 		  HCCHAR_MULTICNT_MASK;
2110 
2111 	if (hcchar & HCCHAR_CHDIS)
2112 		dev_warn(hsotg->dev,
2113 			 "%s: chdis set, channel %d, hcchar 0x%08x\n",
2114 			 __func__, chan->hc_num, hcchar);
2115 
2116 	/* Set host channel enable after all other setup is complete */
2117 	hcchar |= HCCHAR_CHENA;
2118 	hcchar &= ~HCCHAR_CHDIS;
2119 
2120 	if (dbg_hc(chan))
2121 		dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n",
2122 			 (hcchar & HCCHAR_MULTICNT_MASK) >>
2123 			 HCCHAR_MULTICNT_SHIFT);
2124 
2125 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
2126 	if (dbg_hc(chan))
2127 		dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
2128 			 chan->hc_num);
2129 
2130 	chan->xfer_started = 1;
2131 	chan->requests++;
2132 }
2133 
2134 /**
2135  * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
2136  * a previous call to dwc2_hc_start_transfer()
2137  *
2138  * @hsotg: Programming view of DWC_otg controller
2139  * @chan:  Information needed to initialize the host channel
2140  *
2141  * The caller must ensure there is sufficient space in the request queue and Tx
2142  * Data FIFO. This function should only be called in Slave mode. In DMA mode,
2143  * the controller acts autonomously to complete transfers programmed to a host
2144  * channel.
2145  *
2146  * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
2147  * if there is any data remaining to be queued. For an IN transfer, another
2148  * data packet is always requested. For the SETUP phase of a control transfer,
2149  * this function does nothing.
2150  *
2151  * Return: 1 if a new request is queued, 0 if no more requests are required
2152  * for this transfer
2153  */
2154 int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
2155 			      struct dwc2_host_chan *chan)
2156 {
2157 	if (dbg_hc(chan))
2158 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2159 			 chan->hc_num);
2160 
2161 	if (chan->do_split)
2162 		/* SPLITs always queue just once per channel */
2163 		return 0;
2164 
2165 	if (chan->data_pid_start == DWC2_HC_PID_SETUP)
2166 		/* SETUPs are queued only once since they can't be NAK'd */
2167 		return 0;
2168 
2169 	if (chan->ep_is_in) {
2170 		/*
2171 		 * Always queue another request for other IN transfers. If
2172 		 * back-to-back INs are issued and NAKs are received for both,
2173 		 * the driver may still be processing the first NAK when the
2174 		 * second NAK is received. When the interrupt handler clears
2175 		 * the NAK interrupt for the first NAK, the second NAK will
2176 		 * not be seen. So we can't depend on the NAK interrupt
2177 		 * handler to requeue a NAK'd request. Instead, IN requests
2178 		 * are issued each time this function is called. When the
2179 		 * transfer completes, the extra requests for the channel will
2180 		 * be flushed.
2181 		 */
2182 		u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
2183 
2184 		dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
2185 		hcchar |= HCCHAR_CHENA;
2186 		hcchar &= ~HCCHAR_CHDIS;
2187 		if (dbg_hc(chan))
2188 			dev_vdbg(hsotg->dev, "	 IN xfer: hcchar = 0x%08x\n",
2189 				 hcchar);
2190 		DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
2191 		chan->requests++;
2192 		return 1;
2193 	}
2194 
2195 	/* OUT transfers */
2196 
2197 	if (chan->xfer_count < chan->xfer_len) {
2198 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
2199 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
2200 			u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
2201 
2202 			dwc2_hc_set_even_odd_frame(hsotg, chan,
2203 						   &hcchar);
2204 		}
2205 
2206 		/* Load OUT packet into the appropriate Tx FIFO */
2207 		dwc2_hc_write_packet(hsotg, chan);
2208 		chan->requests++;
2209 		return 1;
2210 	}
2211 
2212 	return 0;
2213 }
2214 
2215 /**
2216  * dwc2_hc_do_ping() - Starts a PING transfer
2217  *
2218  * @hsotg: Programming view of DWC_otg controller
2219  * @chan:  Information needed to initialize the host channel
2220  *
2221  * This function should only be called in Slave mode. The Do Ping bit is set in
2222  * the HCTSIZ register, then the channel is enabled.
2223  */
2224 void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
2225 {
2226 	u32 hcchar;
2227 	u32 hctsiz;
2228 
2229 	if (dbg_hc(chan))
2230 		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2231 			 chan->hc_num);
2232 
2233 
2234 	hctsiz = TSIZ_DOPNG;
2235 	hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
2236 	DWC2_WRITE_4(hsotg, HCTSIZ(chan->hc_num), hctsiz);
2237 
2238 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chan->hc_num));
2239 	hcchar |= HCCHAR_CHENA;
2240 	hcchar &= ~HCCHAR_CHDIS;
2241 	DWC2_WRITE_4(hsotg, HCCHAR(chan->hc_num), hcchar);
2242 }
2243 
2244 /**
2245  * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
2246  * the HFIR register according to PHY type and speed
2247  *
2248  * @hsotg: Programming view of DWC_otg controller
2249  *
2250  * NOTE: The caller can modify the value of the HFIR register only after the
2251  * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
2252  * has been set
2253  */
2254 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
2255 {
2256 	u32 usbcfg;
2257 	u32 hprt0;
2258 	int clock = 60;	/* default value */
2259 
2260 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
2261 	hprt0 = DWC2_READ_4(hsotg, HPRT0);
2262 
2263 	if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
2264 	    !(usbcfg & GUSBCFG_PHYIF16))
2265 		clock = 60;
2266 	if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
2267 	    GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
2268 		clock = 48;
2269 	if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2270 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2271 		clock = 30;
2272 	if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2273 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
2274 		clock = 60;
2275 	if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2276 	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2277 		clock = 48;
2278 	if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
2279 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
2280 		clock = 48;
2281 	if ((usbcfg & GUSBCFG_PHYSEL) &&
2282 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2283 		clock = 48;
2284 
2285 	if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
2286 		/* High speed case */
2287 		return 125 * clock;
2288 	else
2289 		/* FS/LS case */
2290 		return 1000 * clock;
2291 }
2292 
2293 /**
2294  * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
2295  * buffer
2296  *
2297  * @core_if: Programming view of DWC_otg controller
2298  * @dest:    Destination buffer for the packet
2299  * @bytes:   Number of bytes to copy to the destination
2300  */
2301 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
2302 {
2303 	bus_size_t fifo = HCFIFO(0);
2304 	u32 *data_buf = (u32 *)dest;
2305 	int word_count = (bytes + 3) / 4;
2306 	int i;
2307 
2308 	/*
2309 	 * Todo: Account for the case where dest is not dword aligned. This
2310 	 * requires reading data from the FIFO into a u32 temp buffer, then
2311 	 * moving it into the data buffer.
2312 	 */
2313 
2314 	dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
2315 
2316 	for (i = 0; i < word_count; i++, data_buf++)
2317 		*data_buf = DWC2_READ_4(hsotg, fifo);
2318 }
2319 
2320 /**
2321  * dwc2_dump_host_registers() - Prints the host registers
2322  *
2323  * @hsotg: Programming view of DWC_otg controller
2324  *
2325  * NOTE: This function will be removed once the peripheral controller code
2326  * is integrated and the driver is stable
2327  */
2328 void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
2329 {
2330 #ifdef DWC2_DEBUG
2331 	bus_size_t addr;
2332 	int i;
2333 
2334 	dev_dbg(hsotg->dev, "Host Global Registers\n");
2335 	addr = HCFG;
2336 	dev_dbg(hsotg->dev, "HCFG	 @0x%08lX : 0x%08X\n",
2337 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2338 	addr = HFIR;
2339 	dev_dbg(hsotg->dev, "HFIR	 @0x%08lX : 0x%08X\n",
2340 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2341 	addr = HFNUM;
2342 	dev_dbg(hsotg->dev, "HFNUM	 @0x%08lX : 0x%08X\n",
2343 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2344 	addr = HPTXSTS;
2345 	dev_dbg(hsotg->dev, "HPTXSTS	 @0x%08lX : 0x%08X\n",
2346 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2347 	addr = HAINT;
2348 	dev_dbg(hsotg->dev, "HAINT	 @0x%08lX : 0x%08X\n",
2349 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2350 	addr = HAINTMSK;
2351 	dev_dbg(hsotg->dev, "HAINTMSK	 @0x%08lX : 0x%08X\n",
2352 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2353 	if (hsotg->core_params->dma_desc_enable > 0) {
2354 		addr = HFLBADDR;
2355 		dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
2356 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2357 	}
2358 
2359 	addr = HPRT0;
2360 	dev_dbg(hsotg->dev, "HPRT0	 @0x%08lX : 0x%08X\n",
2361 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2362 
2363 	for (i = 0; i < hsotg->core_params->host_channels; i++) {
2364 		dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
2365 		addr = HCCHAR(i);
2366 		dev_dbg(hsotg->dev, "HCCHAR	 @0x%08lX : 0x%08X\n",
2367 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2368 		addr = HCSPLT(i);
2369 		dev_dbg(hsotg->dev, "HCSPLT	 @0x%08lX : 0x%08X\n",
2370 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2371 		addr = HCINT(i);
2372 		dev_dbg(hsotg->dev, "HCINT	 @0x%08lX : 0x%08X\n",
2373 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2374 		addr = HCINTMSK(i);
2375 		dev_dbg(hsotg->dev, "HCINTMSK	 @0x%08lX : 0x%08X\n",
2376 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2377 		addr = HCTSIZ(i);
2378 		dev_dbg(hsotg->dev, "HCTSIZ	 @0x%08lX : 0x%08X\n",
2379 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2380 		addr = HCDMA(i);
2381 		dev_dbg(hsotg->dev, "HCDMA	 @0x%08lX : 0x%08X\n",
2382 			(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2383 		if (hsotg->core_params->dma_desc_enable > 0) {
2384 			addr = HCDMAB(i);
2385 			dev_dbg(hsotg->dev, "HCDMAB	 @0x%08lX : 0x%08X\n",
2386 				(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2387 		}
2388 	}
2389 #endif
2390 }
2391 
2392 /**
2393  * dwc2_dump_global_registers() - Prints the core global registers
2394  *
2395  * @hsotg: Programming view of DWC_otg controller
2396  *
2397  * NOTE: This function will be removed once the peripheral controller code
2398  * is integrated and the driver is stable
2399  */
2400 void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
2401 {
2402 #ifdef DWC2_DEBUG
2403 	bus_size_t addr;
2404 
2405 	dev_dbg(hsotg->dev, "Core Global Registers\n");
2406 	addr = GOTGCTL;
2407 	dev_dbg(hsotg->dev, "GOTGCTL	 @0x%08lX : 0x%08X\n",
2408 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2409 	addr = GOTGINT;
2410 	dev_dbg(hsotg->dev, "GOTGINT	 @0x%08lX : 0x%08X\n",
2411 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2412 	addr = GAHBCFG;
2413 	dev_dbg(hsotg->dev, "GAHBCFG	 @0x%08lX : 0x%08X\n",
2414 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2415 	addr = GUSBCFG;
2416 	dev_dbg(hsotg->dev, "GUSBCFG	 @0x%08lX : 0x%08X\n",
2417 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2418 	addr = GRSTCTL;
2419 	dev_dbg(hsotg->dev, "GRSTCTL	 @0x%08lX : 0x%08X\n",
2420 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2421 	addr = GINTSTS;
2422 	dev_dbg(hsotg->dev, "GINTSTS	 @0x%08lX : 0x%08X\n",
2423 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2424 	addr = GINTMSK;
2425 	dev_dbg(hsotg->dev, "GINTMSK	 @0x%08lX : 0x%08X\n",
2426 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2427 	addr = GRXSTSR;
2428 	dev_dbg(hsotg->dev, "GRXSTSR	 @0x%08lX : 0x%08X\n",
2429 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2430 	addr = GRXFSIZ;
2431 	dev_dbg(hsotg->dev, "GRXFSIZ	 @0x%08lX : 0x%08X\n",
2432 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2433 	addr = GNPTXFSIZ;
2434 	dev_dbg(hsotg->dev, "GNPTXFSIZ	 @0x%08lX : 0x%08X\n",
2435 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2436 	addr = GNPTXSTS;
2437 	dev_dbg(hsotg->dev, "GNPTXSTS	 @0x%08lX : 0x%08X\n",
2438 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2439 	addr = GI2CCTL;
2440 	dev_dbg(hsotg->dev, "GI2CCTL	 @0x%08lX : 0x%08X\n",
2441 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2442 	addr = GPVNDCTL;
2443 	dev_dbg(hsotg->dev, "GPVNDCTL	 @0x%08lX : 0x%08X\n",
2444 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2445 	addr = GGPIO;
2446 	dev_dbg(hsotg->dev, "GGPIO	 @0x%08lX : 0x%08X\n",
2447 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2448 	addr = GUID;
2449 	dev_dbg(hsotg->dev, "GUID	 @0x%08lX : 0x%08X\n",
2450 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2451 	addr = GSNPSID;
2452 	dev_dbg(hsotg->dev, "GSNPSID	 @0x%08lX : 0x%08X\n",
2453 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2454 	addr = GHWCFG1;
2455 	dev_dbg(hsotg->dev, "GHWCFG1	 @0x%08lX : 0x%08X\n",
2456 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2457 	addr = GHWCFG2;
2458 	dev_dbg(hsotg->dev, "GHWCFG2	 @0x%08lX : 0x%08X\n",
2459 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2460 	addr = GHWCFG3;
2461 	dev_dbg(hsotg->dev, "GHWCFG3	 @0x%08lX : 0x%08X\n",
2462 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2463 	addr = GHWCFG4;
2464 	dev_dbg(hsotg->dev, "GHWCFG4	 @0x%08lX : 0x%08X\n",
2465 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2466 	addr = GLPMCFG;
2467 	dev_dbg(hsotg->dev, "GLPMCFG	 @0x%08lX : 0x%08X\n",
2468 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2469 	addr = GPWRDN;
2470 	dev_dbg(hsotg->dev, "GPWRDN	 @0x%08lX : 0x%08X\n",
2471 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2472 	addr = GDFIFOCFG;
2473 	dev_dbg(hsotg->dev, "GDFIFOCFG	 @0x%08lX : 0x%08X\n",
2474 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2475 	addr = HPTXFSIZ;
2476 	dev_dbg(hsotg->dev, "HPTXFSIZ	 @0x%08lX : 0x%08X\n",
2477 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2478 
2479 	addr = PCGCTL;
2480 	dev_dbg(hsotg->dev, "PCGCTL	 @0x%08lX : 0x%08X\n",
2481 		(unsigned long)addr, DWC2_READ_4(hsotg, addr));
2482 #endif
2483 }
2484 
2485 /**
2486  * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
2487  *
2488  * @hsotg: Programming view of DWC_otg controller
2489  * @num:   Tx FIFO to flush
2490  */
2491 void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
2492 {
2493 	u32 greset;
2494 	int count = 0;
2495 
2496 	dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
2497 
2498 	greset = GRSTCTL_TXFFLSH;
2499 	greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
2500 	DWC2_WRITE_4(hsotg, GRSTCTL, greset);
2501 
2502 	do {
2503 		greset = DWC2_READ_4(hsotg, GRSTCTL);
2504 		if (++count > 10000) {
2505 			dev_warn(hsotg->dev,
2506 				 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
2507 				 __func__, greset,
2508 				 DWC2_READ_4(hsotg, GNPTXSTS));
2509 			break;
2510 		}
2511 		udelay(1);
2512 	} while (greset & GRSTCTL_TXFFLSH);
2513 
2514 	/* Wait for at least 3 PHY Clocks */
2515 	udelay(1);
2516 }
2517 
2518 /**
2519  * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
2520  *
2521  * @hsotg: Programming view of DWC_otg controller
2522  */
2523 void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
2524 {
2525 	u32 greset;
2526 	int count = 0;
2527 
2528 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
2529 
2530 	greset = GRSTCTL_RXFFLSH;
2531 	DWC2_WRITE_4(hsotg, GRSTCTL, greset);
2532 
2533 	do {
2534 		greset = DWC2_READ_4(hsotg, GRSTCTL);
2535 		if (++count > 10000) {
2536 			dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
2537 				 __func__, greset);
2538 			break;
2539 		}
2540 		udelay(1);
2541 	} while (greset & GRSTCTL_RXFFLSH);
2542 
2543 	/* Wait for at least 3 PHY Clocks */
2544 	udelay(1);
2545 }
2546 
2547 #define DWC2_OUT_OF_BOUNDS(a, b, c)	((a) < (b) || (a) > (c))
2548 
2549 /* Parameter access functions */
2550 void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
2551 {
2552 	int valid = 1;
2553 
2554 	switch (val) {
2555 	case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
2556 		if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
2557 			valid = 0;
2558 		break;
2559 	case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
2560 		switch (hsotg->hw_params.op_mode) {
2561 		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2562 		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2563 		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2564 		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2565 			break;
2566 		default:
2567 			valid = 0;
2568 			break;
2569 		}
2570 		break;
2571 	case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
2572 		/* always valid */
2573 		break;
2574 	default:
2575 		valid = 0;
2576 		break;
2577 	}
2578 
2579 	if (!valid) {
2580 		if (val >= 0)
2581 			dev_err(hsotg->dev,
2582 				"%d invalid for otg_cap parameter. Check HW configuration.\n",
2583 				val);
2584 		switch (hsotg->hw_params.op_mode) {
2585 		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2586 			val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
2587 			break;
2588 		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2589 		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2590 		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2591 			val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
2592 			break;
2593 		default:
2594 			val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
2595 			break;
2596 		}
2597 		dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
2598 	}
2599 
2600 	hsotg->core_params->otg_cap = val;
2601 }
2602 
2603 void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
2604 {
2605 	int valid = 1;
2606 
2607 	if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
2608 		valid = 0;
2609 	if (val < 0)
2610 		valid = 0;
2611 
2612 	if (!valid) {
2613 		if (val >= 0)
2614 			dev_err(hsotg->dev,
2615 				"%d invalid for dma_enable parameter. Check HW configuration.\n",
2616 				val);
2617 		val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
2618 		dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
2619 	}
2620 
2621 	hsotg->core_params->dma_enable = val;
2622 }
2623 
2624 void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
2625 {
2626 	int valid = 1;
2627 
2628 	if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2629 			!hsotg->hw_params.dma_desc_enable))
2630 		valid = 0;
2631 	if (val < 0)
2632 		valid = 0;
2633 
2634 	if (!valid) {
2635 		if (val >= 0)
2636 			dev_err(hsotg->dev,
2637 				"%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2638 				val);
2639 		val = (hsotg->core_params->dma_enable > 0 &&
2640 			hsotg->hw_params.dma_desc_enable);
2641 		dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2642 	}
2643 
2644 	hsotg->core_params->dma_desc_enable = val;
2645 }
2646 
2647 void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val)
2648 {
2649 	int valid = 1;
2650 
2651 	if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2652 			!hsotg->hw_params.dma_desc_enable))
2653 		valid = 0;
2654 	if (val < 0)
2655 		valid = 0;
2656 
2657 	if (!valid) {
2658 		if (val >= 0)
2659 			dev_err(hsotg->dev,
2660 				"%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
2661 				val);
2662 		val = (hsotg->core_params->dma_enable > 0 &&
2663 			hsotg->hw_params.dma_desc_enable);
2664 	}
2665 
2666 	hsotg->core_params->dma_desc_fs_enable = val;
2667 	dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val);
2668 }
2669 
2670 void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2671 						 int val)
2672 {
2673 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2674 		if (val >= 0) {
2675 			dev_err(hsotg->dev,
2676 				"Wrong value for host_support_fs_low_power\n");
2677 			dev_err(hsotg->dev,
2678 				"host_support_fs_low_power must be 0 or 1\n");
2679 		}
2680 		val = 0;
2681 		dev_dbg(hsotg->dev,
2682 			"Setting host_support_fs_low_power to %d\n", val);
2683 	}
2684 
2685 	hsotg->core_params->host_support_fs_ls_low_power = val;
2686 }
2687 
2688 void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
2689 {
2690 	int valid = 1;
2691 
2692 	if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
2693 		valid = 0;
2694 	if (val < 0)
2695 		valid = 0;
2696 
2697 	if (!valid) {
2698 		if (val >= 0)
2699 			dev_err(hsotg->dev,
2700 				"%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2701 				val);
2702 		val = hsotg->hw_params.enable_dynamic_fifo;
2703 		dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2704 	}
2705 
2706 	hsotg->core_params->enable_dynamic_fifo = val;
2707 }
2708 
2709 void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2710 {
2711 	int valid = 1;
2712 
2713 	if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
2714 		valid = 0;
2715 
2716 	if (!valid) {
2717 		if (val >= 0)
2718 			dev_err(hsotg->dev,
2719 				"%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2720 				val);
2721 		val = hsotg->hw_params.host_rx_fifo_size;
2722 		dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2723 	}
2724 
2725 	hsotg->core_params->host_rx_fifo_size = val;
2726 }
2727 
2728 void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2729 {
2730 	int valid = 1;
2731 
2732 	if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
2733 		valid = 0;
2734 
2735 	if (!valid) {
2736 		if (val >= 0)
2737 			dev_err(hsotg->dev,
2738 				"%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2739 				val);
2740 		val = hsotg->hw_params.host_nperio_tx_fifo_size;
2741 		dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2742 			val);
2743 	}
2744 
2745 	hsotg->core_params->host_nperio_tx_fifo_size = val;
2746 }
2747 
2748 void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2749 {
2750 	int valid = 1;
2751 
2752 	if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
2753 		valid = 0;
2754 
2755 	if (!valid) {
2756 		if (val >= 0)
2757 			dev_err(hsotg->dev,
2758 				"%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2759 				val);
2760 		val = hsotg->hw_params.host_perio_tx_fifo_size;
2761 		dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2762 			val);
2763 	}
2764 
2765 	hsotg->core_params->host_perio_tx_fifo_size = val;
2766 }
2767 
2768 void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
2769 {
2770 	int valid = 1;
2771 
2772 	if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
2773 		valid = 0;
2774 
2775 	if (!valid) {
2776 		if (val >= 0)
2777 			dev_err(hsotg->dev,
2778 				"%d invalid for max_transfer_size. Check HW configuration.\n",
2779 				val);
2780 		val = hsotg->hw_params.max_transfer_size;
2781 		dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2782 	}
2783 
2784 	hsotg->core_params->max_transfer_size = val;
2785 }
2786 
2787 void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
2788 {
2789 	int valid = 1;
2790 
2791 	if (val < 15 || val > hsotg->hw_params.max_packet_count)
2792 		valid = 0;
2793 
2794 	if (!valid) {
2795 		if (val >= 0)
2796 			dev_err(hsotg->dev,
2797 				"%d invalid for max_packet_count. Check HW configuration.\n",
2798 				val);
2799 		val = hsotg->hw_params.max_packet_count;
2800 		dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2801 	}
2802 
2803 	hsotg->core_params->max_packet_count = val;
2804 }
2805 
2806 void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2807 {
2808 	int valid = 1;
2809 
2810 	if (val < 1 || val > hsotg->hw_params.host_channels)
2811 		valid = 0;
2812 
2813 	if (!valid) {
2814 		if (val >= 0)
2815 			dev_err(hsotg->dev,
2816 				"%d invalid for host_channels. Check HW configuration.\n",
2817 				val);
2818 		val = hsotg->hw_params.host_channels;
2819 		dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2820 	}
2821 
2822 	hsotg->core_params->host_channels = val;
2823 }
2824 
2825 void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2826 {
2827 	int valid = 0;
2828 	u32 hs_phy_type, fs_phy_type;
2829 
2830 	if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
2831 			       DWC2_PHY_TYPE_PARAM_ULPI)) {
2832 		if (val >= 0) {
2833 			dev_err(hsotg->dev, "Wrong value for phy_type\n");
2834 			dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2835 		}
2836 
2837 		valid = 0;
2838 	}
2839 
2840 	hs_phy_type = hsotg->hw_params.hs_phy_type;
2841 	fs_phy_type = hsotg->hw_params.fs_phy_type;
2842 	if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2843 	    (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2844 	     hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2845 		valid = 1;
2846 	else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2847 		 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2848 		  hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2849 		valid = 1;
2850 	else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2851 		 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2852 		valid = 1;
2853 
2854 	if (!valid) {
2855 		if (val >= 0)
2856 			dev_err(hsotg->dev,
2857 				"%d invalid for phy_type. Check HW configuration.\n",
2858 				val);
2859 		val = DWC2_PHY_TYPE_PARAM_FS;
2860 		if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2861 			if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2862 			    hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2863 				val = DWC2_PHY_TYPE_PARAM_UTMI;
2864 			else
2865 				val = DWC2_PHY_TYPE_PARAM_ULPI;
2866 		}
2867 		dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2868 	}
2869 
2870 	hsotg->core_params->phy_type = val;
2871 }
2872 
2873 STATIC int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2874 {
2875 	return hsotg->core_params->phy_type;
2876 }
2877 
2878 void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2879 {
2880 	int valid = 1;
2881 
2882 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2883 		if (val >= 0) {
2884 			dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2885 			dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2886 		}
2887 		valid = 0;
2888 	}
2889 
2890 	if (val == DWC2_SPEED_PARAM_HIGH &&
2891 	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2892 		valid = 0;
2893 
2894 	if (!valid) {
2895 		if (val >= 0)
2896 			dev_err(hsotg->dev,
2897 				"%d invalid for speed parameter. Check HW configuration.\n",
2898 				val);
2899 		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
2900 				DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
2901 		dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2902 	}
2903 
2904 	hsotg->core_params->speed = val;
2905 }
2906 
2907 void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2908 {
2909 	int valid = 1;
2910 
2911 	if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2912 			       DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
2913 		if (val >= 0) {
2914 			dev_err(hsotg->dev,
2915 				"Wrong value for host_ls_low_power_phy_clk parameter\n");
2916 			dev_err(hsotg->dev,
2917 				"host_ls_low_power_phy_clk must be 0 or 1\n");
2918 		}
2919 		valid = 0;
2920 	}
2921 
2922 	if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2923 	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2924 		valid = 0;
2925 
2926 	if (!valid) {
2927 		if (val >= 0)
2928 			dev_err(hsotg->dev,
2929 				"%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2930 				val);
2931 		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2932 			? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2933 			: DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2934 		dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2935 			val);
2936 	}
2937 
2938 	hsotg->core_params->host_ls_low_power_phy_clk = val;
2939 }
2940 
2941 void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2942 {
2943 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2944 		if (val >= 0) {
2945 			dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2946 			dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2947 		}
2948 		val = 0;
2949 		dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2950 	}
2951 
2952 	hsotg->core_params->phy_ulpi_ddr = val;
2953 }
2954 
2955 void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2956 {
2957 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2958 		if (val >= 0) {
2959 			dev_err(hsotg->dev,
2960 				"Wrong value for phy_ulpi_ext_vbus\n");
2961 			dev_err(hsotg->dev,
2962 				"phy_ulpi_ext_vbus must be 0 or 1\n");
2963 		}
2964 		val = 0;
2965 		dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2966 	}
2967 
2968 	hsotg->core_params->phy_ulpi_ext_vbus = val;
2969 }
2970 
2971 void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2972 {
2973 	int valid = 0;
2974 
2975 	switch (hsotg->hw_params.utmi_phy_data_width) {
2976 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2977 		valid = (val == 8);
2978 		break;
2979 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2980 		valid = (val == 16);
2981 		break;
2982 	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2983 		valid = (val == 8 || val == 16);
2984 		break;
2985 	}
2986 
2987 	if (!valid) {
2988 		if (val >= 0) {
2989 			dev_err(hsotg->dev,
2990 				"%d invalid for phy_utmi_width. Check HW configuration.\n",
2991 				val);
2992 		}
2993 		val = (hsotg->hw_params.utmi_phy_data_width ==
2994 		       GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
2995 		dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2996 	}
2997 
2998 	hsotg->core_params->phy_utmi_width = val;
2999 }
3000 
3001 void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
3002 {
3003 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3004 		if (val >= 0) {
3005 			dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
3006 			dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
3007 		}
3008 		val = 0;
3009 		dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
3010 	}
3011 
3012 	hsotg->core_params->ulpi_fs_ls = val;
3013 }
3014 
3015 void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
3016 {
3017 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3018 		if (val >= 0) {
3019 			dev_err(hsotg->dev, "Wrong value for ts_dline\n");
3020 			dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
3021 		}
3022 		val = 0;
3023 		dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
3024 	}
3025 
3026 	hsotg->core_params->ts_dline = val;
3027 }
3028 
3029 void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
3030 {
3031 	int valid = 1;
3032 
3033 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3034 		if (val >= 0) {
3035 			dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
3036 			dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
3037 		}
3038 
3039 		valid = 0;
3040 	}
3041 
3042 	if (val == 1 && !(hsotg->hw_params.i2c_enable))
3043 		valid = 0;
3044 
3045 	if (!valid) {
3046 		if (val >= 0)
3047 			dev_err(hsotg->dev,
3048 				"%d invalid for i2c_enable. Check HW configuration.\n",
3049 				val);
3050 		val = hsotg->hw_params.i2c_enable;
3051 		dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
3052 	}
3053 
3054 	hsotg->core_params->i2c_enable = val;
3055 }
3056 
3057 void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
3058 {
3059 	int valid = 1;
3060 
3061 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3062 		if (val >= 0) {
3063 			dev_err(hsotg->dev,
3064 				"Wrong value for en_multiple_tx_fifo,\n");
3065 			dev_err(hsotg->dev,
3066 				"en_multiple_tx_fifo must be 0 or 1\n");
3067 		}
3068 		valid = 0;
3069 	}
3070 
3071 	if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
3072 		valid = 0;
3073 
3074 	if (!valid) {
3075 		if (val >= 0)
3076 			dev_err(hsotg->dev,
3077 				"%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
3078 				val);
3079 		val = hsotg->hw_params.en_multiple_tx_fifo;
3080 		dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
3081 	}
3082 
3083 	hsotg->core_params->en_multiple_tx_fifo = val;
3084 }
3085 
3086 void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
3087 {
3088 	int valid = 1;
3089 
3090 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3091 		if (val >= 0) {
3092 			dev_err(hsotg->dev,
3093 				"'%d' invalid for parameter reload_ctl\n", val);
3094 			dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
3095 		}
3096 		valid = 0;
3097 	}
3098 
3099 	if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
3100 		valid = 0;
3101 
3102 	if (!valid) {
3103 		if (val >= 0)
3104 			dev_err(hsotg->dev,
3105 				"%d invalid for parameter reload_ctl. Check HW configuration.\n",
3106 				val);
3107 		val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
3108 		dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
3109 	}
3110 
3111 	hsotg->core_params->reload_ctl = val;
3112 }
3113 
3114 void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
3115 {
3116 	if (val != -1)
3117 		hsotg->core_params->ahbcfg = val;
3118 	else
3119 		hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
3120 						GAHBCFG_HBSTLEN_SHIFT;
3121 }
3122 
3123 void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
3124 {
3125 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3126 		if (val >= 0) {
3127 			dev_err(hsotg->dev,
3128 				"'%d' invalid for parameter otg_ver\n", val);
3129 			dev_err(hsotg->dev,
3130 				"otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
3131 		}
3132 		val = 0;
3133 		dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
3134 	}
3135 
3136 	hsotg->core_params->otg_ver = val;
3137 }
3138 
3139 STATIC void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
3140 {
3141 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3142 		if (val >= 0) {
3143 			dev_err(hsotg->dev,
3144 				"'%d' invalid for parameter uframe_sched\n",
3145 				val);
3146 			dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
3147 		}
3148 		val = 1;
3149 		dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
3150 	}
3151 
3152 	hsotg->core_params->uframe_sched = val;
3153 }
3154 
3155 STATIC void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
3156 		int val)
3157 {
3158 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3159 		if (val >= 0) {
3160 			dev_err(hsotg->dev,
3161 				"'%d' invalid for parameter external_id_pin_ctl\n",
3162 				val);
3163 			dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
3164 		}
3165 		val = 0;
3166 		dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
3167 	}
3168 
3169 	hsotg->core_params->external_id_pin_ctl = val;
3170 }
3171 
3172 STATIC void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
3173 		int val)
3174 {
3175 	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3176 		if (val >= 0) {
3177 			dev_err(hsotg->dev,
3178 				"'%d' invalid for parameter hibernation\n",
3179 				val);
3180 			dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
3181 		}
3182 		val = 0;
3183 		dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
3184 	}
3185 
3186 	hsotg->core_params->hibernation = val;
3187 }
3188 
3189 /*
3190  * This function is called during module intialization to pass module parameters
3191  * for the DWC_otg core.
3192  */
3193 void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
3194 			 const struct dwc2_core_params *params)
3195 {
3196 	dev_dbg(hsotg->dev, "%s()\n", __func__);
3197 
3198 	dwc2_set_param_otg_cap(hsotg, params->otg_cap);
3199 	dwc2_set_param_dma_enable(hsotg, params->dma_enable);
3200 	dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
3201 	dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
3202 	dwc2_set_param_host_support_fs_ls_low_power(hsotg,
3203 			params->host_support_fs_ls_low_power);
3204 	dwc2_set_param_enable_dynamic_fifo(hsotg,
3205 			params->enable_dynamic_fifo);
3206 	dwc2_set_param_host_rx_fifo_size(hsotg,
3207 			params->host_rx_fifo_size);
3208 	dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
3209 			params->host_nperio_tx_fifo_size);
3210 	dwc2_set_param_host_perio_tx_fifo_size(hsotg,
3211 			params->host_perio_tx_fifo_size);
3212 	dwc2_set_param_max_transfer_size(hsotg,
3213 			params->max_transfer_size);
3214 	dwc2_set_param_max_packet_count(hsotg,
3215 			params->max_packet_count);
3216 	dwc2_set_param_host_channels(hsotg, params->host_channels);
3217 	dwc2_set_param_phy_type(hsotg, params->phy_type);
3218 	dwc2_set_param_speed(hsotg, params->speed);
3219 	dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
3220 			params->host_ls_low_power_phy_clk);
3221 	dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
3222 	dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
3223 			params->phy_ulpi_ext_vbus);
3224 	dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
3225 	dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
3226 	dwc2_set_param_ts_dline(hsotg, params->ts_dline);
3227 	dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
3228 	dwc2_set_param_en_multiple_tx_fifo(hsotg,
3229 			params->en_multiple_tx_fifo);
3230 	dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
3231 	dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
3232 	dwc2_set_param_otg_ver(hsotg, params->otg_ver);
3233 	dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
3234 	dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
3235 	dwc2_set_param_hibernation(hsotg, params->hibernation);
3236 }
3237 
3238 /*
3239  * Forces either host or device mode if the controller is not
3240  * currently in that mode.
3241  *
3242  * Returns true if the mode was forced.
3243  */
3244 STATIC bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
3245 {
3246 	if (host && dwc2_is_host_mode(hsotg))
3247 		return false;
3248 	else if (!host && dwc2_is_device_mode(hsotg))
3249 		return false;
3250 
3251 	return dwc2_force_mode(hsotg, host);
3252 }
3253 
3254 /*
3255  * Gets host hardware parameters. Forces host mode if not currently in
3256  * host mode. Should be called immediately after a core soft reset in
3257  * order to get the reset values.
3258  */
3259 STATIC void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg)
3260 {
3261 	struct dwc2_hw_params *hw = &hsotg->hw_params;
3262 	u32 gnptxfsiz;
3263 	u32 hptxfsiz;
3264 	bool forced;
3265 
3266 	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
3267 		return;
3268 
3269 	forced = dwc2_force_mode_if_needed(hsotg, true);
3270 
3271 	gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ);
3272 	hptxfsiz = DWC2_READ_4(hsotg, HPTXFSIZ);
3273 	dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
3274 	dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
3275 
3276 	if (forced)
3277 		dwc2_clear_force_mode(hsotg);
3278 
3279 	hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3280 				       FIFOSIZE_DEPTH_SHIFT;
3281 	hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3282 				      FIFOSIZE_DEPTH_SHIFT;
3283 }
3284 
3285 /*
3286  * Gets device hardware parameters. Forces device mode if not
3287  * currently in device mode. Should be called immediately after a core
3288  * soft reset in order to get the reset values.
3289  */
3290 STATIC void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
3291 {
3292 	struct dwc2_hw_params *hw = &hsotg->hw_params;
3293 	bool forced;
3294 	u32 gnptxfsiz;
3295 
3296 	if (hsotg->dr_mode == USB_DR_MODE_HOST)
3297 		return;
3298 
3299 	forced = dwc2_force_mode_if_needed(hsotg, false);
3300 
3301 	gnptxfsiz = DWC2_READ_4(hsotg, GNPTXFSIZ);
3302 	dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
3303 
3304 	if (forced)
3305 		dwc2_clear_force_mode(hsotg);
3306 
3307 	hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3308 				       FIFOSIZE_DEPTH_SHIFT;
3309 }
3310 
3311 /**
3312  * During device initialization, read various hardware configuration
3313  * registers and interpret the contents.
3314  */
3315 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
3316 {
3317 	struct dwc2_hw_params *hw = &hsotg->hw_params;
3318 	unsigned width;
3319 	u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
3320 	u32 grxfsiz;
3321 
3322 	/*
3323 	 * Attempt to ensure this device is really a DWC_otg Controller.
3324 	 * Read and verify the GSNPSID register contents. The value should be
3325 	 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
3326 	 * as in "OTG version 2.xx" or "OTG version 3.xx".
3327 	 */
3328 	hw->snpsid = DWC2_READ_4(hsotg, GSNPSID);
3329 	if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
3330 	    (hw->snpsid & 0xfffff000) != 0x4f543000) {
3331 		dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
3332 			hw->snpsid);
3333 		return -ENODEV;
3334 	}
3335 
3336 	dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
3337 		hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
3338 		hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
3339 
3340 	hwcfg1 = DWC2_READ_4(hsotg, GHWCFG1);
3341 	hwcfg2 = DWC2_READ_4(hsotg, GHWCFG2);
3342 	hwcfg3 = DWC2_READ_4(hsotg, GHWCFG3);
3343 	hwcfg4 = DWC2_READ_4(hsotg, GHWCFG4);
3344 	grxfsiz = DWC2_READ_4(hsotg, GRXFSIZ);
3345 
3346 	dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
3347 	dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
3348 	dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
3349 	dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
3350 	dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
3351 
3352 	/*
3353 	 * Host specific hardware parameters. Reading these parameters
3354 	 * requires the controller to be in host mode. The mode will
3355 	 * be forced, if necessary, to read these values.
3356 	 */
3357 	dwc2_get_host_hwparams(hsotg);
3358 	dwc2_get_dev_hwparams(hsotg);
3359 
3360 	/* hwcfg1 */
3361 	hw->dev_ep_dirs = hwcfg1;
3362 
3363 	/* hwcfg2 */
3364 	hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
3365 		      GHWCFG2_OP_MODE_SHIFT;
3366 	hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
3367 		   GHWCFG2_ARCHITECTURE_SHIFT;
3368 	hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
3369 	hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
3370 				GHWCFG2_NUM_HOST_CHAN_SHIFT);
3371 	hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
3372 			  GHWCFG2_HS_PHY_TYPE_SHIFT;
3373 	hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
3374 			  GHWCFG2_FS_PHY_TYPE_SHIFT;
3375 	hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
3376 			 GHWCFG2_NUM_DEV_EP_SHIFT;
3377 	hw->nperio_tx_q_depth =
3378 		(hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
3379 		GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
3380 	hw->host_perio_tx_q_depth =
3381 		(hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
3382 		GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
3383 	hw->dev_token_q_depth =
3384 		(hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
3385 		GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
3386 
3387 	/* hwcfg3 */
3388 	width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
3389 		GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
3390 	hw->max_transfer_size = (1 << (width + 11)) - 1;
3391 	/*
3392 	 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
3393 	 * coherent buffers with this size, and if it's too large we can
3394 	 * exhaust the coherent DMA pool.
3395 	 */
3396 	if (hw->max_transfer_size > 65535)
3397 		hw->max_transfer_size = 65535;
3398 	width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
3399 		GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
3400 	hw->max_packet_count = (1 << (width + 4)) - 1;
3401 	hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
3402 	hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
3403 			      GHWCFG3_DFIFO_DEPTH_SHIFT;
3404 
3405 	/* hwcfg4 */
3406 	hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
3407 	hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
3408 				  GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
3409 	hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
3410 	hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
3411 	hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
3412 				  GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
3413 
3414 	/* fifo sizes */
3415 	hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
3416 				GRXFSIZ_DEPTH_SHIFT;
3417 
3418 	dev_dbg(hsotg->dev, "Detected values from hardware:\n");
3419 	dev_dbg(hsotg->dev, "  op_mode=%d\n",
3420 		hw->op_mode);
3421 	dev_dbg(hsotg->dev, "  arch=%d\n",
3422 		hw->arch);
3423 	dev_dbg(hsotg->dev, "  dma_desc_enable=%d\n",
3424 		hw->dma_desc_enable);
3425 	dev_dbg(hsotg->dev, "  power_optimized=%d\n",
3426 		hw->power_optimized);
3427 	dev_dbg(hsotg->dev, "  i2c_enable=%d\n",
3428 		hw->i2c_enable);
3429 	dev_dbg(hsotg->dev, "  hs_phy_type=%d\n",
3430 		hw->hs_phy_type);
3431 	dev_dbg(hsotg->dev, "  fs_phy_type=%d\n",
3432 		hw->fs_phy_type);
3433 	dev_dbg(hsotg->dev, "  utmi_phy_data_width=%d\n",
3434 		hw->utmi_phy_data_width);
3435 	dev_dbg(hsotg->dev, "  num_dev_ep=%d\n",
3436 		hw->num_dev_ep);
3437 	dev_dbg(hsotg->dev, "  num_dev_perio_in_ep=%d\n",
3438 		hw->num_dev_perio_in_ep);
3439 	dev_dbg(hsotg->dev, "  host_channels=%d\n",
3440 		hw->host_channels);
3441 	dev_dbg(hsotg->dev, "  max_transfer_size=%d\n",
3442 		hw->max_transfer_size);
3443 	dev_dbg(hsotg->dev, "  max_packet_count=%d\n",
3444 		hw->max_packet_count);
3445 	dev_dbg(hsotg->dev, "  nperio_tx_q_depth=0x%0x\n",
3446 		hw->nperio_tx_q_depth);
3447 	dev_dbg(hsotg->dev, "  host_perio_tx_q_depth=0x%0x\n",
3448 		hw->host_perio_tx_q_depth);
3449 	dev_dbg(hsotg->dev, "  dev_token_q_depth=0x%0x\n",
3450 		hw->dev_token_q_depth);
3451 	dev_dbg(hsotg->dev, "  enable_dynamic_fifo=%d\n",
3452 		hw->enable_dynamic_fifo);
3453 	dev_dbg(hsotg->dev, "  en_multiple_tx_fifo=%d\n",
3454 		hw->en_multiple_tx_fifo);
3455 	dev_dbg(hsotg->dev, "  total_fifo_size=%d\n",
3456 		hw->total_fifo_size);
3457 	dev_dbg(hsotg->dev, "  host_rx_fifo_size=%d\n",
3458 		hw->host_rx_fifo_size);
3459 	dev_dbg(hsotg->dev, "  host_nperio_tx_fifo_size=%d\n",
3460 		hw->host_nperio_tx_fifo_size);
3461 	dev_dbg(hsotg->dev, "  host_perio_tx_fifo_size=%d\n",
3462 		hw->host_perio_tx_fifo_size);
3463 	dev_dbg(hsotg->dev, "\n");
3464 
3465 	return 0;
3466 }
3467 
3468 /*
3469  * Sets all parameters to the given value.
3470  *
3471  * Assumes that the dwc2_core_params struct contains only integers.
3472  */
3473 void dwc2_set_all_params(struct dwc2_core_params *params, int value)
3474 {
3475 	int *p = (int *)params;
3476 	size_t size = sizeof(*params) / sizeof(*p);
3477 	int i;
3478 
3479 	for (i = 0; i < size; i++)
3480 		p[i] = value;
3481 }
3482 
3483 
3484 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
3485 {
3486 	return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
3487 }
3488 
3489 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
3490 {
3491 	if (DWC2_READ_4(hsotg, GSNPSID) == 0xffffffff)
3492 		return false;
3493 	else
3494 		return true;
3495 }
3496 
3497 /**
3498  * dwc2_enable_global_interrupts() - Enables the controller's Global
3499  * Interrupt in the AHB Config register
3500  *
3501  * @hsotg: Programming view of DWC_otg controller
3502  */
3503 void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
3504 {
3505 	u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
3506 
3507 	ahbcfg |= GAHBCFG_GLBL_INTR_EN;
3508 	DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
3509 }
3510 
3511 /**
3512  * dwc2_disable_global_interrupts() - Disables the controller's Global
3513  * Interrupt in the AHB Config register
3514  *
3515  * @hsotg: Programming view of DWC_otg controller
3516  */
3517 void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
3518 {
3519 	u32 ahbcfg = DWC2_READ_4(hsotg, GAHBCFG);
3520 
3521 	ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
3522 	DWC2_WRITE_4(hsotg, GAHBCFG, ahbcfg);
3523 }
3524 
3525 /* Returns the controller's GHWCFG2.OTG_MODE. */
3526 unsigned dwc2_op_mode(struct dwc2_hsotg *hsotg)
3527 {
3528 	u32 ghwcfg2 = DWC2_READ_4(hsotg, GHWCFG2);
3529 
3530 	return (ghwcfg2 & GHWCFG2_OP_MODE_MASK) >>
3531 		GHWCFG2_OP_MODE_SHIFT;
3532 }
3533 
3534 /* Returns true if the controller is capable of DRD. */
3535 bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg)
3536 {
3537 	unsigned op_mode = dwc2_op_mode(hsotg);
3538 
3539 	return (op_mode == GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) ||
3540 		(op_mode == GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE) ||
3541 		(op_mode == GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE);
3542 }
3543 
3544 /* Returns true if the controller is host-only. */
3545 bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg)
3546 {
3547 	unsigned op_mode = dwc2_op_mode(hsotg);
3548 
3549 	return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_HOST) ||
3550 		(op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST);
3551 }
3552 
3553 /* Returns true if the controller is device-only. */
3554 bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg)
3555 {
3556 	unsigned op_mode = dwc2_op_mode(hsotg);
3557 
3558 	return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) ||
3559 		(op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE);
3560 }
3561