xref: /netbsd-src/sys/external/bsd/dwc2/dist/dwc2_hcdintr.c (revision 63aea4bd5b445e491ff0389fe27ec78b3099dba3)
1 /*	$NetBSD: dwc2_hcdintr.c,v 1.12 2015/08/30 12:59:59 skrll Exp $	*/
2 
3 /*
4  * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
5  *
6  * Copyright (C) 2004-2013 Synopsys, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The names of the above-listed copyright holders may not be used
18  *    to endorse or promote products derived from this software without
19  *    specific prior written permission.
20  *
21  * ALTERNATIVELY, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") as published by the Free Software
23  * Foundation; either version 2 of the License, or (at your option) any
24  * later version.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * This file contains the interrupt handlers for Host mode
41  */
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdintr.c,v 1.12 2015/08/30 12:59:59 skrll Exp $");
44 
45 #include <sys/types.h>
46 #include <sys/pool.h>
47 
48 #include <dev/usb/usb.h>
49 #include <dev/usb/usbdi.h>
50 #include <dev/usb/usbdivar.h>
51 #include <dev/usb/usb_mem.h>
52 
53 #include <machine/param.h>
54 
55 #include <linux/kernel.h>
56 
57 #include <dwc2/dwc2.h>
58 #include <dwc2/dwc2var.h>
59 
60 #include "dwc2_core.h"
61 #include "dwc2_hcd.h"
62 
63 /* This function is for debug only */
64 static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
65 {
66 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
67 	u16 curr_frame_number = hsotg->frame_number;
68 
69 	if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
70 		if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) !=
71 		    curr_frame_number) {
72 			hsotg->frame_num_array[hsotg->frame_num_idx] =
73 					curr_frame_number;
74 			hsotg->last_frame_num_array[hsotg->frame_num_idx] =
75 					hsotg->last_frame_num;
76 			hsotg->frame_num_idx++;
77 		}
78 	} else if (!hsotg->dumped_frame_num_array) {
79 		int i;
80 
81 		dev_info(hsotg->dev, "Frame     Last Frame\n");
82 		dev_info(hsotg->dev, "-----     ----------\n");
83 		for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
84 			dev_info(hsotg->dev, "0x%04x    0x%04x\n",
85 				 hsotg->frame_num_array[i],
86 				 hsotg->last_frame_num_array[i]);
87 		}
88 		hsotg->dumped_frame_num_array = 1;
89 	}
90 	hsotg->last_frame_num = curr_frame_number;
91 #endif
92 }
93 
94 static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
95 				    struct dwc2_host_chan *chan,
96 				    struct dwc2_qtd *qtd)
97 {
98 // 	struct urb *usb_urb;
99 
100 	if (!chan->qh)
101 		return;
102 
103 	if (chan->qh->dev_speed == USB_SPEED_HIGH)
104 		return;
105 
106 	if (!qtd->urb)
107 		return;
108 
109 
110 	if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
111 		chan->qh->tt_buffer_dirty = 1;
112 			chan->qh->tt_buffer_dirty = 0;
113 	}
114 }
115 
116 /*
117  * Handles the start-of-frame interrupt in host mode. Non-periodic
118  * transactions may be queued to the DWC_otg controller for the current
119  * (micro)frame. Periodic transactions may be queued to the controller
120  * for the next (micro)frame.
121  */
122 static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
123 {
124 	struct list_head *qh_entry;
125 	struct dwc2_qh *qh;
126 	enum dwc2_transaction_type tr_type;
127 
128 #ifdef DEBUG_SOF
129 	dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
130 #endif
131 
132 	hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
133 
134 	dwc2_track_missed_sofs(hsotg);
135 
136 	/* Determine whether any periodic QHs should be executed */
137 	qh_entry = hsotg->periodic_sched_inactive.next;
138 	while (qh_entry != &hsotg->periodic_sched_inactive) {
139 		qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
140 		qh_entry = qh_entry->next;
141 		if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number))
142 			/*
143 			 * Move QH to the ready list to be executed next
144 			 * (micro)frame
145 			 */
146 			list_move(&qh->qh_list_entry,
147 				  &hsotg->periodic_sched_ready);
148 	}
149 	tr_type = dwc2_hcd_select_transactions(hsotg);
150 	if (tr_type != DWC2_TRANSACTION_NONE)
151 		dwc2_hcd_queue_transactions(hsotg, tr_type);
152 
153 	/* Clear interrupt */
154 	DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_SOF);
155 }
156 
157 /*
158  * Handles the Rx FIFO Level Interrupt, which indicates that there is
159  * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
160  * memory if the DWC_otg controller is operating in Slave mode.
161  */
162 static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
163 {
164 	u32 grxsts, chnum, bcnt, pktsts;
165 	struct dwc2_host_chan *chan;
166 
167 	if (dbg_perio())
168 		dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
169 
170 	grxsts = DWC2_READ_4(hsotg, GRXSTSP);
171 	chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
172 	chan = hsotg->hc_ptr_array[chnum];
173 	if (!chan) {
174 		dev_err(hsotg->dev, "Unable to get corresponding channel\n");
175 		return;
176 	}
177 
178 	bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
179 	pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
180 
181 	/* Packet Status */
182 	if (dbg_perio()) {
183 		dev_vdbg(hsotg->dev, "    Ch num = %d\n", chnum);
184 		dev_vdbg(hsotg->dev, "    Count = %d\n", bcnt);
185 		dev_vdbg(hsotg->dev, "    DPID = %d, chan.dpid = %d\n",
186 			 (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT,
187 			 chan->data_pid_start);
188 		dev_vdbg(hsotg->dev, "    PStatus = %d\n", pktsts);
189 	}
190 
191 	switch (pktsts) {
192 	case GRXSTS_PKTSTS_HCHIN:
193 		/* Read the data into the host buffer */
194 		if (bcnt > 0) {
195 			dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
196 
197 			/* Update the HC fields for the next packet received */
198 			chan->xfer_count += bcnt;
199 			chan->xfer_buf += bcnt;
200 		}
201 		break;
202 	case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
203 	case GRXSTS_PKTSTS_DATATOGGLEERR:
204 	case GRXSTS_PKTSTS_HCHHALTED:
205 		/* Handled in interrupt, just ignore data */
206 		break;
207 	default:
208 		dev_err(hsotg->dev,
209 			"RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
210 		break;
211 	}
212 }
213 
214 /*
215  * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
216  * data packets may be written to the FIFO for OUT transfers. More requests
217  * may be written to the non-periodic request queue for IN transfers. This
218  * interrupt is enabled only in Slave mode.
219  */
220 static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
221 {
222 	dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
223 	dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
224 }
225 
226 /*
227  * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
228  * packets may be written to the FIFO for OUT transfers. More requests may be
229  * written to the periodic request queue for IN transfers. This interrupt is
230  * enabled only in Slave mode.
231  */
232 static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
233 {
234 	if (dbg_perio())
235 		dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
236 	dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
237 }
238 
239 static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
240 			      u32 *hprt0_modify)
241 {
242 	struct dwc2_core_params *params = hsotg->core_params;
243 	int do_reset = 0;
244 	u32 usbcfg;
245 	u32 prtspd;
246 	u32 hcfg;
247 	u32 fslspclksel;
248 	u32 hfir;
249 
250 	dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
251 
252 	/* Every time when port enables calculate HFIR.FrInterval */
253 	hfir = DWC2_READ_4(hsotg, HFIR);
254 	hfir &= ~HFIR_FRINT_MASK;
255 	hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
256 		HFIR_FRINT_MASK;
257 	DWC2_WRITE_4(hsotg, HFIR, hfir);
258 
259 	/* Check if we need to adjust the PHY clock speed for low power */
260 	if (!params->host_support_fs_ls_low_power) {
261 		/* Port has been enabled, set the reset change flag */
262 		hsotg->flags.b.port_reset_change = 1;
263 
264 		dwc2_root_intr(hsotg->hsotg_sc);
265 		return;
266 	}
267 
268 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
269 	prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
270 
271 	if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
272 		/* Low power */
273 		if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
274 			/* Set PHY low power clock select for FS/LS devices */
275 			usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
276 			DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
277 			do_reset = 1;
278 		}
279 
280 		hcfg = DWC2_READ_4(hsotg, HCFG);
281 		fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
282 			      HCFG_FSLSPCLKSEL_SHIFT;
283 
284 		if (prtspd == HPRT0_SPD_LOW_SPEED &&
285 		    params->host_ls_low_power_phy_clk ==
286 		    DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
287 			/* 6 MHZ */
288 			dev_vdbg(hsotg->dev,
289 				 "FS_PHY programming HCFG to 6 MHz\n");
290 			if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
291 				fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
292 				hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
293 				hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
294 				DWC2_WRITE_4(hsotg, HCFG, hcfg);
295 				do_reset = 1;
296 			}
297 		} else {
298 			/* 48 MHZ */
299 			dev_vdbg(hsotg->dev,
300 				 "FS_PHY programming HCFG to 48 MHz\n");
301 			if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
302 				fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
303 				hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
304 				hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
305 				DWC2_WRITE_4(hsotg, HCFG, hcfg);
306 				do_reset = 1;
307 			}
308 		}
309 	} else {
310 		/* Not low power */
311 		if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
312 			usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
313 			DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
314 			do_reset = 1;
315 		}
316 	}
317 
318 	if (do_reset) {
319 		*hprt0_modify |= HPRT0_RST;
320 		queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
321 				   msecs_to_jiffies(60));
322 	} else {
323 		/* Port has been enabled, set the reset change flag */
324 		hsotg->flags.b.port_reset_change = 1;
325 		dwc2_root_intr(hsotg->hsotg_sc);
326 
327 	}
328 }
329 
330 /*
331  * There are multiple conditions that can cause a port interrupt. This function
332  * determines which interrupt conditions have occurred and handles them
333  * appropriately.
334  */
335 static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
336 {
337 	u32 hprt0;
338 	u32 hprt0_modify;
339 
340 	dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
341 
342 	hprt0 = DWC2_READ_4(hsotg, HPRT0);
343 	hprt0_modify = hprt0;
344 
345 	/*
346 	 * Clear appropriate bits in HPRT0 to clear the interrupt bit in
347 	 * GINTSTS
348 	 */
349 	hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
350 			  HPRT0_OVRCURRCHG);
351 
352 	/*
353 	 * Port Connect Detected
354 	 * Set flag and clear if detected
355 	 */
356 	if (hprt0 & HPRT0_CONNDET) {
357 		dev_vdbg(hsotg->dev,
358 			 "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
359 			 hprt0);
360 		if (hsotg->lx_state != DWC2_L0)
361 			usb_hcd_resume_root_hub(hsotg->priv);
362 
363 		hsotg->flags.b.port_connect_status_change = 1;
364 		hsotg->flags.b.port_connect_status = 1;
365 		hprt0_modify |= HPRT0_CONNDET;
366 
367 		/*
368 		 * The Hub driver asserts a reset when it sees port connect
369 		 * status change flag
370 		 */
371 	}
372 
373 	/*
374 	 * Port Enable Changed
375 	 * Clear if detected - Set internal flag if disabled
376 	 */
377 	if (hprt0 & HPRT0_ENACHG) {
378 		dev_vdbg(hsotg->dev,
379 			 "  --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
380 			 hprt0, !!(hprt0 & HPRT0_ENA));
381 		hprt0_modify |= HPRT0_ENACHG;
382 		if (hprt0 & HPRT0_ENA)
383 			dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
384 		else
385 			hsotg->flags.b.port_enable_change = 1;
386 	}
387 
388 	/* Overcurrent Change Interrupt */
389 	if (hprt0 & HPRT0_OVRCURRCHG) {
390 		dev_vdbg(hsotg->dev,
391 			 "  --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
392 			 hprt0);
393 		hsotg->flags.b.port_over_current_change = 1;
394 		hprt0_modify |= HPRT0_OVRCURRCHG;
395 	}
396 
397 	/* Clear Port Interrupts */
398 	DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify);
399 
400 	if (hsotg->flags.b.port_connect_status_change ||
401 	    hsotg->flags.b.port_enable_change ||
402 	    hsotg->flags.b.port_over_current_change)
403 		dwc2_root_intr(hsotg->hsotg_sc);
404 }
405 
406 /*
407  * Gets the actual length of a transfer after the transfer halts. halt_status
408  * holds the reason for the halt.
409  *
410  * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
411  * is set to 1 upon return if less than the requested number of bytes were
412  * transferred. short_read may also be NULL on entry, in which case it remains
413  * unchanged.
414  */
415 static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
416 				       struct dwc2_host_chan *chan, int chnum,
417 				       struct dwc2_qtd *qtd,
418 				       enum dwc2_halt_status halt_status,
419 				       int *short_read)
420 {
421 	u32 hctsiz, count, length;
422 
423 	hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
424 
425 	if (halt_status == DWC2_HC_XFER_COMPLETE) {
426 		if (chan->ep_is_in) {
427 			count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
428 				TSIZ_XFERSIZE_SHIFT;
429 			length = chan->xfer_len - count;
430 			if (short_read != NULL)
431 				*short_read = (count != 0);
432 		} else if (chan->qh->do_split) {
433 			length = qtd->ssplit_out_xfer_count;
434 		} else {
435 			length = chan->xfer_len;
436 		}
437 	} else {
438 		/*
439 		 * Must use the hctsiz.pktcnt field to determine how much data
440 		 * has been transferred. This field reflects the number of
441 		 * packets that have been transferred via the USB. This is
442 		 * always an integral number of packets if the transfer was
443 		 * halted before its normal completion. (Can't use the
444 		 * hctsiz.xfersize field because that reflects the number of
445 		 * bytes transferred via the AHB, not the USB).
446 		 */
447 		count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
448 		length = (chan->start_pkt_count - count) * chan->max_packet;
449 	}
450 
451 	return length;
452 }
453 
454 /**
455  * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
456  * Complete interrupt on the host channel. Updates the actual_length field
457  * of the URB based on the number of bytes transferred via the host channel.
458  * Sets the URB status if the data transfer is finished.
459  *
460  * Return: 1 if the data transfer specified by the URB is completely finished,
461  * 0 otherwise
462  */
463 static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
464 				 struct dwc2_host_chan *chan, int chnum,
465 				 struct dwc2_hcd_urb *urb,
466 				 struct dwc2_qtd *qtd)
467 {
468 	int xfer_done = 0;
469 	int short_read = 0;
470 	int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
471 						      DWC2_HC_XFER_COMPLETE,
472 						      &short_read);
473 
474 	if (urb->actual_length + xfer_length > urb->length) {
475 		dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
476 		xfer_length = urb->length - urb->actual_length;
477 	}
478 
479 	/* Non DWORD-aligned buffer case handling */
480 	if (chan->align_buf && xfer_length) {
481 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
482 		usb_syncmem(urb->usbdma, 0, chan->qh->dw_align_buf_size,
483 		    chan->ep_is_in ?
484 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
485 		if (chan->ep_is_in)
486 			memcpy(urb->buf + urb->actual_length,
487 					chan->qh->dw_align_buf, xfer_length);
488 		usb_syncmem(urb->usbdma, 0, chan->qh->dw_align_buf_size,
489 		    chan->ep_is_in ?
490 		    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
491 	}
492 
493 	dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
494 		 urb->actual_length, xfer_length);
495 	urb->actual_length += xfer_length;
496 
497 	if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
498 	    (urb->flags & URB_SEND_ZERO_PACKET) &&
499 	    urb->actual_length >= urb->length &&
500 	    !(urb->length % chan->max_packet)) {
501 		xfer_done = 0;
502 	} else if (short_read || urb->actual_length >= urb->length) {
503 		xfer_done = 1;
504 		urb->status = 0;
505 	}
506 
507 	dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
508 		 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
509 	dev_vdbg(hsotg->dev, "  chan->xfer_len %d\n", chan->xfer_len);
510 	dev_vdbg(hsotg->dev, "  hctsiz.xfersize %d\n",
511 		 (DWC2_READ_4(hsotg, HCTSIZ(chnum)) & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
512 	dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n", urb->length);
513 	dev_vdbg(hsotg->dev, "  urb->actual_length %d\n", urb->actual_length);
514 	dev_vdbg(hsotg->dev, "  short_read %d, xfer_done %d\n", short_read,
515 		 xfer_done);
516 
517 	return xfer_done;
518 }
519 
520 /*
521  * Save the starting data toggle for the next transfer. The data toggle is
522  * saved in the QH for non-control transfers and it's saved in the QTD for
523  * control transfers.
524  */
525 void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
526 			       struct dwc2_host_chan *chan, int chnum,
527 			       struct dwc2_qtd *qtd)
528 {
529 	u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
530 	u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
531 
532 	if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
533 		if (pid == TSIZ_SC_MC_PID_DATA0)
534 			chan->qh->data_toggle = DWC2_HC_PID_DATA0;
535 		else
536 			chan->qh->data_toggle = DWC2_HC_PID_DATA1;
537 	} else {
538 		if (pid == TSIZ_SC_MC_PID_DATA0)
539 			qtd->data_toggle = DWC2_HC_PID_DATA0;
540 		else
541 			qtd->data_toggle = DWC2_HC_PID_DATA1;
542 	}
543 }
544 
545 /**
546  * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
547  * the transfer is stopped for any reason. The fields of the current entry in
548  * the frame descriptor array are set based on the transfer state and the input
549  * halt_status. Completes the Isochronous URB if all the URB frames have been
550  * completed.
551  *
552  * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
553  * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
554  */
555 static enum dwc2_halt_status dwc2_update_isoc_urb_state(
556 		struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
557 		int chnum, struct dwc2_qtd *qtd,
558 		enum dwc2_halt_status halt_status)
559 {
560 	struct dwc2_hcd_iso_packet_desc *frame_desc;
561 	struct dwc2_hcd_urb *urb = qtd->urb;
562 
563 	if (!urb)
564 		return DWC2_HC_XFER_NO_HALT_STATUS;
565 
566 	frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
567 
568 	switch (halt_status) {
569 	case DWC2_HC_XFER_COMPLETE:
570 		frame_desc->status = 0;
571 		frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
572 					chan, chnum, qtd, halt_status, NULL);
573 
574 		/* Non DWORD-aligned buffer case handling */
575 		if (chan->align_buf && frame_desc->actual_length) {
576 			dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
577 				 __func__);
578 			usb_dma_t *ud = &chan->qh->dw_align_buf_usbdma;
579 
580 			usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
581 			    chan->ep_is_in ?
582 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
583 			if (chan->ep_is_in)
584 				memcpy(urb->buf + frame_desc->offset +
585 					qtd->isoc_split_offset,
586 					chan->qh->dw_align_buf,
587 					frame_desc->actual_length);
588 			usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
589 			    chan->ep_is_in ?
590 			    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
591 		}
592 		break;
593 	case DWC2_HC_XFER_FRAME_OVERRUN:
594 		urb->error_count++;
595 		if (chan->ep_is_in)
596 			frame_desc->status = -ENOSR;
597 		else
598 			frame_desc->status = -ECOMM;
599 		frame_desc->actual_length = 0;
600 		break;
601 	case DWC2_HC_XFER_BABBLE_ERR:
602 		urb->error_count++;
603 		frame_desc->status = -EOVERFLOW;
604 		/* Don't need to update actual_length in this case */
605 		break;
606 	case DWC2_HC_XFER_XACT_ERR:
607 		urb->error_count++;
608 		frame_desc->status = -EPROTO;
609 		frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
610 					chan, chnum, qtd, halt_status, NULL);
611 
612 		/* Non DWORD-aligned buffer case handling */
613 		if (chan->align_buf && frame_desc->actual_length) {
614 			dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
615 				 __func__);
616 			usb_dma_t *ud = &chan->qh->dw_align_buf_usbdma;
617 
618 			usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
619 			    chan->ep_is_in ?
620 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
621 			if (chan->ep_is_in)
622 				memcpy(urb->buf + frame_desc->offset +
623 					qtd->isoc_split_offset,
624 					chan->qh->dw_align_buf,
625 					frame_desc->actual_length);
626 			usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
627 			    chan->ep_is_in ?
628 			    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
629 		}
630 
631 		/* Skip whole frame */
632 		if (chan->qh->do_split &&
633 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
634 		    hsotg->core_params->dma_enable > 0) {
635 			qtd->complete_split = 0;
636 			qtd->isoc_split_offset = 0;
637 		}
638 
639 		break;
640 	default:
641 		dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
642 			halt_status);
643 		break;
644 	}
645 
646 	if (++qtd->isoc_frame_index == urb->packet_count) {
647 		/*
648 		 * urb->status is not used for isoc transfers. The individual
649 		 * frame_desc statuses are used instead.
650 		 */
651 		dwc2_host_complete(hsotg, qtd, 0);
652 		halt_status = DWC2_HC_XFER_URB_COMPLETE;
653 	} else {
654 		halt_status = DWC2_HC_XFER_COMPLETE;
655 	}
656 
657 	return halt_status;
658 }
659 
660 /*
661  * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
662  * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
663  * still linked to the QH, the QH is added to the end of the inactive
664  * non-periodic schedule. For periodic QHs, removes the QH from the periodic
665  * schedule if no more QTDs are linked to the QH.
666  */
667 static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
668 			       int free_qtd)
669 {
670 	int continue_split = 0;
671 	struct dwc2_qtd *qtd;
672 
673 	if (dbg_qh(qh))
674 		dev_vdbg(hsotg->dev, "  %s(%p,%p,%d)\n", __func__,
675 			 hsotg, qh, free_qtd);
676 
677 	if (list_empty(&qh->qtd_list)) {
678 		dev_dbg(hsotg->dev, "## QTD list empty ##\n");
679 		goto no_qtd;
680 	}
681 
682 	qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
683 
684 	if (qtd->complete_split)
685 		continue_split = 1;
686 	else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
687 		 qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
688 		continue_split = 1;
689 
690 	if (free_qtd) {
691 		dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
692 		continue_split = 0;
693 	}
694 
695 no_qtd:
696 	if (qh->channel)
697 		qh->channel->align_buf = 0;
698 	qh->channel = NULL;
699 	dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
700 }
701 
702 /**
703  * dwc2_release_channel() - Releases a host channel for use by other transfers
704  *
705  * @hsotg:       The HCD state structure
706  * @chan:        The host channel to release
707  * @qtd:         The QTD associated with the host channel. This QTD may be
708  *               freed if the transfer is complete or an error has occurred.
709  * @halt_status: Reason the channel is being released. This status
710  *               determines the actions taken by this function.
711  *
712  * Also attempts to select and queue more transactions since at least one host
713  * channel is available.
714  */
715 static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
716 				 struct dwc2_host_chan *chan,
717 				 struct dwc2_qtd *qtd,
718 				 enum dwc2_halt_status halt_status)
719 {
720 	enum dwc2_transaction_type tr_type;
721 	u32 haintmsk;
722 	int free_qtd = 0;
723 
724 	if (dbg_hc(chan))
725 		dev_vdbg(hsotg->dev, "  %s: channel %d, halt_status %d\n",
726 			 __func__, chan->hc_num, halt_status);
727 
728 	switch (halt_status) {
729 	case DWC2_HC_XFER_URB_COMPLETE:
730 		free_qtd = 1;
731 		break;
732 	case DWC2_HC_XFER_AHB_ERR:
733 	case DWC2_HC_XFER_STALL:
734 	case DWC2_HC_XFER_BABBLE_ERR:
735 		free_qtd = 1;
736 		break;
737 	case DWC2_HC_XFER_XACT_ERR:
738 		if (qtd && qtd->error_count >= 3) {
739 			dev_vdbg(hsotg->dev,
740 				 "  Complete URB with transaction error\n");
741 			free_qtd = 1;
742 			dwc2_host_complete(hsotg, qtd, -EPROTO);
743 		}
744 		break;
745 	case DWC2_HC_XFER_URB_DEQUEUE:
746 		/*
747 		 * The QTD has already been removed and the QH has been
748 		 * deactivated. Don't want to do anything except release the
749 		 * host channel and try to queue more transfers.
750 		 */
751 		goto cleanup;
752 	case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
753 		dev_vdbg(hsotg->dev, "  Complete URB with I/O error\n");
754 		free_qtd = 1;
755 		dwc2_host_complete(hsotg, qtd, -EIO);
756 		break;
757 	case DWC2_HC_XFER_NO_HALT_STATUS:
758 	default:
759 		break;
760 	}
761 
762 	dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
763 
764 cleanup:
765 	/*
766 	 * Release the host channel for use by other transfers. The cleanup
767 	 * function clears the channel interrupt enables and conditions, so
768 	 * there's no need to clear the Channel Halted interrupt separately.
769 	 */
770 	if (!list_empty(&chan->hc_list_entry))
771 		list_del(&chan->hc_list_entry);
772 	dwc2_hc_cleanup(hsotg, chan);
773 	list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
774 
775 	if (hsotg->core_params->uframe_sched > 0) {
776 		hsotg->available_host_channels++;
777 	} else {
778 		switch (chan->ep_type) {
779 		case USB_ENDPOINT_XFER_CONTROL:
780 		case USB_ENDPOINT_XFER_BULK:
781 			hsotg->non_periodic_channels--;
782 			break;
783 		default:
784 			/*
785 			 * Don't release reservations for periodic channels
786 			 * here. That's done when a periodic transfer is
787 			 * descheduled (i.e. when the QH is removed from the
788 			 * periodic schedule).
789 			 */
790 			break;
791 		}
792 	}
793 
794 	haintmsk = DWC2_READ_4(hsotg, HAINTMSK);
795 	haintmsk &= ~(1 << chan->hc_num);
796 	DWC2_WRITE_4(hsotg, HAINTMSK, haintmsk);
797 
798 	/* Try to queue more transfers now that there's a free channel */
799 	tr_type = dwc2_hcd_select_transactions(hsotg);
800 	if (tr_type != DWC2_TRANSACTION_NONE)
801 		dwc2_hcd_queue_transactions(hsotg, tr_type);
802 }
803 
804 /*
805  * Halts a host channel. If the channel cannot be halted immediately because
806  * the request queue is full, this function ensures that the FIFO empty
807  * interrupt for the appropriate queue is enabled so that the halt request can
808  * be queued when there is space in the request queue.
809  *
810  * This function may also be called in DMA mode. In that case, the channel is
811  * simply released since the core always halts the channel automatically in
812  * DMA mode.
813  */
814 static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
815 			      struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
816 			      enum dwc2_halt_status halt_status)
817 {
818 	if (dbg_hc(chan))
819 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
820 
821 	if (hsotg->core_params->dma_enable > 0) {
822 		if (dbg_hc(chan))
823 			dev_vdbg(hsotg->dev, "DMA enabled\n");
824 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
825 		return;
826 	}
827 
828 	/* Slave mode processing */
829 	dwc2_hc_halt(hsotg, chan, halt_status);
830 
831 	if (chan->halt_on_queue) {
832 		u32 gintmsk;
833 
834 		dev_vdbg(hsotg->dev, "Halt on queue\n");
835 		if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
836 		    chan->ep_type == USB_ENDPOINT_XFER_BULK) {
837 			dev_vdbg(hsotg->dev, "control/bulk\n");
838 			/*
839 			 * Make sure the Non-periodic Tx FIFO empty interrupt
840 			 * is enabled so that the non-periodic schedule will
841 			 * be processed
842 			 */
843 			gintmsk = DWC2_READ_4(hsotg, GINTMSK);
844 			gintmsk |= GINTSTS_NPTXFEMP;
845 			DWC2_WRITE_4(hsotg, GINTMSK, gintmsk);
846 		} else {
847 			dev_vdbg(hsotg->dev, "isoc/intr\n");
848 			/*
849 			 * Move the QH from the periodic queued schedule to
850 			 * the periodic assigned schedule. This allows the
851 			 * halt to be queued when the periodic schedule is
852 			 * processed.
853 			 */
854 			list_move(&chan->qh->qh_list_entry,
855 				  &hsotg->periodic_sched_assigned);
856 
857 			/*
858 			 * Make sure the Periodic Tx FIFO Empty interrupt is
859 			 * enabled so that the periodic schedule will be
860 			 * processed
861 			 */
862 			gintmsk = DWC2_READ_4(hsotg, GINTMSK);
863 			gintmsk |= GINTSTS_PTXFEMP;
864 			DWC2_WRITE_4(hsotg, GINTMSK, gintmsk);
865 		}
866 	}
867 }
868 
869 /*
870  * Performs common cleanup for non-periodic transfers after a Transfer
871  * Complete interrupt. This function should be called after any endpoint type
872  * specific handling is finished to release the host channel.
873  */
874 static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
875 					    struct dwc2_host_chan *chan,
876 					    int chnum, struct dwc2_qtd *qtd,
877 					    enum dwc2_halt_status halt_status)
878 {
879 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
880 
881 	qtd->error_count = 0;
882 
883 	if (chan->hcint & HCINTMSK_NYET) {
884 		/*
885 		 * Got a NYET on the last transaction of the transfer. This
886 		 * means that the endpoint should be in the PING state at the
887 		 * beginning of the next transfer.
888 		 */
889 		dev_vdbg(hsotg->dev, "got NYET\n");
890 		chan->qh->ping_state = 1;
891 	}
892 
893 	/*
894 	 * Always halt and release the host channel to make it available for
895 	 * more transfers. There may still be more phases for a control
896 	 * transfer or more data packets for a bulk transfer at this point,
897 	 * but the host channel is still halted. A channel will be reassigned
898 	 * to the transfer when the non-periodic schedule is processed after
899 	 * the channel is released. This allows transactions to be queued
900 	 * properly via dwc2_hcd_queue_transactions, which also enables the
901 	 * Tx FIFO Empty interrupt if necessary.
902 	 */
903 	if (chan->ep_is_in) {
904 		/*
905 		 * IN transfers in Slave mode require an explicit disable to
906 		 * halt the channel. (In DMA mode, this call simply releases
907 		 * the channel.)
908 		 */
909 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
910 	} else {
911 		/*
912 		 * The channel is automatically disabled by the core for OUT
913 		 * transfers in Slave mode
914 		 */
915 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
916 	}
917 }
918 
919 /*
920  * Performs common cleanup for periodic transfers after a Transfer Complete
921  * interrupt. This function should be called after any endpoint type specific
922  * handling is finished to release the host channel.
923  */
924 static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
925 					struct dwc2_host_chan *chan, int chnum,
926 					struct dwc2_qtd *qtd,
927 					enum dwc2_halt_status halt_status)
928 {
929 	u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
930 
931 	qtd->error_count = 0;
932 
933 	if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
934 		/* Core halts channel in these cases */
935 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
936 	else
937 		/* Flush any outstanding requests from the Tx queue */
938 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
939 }
940 
941 static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
942 				       struct dwc2_host_chan *chan, int chnum,
943 				       struct dwc2_qtd *qtd)
944 {
945 	struct dwc2_hcd_iso_packet_desc *frame_desc;
946 	u32 len;
947 
948 	if (!qtd->urb)
949 		return 0;
950 
951 	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
952 	len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
953 					  DWC2_HC_XFER_COMPLETE, NULL);
954 	if (!len) {
955 		qtd->complete_split = 0;
956 		qtd->isoc_split_offset = 0;
957 		return 0;
958 	}
959 
960 	frame_desc->actual_length += len;
961 
962 	if (chan->align_buf) {
963 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
964 		usb_syncmem(qtd->urb->usbdma, chan->qh->dw_align_buf_dma,
965 		    chan->qh->dw_align_buf_size, BUS_DMASYNC_POSTREAD);
966 		memcpy(qtd->urb->buf + frame_desc->offset +
967 		       qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
968 		usb_syncmem(qtd->urb->usbdma, chan->qh->dw_align_buf_dma,
969 		    chan->qh->dw_align_buf_size, BUS_DMASYNC_PREREAD);
970 	}
971 
972 	qtd->isoc_split_offset += len;
973 
974 	if (frame_desc->actual_length >= frame_desc->length) {
975 		frame_desc->status = 0;
976 		qtd->isoc_frame_index++;
977 		qtd->complete_split = 0;
978 		qtd->isoc_split_offset = 0;
979 	}
980 
981 	if (qtd->isoc_frame_index == qtd->urb->packet_count) {
982 		dwc2_host_complete(hsotg, qtd, 0);
983 		dwc2_release_channel(hsotg, chan, qtd,
984 				     DWC2_HC_XFER_URB_COMPLETE);
985 	} else {
986 		dwc2_release_channel(hsotg, chan, qtd,
987 				     DWC2_HC_XFER_NO_HALT_STATUS);
988 	}
989 
990 	return 1;	/* Indicates that channel released */
991 }
992 
993 /*
994  * Handles a host channel Transfer Complete interrupt. This handler may be
995  * called in either DMA mode or Slave mode.
996  */
997 static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
998 				  struct dwc2_host_chan *chan, int chnum,
999 				  struct dwc2_qtd *qtd)
1000 {
1001 	struct dwc2_hcd_urb *urb = qtd->urb;
1002 	enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
1003 	int pipe_type;
1004 	int urb_xfer_done;
1005 
1006 	if (dbg_hc(chan))
1007 		dev_vdbg(hsotg->dev,
1008 			 "--Host Channel %d Interrupt: Transfer Complete--\n",
1009 			 chnum);
1010 
1011 	if (!urb)
1012 		goto handle_xfercomp_done;
1013 
1014 	pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1015 
1016 	if (hsotg->core_params->dma_desc_enable > 0) {
1017 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
1018 		if (pipe_type == USB_ENDPOINT_XFER_ISOC)
1019 			/* Do not disable the interrupt, just clear it */
1020 			return;
1021 		goto handle_xfercomp_done;
1022 	}
1023 
1024 	/* Handle xfer complete on CSPLIT */
1025 	if (chan->qh->do_split) {
1026 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
1027 		    hsotg->core_params->dma_enable > 0) {
1028 			if (qtd->complete_split &&
1029 			    dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
1030 							qtd))
1031 				goto handle_xfercomp_done;
1032 		} else {
1033 			qtd->complete_split = 0;
1034 		}
1035 	}
1036 
1037 	/* Update the QTD and URB states */
1038 	switch (pipe_type) {
1039 	case USB_ENDPOINT_XFER_CONTROL:
1040 		switch (qtd->control_phase) {
1041 		case DWC2_CONTROL_SETUP:
1042 			if (urb->length > 0)
1043 				qtd->control_phase = DWC2_CONTROL_DATA;
1044 			else
1045 				qtd->control_phase = DWC2_CONTROL_STATUS;
1046 			dev_vdbg(hsotg->dev,
1047 				 "  Control setup transaction done\n");
1048 			halt_status = DWC2_HC_XFER_COMPLETE;
1049 			break;
1050 		case DWC2_CONTROL_DATA:
1051 			urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
1052 							      chnum, urb, qtd);
1053 			if (urb_xfer_done) {
1054 				qtd->control_phase = DWC2_CONTROL_STATUS;
1055 				dev_vdbg(hsotg->dev,
1056 					 "  Control data transfer done\n");
1057 			} else {
1058 				dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1059 							  qtd);
1060 			}
1061 			halt_status = DWC2_HC_XFER_COMPLETE;
1062 			break;
1063 		case DWC2_CONTROL_STATUS:
1064 			dev_vdbg(hsotg->dev, "  Control transfer complete\n");
1065 			if (urb->status == -EINPROGRESS)
1066 				urb->status = 0;
1067 			dwc2_host_complete(hsotg, qtd, urb->status);
1068 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1069 			break;
1070 		}
1071 
1072 		dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1073 						halt_status);
1074 		break;
1075 	case USB_ENDPOINT_XFER_BULK:
1076 		dev_vdbg(hsotg->dev, "  Bulk transfer complete\n");
1077 		urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1078 						      qtd);
1079 		if (urb_xfer_done) {
1080 			dwc2_host_complete(hsotg, qtd, urb->status);
1081 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1082 		} else {
1083 			halt_status = DWC2_HC_XFER_COMPLETE;
1084 		}
1085 
1086 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1087 		dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1088 						halt_status);
1089 		break;
1090 	case USB_ENDPOINT_XFER_INT:
1091 		dev_vdbg(hsotg->dev, "  Interrupt transfer complete\n");
1092 		urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1093 						      qtd);
1094 
1095 		/*
1096 		 * Interrupt URB is done on the first transfer complete
1097 		 * interrupt
1098 		 */
1099 		if (urb_xfer_done) {
1100 			dwc2_host_complete(hsotg, qtd, urb->status);
1101 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1102 		} else {
1103 			halt_status = DWC2_HC_XFER_COMPLETE;
1104 		}
1105 
1106 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1107 		dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1108 					    halt_status);
1109 		break;
1110 	case USB_ENDPOINT_XFER_ISOC:
1111 		if (dbg_perio())
1112 			dev_vdbg(hsotg->dev, "  Isochronous transfer complete\n");
1113 		if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
1114 			halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1115 					chnum, qtd, DWC2_HC_XFER_COMPLETE);
1116 		dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1117 					    halt_status);
1118 		break;
1119 	}
1120 
1121 handle_xfercomp_done:
1122 	disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
1123 }
1124 
1125 /*
1126  * Handles a host channel STALL interrupt. This handler may be called in
1127  * either DMA mode or Slave mode.
1128  */
1129 static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
1130 			       struct dwc2_host_chan *chan, int chnum,
1131 			       struct dwc2_qtd *qtd)
1132 {
1133 	struct dwc2_hcd_urb *urb = qtd->urb;
1134 	int pipe_type;
1135 
1136 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
1137 		chnum);
1138 
1139 	if (hsotg->core_params->dma_desc_enable > 0) {
1140 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1141 					    DWC2_HC_XFER_STALL);
1142 		goto handle_stall_done;
1143 	}
1144 
1145 	if (!urb)
1146 		goto handle_stall_halt;
1147 
1148 	pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1149 
1150 	if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
1151 		dwc2_host_complete(hsotg, qtd, -EPIPE);
1152 
1153 	if (pipe_type == USB_ENDPOINT_XFER_BULK ||
1154 	    pipe_type == USB_ENDPOINT_XFER_INT) {
1155 		dwc2_host_complete(hsotg, qtd, -EPIPE);
1156 		/*
1157 		 * USB protocol requires resetting the data toggle for bulk
1158 		 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1159 		 * setup command is issued to the endpoint. Anticipate the
1160 		 * CLEAR_FEATURE command since a STALL has occurred and reset
1161 		 * the data toggle now.
1162 		 */
1163 		chan->qh->data_toggle = 0;
1164 	}
1165 
1166 handle_stall_halt:
1167 	dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
1168 
1169 handle_stall_done:
1170 	disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
1171 }
1172 
1173 /*
1174  * Updates the state of the URB when a transfer has been stopped due to an
1175  * abnormal condition before the transfer completes. Modifies the
1176  * actual_length field of the URB to reflect the number of bytes that have
1177  * actually been transferred via the host channel.
1178  */
1179 static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1180 				      struct dwc2_host_chan *chan, int chnum,
1181 				      struct dwc2_hcd_urb *urb,
1182 				      struct dwc2_qtd *qtd,
1183 				      enum dwc2_halt_status halt_status)
1184 {
1185 	u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
1186 						      qtd, halt_status, NULL);
1187 
1188 	if (urb->actual_length + xfer_length > urb->length) {
1189 		dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
1190 		xfer_length = urb->length - urb->actual_length;
1191 	}
1192 
1193 	/* Non DWORD-aligned buffer case handling */
1194 	if (chan->align_buf && xfer_length && chan->ep_is_in) {
1195 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
1196 
1197 		usb_dma_t *ud = &chan->qh->dw_align_buf_usbdma;
1198 
1199 		usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
1200 		    chan->ep_is_in ?
1201 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1202 		if (chan->ep_is_in)
1203 			memcpy(urb->buf + urb->actual_length,
1204 					chan->qh->dw_align_buf,
1205 					xfer_length);
1206 		usb_syncmem(ud, 0, chan->qh->dw_align_buf_size,
1207 		    chan->ep_is_in ?
1208 		    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1209 	}
1210 
1211 	urb->actual_length += xfer_length;
1212 
1213 	dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
1214 		 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
1215 	dev_vdbg(hsotg->dev, "  chan->start_pkt_count %d\n",
1216 		 chan->start_pkt_count);
1217 	dev_vdbg(hsotg->dev, "  hctsiz.pktcnt %d\n",
1218 		 (DWC2_READ_4(hsotg, HCTSIZ(chnum)) & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
1219 	dev_vdbg(hsotg->dev, "  chan->max_packet %d\n", chan->max_packet);
1220 	dev_vdbg(hsotg->dev, "  bytes_transferred %d\n",
1221 		 xfer_length);
1222 	dev_vdbg(hsotg->dev, "  urb->actual_length %d\n",
1223 		 urb->actual_length);
1224 	dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n",
1225 		 urb->length);
1226 }
1227 
1228 /*
1229  * Handles a host channel NAK interrupt. This handler may be called in either
1230  * DMA mode or Slave mode.
1231  */
1232 static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1233 			     struct dwc2_host_chan *chan, int chnum,
1234 			     struct dwc2_qtd *qtd)
1235 {
1236 	if (!qtd) {
1237 		dev_dbg(hsotg->dev, "%s: qtd is NULL\n", __func__);
1238 		return;
1239 	}
1240 
1241 	if (!qtd->urb) {
1242 		dev_dbg(hsotg->dev, "%s: qtd->urb is NULL\n", __func__);
1243 		return;
1244 	}
1245 
1246 	if (dbg_hc(chan))
1247 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
1248 			 chnum);
1249 
1250 	/*
1251 	 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1252 	 * interrupt. Re-start the SSPLIT transfer.
1253 	 */
1254 	if (chan->do_split) {
1255 		/*
1256 		 * When we get control/bulk NAKs then remember this so we holdoff on
1257 		 * this qh until the beginning of the next frame
1258 		 */
1259 		switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1260 		case USB_ENDPOINT_XFER_CONTROL:
1261 		case USB_ENDPOINT_XFER_BULK:
1262 			chan->qh->nak_frame = dwc2_hcd_get_frame_number(hsotg);
1263 			break;
1264 		}
1265 
1266 		if (chan->complete_split)
1267 			qtd->error_count = 0;
1268 		qtd->complete_split = 0;
1269 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1270 		goto handle_nak_done;
1271 	}
1272 
1273 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1274 	case USB_ENDPOINT_XFER_CONTROL:
1275 	case USB_ENDPOINT_XFER_BULK:
1276 		if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
1277 			/*
1278 			 * NAK interrupts are enabled on bulk/control IN
1279 			 * transfers in DMA mode for the sole purpose of
1280 			 * resetting the error count after a transaction error
1281 			 * occurs. The core will continue transferring data.
1282 			 */
1283 			qtd->error_count = 0;
1284 			break;
1285 		}
1286 
1287 		/*
1288 		 * NAK interrupts normally occur during OUT transfers in DMA
1289 		 * or Slave mode. For IN transfers, more requests will be
1290 		 * queued as request queue space is available.
1291 		 */
1292 		qtd->error_count = 0;
1293 
1294 		if (!chan->qh->ping_state) {
1295 			dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1296 						  qtd, DWC2_HC_XFER_NAK);
1297 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1298 
1299 			if (chan->speed == USB_SPEED_HIGH)
1300 				chan->qh->ping_state = 1;
1301 		}
1302 
1303 		/*
1304 		 * Halt the channel so the transfer can be re-started from
1305 		 * the appropriate point or the PING protocol will
1306 		 * start/continue
1307 		 */
1308 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1309 		break;
1310 	case USB_ENDPOINT_XFER_INT:
1311 		qtd->error_count = 0;
1312 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1313 		break;
1314 	case USB_ENDPOINT_XFER_ISOC:
1315 		/* Should never get called for isochronous transfers */
1316 		dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
1317 		break;
1318 	}
1319 
1320 handle_nak_done:
1321 	disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
1322 }
1323 
1324 /*
1325  * Handles a host channel ACK interrupt. This interrupt is enabled when
1326  * performing the PING protocol in Slave mode, when errors occur during
1327  * either Slave mode or DMA mode, and during Start Split transactions.
1328  */
1329 static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
1330 			     struct dwc2_host_chan *chan, int chnum,
1331 			     struct dwc2_qtd *qtd)
1332 {
1333 	struct dwc2_hcd_iso_packet_desc *frame_desc;
1334 
1335 	if (dbg_hc(chan))
1336 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
1337 			 chnum);
1338 
1339 	if (chan->do_split) {
1340 		/* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
1341 		if (!chan->ep_is_in &&
1342 		    chan->data_pid_start != DWC2_HC_PID_SETUP)
1343 			qtd->ssplit_out_xfer_count = chan->xfer_len;
1344 
1345 		if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
1346 			qtd->complete_split = 1;
1347 			dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1348 		} else {
1349 			/* ISOC OUT */
1350 			switch (chan->xact_pos) {
1351 			case DWC2_HCSPLT_XACTPOS_ALL:
1352 				break;
1353 			case DWC2_HCSPLT_XACTPOS_END:
1354 				qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1355 				qtd->isoc_split_offset = 0;
1356 				break;
1357 			case DWC2_HCSPLT_XACTPOS_BEGIN:
1358 			case DWC2_HCSPLT_XACTPOS_MID:
1359 				/*
1360 				 * For BEGIN or MID, calculate the length for
1361 				 * the next microframe to determine the correct
1362 				 * SSPLIT token, either MID or END
1363 				 */
1364 				frame_desc = &qtd->urb->iso_descs[
1365 						qtd->isoc_frame_index];
1366 				qtd->isoc_split_offset += 188;
1367 
1368 				if (frame_desc->length - qtd->isoc_split_offset
1369 							<= 188)
1370 					qtd->isoc_split_pos =
1371 							DWC2_HCSPLT_XACTPOS_END;
1372 				else
1373 					qtd->isoc_split_pos =
1374 							DWC2_HCSPLT_XACTPOS_MID;
1375 				break;
1376 			}
1377 		}
1378 	} else {
1379 		qtd->error_count = 0;
1380 
1381 		if (chan->qh->ping_state) {
1382 			chan->qh->ping_state = 0;
1383 			/*
1384 			 * Halt the channel so the transfer can be re-started
1385 			 * from the appropriate point. This only happens in
1386 			 * Slave mode. In DMA mode, the ping_state is cleared
1387 			 * when the transfer is started because the core
1388 			 * automatically executes the PING, then the transfer.
1389 			 */
1390 			dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1391 		}
1392 	}
1393 
1394 	/*
1395 	 * If the ACK occurred when _not_ in the PING state, let the channel
1396 	 * continue transferring data after clearing the error count
1397 	 */
1398 	disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
1399 }
1400 
1401 /*
1402  * Handles a host channel NYET interrupt. This interrupt should only occur on
1403  * Bulk and Control OUT endpoints and for complete split transactions. If a
1404  * NYET occurs at the same time as a Transfer Complete interrupt, it is
1405  * handled in the xfercomp interrupt handler, not here. This handler may be
1406  * called in either DMA mode or Slave mode.
1407  */
1408 static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1409 			      struct dwc2_host_chan *chan, int chnum,
1410 			      struct dwc2_qtd *qtd)
1411 {
1412 	if (dbg_hc(chan))
1413 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
1414 			 chnum);
1415 
1416 	/*
1417 	 * NYET on CSPLIT
1418 	 * re-do the CSPLIT immediately on non-periodic
1419 	 */
1420 	if (chan->do_split && chan->complete_split) {
1421 		if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
1422 		    hsotg->core_params->dma_enable > 0) {
1423 			qtd->complete_split = 0;
1424 			qtd->isoc_split_offset = 0;
1425 			qtd->isoc_frame_index++;
1426 			if (qtd->urb &&
1427 			    qtd->isoc_frame_index == qtd->urb->packet_count) {
1428 				dwc2_host_complete(hsotg, qtd, 0);
1429 				dwc2_release_channel(hsotg, chan, qtd,
1430 						     DWC2_HC_XFER_URB_COMPLETE);
1431 			} else {
1432 				dwc2_release_channel(hsotg, chan, qtd,
1433 						DWC2_HC_XFER_NO_HALT_STATUS);
1434 			}
1435 			goto handle_nyet_done;
1436 		}
1437 
1438 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1439 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1440 			int frnum = dwc2_hcd_get_frame_number(hsotg);
1441 
1442 			if (dwc2_full_frame_num(frnum) !=
1443 			    dwc2_full_frame_num(chan->qh->sched_frame)) {
1444 				/*
1445 				 * No longer in the same full speed frame.
1446 				 * Treat this as a transaction error.
1447 				 */
1448 #if 0
1449 				/*
1450 				 * Todo: Fix system performance so this can
1451 				 * be treated as an error. Right now complete
1452 				 * splits cannot be scheduled precisely enough
1453 				 * due to other system activity, so this error
1454 				 * occurs regularly in Slave mode.
1455 				 */
1456 				qtd->error_count++;
1457 #endif
1458 				qtd->complete_split = 0;
1459 				dwc2_halt_channel(hsotg, chan, qtd,
1460 						  DWC2_HC_XFER_XACT_ERR);
1461 				/* Todo: add support for isoc release */
1462 				goto handle_nyet_done;
1463 			}
1464 		}
1465 
1466 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1467 		goto handle_nyet_done;
1468 	}
1469 
1470 	chan->qh->ping_state = 1;
1471 	qtd->error_count = 0;
1472 
1473 	dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
1474 				  DWC2_HC_XFER_NYET);
1475 	dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1476 
1477 	/*
1478 	 * Halt the channel and re-start the transfer so the PING protocol
1479 	 * will start
1480 	 */
1481 	dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1482 
1483 handle_nyet_done:
1484 	disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
1485 }
1486 
1487 /*
1488  * Handles a host channel babble interrupt. This handler may be called in
1489  * either DMA mode or Slave mode.
1490  */
1491 static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
1492 				struct dwc2_host_chan *chan, int chnum,
1493 				struct dwc2_qtd *qtd)
1494 {
1495 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
1496 		chnum);
1497 
1498 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1499 
1500 	if (hsotg->core_params->dma_desc_enable > 0) {
1501 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1502 					    DWC2_HC_XFER_BABBLE_ERR);
1503 		goto disable_int;
1504 	}
1505 
1506 	if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1507 		dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
1508 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
1509 	} else {
1510 		enum dwc2_halt_status halt_status;
1511 
1512 		halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1513 						qtd, DWC2_HC_XFER_BABBLE_ERR);
1514 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1515 	}
1516 
1517 disable_int:
1518 	disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
1519 }
1520 
1521 /*
1522  * Handles a host channel AHB error interrupt. This handler is only called in
1523  * DMA mode.
1524  */
1525 static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1526 				struct dwc2_host_chan *chan, int chnum,
1527 				struct dwc2_qtd *qtd)
1528 {
1529 	struct dwc2_hcd_urb *urb = qtd->urb;
1530 
1531 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
1532 		chnum);
1533 
1534 	if (!urb)
1535 		goto handle_ahberr_halt;
1536 
1537 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1538 
1539 #ifdef DWC2_DEBUG
1540 	const char *pipetype, *speed;
1541 
1542 	u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1543 	u32 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chnum));
1544 	u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
1545 	u32 hc_dma = DWC2_READ_4(hsotg, HCDMA(chnum));
1546 
1547 	dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
1548 	dev_err(hsotg->dev, "  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
1549 	dev_err(hsotg->dev, "  hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
1550 	dev_err(hsotg->dev, "  Device address: %d\n",
1551 		dwc2_hcd_get_dev_addr(&urb->pipe_info));
1552 	dev_err(hsotg->dev, "  Endpoint: %d, %s\n",
1553 		dwc2_hcd_get_ep_num(&urb->pipe_info),
1554 		dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
1555 
1556 	switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
1557 	case USB_ENDPOINT_XFER_CONTROL:
1558 		pipetype = "CONTROL";
1559 		break;
1560 	case USB_ENDPOINT_XFER_BULK:
1561 		pipetype = "BULK";
1562 		break;
1563 	case USB_ENDPOINT_XFER_INT:
1564 		pipetype = "INTERRUPT";
1565 		break;
1566 	case USB_ENDPOINT_XFER_ISOC:
1567 		pipetype = "ISOCHRONOUS";
1568 		break;
1569 	default:
1570 		pipetype = "UNKNOWN";
1571 		break;
1572 	}
1573 
1574 	dev_err(hsotg->dev, "  Endpoint type: %s\n", pipetype);
1575 
1576 	switch (chan->speed) {
1577 	case USB_SPEED_HIGH:
1578 		speed = "HIGH";
1579 		break;
1580 	case USB_SPEED_FULL:
1581 		speed = "FULL";
1582 		break;
1583 	case USB_SPEED_LOW:
1584 		speed = "LOW";
1585 		break;
1586 	default:
1587 		speed = "UNKNOWN";
1588 		break;
1589 	}
1590 
1591 	dev_err(hsotg->dev, "  Speed: %s\n", speed);
1592 
1593 	dev_err(hsotg->dev, "  Max packet size: %d\n",
1594 		dwc2_hcd_get_mps(&urb->pipe_info));
1595 	dev_err(hsotg->dev, "  Data buffer length: %d\n", urb->length);
1596 	dev_err(hsotg->dev, "  Transfer buffer: %p, Transfer DMA: %08lx\n",
1597 		urb->buf, (unsigned long)urb->dma);
1598 	dev_err(hsotg->dev, "  Setup buffer: %p, Setup DMA: %08lx\n",
1599 		urb->setup_packet, (unsigned long)urb->setup_dma);
1600 	dev_err(hsotg->dev, "  Interval: %d\n", urb->interval);
1601 #endif
1602 
1603 	/* Core halts the channel for Descriptor DMA mode */
1604 	if (hsotg->core_params->dma_desc_enable > 0) {
1605 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1606 					    DWC2_HC_XFER_AHB_ERR);
1607 		goto handle_ahberr_done;
1608 	}
1609 
1610 	dwc2_host_complete(hsotg, qtd, -EIO);
1611 
1612 handle_ahberr_halt:
1613 	/*
1614 	 * Force a channel halt. Don't call dwc2_halt_channel because that won't
1615 	 * write to the HCCHARn register in DMA mode to force the halt.
1616 	 */
1617 	dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
1618 
1619 handle_ahberr_done:
1620 	disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
1621 }
1622 
1623 /*
1624  * Handles a host channel transaction error interrupt. This handler may be
1625  * called in either DMA mode or Slave mode.
1626  */
1627 static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
1628 				 struct dwc2_host_chan *chan, int chnum,
1629 				 struct dwc2_qtd *qtd)
1630 {
1631 	dev_dbg(hsotg->dev,
1632 		"--Host Channel %d Interrupt: Transaction Error--\n", chnum);
1633 
1634 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1635 
1636 	if (hsotg->core_params->dma_desc_enable > 0) {
1637 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1638 					    DWC2_HC_XFER_XACT_ERR);
1639 		goto handle_xacterr_done;
1640 	}
1641 
1642 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1643 	case USB_ENDPOINT_XFER_CONTROL:
1644 	case USB_ENDPOINT_XFER_BULK:
1645 		qtd->error_count++;
1646 		if (!chan->qh->ping_state) {
1647 
1648 			dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1649 						  qtd, DWC2_HC_XFER_XACT_ERR);
1650 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1651 			if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
1652 				chan->qh->ping_state = 1;
1653 		}
1654 
1655 		/*
1656 		 * Halt the channel so the transfer can be re-started from
1657 		 * the appropriate point or the PING protocol will start
1658 		 */
1659 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1660 		break;
1661 	case USB_ENDPOINT_XFER_INT:
1662 		qtd->error_count++;
1663 		if (chan->do_split && chan->complete_split)
1664 			qtd->complete_split = 0;
1665 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1666 		break;
1667 	case USB_ENDPOINT_XFER_ISOC:
1668 		{
1669 			enum dwc2_halt_status halt_status;
1670 
1671 			halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1672 					chnum, qtd, DWC2_HC_XFER_XACT_ERR);
1673 			dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1674 		}
1675 		break;
1676 	}
1677 
1678 handle_xacterr_done:
1679 	disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
1680 }
1681 
1682 /*
1683  * Handles a host channel frame overrun interrupt. This handler may be called
1684  * in either DMA mode or Slave mode.
1685  */
1686 static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
1687 				  struct dwc2_host_chan *chan, int chnum,
1688 				  struct dwc2_qtd *qtd)
1689 {
1690 	enum dwc2_halt_status halt_status;
1691 
1692 	if (dbg_hc(chan))
1693 		dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
1694 			chnum);
1695 
1696 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1697 
1698 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1699 	case USB_ENDPOINT_XFER_CONTROL:
1700 	case USB_ENDPOINT_XFER_BULK:
1701 		break;
1702 	case USB_ENDPOINT_XFER_INT:
1703 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1704 		break;
1705 	case USB_ENDPOINT_XFER_ISOC:
1706 		halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1707 					qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1708 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1709 		break;
1710 	}
1711 
1712 	disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
1713 }
1714 
1715 /*
1716  * Handles a host channel data toggle error interrupt. This handler may be
1717  * called in either DMA mode or Slave mode.
1718  */
1719 static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
1720 				    struct dwc2_host_chan *chan, int chnum,
1721 				    struct dwc2_qtd *qtd)
1722 {
1723 	dev_dbg(hsotg->dev,
1724 		"--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
1725 
1726 	if (chan->ep_is_in)
1727 		qtd->error_count = 0;
1728 	else
1729 		dev_err(hsotg->dev,
1730 			"Data Toggle Error on OUT transfer, channel %d\n",
1731 			chnum);
1732 
1733 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1734 	disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
1735 }
1736 
1737 /*
1738  * For debug only. It checks that a valid halt status is set and that
1739  * HCCHARn.chdis is clear. If there's a problem, corrective action is
1740  * taken and a warning is issued.
1741  *
1742  * Return: true if halt status is ok, false otherwise
1743  */
1744 static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
1745 				struct dwc2_host_chan *chan, int chnum,
1746 				struct dwc2_qtd *qtd)
1747 {
1748 #ifdef DWC2_DEBUG
1749 	u32 hcchar;
1750 	u32 hctsiz;
1751 	u32 hcintmsk;
1752 	u32 hcsplt;
1753 
1754 	if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
1755 		/*
1756 		 * This code is here only as a check. This condition should
1757 		 * never happen. Ignore the halt if it does occur.
1758 		 */
1759 		hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1760 		hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
1761 		hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1762 		hcsplt = DWC2_READ_4(hsotg, HCSPLT(chnum));
1763 		dev_dbg(hsotg->dev,
1764 			"%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
1765 			 __func__);
1766 		dev_dbg(hsotg->dev,
1767 			"channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
1768 			chnum, hcchar, hctsiz);
1769 		dev_dbg(hsotg->dev,
1770 			"hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
1771 			chan->hcint, hcintmsk, hcsplt);
1772 		if (qtd)
1773 			dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
1774 				qtd->complete_split);
1775 		dev_warn(hsotg->dev,
1776 			 "%s: no halt status, channel %d, ignoring interrupt\n",
1777 			 __func__, chnum);
1778 		return false;
1779 	}
1780 
1781 	/*
1782 	 * This code is here only as a check. hcchar.chdis should never be set
1783 	 * when the halt interrupt occurs. Halt the channel again if it does
1784 	 * occur.
1785 	 */
1786 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1787 	if (hcchar & HCCHAR_CHDIS) {
1788 		dev_warn(hsotg->dev,
1789 			 "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
1790 			 __func__, hcchar);
1791 		chan->halt_pending = 0;
1792 		dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
1793 		return false;
1794 	}
1795 #endif
1796 
1797 	return true;
1798 }
1799 
1800 /*
1801  * Handles a host Channel Halted interrupt in DMA mode. This handler
1802  * determines the reason the channel halted and proceeds accordingly.
1803  */
1804 static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
1805 				    struct dwc2_host_chan *chan, int chnum,
1806 				    struct dwc2_qtd *qtd)
1807 {
1808 	u32 hcintmsk;
1809 	int out_nak_enh = 0;
1810 
1811 	if (dbg_hc(chan))
1812 		dev_vdbg(hsotg->dev,
1813 			 "--Host Channel %d Interrupt: DMA Channel Halted--\n",
1814 			 chnum);
1815 
1816 	/*
1817 	 * For core with OUT NAK enhancement, the flow for high-speed
1818 	 * CONTROL/BULK OUT is handled a little differently
1819 	 */
1820 	if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
1821 		if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
1822 		    (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1823 		     chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
1824 			out_nak_enh = 1;
1825 		}
1826 	}
1827 
1828 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1829 	    (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
1830 	     hsotg->core_params->dma_desc_enable <= 0)) {
1831 		if (hsotg->core_params->dma_desc_enable > 0)
1832 			dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1833 						    chan->halt_status);
1834 		else
1835 			/*
1836 			 * Just release the channel. A dequeue can happen on a
1837 			 * transfer timeout. In the case of an AHB Error, the
1838 			 * channel was forced to halt because there's no way to
1839 			 * gracefully recover.
1840 			 */
1841 			dwc2_release_channel(hsotg, chan, qtd,
1842 					     chan->halt_status);
1843 		return;
1844 	}
1845 
1846 	hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1847 
1848 	if (chan->hcint & HCINTMSK_XFERCOMPL) {
1849 		/*
1850 		 * Todo: This is here because of a possible hardware bug. Spec
1851 		 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1852 		 * interrupt w/ACK bit set should occur, but I only see the
1853 		 * XFERCOMP bit, even with it masked out. This is a workaround
1854 		 * for that behavior. Should fix this when hardware is fixed.
1855 		 */
1856 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
1857 			dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1858 		dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
1859 	} else if (chan->hcint & HCINTMSK_STALL) {
1860 		dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
1861 	} else if ((chan->hcint & HCINTMSK_XACTERR) &&
1862 		   hsotg->core_params->dma_desc_enable <= 0) {
1863 		if (out_nak_enh) {
1864 			if (chan->hcint &
1865 			    (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
1866 				dev_vdbg(hsotg->dev,
1867 					 "XactErr with NYET/NAK/ACK\n");
1868 				qtd->error_count = 0;
1869 			} else {
1870 				dev_vdbg(hsotg->dev,
1871 					 "XactErr without NYET/NAK/ACK\n");
1872 			}
1873 		}
1874 
1875 		/*
1876 		 * Must handle xacterr before nak or ack. Could get a xacterr
1877 		 * at the same time as either of these on a BULK/CONTROL OUT
1878 		 * that started with a PING. The xacterr takes precedence.
1879 		 */
1880 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1881 	} else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
1882 		   hsotg->core_params->dma_desc_enable > 0) {
1883 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1884 	} else if ((chan->hcint & HCINTMSK_AHBERR) &&
1885 		   hsotg->core_params->dma_desc_enable > 0) {
1886 		dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
1887 	} else if (chan->hcint & HCINTMSK_BBLERR) {
1888 		dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
1889 	} else if (chan->hcint & HCINTMSK_FRMOVRUN) {
1890 		dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
1891 	} else if (!out_nak_enh) {
1892 		if (chan->hcint & HCINTMSK_NYET) {
1893 			/*
1894 			 * Must handle nyet before nak or ack. Could get a nyet
1895 			 * at the same time as either of those on a BULK/CONTROL
1896 			 * OUT that started with a PING. The nyet takes
1897 			 * precedence.
1898 			 */
1899 			dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
1900 		} else if ((chan->hcint & HCINTMSK_NAK) &&
1901 			   !(hcintmsk & HCINTMSK_NAK)) {
1902 			/*
1903 			 * If nak is not masked, it's because a non-split IN
1904 			 * transfer is in an error state. In that case, the nak
1905 			 * is handled by the nak interrupt handler, not here.
1906 			 * Handle nak here for BULK/CONTROL OUT transfers, which
1907 			 * halt on a NAK to allow rewinding the buffer pointer.
1908 			 */
1909 			dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
1910 		} else if ((chan->hcint & HCINTMSK_ACK) &&
1911 			   !(hcintmsk & HCINTMSK_ACK)) {
1912 			/*
1913 			 * If ack is not masked, it's because a non-split IN
1914 			 * transfer is in an error state. In that case, the ack
1915 			 * is handled by the ack interrupt handler, not here.
1916 			 * Handle ack here for split transfers. Start splits
1917 			 * halt on ACK.
1918 			 */
1919 			dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1920 		} else {
1921 			if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1922 			    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1923 				/*
1924 				 * A periodic transfer halted with no other
1925 				 * channel interrupts set. Assume it was halted
1926 				 * by the core because it could not be completed
1927 				 * in its scheduled (micro)frame.
1928 				 */
1929 				dev_dbg(hsotg->dev,
1930 					"%s: Halt channel %d (assume incomplete periodic transfer)\n",
1931 					__func__, chnum);
1932 				dwc2_halt_channel(hsotg, chan, qtd,
1933 					DWC2_HC_XFER_PERIODIC_INCOMPLETE);
1934 			} else {
1935 				dev_err(hsotg->dev,
1936 					"%s: Channel %d - ChHltd set, but reason is unknown\n",
1937 					__func__, chnum);
1938 				dev_err(hsotg->dev,
1939 					"hcint 0x%08x, intsts 0x%08x\n",
1940 					chan->hcint,
1941 					DWC2_READ_4(hsotg, GINTSTS));
1942 				goto error;
1943 			}
1944 		}
1945 	} else {
1946 		dev_info(hsotg->dev,
1947 			 "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
1948 			 chan->hcint);
1949 error:
1950 		/* Failthrough: use 3-strikes rule */
1951 		qtd->error_count++;
1952 		dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1953 					  qtd, DWC2_HC_XFER_XACT_ERR);
1954 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1955 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1956 	}
1957 }
1958 
1959 /*
1960  * Handles a host channel Channel Halted interrupt
1961  *
1962  * In slave mode, this handler is called only when the driver specifically
1963  * requests a halt. This occurs during handling other host channel interrupts
1964  * (e.g. nak, xacterr, stall, nyet, etc.).
1965  *
1966  * In DMA mode, this is the interrupt that occurs when the core has finished
1967  * processing a transfer on a channel. Other host channel interrupts (except
1968  * ahberr) are disabled in DMA mode.
1969  */
1970 static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
1971 				struct dwc2_host_chan *chan, int chnum,
1972 				struct dwc2_qtd *qtd)
1973 {
1974 	if (dbg_hc(chan))
1975 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
1976 			 chnum);
1977 
1978 	if (hsotg->core_params->dma_enable > 0) {
1979 		dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
1980 	} else {
1981 		if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
1982 			return;
1983 		dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
1984 	}
1985 }
1986 
1987 /* Handles interrupt for a specific Host Channel */
1988 static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
1989 {
1990 	struct dwc2_qtd *qtd;
1991 	struct dwc2_host_chan *chan;
1992 	u32 hcint, hcintmsk;
1993 
1994 	chan = hsotg->hc_ptr_array[chnum];
1995 
1996 	hcint = DWC2_READ_4(hsotg, HCINT(chnum));
1997 	hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1998 	if (!chan) {
1999 		dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
2000 		DWC2_WRITE_4(hsotg, HCINT(chnum), hcint);
2001 		return;
2002 	}
2003 
2004 	if (dbg_hc(chan)) {
2005 		dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
2006 			 chnum);
2007 		dev_vdbg(hsotg->dev,
2008 			 "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2009 			 hcint, hcintmsk, hcint & hcintmsk);
2010 	}
2011 
2012 	DWC2_WRITE_4(hsotg, HCINT(chnum), hcint);
2013 	chan->hcint = hcint;
2014 	hcint &= hcintmsk;
2015 
2016 	/*
2017 	 * If the channel was halted due to a dequeue, the qtd list might
2018 	 * be empty or at least the first entry will not be the active qtd.
2019 	 * In this case, take a shortcut and just release the channel.
2020 	 */
2021 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
2022 		/*
2023 		 * If the channel was halted, this should be the only
2024 		 * interrupt unmasked
2025 		 */
2026 		WARN_ON(hcint != HCINTMSK_CHHLTD);
2027 		if (hsotg->core_params->dma_desc_enable > 0)
2028 			dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
2029 						    chan->halt_status);
2030 		else
2031 			dwc2_release_channel(hsotg, chan, NULL,
2032 					     chan->halt_status);
2033 		return;
2034 	}
2035 
2036 	if (list_empty(&chan->qh->qtd_list)) {
2037 		/*
2038 		 * TODO: Will this ever happen with the
2039 		 * DWC2_HC_XFER_URB_DEQUEUE handling above?
2040 		 */
2041 		dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
2042 			chnum);
2043 		dev_dbg(hsotg->dev,
2044 			"  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2045 			chan->hcint, hcintmsk, hcint);
2046 		chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
2047 		disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
2048 		chan->hcint = 0;
2049 		return;
2050 	}
2051 
2052 	qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
2053 			       qtd_list_entry);
2054 
2055 	if (hsotg->core_params->dma_enable <= 0) {
2056 		if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
2057 			hcint &= ~HCINTMSK_CHHLTD;
2058 	}
2059 
2060 	if (hcint & HCINTMSK_XFERCOMPL) {
2061 		dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
2062 		/*
2063 		 * If NYET occurred at same time as Xfer Complete, the NYET is
2064 		 * handled by the Xfer Complete interrupt handler. Don't want
2065 		 * to call the NYET interrupt handler in this case.
2066 		 */
2067 		hcint &= ~HCINTMSK_NYET;
2068 	}
2069 	if (hcint & HCINTMSK_CHHLTD)
2070 		dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
2071 	if (hcint & HCINTMSK_AHBERR)
2072 		dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
2073 	if (hcint & HCINTMSK_STALL)
2074 		dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
2075 	if (hcint & HCINTMSK_NAK)
2076 		dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
2077 	if (hcint & HCINTMSK_ACK)
2078 		dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
2079 	if (hcint & HCINTMSK_NYET)
2080 		dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
2081 	if (hcint & HCINTMSK_XACTERR)
2082 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
2083 	if (hcint & HCINTMSK_BBLERR)
2084 		dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
2085 	if (hcint & HCINTMSK_FRMOVRUN)
2086 		dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
2087 	if (hcint & HCINTMSK_DATATGLERR)
2088 		dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
2089 
2090 	chan->hcint = 0;
2091 }
2092 
2093 /*
2094  * This interrupt indicates that one or more host channels has a pending
2095  * interrupt. There are multiple conditions that can cause each host channel
2096  * interrupt. This function determines which conditions have occurred for each
2097  * host channel interrupt and handles them appropriately.
2098  */
2099 static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2100 {
2101 	u32 haint;
2102 	int i;
2103 
2104 	haint = DWC2_READ_4(hsotg, HAINT);
2105 	if (dbg_perio()) {
2106 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
2107 
2108 		dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2109 	}
2110 
2111 	for (i = 0; i < hsotg->core_params->host_channels; i++) {
2112 		if (haint & (1 << i))
2113 			dwc2_hc_n_intr(hsotg, i);
2114 	}
2115 }
2116 
2117 /* This function handles interrupts for the HCD */
2118 irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
2119 {
2120 	u32 gintsts, dbg_gintsts;
2121 	irqreturn_t retval = IRQ_NONE;
2122 
2123 	if (!dwc2_is_controller_alive(hsotg)) {
2124 		dev_warn(hsotg->dev, "Controller is dead\n");
2125 		return retval;
2126 	}
2127 
2128 	KASSERT(mutex_owned(&hsotg->lock));
2129 
2130 	/* Check if HOST Mode */
2131 	if (dwc2_is_host_mode(hsotg)) {
2132 		gintsts = dwc2_read_core_intr(hsotg);
2133 		if (!gintsts) {
2134 			return retval;
2135 		}
2136 
2137 		retval = IRQ_HANDLED;
2138 
2139 		dbg_gintsts = gintsts;
2140 #ifndef DEBUG_SOF
2141 		dbg_gintsts &= ~GINTSTS_SOF;
2142 #endif
2143 		if (!dbg_perio())
2144 			dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
2145 					 GINTSTS_PTXFEMP);
2146 
2147 		/* Only print if there are any non-suppressed interrupts left */
2148 		if (dbg_gintsts)
2149 			dev_vdbg(hsotg->dev,
2150 				 "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
2151 				 gintsts);
2152 
2153 		if (gintsts & GINTSTS_SOF)
2154 			dwc2_sof_intr(hsotg);
2155 		if (gintsts & GINTSTS_RXFLVL)
2156 			dwc2_rx_fifo_level_intr(hsotg);
2157 		if (gintsts & GINTSTS_NPTXFEMP)
2158 			dwc2_np_tx_fifo_empty_intr(hsotg);
2159 		if (gintsts & GINTSTS_PRTINT)
2160 			dwc2_port_intr(hsotg);
2161 		if (gintsts & GINTSTS_HCHINT)
2162 			dwc2_hc_intr(hsotg);
2163 		if (gintsts & GINTSTS_PTXFEMP)
2164 			dwc2_perio_tx_fifo_empty_intr(hsotg);
2165 
2166 		if (dbg_gintsts) {
2167 			dev_vdbg(hsotg->dev,
2168 				 "DWC OTG HCD Finished Servicing Interrupts\n");
2169 			dev_vdbg(hsotg->dev,
2170 				 "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
2171 				 DWC2_READ_4(hsotg, GINTSTS),
2172 				 DWC2_READ_4(hsotg, GINTMSK));
2173 		}
2174 	}
2175 
2176 	return retval;
2177 }
2178