xref: /netbsd-src/sys/external/bsd/dwc2/dist/dwc2_hcdintr.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: dwc2_hcdintr.c,v 1.8 2014/04/03 06:34:58 skrll Exp $	*/
2 
3 /*
4  * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
5  *
6  * Copyright (C) 2004-2013 Synopsys, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The names of the above-listed copyright holders may not be used
18  *    to endorse or promote products derived from this software without
19  *    specific prior written permission.
20  *
21  * ALTERNATIVELY, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") as published by the Free Software
23  * Foundation; either version 2 of the License, or (at your option) any
24  * later version.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * This file contains the interrupt handlers for Host mode
41  */
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdintr.c,v 1.8 2014/04/03 06:34:58 skrll Exp $");
44 
45 #include <sys/types.h>
46 #include <sys/pool.h>
47 
48 #include <dev/usb/usb.h>
49 #include <dev/usb/usbdi.h>
50 #include <dev/usb/usbdivar.h>
51 #include <dev/usb/usb_mem.h>
52 
53 #include <machine/param.h>
54 
55 #include <linux/kernel.h>
56 
57 #include <dwc2/dwc2.h>
58 #include <dwc2/dwc2var.h>
59 
60 #include "dwc2_core.h"
61 #include "dwc2_hcd.h"
62 
63 /* This function is for debug only */
64 static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
65 {
66 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
67 	u16 curr_frame_number = hsotg->frame_number;
68 
69 	if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
70 		if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) !=
71 		    curr_frame_number) {
72 			hsotg->frame_num_array[hsotg->frame_num_idx] =
73 					curr_frame_number;
74 			hsotg->last_frame_num_array[hsotg->frame_num_idx] =
75 					hsotg->last_frame_num;
76 			hsotg->frame_num_idx++;
77 		}
78 	} else if (!hsotg->dumped_frame_num_array) {
79 		int i;
80 
81 		dev_info(hsotg->dev, "Frame     Last Frame\n");
82 		dev_info(hsotg->dev, "-----     ----------\n");
83 		for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
84 			dev_info(hsotg->dev, "0x%04x    0x%04x\n",
85 				 hsotg->frame_num_array[i],
86 				 hsotg->last_frame_num_array[i]);
87 		}
88 		hsotg->dumped_frame_num_array = 1;
89 	}
90 	hsotg->last_frame_num = curr_frame_number;
91 #endif
92 }
93 
94 static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
95 				    struct dwc2_host_chan *chan,
96 				    struct dwc2_qtd *qtd)
97 {
98 // 	struct urb *usb_urb;
99 
100 	if (!chan->qh)
101 		return;
102 
103 	if (chan->qh->dev_speed == USB_SPEED_HIGH)
104 		return;
105 
106 	if (!qtd->urb)
107 		return;
108 
109 
110 	if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
111 		chan->qh->tt_buffer_dirty = 1;
112 			chan->qh->tt_buffer_dirty = 0;
113 	}
114 }
115 
116 /*
117  * Handles the start-of-frame interrupt in host mode. Non-periodic
118  * transactions may be queued to the DWC_otg controller for the current
119  * (micro)frame. Periodic transactions may be queued to the controller
120  * for the next (micro)frame.
121  */
122 static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
123 {
124 	struct list_head *qh_entry;
125 	struct dwc2_qh *qh;
126 	enum dwc2_transaction_type tr_type;
127 
128 #ifdef DEBUG_SOF
129 	dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
130 #endif
131 
132 	hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
133 
134 	dwc2_track_missed_sofs(hsotg);
135 
136 	/* Determine whether any periodic QHs should be executed */
137 	qh_entry = hsotg->periodic_sched_inactive.next;
138 	while (qh_entry != &hsotg->periodic_sched_inactive) {
139 		qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
140 		qh_entry = qh_entry->next;
141 		if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number))
142 			/*
143 			 * Move QH to the ready list to be executed next
144 			 * (micro)frame
145 			 */
146 			list_move(&qh->qh_list_entry,
147 				  &hsotg->periodic_sched_ready);
148 	}
149 	tr_type = dwc2_hcd_select_transactions(hsotg);
150 	if (tr_type != DWC2_TRANSACTION_NONE)
151 		dwc2_hcd_queue_transactions(hsotg, tr_type);
152 
153 	/* Clear interrupt */
154 	DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_SOF);
155 }
156 
157 /*
158  * Handles the Rx FIFO Level Interrupt, which indicates that there is
159  * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
160  * memory if the DWC_otg controller is operating in Slave mode.
161  */
162 static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
163 {
164 	u32 grxsts, chnum, bcnt, pktsts;
165 	struct dwc2_host_chan *chan;
166 
167 	if (dbg_perio())
168 		dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
169 
170 	grxsts = DWC2_READ_4(hsotg, GRXSTSP);
171 	chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
172 	chan = hsotg->hc_ptr_array[chnum];
173 	if (!chan) {
174 		dev_err(hsotg->dev, "Unable to get corresponding channel\n");
175 		return;
176 	}
177 
178 	bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
179 	pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
180 
181 	/* Packet Status */
182 	if (dbg_perio()) {
183 		dev_vdbg(hsotg->dev, "    Ch num = %d\n", chnum);
184 		dev_vdbg(hsotg->dev, "    Count = %d\n", bcnt);
185 		dev_vdbg(hsotg->dev, "    DPID = %d, chan.dpid = %d\n",
186 			 (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT,
187 			 chan->data_pid_start);
188 		dev_vdbg(hsotg->dev, "    PStatus = %d\n", pktsts);
189 	}
190 
191 	switch (pktsts) {
192 	case GRXSTS_PKTSTS_HCHIN:
193 		/* Read the data into the host buffer */
194 		if (bcnt > 0) {
195 			dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
196 
197 			/* Update the HC fields for the next packet received */
198 			chan->xfer_count += bcnt;
199 			chan->xfer_buf += bcnt;
200 		}
201 		break;
202 	case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
203 	case GRXSTS_PKTSTS_DATATOGGLEERR:
204 	case GRXSTS_PKTSTS_HCHHALTED:
205 		/* Handled in interrupt, just ignore data */
206 		break;
207 	default:
208 		dev_err(hsotg->dev,
209 			"RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
210 		break;
211 	}
212 }
213 
214 /*
215  * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
216  * data packets may be written to the FIFO for OUT transfers. More requests
217  * may be written to the non-periodic request queue for IN transfers. This
218  * interrupt is enabled only in Slave mode.
219  */
220 static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
221 {
222 	dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
223 	dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
224 }
225 
226 /*
227  * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
228  * packets may be written to the FIFO for OUT transfers. More requests may be
229  * written to the periodic request queue for IN transfers. This interrupt is
230  * enabled only in Slave mode.
231  */
232 static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
233 {
234 	if (dbg_perio())
235 		dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
236 	dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
237 }
238 
239 static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
240 			      u32 *hprt0_modify)
241 {
242 	struct dwc2_core_params *params = hsotg->core_params;
243 	int do_reset = 0;
244 	u32 usbcfg;
245 	u32 prtspd;
246 	u32 hcfg;
247 	u32 fslspclksel;
248 	u32 hfir;
249 
250 	dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
251 
252 	/* Every time when port enables calculate HFIR.FrInterval */
253 	hfir = DWC2_READ_4(hsotg, HFIR);
254 	hfir &= ~HFIR_FRINT_MASK;
255 	hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
256 		HFIR_FRINT_MASK;
257 	DWC2_WRITE_4(hsotg, HFIR, hfir);
258 
259 	/* Check if we need to adjust the PHY clock speed for low power */
260 	if (!params->host_support_fs_ls_low_power) {
261 		/* Port has been enabled, set the reset change flag */
262 		hsotg->flags.b.port_reset_change = 1;
263 
264 		dwc2_root_intr(hsotg->hsotg_sc);
265 		return;
266 	}
267 
268 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
269 	prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
270 
271 	if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
272 		/* Low power */
273 		if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
274 			/* Set PHY low power clock select for FS/LS devices */
275 			usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
276 			DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
277 			do_reset = 1;
278 		}
279 
280 		hcfg = DWC2_READ_4(hsotg, HCFG);
281 		fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
282 			      HCFG_FSLSPCLKSEL_SHIFT;
283 
284 		if (prtspd == HPRT0_SPD_LOW_SPEED &&
285 		    params->host_ls_low_power_phy_clk ==
286 		    DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
287 			/* 6 MHZ */
288 			dev_vdbg(hsotg->dev,
289 				 "FS_PHY programming HCFG to 6 MHz\n");
290 			if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
291 				fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
292 				hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
293 				hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
294 				DWC2_WRITE_4(hsotg, HCFG, hcfg);
295 				do_reset = 1;
296 			}
297 		} else {
298 			/* 48 MHZ */
299 			dev_vdbg(hsotg->dev,
300 				 "FS_PHY programming HCFG to 48 MHz\n");
301 			if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
302 				fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
303 				hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
304 				hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
305 				DWC2_WRITE_4(hsotg, HCFG, hcfg);
306 				do_reset = 1;
307 			}
308 		}
309 	} else {
310 		/* Not low power */
311 		if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
312 			usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
313 			DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
314 			do_reset = 1;
315 		}
316 	}
317 
318 	if (do_reset) {
319 		*hprt0_modify |= HPRT0_RST;
320 		queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
321 				   msecs_to_jiffies(60));
322 	} else {
323 		/* Port has been enabled, set the reset change flag */
324 		hsotg->flags.b.port_reset_change = 1;
325 		dwc2_root_intr(hsotg->hsotg_sc);
326 
327 	}
328 }
329 
330 /*
331  * There are multiple conditions that can cause a port interrupt. This function
332  * determines which interrupt conditions have occurred and handles them
333  * appropriately.
334  */
335 static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
336 {
337 	u32 hprt0;
338 	u32 hprt0_modify;
339 
340 	dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
341 
342 	hprt0 = DWC2_READ_4(hsotg, HPRT0);
343 	hprt0_modify = hprt0;
344 
345 	/*
346 	 * Clear appropriate bits in HPRT0 to clear the interrupt bit in
347 	 * GINTSTS
348 	 */
349 	hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
350 			  HPRT0_OVRCURRCHG);
351 
352 	/*
353 	 * Port Connect Detected
354 	 * Set flag and clear if detected
355 	 */
356 	if (hprt0 & HPRT0_CONNDET) {
357 		dev_vdbg(hsotg->dev,
358 			 "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
359 			 hprt0);
360 		hsotg->flags.b.port_connect_status_change = 1;
361 		hsotg->flags.b.port_connect_status = 1;
362 		hprt0_modify |= HPRT0_CONNDET;
363 
364 		/*
365 		 * The Hub driver asserts a reset when it sees port connect
366 		 * status change flag
367 		 */
368 	}
369 
370 	/*
371 	 * Port Enable Changed
372 	 * Clear if detected - Set internal flag if disabled
373 	 */
374 	if (hprt0 & HPRT0_ENACHG) {
375 		dev_vdbg(hsotg->dev,
376 			 "  --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
377 			 hprt0, !!(hprt0 & HPRT0_ENA));
378 		hprt0_modify |= HPRT0_ENACHG;
379 		if (hprt0 & HPRT0_ENA)
380 			dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
381 		else
382 			hsotg->flags.b.port_enable_change = 1;
383 	}
384 
385 	/* Overcurrent Change Interrupt */
386 	if (hprt0 & HPRT0_OVRCURRCHG) {
387 		dev_vdbg(hsotg->dev,
388 			 "  --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
389 			 hprt0);
390 		hsotg->flags.b.port_over_current_change = 1;
391 		hprt0_modify |= HPRT0_OVRCURRCHG;
392 	}
393 
394 	/* Clear Port Interrupts */
395 	DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify);
396 
397 	if (hsotg->flags.b.port_connect_status_change ||
398 	    hsotg->flags.b.port_enable_change ||
399 	    hsotg->flags.b.port_over_current_change)
400 		dwc2_root_intr(hsotg->hsotg_sc);
401 }
402 
403 /*
404  * Gets the actual length of a transfer after the transfer halts. halt_status
405  * holds the reason for the halt.
406  *
407  * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
408  * is set to 1 upon return if less than the requested number of bytes were
409  * transferred. short_read may also be NULL on entry, in which case it remains
410  * unchanged.
411  */
412 static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
413 				       struct dwc2_host_chan *chan, int chnum,
414 				       struct dwc2_qtd *qtd,
415 				       enum dwc2_halt_status halt_status,
416 				       int *short_read)
417 {
418 	u32 hctsiz, count, length;
419 
420 	hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
421 
422 	if (halt_status == DWC2_HC_XFER_COMPLETE) {
423 		if (chan->ep_is_in) {
424 			count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
425 				TSIZ_XFERSIZE_SHIFT;
426 			length = chan->xfer_len - count;
427 			if (short_read != NULL)
428 				*short_read = (count != 0);
429 		} else if (chan->qh->do_split) {
430 			length = qtd->ssplit_out_xfer_count;
431 		} else {
432 			length = chan->xfer_len;
433 		}
434 	} else {
435 		/*
436 		 * Must use the hctsiz.pktcnt field to determine how much data
437 		 * has been transferred. This field reflects the number of
438 		 * packets that have been transferred via the USB. This is
439 		 * always an integral number of packets if the transfer was
440 		 * halted before its normal completion. (Can't use the
441 		 * hctsiz.xfersize field because that reflects the number of
442 		 * bytes transferred via the AHB, not the USB).
443 		 */
444 		count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
445 		length = (chan->start_pkt_count - count) * chan->max_packet;
446 	}
447 
448 	return length;
449 }
450 
451 /**
452  * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
453  * Complete interrupt on the host channel. Updates the actual_length field
454  * of the URB based on the number of bytes transferred via the host channel.
455  * Sets the URB status if the data transfer is finished.
456  *
457  * Return: 1 if the data transfer specified by the URB is completely finished,
458  * 0 otherwise
459  */
460 static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
461 				 struct dwc2_host_chan *chan, int chnum,
462 				 struct dwc2_hcd_urb *urb,
463 				 struct dwc2_qtd *qtd)
464 {
465 	int xfer_done = 0;
466 	int short_read = 0;
467 	int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
468 						      DWC2_HC_XFER_COMPLETE,
469 						      &short_read);
470 
471 	if (urb->actual_length + xfer_length > urb->length) {
472 		dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
473 		xfer_length = urb->length - urb->actual_length;
474 	}
475 
476 	/* Non DWORD-aligned buffer case handling */
477 	if (chan->align_buf && xfer_length && chan->ep_is_in) {
478 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
479 		usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_POSTREAD);
480 		memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
481 		       xfer_length);
482 		usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_PREREAD);
483 	}
484 
485 	dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
486 		 urb->actual_length, xfer_length);
487 	urb->actual_length += xfer_length;
488 
489 	if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
490 	    (urb->flags & URB_SEND_ZERO_PACKET) &&
491 	    urb->actual_length >= urb->length &&
492 	    !(urb->length % chan->max_packet)) {
493 		xfer_done = 0;
494 	} else if (short_read || urb->actual_length >= urb->length) {
495 		xfer_done = 1;
496 		urb->status = 0;
497 	}
498 
499 	dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
500 		 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
501 	dev_vdbg(hsotg->dev, "  chan->xfer_len %d\n", chan->xfer_len);
502 	dev_vdbg(hsotg->dev, "  hctsiz.xfersize %d\n",
503 		 (DWC2_READ_4(hsotg, HCTSIZ(chnum)) & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
504 	dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n", urb->length);
505 	dev_vdbg(hsotg->dev, "  urb->actual_length %d\n", urb->actual_length);
506 	dev_vdbg(hsotg->dev, "  short_read %d, xfer_done %d\n", short_read,
507 		 xfer_done);
508 
509 	return xfer_done;
510 }
511 
512 /*
513  * Save the starting data toggle for the next transfer. The data toggle is
514  * saved in the QH for non-control transfers and it's saved in the QTD for
515  * control transfers.
516  */
517 void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
518 			       struct dwc2_host_chan *chan, int chnum,
519 			       struct dwc2_qtd *qtd)
520 {
521 	u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
522 	u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
523 
524 	if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
525 		if (pid == TSIZ_SC_MC_PID_DATA0)
526 			chan->qh->data_toggle = DWC2_HC_PID_DATA0;
527 		else
528 			chan->qh->data_toggle = DWC2_HC_PID_DATA1;
529 	} else {
530 		if (pid == TSIZ_SC_MC_PID_DATA0)
531 			qtd->data_toggle = DWC2_HC_PID_DATA0;
532 		else
533 			qtd->data_toggle = DWC2_HC_PID_DATA1;
534 	}
535 }
536 
537 /**
538  * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
539  * the transfer is stopped for any reason. The fields of the current entry in
540  * the frame descriptor array are set based on the transfer state and the input
541  * halt_status. Completes the Isochronous URB if all the URB frames have been
542  * completed.
543  *
544  * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
545  * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
546  */
547 static enum dwc2_halt_status dwc2_update_isoc_urb_state(
548 		struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
549 		int chnum, struct dwc2_qtd *qtd,
550 		enum dwc2_halt_status halt_status)
551 {
552 	struct dwc2_hcd_iso_packet_desc *frame_desc;
553 	struct dwc2_hcd_urb *urb = qtd->urb;
554 
555 	if (!urb)
556 		return DWC2_HC_XFER_NO_HALT_STATUS;
557 
558 	frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
559 
560 	switch (halt_status) {
561 	case DWC2_HC_XFER_COMPLETE:
562 		frame_desc->status = 0;
563 		frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
564 					chan, chnum, qtd, halt_status, NULL);
565 
566 		/* Non DWORD-aligned buffer case handling */
567 		if (chan->align_buf && frame_desc->actual_length &&
568 		    chan->ep_is_in) {
569 			dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
570 				 __func__);
571 			usb_syncmem(urb->usbdma, 0, urb->length,
572 				    BUS_DMASYNC_POSTREAD);
573 			memcpy(urb->buf + frame_desc->offset +
574 			       qtd->isoc_split_offset, chan->qh->dw_align_buf,
575 			       frame_desc->actual_length);
576 			usb_syncmem(urb->usbdma, 0, urb->length,
577 				    BUS_DMASYNC_PREREAD);
578 		}
579 		break;
580 	case DWC2_HC_XFER_FRAME_OVERRUN:
581 		urb->error_count++;
582 		if (chan->ep_is_in)
583 			frame_desc->status = -ENOSR;
584 		else
585 			frame_desc->status = -ECOMM;
586 		frame_desc->actual_length = 0;
587 		break;
588 	case DWC2_HC_XFER_BABBLE_ERR:
589 		urb->error_count++;
590 		frame_desc->status = -EOVERFLOW;
591 		/* Don't need to update actual_length in this case */
592 		break;
593 	case DWC2_HC_XFER_XACT_ERR:
594 		urb->error_count++;
595 		frame_desc->status = -EPROTO;
596 		frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
597 					chan, chnum, qtd, halt_status, NULL);
598 
599 		/* Non DWORD-aligned buffer case handling */
600 		if (chan->align_buf && frame_desc->actual_length &&
601 		    chan->ep_is_in) {
602 			dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
603 				 __func__);
604 			usb_syncmem(urb->usbdma, 0, urb->length,
605 				    BUS_DMASYNC_POSTREAD);
606 			memcpy(urb->buf + frame_desc->offset +
607 			       qtd->isoc_split_offset, chan->qh->dw_align_buf,
608 			       frame_desc->actual_length);
609 			usb_syncmem(urb->usbdma, 0, urb->length,
610 				    BUS_DMASYNC_PREREAD);
611 		}
612 
613 		/* Skip whole frame */
614 		if (chan->qh->do_split &&
615 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
616 		    hsotg->core_params->dma_enable > 0) {
617 			qtd->complete_split = 0;
618 			qtd->isoc_split_offset = 0;
619 		}
620 
621 		break;
622 	default:
623 		dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
624 			halt_status);
625 		break;
626 	}
627 
628 	if (++qtd->isoc_frame_index == urb->packet_count) {
629 		/*
630 		 * urb->status is not used for isoc transfers. The individual
631 		 * frame_desc statuses are used instead.
632 		 */
633 		dwc2_host_complete(hsotg, qtd, 0);
634 		halt_status = DWC2_HC_XFER_URB_COMPLETE;
635 	} else {
636 		halt_status = DWC2_HC_XFER_COMPLETE;
637 	}
638 
639 	return halt_status;
640 }
641 
642 /*
643  * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
644  * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
645  * still linked to the QH, the QH is added to the end of the inactive
646  * non-periodic schedule. For periodic QHs, removes the QH from the periodic
647  * schedule if no more QTDs are linked to the QH.
648  */
649 static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
650 			       int free_qtd)
651 {
652 	int continue_split = 0;
653 	struct dwc2_qtd *qtd;
654 
655 	if (dbg_qh(qh))
656 		dev_vdbg(hsotg->dev, "  %s(%p,%p,%d)\n", __func__,
657 			 hsotg, qh, free_qtd);
658 
659 	if (list_empty(&qh->qtd_list)) {
660 		dev_dbg(hsotg->dev, "## QTD list empty ##\n");
661 		goto no_qtd;
662 	}
663 
664 	qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
665 
666 	if (qtd->complete_split)
667 		continue_split = 1;
668 	else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
669 		 qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
670 		continue_split = 1;
671 
672 	if (free_qtd) {
673 		dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
674 		continue_split = 0;
675 	}
676 
677 no_qtd:
678 	if (qh->channel)
679 		qh->channel->align_buf = 0;
680 	qh->channel = NULL;
681 	dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
682 }
683 
684 /**
685  * dwc2_release_channel() - Releases a host channel for use by other transfers
686  *
687  * @hsotg:       The HCD state structure
688  * @chan:        The host channel to release
689  * @qtd:         The QTD associated with the host channel. This QTD may be
690  *               freed if the transfer is complete or an error has occurred.
691  * @halt_status: Reason the channel is being released. This status
692  *               determines the actions taken by this function.
693  *
694  * Also attempts to select and queue more transactions since at least one host
695  * channel is available.
696  */
697 static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
698 				 struct dwc2_host_chan *chan,
699 				 struct dwc2_qtd *qtd,
700 				 enum dwc2_halt_status halt_status)
701 {
702 	enum dwc2_transaction_type tr_type;
703 	u32 haintmsk;
704 	int free_qtd = 0;
705 
706 	if (dbg_hc(chan))
707 		dev_vdbg(hsotg->dev, "  %s: channel %d, halt_status %d\n",
708 			 __func__, chan->hc_num, halt_status);
709 
710 	switch (halt_status) {
711 	case DWC2_HC_XFER_URB_COMPLETE:
712 		free_qtd = 1;
713 		break;
714 	case DWC2_HC_XFER_AHB_ERR:
715 	case DWC2_HC_XFER_STALL:
716 	case DWC2_HC_XFER_BABBLE_ERR:
717 		free_qtd = 1;
718 		break;
719 	case DWC2_HC_XFER_XACT_ERR:
720 		if (qtd && qtd->error_count >= 3) {
721 			dev_vdbg(hsotg->dev,
722 				 "  Complete URB with transaction error\n");
723 			free_qtd = 1;
724 			dwc2_host_complete(hsotg, qtd, -EPROTO);
725 		}
726 		break;
727 	case DWC2_HC_XFER_URB_DEQUEUE:
728 		/*
729 		 * The QTD has already been removed and the QH has been
730 		 * deactivated. Don't want to do anything except release the
731 		 * host channel and try to queue more transfers.
732 		 */
733 		goto cleanup;
734 	case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
735 		dev_vdbg(hsotg->dev, "  Complete URB with I/O error\n");
736 		free_qtd = 1;
737 		dwc2_host_complete(hsotg, qtd, -EIO);
738 		break;
739 	case DWC2_HC_XFER_NO_HALT_STATUS:
740 	default:
741 		break;
742 	}
743 
744 	dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
745 
746 cleanup:
747 	/*
748 	 * Release the host channel for use by other transfers. The cleanup
749 	 * function clears the channel interrupt enables and conditions, so
750 	 * there's no need to clear the Channel Halted interrupt separately.
751 	 */
752 	if (!list_empty(&chan->hc_list_entry))
753 		list_del(&chan->hc_list_entry);
754 	dwc2_hc_cleanup(hsotg, chan);
755 	list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
756 
757 	if (hsotg->core_params->uframe_sched > 0) {
758 		hsotg->available_host_channels++;
759 	} else {
760 		switch (chan->ep_type) {
761 		case USB_ENDPOINT_XFER_CONTROL:
762 		case USB_ENDPOINT_XFER_BULK:
763 			hsotg->non_periodic_channels--;
764 			break;
765 		default:
766 			/*
767 			 * Don't release reservations for periodic channels
768 			 * here. That's done when a periodic transfer is
769 			 * descheduled (i.e. when the QH is removed from the
770 			 * periodic schedule).
771 			 */
772 			break;
773 		}
774 	}
775 
776 	haintmsk = DWC2_READ_4(hsotg, HAINTMSK);
777 	haintmsk &= ~(1 << chan->hc_num);
778 	DWC2_WRITE_4(hsotg, HAINTMSK, haintmsk);
779 
780 	/* Try to queue more transfers now that there's a free channel */
781 	tr_type = dwc2_hcd_select_transactions(hsotg);
782 	if (tr_type != DWC2_TRANSACTION_NONE)
783 		dwc2_hcd_queue_transactions(hsotg, tr_type);
784 }
785 
786 /*
787  * Halts a host channel. If the channel cannot be halted immediately because
788  * the request queue is full, this function ensures that the FIFO empty
789  * interrupt for the appropriate queue is enabled so that the halt request can
790  * be queued when there is space in the request queue.
791  *
792  * This function may also be called in DMA mode. In that case, the channel is
793  * simply released since the core always halts the channel automatically in
794  * DMA mode.
795  */
796 static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
797 			      struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
798 			      enum dwc2_halt_status halt_status)
799 {
800 	if (dbg_hc(chan))
801 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
802 
803 	if (hsotg->core_params->dma_enable > 0) {
804 		if (dbg_hc(chan))
805 			dev_vdbg(hsotg->dev, "DMA enabled\n");
806 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
807 		return;
808 	}
809 
810 	/* Slave mode processing */
811 	dwc2_hc_halt(hsotg, chan, halt_status);
812 
813 	if (chan->halt_on_queue) {
814 		u32 gintmsk;
815 
816 		dev_vdbg(hsotg->dev, "Halt on queue\n");
817 		if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
818 		    chan->ep_type == USB_ENDPOINT_XFER_BULK) {
819 			dev_vdbg(hsotg->dev, "control/bulk\n");
820 			/*
821 			 * Make sure the Non-periodic Tx FIFO empty interrupt
822 			 * is enabled so that the non-periodic schedule will
823 			 * be processed
824 			 */
825 			gintmsk = DWC2_READ_4(hsotg, GINTMSK);
826 			gintmsk |= GINTSTS_NPTXFEMP;
827 			DWC2_WRITE_4(hsotg, GINTMSK, gintmsk);
828 		} else {
829 			dev_vdbg(hsotg->dev, "isoc/intr\n");
830 			/*
831 			 * Move the QH from the periodic queued schedule to
832 			 * the periodic assigned schedule. This allows the
833 			 * halt to be queued when the periodic schedule is
834 			 * processed.
835 			 */
836 			list_move(&chan->qh->qh_list_entry,
837 				  &hsotg->periodic_sched_assigned);
838 
839 			/*
840 			 * Make sure the Periodic Tx FIFO Empty interrupt is
841 			 * enabled so that the periodic schedule will be
842 			 * processed
843 			 */
844 			gintmsk = DWC2_READ_4(hsotg, GINTMSK);
845 			gintmsk |= GINTSTS_PTXFEMP;
846 			DWC2_WRITE_4(hsotg, GINTMSK, gintmsk);
847 		}
848 	}
849 }
850 
851 /*
852  * Performs common cleanup for non-periodic transfers after a Transfer
853  * Complete interrupt. This function should be called after any endpoint type
854  * specific handling is finished to release the host channel.
855  */
856 static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
857 					    struct dwc2_host_chan *chan,
858 					    int chnum, struct dwc2_qtd *qtd,
859 					    enum dwc2_halt_status halt_status)
860 {
861 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
862 
863 	qtd->error_count = 0;
864 
865 	if (chan->hcint & HCINTMSK_NYET) {
866 		/*
867 		 * Got a NYET on the last transaction of the transfer. This
868 		 * means that the endpoint should be in the PING state at the
869 		 * beginning of the next transfer.
870 		 */
871 		dev_vdbg(hsotg->dev, "got NYET\n");
872 		chan->qh->ping_state = 1;
873 	}
874 
875 	/*
876 	 * Always halt and release the host channel to make it available for
877 	 * more transfers. There may still be more phases for a control
878 	 * transfer or more data packets for a bulk transfer at this point,
879 	 * but the host channel is still halted. A channel will be reassigned
880 	 * to the transfer when the non-periodic schedule is processed after
881 	 * the channel is released. This allows transactions to be queued
882 	 * properly via dwc2_hcd_queue_transactions, which also enables the
883 	 * Tx FIFO Empty interrupt if necessary.
884 	 */
885 	if (chan->ep_is_in) {
886 		/*
887 		 * IN transfers in Slave mode require an explicit disable to
888 		 * halt the channel. (In DMA mode, this call simply releases
889 		 * the channel.)
890 		 */
891 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
892 	} else {
893 		/*
894 		 * The channel is automatically disabled by the core for OUT
895 		 * transfers in Slave mode
896 		 */
897 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
898 	}
899 }
900 
901 /*
902  * Performs common cleanup for periodic transfers after a Transfer Complete
903  * interrupt. This function should be called after any endpoint type specific
904  * handling is finished to release the host channel.
905  */
906 static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
907 					struct dwc2_host_chan *chan, int chnum,
908 					struct dwc2_qtd *qtd,
909 					enum dwc2_halt_status halt_status)
910 {
911 	u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
912 
913 	qtd->error_count = 0;
914 
915 	if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
916 		/* Core halts channel in these cases */
917 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
918 	else
919 		/* Flush any outstanding requests from the Tx queue */
920 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
921 }
922 
923 static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
924 				       struct dwc2_host_chan *chan, int chnum,
925 				       struct dwc2_qtd *qtd)
926 {
927 	struct dwc2_hcd_iso_packet_desc *frame_desc;
928 	u32 len;
929 
930 	if (!qtd->urb)
931 		return 0;
932 
933 	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
934 	len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
935 					  DWC2_HC_XFER_COMPLETE, NULL);
936 	if (!len) {
937 		qtd->complete_split = 0;
938 		qtd->isoc_split_offset = 0;
939 		return 0;
940 	}
941 
942 	frame_desc->actual_length += len;
943 
944 	if (chan->align_buf) {
945 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
946 		usb_syncmem(qtd->urb->usbdma, 0, qtd->urb->length,
947 			    BUS_DMASYNC_POSTREAD);
948 		memcpy(qtd->urb->buf + frame_desc->offset +
949 		       qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
950 		usb_syncmem(qtd->urb->usbdma, 0, qtd->urb->length,
951 			    BUS_DMASYNC_PREREAD);
952 	}
953 
954 	qtd->isoc_split_offset += len;
955 
956 	if (frame_desc->actual_length >= frame_desc->length) {
957 		frame_desc->status = 0;
958 		qtd->isoc_frame_index++;
959 		qtd->complete_split = 0;
960 		qtd->isoc_split_offset = 0;
961 	}
962 
963 	if (qtd->isoc_frame_index == qtd->urb->packet_count) {
964 		dwc2_host_complete(hsotg, qtd, 0);
965 		dwc2_release_channel(hsotg, chan, qtd,
966 				     DWC2_HC_XFER_URB_COMPLETE);
967 	} else {
968 		dwc2_release_channel(hsotg, chan, qtd,
969 				     DWC2_HC_XFER_NO_HALT_STATUS);
970 	}
971 
972 	return 1;	/* Indicates that channel released */
973 }
974 
975 /*
976  * Handles a host channel Transfer Complete interrupt. This handler may be
977  * called in either DMA mode or Slave mode.
978  */
979 static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
980 				  struct dwc2_host_chan *chan, int chnum,
981 				  struct dwc2_qtd *qtd)
982 {
983 	struct dwc2_hcd_urb *urb = qtd->urb;
984 	enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
985 	int pipe_type;
986 	int urb_xfer_done;
987 
988 	if (dbg_hc(chan))
989 		dev_vdbg(hsotg->dev,
990 			 "--Host Channel %d Interrupt: Transfer Complete--\n",
991 			 chnum);
992 
993 	if (!urb)
994 		goto handle_xfercomp_done;
995 
996 	pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
997 
998 	if (hsotg->core_params->dma_desc_enable > 0) {
999 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
1000 		if (pipe_type == USB_ENDPOINT_XFER_ISOC)
1001 			/* Do not disable the interrupt, just clear it */
1002 			return;
1003 		goto handle_xfercomp_done;
1004 	}
1005 
1006 	/* Handle xfer complete on CSPLIT */
1007 	if (chan->qh->do_split) {
1008 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
1009 		    hsotg->core_params->dma_enable > 0) {
1010 			if (qtd->complete_split &&
1011 			    dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
1012 							qtd))
1013 				goto handle_xfercomp_done;
1014 		} else {
1015 			qtd->complete_split = 0;
1016 		}
1017 	}
1018 
1019 	/* Update the QTD and URB states */
1020 	switch (pipe_type) {
1021 	case USB_ENDPOINT_XFER_CONTROL:
1022 		switch (qtd->control_phase) {
1023 		case DWC2_CONTROL_SETUP:
1024 			if (urb->length > 0)
1025 				qtd->control_phase = DWC2_CONTROL_DATA;
1026 			else
1027 				qtd->control_phase = DWC2_CONTROL_STATUS;
1028 			dev_vdbg(hsotg->dev,
1029 				 "  Control setup transaction done\n");
1030 			halt_status = DWC2_HC_XFER_COMPLETE;
1031 			break;
1032 		case DWC2_CONTROL_DATA:
1033 			urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
1034 							      chnum, urb, qtd);
1035 			if (urb_xfer_done) {
1036 				qtd->control_phase = DWC2_CONTROL_STATUS;
1037 				dev_vdbg(hsotg->dev,
1038 					 "  Control data transfer done\n");
1039 			} else {
1040 				dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1041 							  qtd);
1042 			}
1043 			halt_status = DWC2_HC_XFER_COMPLETE;
1044 			break;
1045 		case DWC2_CONTROL_STATUS:
1046 			dev_vdbg(hsotg->dev, "  Control transfer complete\n");
1047 			if (urb->status == -EINPROGRESS)
1048 				urb->status = 0;
1049 			dwc2_host_complete(hsotg, qtd, urb->status);
1050 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1051 			break;
1052 		}
1053 
1054 		dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1055 						halt_status);
1056 		break;
1057 	case USB_ENDPOINT_XFER_BULK:
1058 		dev_vdbg(hsotg->dev, "  Bulk transfer complete\n");
1059 		urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1060 						      qtd);
1061 		if (urb_xfer_done) {
1062 			dwc2_host_complete(hsotg, qtd, urb->status);
1063 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1064 		} else {
1065 			halt_status = DWC2_HC_XFER_COMPLETE;
1066 		}
1067 
1068 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1069 		dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1070 						halt_status);
1071 		break;
1072 	case USB_ENDPOINT_XFER_INT:
1073 		dev_vdbg(hsotg->dev, "  Interrupt transfer complete\n");
1074 		urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1075 						      qtd);
1076 
1077 		/*
1078 		 * Interrupt URB is done on the first transfer complete
1079 		 * interrupt
1080 		 */
1081 		if (urb_xfer_done) {
1082 			dwc2_host_complete(hsotg, qtd, urb->status);
1083 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1084 		} else {
1085 			halt_status = DWC2_HC_XFER_COMPLETE;
1086 		}
1087 
1088 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1089 		dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1090 					    halt_status);
1091 		break;
1092 	case USB_ENDPOINT_XFER_ISOC:
1093 		if (dbg_perio())
1094 			dev_vdbg(hsotg->dev, "  Isochronous transfer complete\n");
1095 		if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
1096 			halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1097 					chnum, qtd, DWC2_HC_XFER_COMPLETE);
1098 		dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1099 					    halt_status);
1100 		break;
1101 	}
1102 
1103 handle_xfercomp_done:
1104 	disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
1105 }
1106 
1107 /*
1108  * Handles a host channel STALL interrupt. This handler may be called in
1109  * either DMA mode or Slave mode.
1110  */
1111 static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
1112 			       struct dwc2_host_chan *chan, int chnum,
1113 			       struct dwc2_qtd *qtd)
1114 {
1115 	struct dwc2_hcd_urb *urb = qtd->urb;
1116 	int pipe_type;
1117 
1118 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
1119 		chnum);
1120 
1121 	if (hsotg->core_params->dma_desc_enable > 0) {
1122 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1123 					    DWC2_HC_XFER_STALL);
1124 		goto handle_stall_done;
1125 	}
1126 
1127 	if (!urb)
1128 		goto handle_stall_halt;
1129 
1130 	pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1131 
1132 	if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
1133 		dwc2_host_complete(hsotg, qtd, -EPIPE);
1134 
1135 	if (pipe_type == USB_ENDPOINT_XFER_BULK ||
1136 	    pipe_type == USB_ENDPOINT_XFER_INT) {
1137 		dwc2_host_complete(hsotg, qtd, -EPIPE);
1138 		/*
1139 		 * USB protocol requires resetting the data toggle for bulk
1140 		 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1141 		 * setup command is issued to the endpoint. Anticipate the
1142 		 * CLEAR_FEATURE command since a STALL has occurred and reset
1143 		 * the data toggle now.
1144 		 */
1145 		chan->qh->data_toggle = 0;
1146 	}
1147 
1148 handle_stall_halt:
1149 	dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
1150 
1151 handle_stall_done:
1152 	disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
1153 }
1154 
1155 /*
1156  * Updates the state of the URB when a transfer has been stopped due to an
1157  * abnormal condition before the transfer completes. Modifies the
1158  * actual_length field of the URB to reflect the number of bytes that have
1159  * actually been transferred via the host channel.
1160  */
1161 static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1162 				      struct dwc2_host_chan *chan, int chnum,
1163 				      struct dwc2_hcd_urb *urb,
1164 				      struct dwc2_qtd *qtd,
1165 				      enum dwc2_halt_status halt_status)
1166 {
1167 	u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
1168 						      qtd, halt_status, NULL);
1169 
1170 	if (urb->actual_length + xfer_length > urb->length) {
1171 		dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
1172 		xfer_length = urb->length - urb->actual_length;
1173 	}
1174 
1175 	/* Non DWORD-aligned buffer case handling */
1176 	if (chan->align_buf && xfer_length && chan->ep_is_in) {
1177 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
1178 		usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_POSTREAD);
1179 		memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
1180 		       xfer_length);
1181 		usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_PREREAD);
1182 	}
1183 
1184 	urb->actual_length += xfer_length;
1185 
1186 	dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
1187 		 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
1188 	dev_vdbg(hsotg->dev, "  chan->start_pkt_count %d\n",
1189 		 chan->start_pkt_count);
1190 	dev_vdbg(hsotg->dev, "  hctsiz.pktcnt %d\n",
1191 		 (DWC2_READ_4(hsotg, HCTSIZ(chnum)) & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
1192 	dev_vdbg(hsotg->dev, "  chan->max_packet %d\n", chan->max_packet);
1193 	dev_vdbg(hsotg->dev, "  bytes_transferred %d\n",
1194 		 xfer_length);
1195 	dev_vdbg(hsotg->dev, "  urb->actual_length %d\n",
1196 		 urb->actual_length);
1197 	dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n",
1198 		 urb->length);
1199 }
1200 
1201 /*
1202  * Handles a host channel NAK interrupt. This handler may be called in either
1203  * DMA mode or Slave mode.
1204  */
1205 static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1206 			     struct dwc2_host_chan *chan, int chnum,
1207 			     struct dwc2_qtd *qtd)
1208 {
1209 	if (dbg_hc(chan))
1210 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
1211 			 chnum);
1212 
1213 	/*
1214 	 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1215 	 * interrupt. Re-start the SSPLIT transfer.
1216 	 */
1217 	if (chan->do_split) {
1218 		if (chan->complete_split)
1219 			qtd->error_count = 0;
1220 		qtd->complete_split = 0;
1221 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1222 		goto handle_nak_done;
1223 	}
1224 
1225 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1226 	case USB_ENDPOINT_XFER_CONTROL:
1227 	case USB_ENDPOINT_XFER_BULK:
1228 		if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
1229 			/*
1230 			 * NAK interrupts are enabled on bulk/control IN
1231 			 * transfers in DMA mode for the sole purpose of
1232 			 * resetting the error count after a transaction error
1233 			 * occurs. The core will continue transferring data.
1234 			 */
1235 			qtd->error_count = 0;
1236 			break;
1237 		}
1238 
1239 		/*
1240 		 * NAK interrupts normally occur during OUT transfers in DMA
1241 		 * or Slave mode. For IN transfers, more requests will be
1242 		 * queued as request queue space is available.
1243 		 */
1244 		qtd->error_count = 0;
1245 
1246 		if (!chan->qh->ping_state) {
1247 			dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1248 						  qtd, DWC2_HC_XFER_NAK);
1249 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1250 
1251 			if (chan->speed == USB_SPEED_HIGH)
1252 				chan->qh->ping_state = 1;
1253 		}
1254 
1255 		/*
1256 		 * Halt the channel so the transfer can be re-started from
1257 		 * the appropriate point or the PING protocol will
1258 		 * start/continue
1259 		 */
1260 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1261 		break;
1262 	case USB_ENDPOINT_XFER_INT:
1263 		qtd->error_count = 0;
1264 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1265 		break;
1266 	case USB_ENDPOINT_XFER_ISOC:
1267 		/* Should never get called for isochronous transfers */
1268 		dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
1269 		break;
1270 	}
1271 
1272 handle_nak_done:
1273 	disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
1274 }
1275 
1276 /*
1277  * Handles a host channel ACK interrupt. This interrupt is enabled when
1278  * performing the PING protocol in Slave mode, when errors occur during
1279  * either Slave mode or DMA mode, and during Start Split transactions.
1280  */
1281 static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
1282 			     struct dwc2_host_chan *chan, int chnum,
1283 			     struct dwc2_qtd *qtd)
1284 {
1285 	struct dwc2_hcd_iso_packet_desc *frame_desc;
1286 
1287 	if (dbg_hc(chan))
1288 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
1289 			 chnum);
1290 
1291 	if (chan->do_split) {
1292 		/* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
1293 		if (!chan->ep_is_in &&
1294 		    chan->data_pid_start != DWC2_HC_PID_SETUP)
1295 			qtd->ssplit_out_xfer_count = chan->xfer_len;
1296 
1297 		if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
1298 			qtd->complete_split = 1;
1299 			dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1300 		} else {
1301 			/* ISOC OUT */
1302 			switch (chan->xact_pos) {
1303 			case DWC2_HCSPLT_XACTPOS_ALL:
1304 				break;
1305 			case DWC2_HCSPLT_XACTPOS_END:
1306 				qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1307 				qtd->isoc_split_offset = 0;
1308 				break;
1309 			case DWC2_HCSPLT_XACTPOS_BEGIN:
1310 			case DWC2_HCSPLT_XACTPOS_MID:
1311 				/*
1312 				 * For BEGIN or MID, calculate the length for
1313 				 * the next microframe to determine the correct
1314 				 * SSPLIT token, either MID or END
1315 				 */
1316 				frame_desc = &qtd->urb->iso_descs[
1317 						qtd->isoc_frame_index];
1318 				qtd->isoc_split_offset += 188;
1319 
1320 				if (frame_desc->length - qtd->isoc_split_offset
1321 							<= 188)
1322 					qtd->isoc_split_pos =
1323 							DWC2_HCSPLT_XACTPOS_END;
1324 				else
1325 					qtd->isoc_split_pos =
1326 							DWC2_HCSPLT_XACTPOS_MID;
1327 				break;
1328 			}
1329 		}
1330 	} else {
1331 		qtd->error_count = 0;
1332 
1333 		if (chan->qh->ping_state) {
1334 			chan->qh->ping_state = 0;
1335 			/*
1336 			 * Halt the channel so the transfer can be re-started
1337 			 * from the appropriate point. This only happens in
1338 			 * Slave mode. In DMA mode, the ping_state is cleared
1339 			 * when the transfer is started because the core
1340 			 * automatically executes the PING, then the transfer.
1341 			 */
1342 			dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1343 		}
1344 	}
1345 
1346 	/*
1347 	 * If the ACK occurred when _not_ in the PING state, let the channel
1348 	 * continue transferring data after clearing the error count
1349 	 */
1350 	disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
1351 }
1352 
1353 /*
1354  * Handles a host channel NYET interrupt. This interrupt should only occur on
1355  * Bulk and Control OUT endpoints and for complete split transactions. If a
1356  * NYET occurs at the same time as a Transfer Complete interrupt, it is
1357  * handled in the xfercomp interrupt handler, not here. This handler may be
1358  * called in either DMA mode or Slave mode.
1359  */
1360 static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1361 			      struct dwc2_host_chan *chan, int chnum,
1362 			      struct dwc2_qtd *qtd)
1363 {
1364 	if (dbg_hc(chan))
1365 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
1366 			 chnum);
1367 
1368 	/*
1369 	 * NYET on CSPLIT
1370 	 * re-do the CSPLIT immediately on non-periodic
1371 	 */
1372 	if (chan->do_split && chan->complete_split) {
1373 		if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
1374 		    hsotg->core_params->dma_enable > 0) {
1375 			qtd->complete_split = 0;
1376 			qtd->isoc_split_offset = 0;
1377 			qtd->isoc_frame_index++;
1378 			if (qtd->urb &&
1379 			    qtd->isoc_frame_index == qtd->urb->packet_count) {
1380 				dwc2_host_complete(hsotg, qtd, 0);
1381 				dwc2_release_channel(hsotg, chan, qtd,
1382 						     DWC2_HC_XFER_URB_COMPLETE);
1383 			} else {
1384 				dwc2_release_channel(hsotg, chan, qtd,
1385 						DWC2_HC_XFER_NO_HALT_STATUS);
1386 			}
1387 			goto handle_nyet_done;
1388 		}
1389 
1390 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1391 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1392 			int frnum = dwc2_hcd_get_frame_number(hsotg);
1393 
1394 			if (dwc2_full_frame_num(frnum) !=
1395 			    dwc2_full_frame_num(chan->qh->sched_frame)) {
1396 				/*
1397 				 * No longer in the same full speed frame.
1398 				 * Treat this as a transaction error.
1399 				 */
1400 #if 0
1401 				/*
1402 				 * Todo: Fix system performance so this can
1403 				 * be treated as an error. Right now complete
1404 				 * splits cannot be scheduled precisely enough
1405 				 * due to other system activity, so this error
1406 				 * occurs regularly in Slave mode.
1407 				 */
1408 				qtd->error_count++;
1409 #endif
1410 				qtd->complete_split = 0;
1411 				dwc2_halt_channel(hsotg, chan, qtd,
1412 						  DWC2_HC_XFER_XACT_ERR);
1413 				/* Todo: add support for isoc release */
1414 				goto handle_nyet_done;
1415 			}
1416 		}
1417 
1418 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1419 		goto handle_nyet_done;
1420 	}
1421 
1422 	chan->qh->ping_state = 1;
1423 	qtd->error_count = 0;
1424 
1425 	dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
1426 				  DWC2_HC_XFER_NYET);
1427 	dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1428 
1429 	/*
1430 	 * Halt the channel and re-start the transfer so the PING protocol
1431 	 * will start
1432 	 */
1433 	dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1434 
1435 handle_nyet_done:
1436 	disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
1437 }
1438 
1439 /*
1440  * Handles a host channel babble interrupt. This handler may be called in
1441  * either DMA mode or Slave mode.
1442  */
1443 static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
1444 				struct dwc2_host_chan *chan, int chnum,
1445 				struct dwc2_qtd *qtd)
1446 {
1447 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
1448 		chnum);
1449 
1450 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1451 
1452 	if (hsotg->core_params->dma_desc_enable > 0) {
1453 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1454 					    DWC2_HC_XFER_BABBLE_ERR);
1455 		goto disable_int;
1456 	}
1457 
1458 	if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1459 		dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
1460 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
1461 	} else {
1462 		enum dwc2_halt_status halt_status;
1463 
1464 		halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1465 						qtd, DWC2_HC_XFER_BABBLE_ERR);
1466 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1467 	}
1468 
1469 disable_int:
1470 	disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
1471 }
1472 
1473 /*
1474  * Handles a host channel AHB error interrupt. This handler is only called in
1475  * DMA mode.
1476  */
1477 static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1478 				struct dwc2_host_chan *chan, int chnum,
1479 				struct dwc2_qtd *qtd)
1480 {
1481 	struct dwc2_hcd_urb *urb = qtd->urb;
1482 
1483 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
1484 		chnum);
1485 
1486 	if (!urb)
1487 		goto handle_ahberr_halt;
1488 
1489 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1490 
1491 #ifdef DWC2_DEBUG
1492 	const char *pipetype, *speed;
1493 
1494 	u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1495 	u32 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chnum));
1496 	u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
1497 	u32 hc_dma = DWC2_READ_4(hsotg, HCDMA(chnum));
1498 
1499 	dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
1500 	dev_err(hsotg->dev, "  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
1501 	dev_err(hsotg->dev, "  hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
1502 	dev_err(hsotg->dev, "  Device address: %d\n",
1503 		dwc2_hcd_get_dev_addr(&urb->pipe_info));
1504 	dev_err(hsotg->dev, "  Endpoint: %d, %s\n",
1505 		dwc2_hcd_get_ep_num(&urb->pipe_info),
1506 		dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
1507 
1508 	switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
1509 	case USB_ENDPOINT_XFER_CONTROL:
1510 		pipetype = "CONTROL";
1511 		break;
1512 	case USB_ENDPOINT_XFER_BULK:
1513 		pipetype = "BULK";
1514 		break;
1515 	case USB_ENDPOINT_XFER_INT:
1516 		pipetype = "INTERRUPT";
1517 		break;
1518 	case USB_ENDPOINT_XFER_ISOC:
1519 		pipetype = "ISOCHRONOUS";
1520 		break;
1521 	default:
1522 		pipetype = "UNKNOWN";
1523 		break;
1524 	}
1525 
1526 	dev_err(hsotg->dev, "  Endpoint type: %s\n", pipetype);
1527 
1528 	switch (chan->speed) {
1529 	case USB_SPEED_HIGH:
1530 		speed = "HIGH";
1531 		break;
1532 	case USB_SPEED_FULL:
1533 		speed = "FULL";
1534 		break;
1535 	case USB_SPEED_LOW:
1536 		speed = "LOW";
1537 		break;
1538 	default:
1539 		speed = "UNKNOWN";
1540 		break;
1541 	}
1542 
1543 	dev_err(hsotg->dev, "  Speed: %s\n", speed);
1544 
1545 	dev_err(hsotg->dev, "  Max packet size: %d\n",
1546 		dwc2_hcd_get_mps(&urb->pipe_info));
1547 	dev_err(hsotg->dev, "  Data buffer length: %d\n", urb->length);
1548 	dev_err(hsotg->dev, "  Transfer buffer: %p, Transfer DMA: %08lx\n",
1549 		urb->buf, (unsigned long)urb->dma);
1550 	dev_err(hsotg->dev, "  Setup buffer: %p, Setup DMA: %08lx\n",
1551 		urb->setup_packet, (unsigned long)urb->setup_dma);
1552 	dev_err(hsotg->dev, "  Interval: %d\n", urb->interval);
1553 #endif
1554 
1555 	/* Core halts the channel for Descriptor DMA mode */
1556 	if (hsotg->core_params->dma_desc_enable > 0) {
1557 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1558 					    DWC2_HC_XFER_AHB_ERR);
1559 		goto handle_ahberr_done;
1560 	}
1561 
1562 	dwc2_host_complete(hsotg, qtd, -EIO);
1563 
1564 handle_ahberr_halt:
1565 	/*
1566 	 * Force a channel halt. Don't call dwc2_halt_channel because that won't
1567 	 * write to the HCCHARn register in DMA mode to force the halt.
1568 	 */
1569 	dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
1570 
1571 handle_ahberr_done:
1572 	disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
1573 }
1574 
1575 /*
1576  * Handles a host channel transaction error interrupt. This handler may be
1577  * called in either DMA mode or Slave mode.
1578  */
1579 static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
1580 				 struct dwc2_host_chan *chan, int chnum,
1581 				 struct dwc2_qtd *qtd)
1582 {
1583 	dev_dbg(hsotg->dev,
1584 		"--Host Channel %d Interrupt: Transaction Error--\n", chnum);
1585 
1586 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1587 
1588 	if (hsotg->core_params->dma_desc_enable > 0) {
1589 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1590 					    DWC2_HC_XFER_XACT_ERR);
1591 		goto handle_xacterr_done;
1592 	}
1593 
1594 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1595 	case USB_ENDPOINT_XFER_CONTROL:
1596 	case USB_ENDPOINT_XFER_BULK:
1597 		qtd->error_count++;
1598 		if (!chan->qh->ping_state) {
1599 
1600 			dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1601 						  qtd, DWC2_HC_XFER_XACT_ERR);
1602 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1603 			if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
1604 				chan->qh->ping_state = 1;
1605 		}
1606 
1607 		/*
1608 		 * Halt the channel so the transfer can be re-started from
1609 		 * the appropriate point or the PING protocol will start
1610 		 */
1611 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1612 		break;
1613 	case USB_ENDPOINT_XFER_INT:
1614 		qtd->error_count++;
1615 		if (chan->do_split && chan->complete_split)
1616 			qtd->complete_split = 0;
1617 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1618 		break;
1619 	case USB_ENDPOINT_XFER_ISOC:
1620 		{
1621 			enum dwc2_halt_status halt_status;
1622 
1623 			halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1624 					chnum, qtd, DWC2_HC_XFER_XACT_ERR);
1625 			dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1626 		}
1627 		break;
1628 	}
1629 
1630 handle_xacterr_done:
1631 	disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
1632 }
1633 
1634 /*
1635  * Handles a host channel frame overrun interrupt. This handler may be called
1636  * in either DMA mode or Slave mode.
1637  */
1638 static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
1639 				  struct dwc2_host_chan *chan, int chnum,
1640 				  struct dwc2_qtd *qtd)
1641 {
1642 	enum dwc2_halt_status halt_status;
1643 
1644 	if (dbg_hc(chan))
1645 		dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
1646 			chnum);
1647 
1648 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1649 
1650 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1651 	case USB_ENDPOINT_XFER_CONTROL:
1652 	case USB_ENDPOINT_XFER_BULK:
1653 		break;
1654 	case USB_ENDPOINT_XFER_INT:
1655 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1656 		break;
1657 	case USB_ENDPOINT_XFER_ISOC:
1658 		halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1659 					qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1660 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1661 		break;
1662 	}
1663 
1664 	disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
1665 }
1666 
1667 /*
1668  * Handles a host channel data toggle error interrupt. This handler may be
1669  * called in either DMA mode or Slave mode.
1670  */
1671 static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
1672 				    struct dwc2_host_chan *chan, int chnum,
1673 				    struct dwc2_qtd *qtd)
1674 {
1675 	dev_dbg(hsotg->dev,
1676 		"--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
1677 
1678 	if (chan->ep_is_in)
1679 		qtd->error_count = 0;
1680 	else
1681 		dev_err(hsotg->dev,
1682 			"Data Toggle Error on OUT transfer, channel %d\n",
1683 			chnum);
1684 
1685 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1686 	disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
1687 }
1688 
1689 /*
1690  * For debug only. It checks that a valid halt status is set and that
1691  * HCCHARn.chdis is clear. If there's a problem, corrective action is
1692  * taken and a warning is issued.
1693  *
1694  * Return: true if halt status is ok, false otherwise
1695  */
1696 static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
1697 				struct dwc2_host_chan *chan, int chnum,
1698 				struct dwc2_qtd *qtd)
1699 {
1700 #ifdef DWC2_DEBUG
1701 	u32 hcchar;
1702 	u32 hctsiz;
1703 	u32 hcintmsk;
1704 	u32 hcsplt;
1705 
1706 	if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
1707 		/*
1708 		 * This code is here only as a check. This condition should
1709 		 * never happen. Ignore the halt if it does occur.
1710 		 */
1711 		hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1712 		hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
1713 		hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1714 		hcsplt = DWC2_READ_4(hsotg, HCSPLT(chnum));
1715 		dev_dbg(hsotg->dev,
1716 			"%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
1717 			 __func__);
1718 		dev_dbg(hsotg->dev,
1719 			"channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
1720 			chnum, hcchar, hctsiz);
1721 		dev_dbg(hsotg->dev,
1722 			"hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
1723 			chan->hcint, hcintmsk, hcsplt);
1724 		if (qtd)
1725 			dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
1726 				qtd->complete_split);
1727 		dev_warn(hsotg->dev,
1728 			 "%s: no halt status, channel %d, ignoring interrupt\n",
1729 			 __func__, chnum);
1730 		return false;
1731 	}
1732 
1733 	/*
1734 	 * This code is here only as a check. hcchar.chdis should never be set
1735 	 * when the halt interrupt occurs. Halt the channel again if it does
1736 	 * occur.
1737 	 */
1738 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1739 	if (hcchar & HCCHAR_CHDIS) {
1740 		dev_warn(hsotg->dev,
1741 			 "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
1742 			 __func__, hcchar);
1743 		chan->halt_pending = 0;
1744 		dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
1745 		return false;
1746 	}
1747 #endif
1748 
1749 	return true;
1750 }
1751 
1752 /*
1753  * Handles a host Channel Halted interrupt in DMA mode. This handler
1754  * determines the reason the channel halted and proceeds accordingly.
1755  */
1756 static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
1757 				    struct dwc2_host_chan *chan, int chnum,
1758 				    struct dwc2_qtd *qtd)
1759 {
1760 	u32 hcintmsk;
1761 	int out_nak_enh = 0;
1762 
1763 	if (dbg_hc(chan))
1764 		dev_vdbg(hsotg->dev,
1765 			 "--Host Channel %d Interrupt: DMA Channel Halted--\n",
1766 			 chnum);
1767 
1768 	/*
1769 	 * For core with OUT NAK enhancement, the flow for high-speed
1770 	 * CONTROL/BULK OUT is handled a little differently
1771 	 */
1772 	if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
1773 		if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
1774 		    (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1775 		     chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
1776 			out_nak_enh = 1;
1777 		}
1778 	}
1779 
1780 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1781 	    (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
1782 	     hsotg->core_params->dma_desc_enable <= 0)) {
1783 		if (hsotg->core_params->dma_desc_enable > 0)
1784 			dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1785 						    chan->halt_status);
1786 		else
1787 			/*
1788 			 * Just release the channel. A dequeue can happen on a
1789 			 * transfer timeout. In the case of an AHB Error, the
1790 			 * channel was forced to halt because there's no way to
1791 			 * gracefully recover.
1792 			 */
1793 			dwc2_release_channel(hsotg, chan, qtd,
1794 					     chan->halt_status);
1795 		return;
1796 	}
1797 
1798 	hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1799 
1800 	if (chan->hcint & HCINTMSK_XFERCOMPL) {
1801 		/*
1802 		 * Todo: This is here because of a possible hardware bug. Spec
1803 		 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1804 		 * interrupt w/ACK bit set should occur, but I only see the
1805 		 * XFERCOMP bit, even with it masked out. This is a workaround
1806 		 * for that behavior. Should fix this when hardware is fixed.
1807 		 */
1808 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
1809 			dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1810 		dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
1811 	} else if (chan->hcint & HCINTMSK_STALL) {
1812 		dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
1813 	} else if ((chan->hcint & HCINTMSK_XACTERR) &&
1814 		   hsotg->core_params->dma_desc_enable <= 0) {
1815 		if (out_nak_enh) {
1816 			if (chan->hcint &
1817 			    (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
1818 				dev_vdbg(hsotg->dev,
1819 					 "XactErr with NYET/NAK/ACK\n");
1820 				qtd->error_count = 0;
1821 			} else {
1822 				dev_vdbg(hsotg->dev,
1823 					 "XactErr without NYET/NAK/ACK\n");
1824 			}
1825 		}
1826 
1827 		/*
1828 		 * Must handle xacterr before nak or ack. Could get a xacterr
1829 		 * at the same time as either of these on a BULK/CONTROL OUT
1830 		 * that started with a PING. The xacterr takes precedence.
1831 		 */
1832 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1833 	} else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
1834 		   hsotg->core_params->dma_desc_enable > 0) {
1835 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1836 	} else if ((chan->hcint & HCINTMSK_AHBERR) &&
1837 		   hsotg->core_params->dma_desc_enable > 0) {
1838 		dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
1839 	} else if (chan->hcint & HCINTMSK_BBLERR) {
1840 		dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
1841 	} else if (chan->hcint & HCINTMSK_FRMOVRUN) {
1842 		dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
1843 	} else if (!out_nak_enh) {
1844 		if (chan->hcint & HCINTMSK_NYET) {
1845 			/*
1846 			 * Must handle nyet before nak or ack. Could get a nyet
1847 			 * at the same time as either of those on a BULK/CONTROL
1848 			 * OUT that started with a PING. The nyet takes
1849 			 * precedence.
1850 			 */
1851 			dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
1852 		} else if ((chan->hcint & HCINTMSK_NAK) &&
1853 			   !(hcintmsk & HCINTMSK_NAK)) {
1854 			/*
1855 			 * If nak is not masked, it's because a non-split IN
1856 			 * transfer is in an error state. In that case, the nak
1857 			 * is handled by the nak interrupt handler, not here.
1858 			 * Handle nak here for BULK/CONTROL OUT transfers, which
1859 			 * halt on a NAK to allow rewinding the buffer pointer.
1860 			 */
1861 			dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
1862 		} else if ((chan->hcint & HCINTMSK_ACK) &&
1863 			   !(hcintmsk & HCINTMSK_ACK)) {
1864 			/*
1865 			 * If ack is not masked, it's because a non-split IN
1866 			 * transfer is in an error state. In that case, the ack
1867 			 * is handled by the ack interrupt handler, not here.
1868 			 * Handle ack here for split transfers. Start splits
1869 			 * halt on ACK.
1870 			 */
1871 			dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1872 		} else {
1873 			if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1874 			    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1875 				/*
1876 				 * A periodic transfer halted with no other
1877 				 * channel interrupts set. Assume it was halted
1878 				 * by the core because it could not be completed
1879 				 * in its scheduled (micro)frame.
1880 				 */
1881 				dev_dbg(hsotg->dev,
1882 					"%s: Halt channel %d (assume incomplete periodic transfer)\n",
1883 					__func__, chnum);
1884 				dwc2_halt_channel(hsotg, chan, qtd,
1885 					DWC2_HC_XFER_PERIODIC_INCOMPLETE);
1886 			} else {
1887 				dev_err(hsotg->dev,
1888 					"%s: Channel %d - ChHltd set, but reason is unknown\n",
1889 					__func__, chnum);
1890 				dev_err(hsotg->dev,
1891 					"hcint 0x%08x, intsts 0x%08x\n",
1892 					chan->hcint,
1893 					DWC2_READ_4(hsotg, GINTSTS));
1894 			}
1895 		}
1896 	} else {
1897 		dev_info(hsotg->dev,
1898 			 "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
1899 			 chan->hcint);
1900 	}
1901 }
1902 
1903 /*
1904  * Handles a host channel Channel Halted interrupt
1905  *
1906  * In slave mode, this handler is called only when the driver specifically
1907  * requests a halt. This occurs during handling other host channel interrupts
1908  * (e.g. nak, xacterr, stall, nyet, etc.).
1909  *
1910  * In DMA mode, this is the interrupt that occurs when the core has finished
1911  * processing a transfer on a channel. Other host channel interrupts (except
1912  * ahberr) are disabled in DMA mode.
1913  */
1914 static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
1915 				struct dwc2_host_chan *chan, int chnum,
1916 				struct dwc2_qtd *qtd)
1917 {
1918 	if (dbg_hc(chan))
1919 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
1920 			 chnum);
1921 
1922 	if (hsotg->core_params->dma_enable > 0) {
1923 		dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
1924 	} else {
1925 		if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
1926 			return;
1927 		dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
1928 	}
1929 }
1930 
1931 /* Handles interrupt for a specific Host Channel */
1932 static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
1933 {
1934 	struct dwc2_qtd *qtd;
1935 	struct dwc2_host_chan *chan;
1936 	u32 hcint, hcintmsk;
1937 
1938 	chan = hsotg->hc_ptr_array[chnum];
1939 
1940 	hcint = DWC2_READ_4(hsotg, HCINT(chnum));
1941 	hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1942 	if (!chan) {
1943 		dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
1944 		DWC2_WRITE_4(hsotg, HCINT(chnum), hcint);
1945 		return;
1946 	}
1947 
1948 	if (dbg_hc(chan)) {
1949 		dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
1950 			 chnum);
1951 		dev_vdbg(hsotg->dev,
1952 			 "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1953 			 hcint, hcintmsk, hcint & hcintmsk);
1954 	}
1955 
1956 	DWC2_WRITE_4(hsotg, HCINT(chnum), hcint);
1957 	chan->hcint = hcint;
1958 	hcint &= hcintmsk;
1959 
1960 	/*
1961 	 * If the channel was halted due to a dequeue, the qtd list might
1962 	 * be empty or at least the first entry will not be the active qtd.
1963 	 * In this case, take a shortcut and just release the channel.
1964 	 */
1965 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1966 		/*
1967 		 * If the channel was halted, this should be the only
1968 		 * interrupt unmasked
1969 		 */
1970 		WARN_ON(hcint != HCINTMSK_CHHLTD);
1971 		if (hsotg->core_params->dma_desc_enable > 0)
1972 			dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1973 						    chan->halt_status);
1974 		else
1975 			dwc2_release_channel(hsotg, chan, NULL,
1976 					     chan->halt_status);
1977 		return;
1978 	}
1979 
1980 	if (list_empty(&chan->qh->qtd_list)) {
1981 		/*
1982 		 * TODO: Will this ever happen with the
1983 		 * DWC2_HC_XFER_URB_DEQUEUE handling above?
1984 		 */
1985 		dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
1986 			chnum);
1987 		dev_dbg(hsotg->dev,
1988 			"  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1989 			chan->hcint, hcintmsk, hcint);
1990 		chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
1991 		disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
1992 		chan->hcint = 0;
1993 		return;
1994 	}
1995 
1996 	qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
1997 			       qtd_list_entry);
1998 
1999 	if (hsotg->core_params->dma_enable <= 0) {
2000 		if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
2001 			hcint &= ~HCINTMSK_CHHLTD;
2002 	}
2003 
2004 	if (hcint & HCINTMSK_XFERCOMPL) {
2005 		dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
2006 		/*
2007 		 * If NYET occurred at same time as Xfer Complete, the NYET is
2008 		 * handled by the Xfer Complete interrupt handler. Don't want
2009 		 * to call the NYET interrupt handler in this case.
2010 		 */
2011 		hcint &= ~HCINTMSK_NYET;
2012 	}
2013 	if (hcint & HCINTMSK_CHHLTD)
2014 		dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
2015 	if (hcint & HCINTMSK_AHBERR)
2016 		dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
2017 	if (hcint & HCINTMSK_STALL)
2018 		dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
2019 	if (hcint & HCINTMSK_NAK)
2020 		dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
2021 	if (hcint & HCINTMSK_ACK)
2022 		dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
2023 	if (hcint & HCINTMSK_NYET)
2024 		dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
2025 	if (hcint & HCINTMSK_XACTERR)
2026 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
2027 	if (hcint & HCINTMSK_BBLERR)
2028 		dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
2029 	if (hcint & HCINTMSK_FRMOVRUN)
2030 		dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
2031 	if (hcint & HCINTMSK_DATATGLERR)
2032 		dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
2033 
2034 	chan->hcint = 0;
2035 }
2036 
2037 /*
2038  * This interrupt indicates that one or more host channels has a pending
2039  * interrupt. There are multiple conditions that can cause each host channel
2040  * interrupt. This function determines which conditions have occurred for each
2041  * host channel interrupt and handles them appropriately.
2042  */
2043 static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2044 {
2045 	u32 haint;
2046 	int i;
2047 
2048 	haint = DWC2_READ_4(hsotg, HAINT);
2049 	if (dbg_perio()) {
2050 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
2051 
2052 		dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2053 	}
2054 
2055 	for (i = 0; i < hsotg->core_params->host_channels; i++) {
2056 		if (haint & (1 << i))
2057 			dwc2_hc_n_intr(hsotg, i);
2058 	}
2059 }
2060 
2061 /* This function handles interrupts for the HCD */
2062 irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
2063 {
2064 	u32 gintsts, dbg_gintsts;
2065 	irqreturn_t retval = IRQ_NONE;
2066 
2067 	if (!dwc2_is_controller_alive(hsotg)) {
2068 		dev_warn(hsotg->dev, "Controller is dead\n");
2069 		return retval;
2070 	}
2071 
2072 	KASSERT(mutex_owned(&hsotg->lock));
2073 
2074 	/* Check if HOST Mode */
2075 	if (dwc2_is_host_mode(hsotg)) {
2076 		gintsts = dwc2_read_core_intr(hsotg);
2077 		if (!gintsts) {
2078 			return retval;
2079 		}
2080 
2081 		retval = IRQ_HANDLED;
2082 
2083 		dbg_gintsts = gintsts;
2084 #ifndef DEBUG_SOF
2085 		dbg_gintsts &= ~GINTSTS_SOF;
2086 #endif
2087 		if (!dbg_perio())
2088 			dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
2089 					 GINTSTS_PTXFEMP);
2090 
2091 		/* Only print if there are any non-suppressed interrupts left */
2092 		if (dbg_gintsts)
2093 			dev_vdbg(hsotg->dev,
2094 				 "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
2095 				 gintsts);
2096 
2097 		if (gintsts & GINTSTS_SOF)
2098 			dwc2_sof_intr(hsotg);
2099 		if (gintsts & GINTSTS_RXFLVL)
2100 			dwc2_rx_fifo_level_intr(hsotg);
2101 		if (gintsts & GINTSTS_NPTXFEMP)
2102 			dwc2_np_tx_fifo_empty_intr(hsotg);
2103 		if (gintsts & GINTSTS_PRTINT)
2104 			dwc2_port_intr(hsotg);
2105 		if (gintsts & GINTSTS_HCHINT)
2106 			dwc2_hc_intr(hsotg);
2107 		if (gintsts & GINTSTS_PTXFEMP)
2108 			dwc2_perio_tx_fifo_empty_intr(hsotg);
2109 
2110 		if (dbg_gintsts) {
2111 			dev_vdbg(hsotg->dev,
2112 				 "DWC OTG HCD Finished Servicing Interrupts\n");
2113 			dev_vdbg(hsotg->dev,
2114 				 "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
2115 				 DWC2_READ_4(hsotg, GINTSTS),
2116 				 DWC2_READ_4(hsotg, GINTMSK));
2117 		}
2118 	}
2119 
2120 	return retval;
2121 }
2122