xref: /netbsd-src/sys/external/bsd/dwc2/dist/dwc2_hcdintr.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: dwc2_hcdintr.c,v 1.6 2013/11/24 12:25:19 skrll Exp $	*/
2 
3 /*
4  * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
5  *
6  * Copyright (C) 2004-2013 Synopsys, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The names of the above-listed copyright holders may not be used
18  *    to endorse or promote products derived from this software without
19  *    specific prior written permission.
20  *
21  * ALTERNATIVELY, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") as published by the Free Software
23  * Foundation; either version 2 of the License, or (at your option) any
24  * later version.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * This file contains the interrupt handlers for Host mode
41  */
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdintr.c,v 1.6 2013/11/24 12:25:19 skrll Exp $");
44 
45 #include <sys/types.h>
46 #include <sys/pool.h>
47 
48 #include <dev/usb/usb.h>
49 #include <dev/usb/usbdi.h>
50 #include <dev/usb/usbdivar.h>
51 #include <dev/usb/usb_mem.h>
52 
53 #include <machine/param.h>
54 
55 #include <linux/kernel.h>
56 
57 #include <dwc2/dwc2.h>
58 #include <dwc2/dwc2var.h>
59 
60 #include "dwc2_core.h"
61 #include "dwc2_hcd.h"
62 
63 /* This function is for debug only */
64 static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
65 {
66 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
67 	u16 curr_frame_number = hsotg->frame_number;
68 
69 	if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
70 		if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) !=
71 		    curr_frame_number) {
72 			hsotg->frame_num_array[hsotg->frame_num_idx] =
73 					curr_frame_number;
74 			hsotg->last_frame_num_array[hsotg->frame_num_idx] =
75 					hsotg->last_frame_num;
76 			hsotg->frame_num_idx++;
77 		}
78 	} else if (!hsotg->dumped_frame_num_array) {
79 		int i;
80 
81 		dev_info(hsotg->dev, "Frame     Last Frame\n");
82 		dev_info(hsotg->dev, "-----     ----------\n");
83 		for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
84 			dev_info(hsotg->dev, "0x%04x    0x%04x\n",
85 				 hsotg->frame_num_array[i],
86 				 hsotg->last_frame_num_array[i]);
87 		}
88 		hsotg->dumped_frame_num_array = 1;
89 	}
90 	hsotg->last_frame_num = curr_frame_number;
91 #endif
92 }
93 
94 static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
95 				    struct dwc2_host_chan *chan,
96 				    struct dwc2_qtd *qtd)
97 {
98 // 	struct urb *usb_urb;
99 
100 	if (!chan->qh)
101 		return;
102 
103 	if (chan->qh->dev_speed == USB_SPEED_HIGH)
104 		return;
105 
106 	if (!qtd->urb)
107 		return;
108 
109 
110 	if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
111 		chan->qh->tt_buffer_dirty = 1;
112 			chan->qh->tt_buffer_dirty = 0;
113 	}
114 }
115 
116 /*
117  * Handles the start-of-frame interrupt in host mode. Non-periodic
118  * transactions may be queued to the DWC_otg controller for the current
119  * (micro)frame. Periodic transactions may be queued to the controller
120  * for the next (micro)frame.
121  */
122 static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
123 {
124 	struct list_head *qh_entry;
125 	struct dwc2_qh *qh;
126 	enum dwc2_transaction_type tr_type;
127 
128 #ifdef DEBUG_SOF
129 	dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
130 #endif
131 
132 	hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
133 
134 	dwc2_track_missed_sofs(hsotg);
135 
136 	/* Determine whether any periodic QHs should be executed */
137 	qh_entry = hsotg->periodic_sched_inactive.next;
138 	while (qh_entry != &hsotg->periodic_sched_inactive) {
139 		qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
140 		qh_entry = qh_entry->next;
141 		if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number))
142 			/*
143 			 * Move QH to the ready list to be executed next
144 			 * (micro)frame
145 			 */
146 			list_move(&qh->qh_list_entry,
147 				  &hsotg->periodic_sched_ready);
148 	}
149 	tr_type = dwc2_hcd_select_transactions(hsotg);
150 	if (tr_type != DWC2_TRANSACTION_NONE)
151 		dwc2_hcd_queue_transactions(hsotg, tr_type);
152 
153 	/* Clear interrupt */
154 	DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_SOF);
155 }
156 
157 /*
158  * Handles the Rx FIFO Level Interrupt, which indicates that there is
159  * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
160  * memory if the DWC_otg controller is operating in Slave mode.
161  */
162 static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
163 {
164 	u32 grxsts, chnum, bcnt, dpid, pktsts;
165 	struct dwc2_host_chan *chan;
166 
167 	if (dbg_perio())
168 		dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
169 
170 	grxsts = DWC2_READ_4(hsotg, GRXSTSP);
171 	chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
172 	chan = hsotg->hc_ptr_array[chnum];
173 	if (!chan) {
174 		dev_err(hsotg->dev, "Unable to get corresponding channel\n");
175 		return;
176 	}
177 
178 	bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
179 	dpid = (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT;
180 	pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
181 
182 	/* Packet Status */
183 	if (dbg_perio()) {
184 		dev_vdbg(hsotg->dev, "    Ch num = %d\n", chnum);
185 		dev_vdbg(hsotg->dev, "    Count = %d\n", bcnt);
186 		dev_vdbg(hsotg->dev, "    DPID = %d, chan.dpid = %d\n", dpid,
187 			 chan->data_pid_start);
188 		dev_vdbg(hsotg->dev, "    PStatus = %d\n", pktsts);
189 	}
190 
191 	switch (pktsts) {
192 	case GRXSTS_PKTSTS_HCHIN:
193 		/* Read the data into the host buffer */
194 		if (bcnt > 0) {
195 			dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
196 
197 			/* Update the HC fields for the next packet received */
198 			chan->xfer_count += bcnt;
199 			chan->xfer_buf += bcnt;
200 		}
201 		break;
202 	case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
203 	case GRXSTS_PKTSTS_DATATOGGLEERR:
204 	case GRXSTS_PKTSTS_HCHHALTED:
205 		/* Handled in interrupt, just ignore data */
206 		break;
207 	default:
208 		dev_err(hsotg->dev,
209 			"RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
210 		break;
211 	}
212 }
213 
214 /*
215  * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
216  * data packets may be written to the FIFO for OUT transfers. More requests
217  * may be written to the non-periodic request queue for IN transfers. This
218  * interrupt is enabled only in Slave mode.
219  */
220 static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
221 {
222 	dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
223 	dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
224 }
225 
226 /*
227  * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
228  * packets may be written to the FIFO for OUT transfers. More requests may be
229  * written to the periodic request queue for IN transfers. This interrupt is
230  * enabled only in Slave mode.
231  */
232 static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
233 {
234 	if (dbg_perio())
235 		dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
236 	dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
237 }
238 
239 static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
240 			      u32 *hprt0_modify)
241 {
242 	struct dwc2_core_params *params = hsotg->core_params;
243 	int do_reset = 0;
244 	u32 usbcfg;
245 	u32 prtspd;
246 	u32 hcfg;
247 	u32 fslspclksel;
248 	u32 hfir;
249 
250 	dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
251 
252 	/* Every time when port enables calculate HFIR.FrInterval */
253 	hfir = DWC2_READ_4(hsotg, HFIR);
254 	hfir &= ~HFIR_FRINT_MASK;
255 	hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
256 		HFIR_FRINT_MASK;
257 	DWC2_WRITE_4(hsotg, HFIR, hfir);
258 
259 	/* Check if we need to adjust the PHY clock speed for low power */
260 	if (!params->host_support_fs_ls_low_power) {
261 		/* Port has been enabled, set the reset change flag */
262 		hsotg->flags.b.port_reset_change = 1;
263 
264 		dwc2_root_intr(hsotg->hsotg_sc);
265 		return;
266 	}
267 
268 	usbcfg = DWC2_READ_4(hsotg, GUSBCFG);
269 	prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
270 
271 	if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
272 		/* Low power */
273 		if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
274 			/* Set PHY low power clock select for FS/LS devices */
275 			usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
276 			DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
277 			do_reset = 1;
278 		}
279 
280 		hcfg = DWC2_READ_4(hsotg, HCFG);
281 		fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
282 			      HCFG_FSLSPCLKSEL_SHIFT;
283 
284 		if (prtspd == HPRT0_SPD_LOW_SPEED &&
285 		    params->host_ls_low_power_phy_clk ==
286 		    DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
287 			/* 6 MHZ */
288 			dev_vdbg(hsotg->dev,
289 				 "FS_PHY programming HCFG to 6 MHz\n");
290 			if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
291 				fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
292 				hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
293 				hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
294 				DWC2_WRITE_4(hsotg, HCFG, hcfg);
295 				do_reset = 1;
296 			}
297 		} else {
298 			/* 48 MHZ */
299 			dev_vdbg(hsotg->dev,
300 				 "FS_PHY programming HCFG to 48 MHz\n");
301 			if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
302 				fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
303 				hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
304 				hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
305 				DWC2_WRITE_4(hsotg, HCFG, hcfg);
306 				do_reset = 1;
307 			}
308 		}
309 	} else {
310 		/* Not low power */
311 		if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
312 			usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
313 			DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg);
314 			do_reset = 1;
315 		}
316 	}
317 
318 	if (do_reset) {
319 		*hprt0_modify |= HPRT0_RST;
320 		queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
321 				   msecs_to_jiffies(60));
322 	} else {
323 		/* Port has been enabled, set the reset change flag */
324 		hsotg->flags.b.port_reset_change = 1;
325 		dwc2_root_intr(hsotg->hsotg_sc);
326 
327 	}
328 }
329 
330 /*
331  * There are multiple conditions that can cause a port interrupt. This function
332  * determines which interrupt conditions have occurred and handles them
333  * appropriately.
334  */
335 static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
336 {
337 	u32 hprt0;
338 	u32 hprt0_modify;
339 
340 	dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
341 
342 	hprt0 = DWC2_READ_4(hsotg, HPRT0);
343 	hprt0_modify = hprt0;
344 
345 	/*
346 	 * Clear appropriate bits in HPRT0 to clear the interrupt bit in
347 	 * GINTSTS
348 	 */
349 	hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
350 			  HPRT0_OVRCURRCHG);
351 
352 	/*
353 	 * Port Connect Detected
354 	 * Set flag and clear if detected
355 	 */
356 	if (hprt0 & HPRT0_CONNDET) {
357 		dev_vdbg(hsotg->dev,
358 			 "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
359 			 hprt0);
360 		hsotg->flags.b.port_connect_status_change = 1;
361 		hsotg->flags.b.port_connect_status = 1;
362 		hprt0_modify |= HPRT0_CONNDET;
363 
364 		/*
365 		 * The Hub driver asserts a reset when it sees port connect
366 		 * status change flag
367 		 */
368 	}
369 
370 	/*
371 	 * Port Enable Changed
372 	 * Clear if detected - Set internal flag if disabled
373 	 */
374 	if (hprt0 & HPRT0_ENACHG) {
375 		dev_vdbg(hsotg->dev,
376 			 "  --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
377 			 hprt0, !!(hprt0 & HPRT0_ENA));
378 		hprt0_modify |= HPRT0_ENACHG;
379 		if (hprt0 & HPRT0_ENA)
380 			dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
381 		else
382 			hsotg->flags.b.port_enable_change = 1;
383 	}
384 
385 	/* Overcurrent Change Interrupt */
386 	if (hprt0 & HPRT0_OVRCURRCHG) {
387 		dev_vdbg(hsotg->dev,
388 			 "  --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
389 			 hprt0);
390 		hsotg->flags.b.port_over_current_change = 1;
391 		hprt0_modify |= HPRT0_OVRCURRCHG;
392 	}
393 
394 	/* Clear Port Interrupts */
395 	DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify);
396 
397 	if (hsotg->flags.b.port_connect_status_change ||
398 	    hsotg->flags.b.port_enable_change ||
399 	    hsotg->flags.b.port_over_current_change)
400 		dwc2_root_intr(hsotg->hsotg_sc);
401 }
402 
403 /*
404  * Gets the actual length of a transfer after the transfer halts. halt_status
405  * holds the reason for the halt.
406  *
407  * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
408  * is set to 1 upon return if less than the requested number of bytes were
409  * transferred. short_read may also be NULL on entry, in which case it remains
410  * unchanged.
411  */
412 static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
413 				       struct dwc2_host_chan *chan, int chnum,
414 				       struct dwc2_qtd *qtd,
415 				       enum dwc2_halt_status halt_status,
416 				       int *short_read)
417 {
418 	u32 hctsiz, count, length;
419 
420 	hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
421 
422 	if (halt_status == DWC2_HC_XFER_COMPLETE) {
423 		if (chan->ep_is_in) {
424 			count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
425 				TSIZ_XFERSIZE_SHIFT;
426 			length = chan->xfer_len - count;
427 			if (short_read != NULL)
428 				*short_read = (count != 0);
429 		} else if (chan->qh->do_split) {
430 			length = qtd->ssplit_out_xfer_count;
431 		} else {
432 			length = chan->xfer_len;
433 		}
434 	} else {
435 		/*
436 		 * Must use the hctsiz.pktcnt field to determine how much data
437 		 * has been transferred. This field reflects the number of
438 		 * packets that have been transferred via the USB. This is
439 		 * always an integral number of packets if the transfer was
440 		 * halted before its normal completion. (Can't use the
441 		 * hctsiz.xfersize field because that reflects the number of
442 		 * bytes transferred via the AHB, not the USB).
443 		 */
444 		count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
445 		length = (chan->start_pkt_count - count) * chan->max_packet;
446 	}
447 
448 	return length;
449 }
450 
451 /**
452  * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
453  * Complete interrupt on the host channel. Updates the actual_length field
454  * of the URB based on the number of bytes transferred via the host channel.
455  * Sets the URB status if the data transfer is finished.
456  *
457  * Return: 1 if the data transfer specified by the URB is completely finished,
458  * 0 otherwise
459  */
460 static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
461 				 struct dwc2_host_chan *chan, int chnum,
462 				 struct dwc2_hcd_urb *urb,
463 				 struct dwc2_qtd *qtd)
464 {
465 	u32 hctsiz;
466 	int xfer_done = 0;
467 	int short_read = 0;
468 	int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
469 						      DWC2_HC_XFER_COMPLETE,
470 						      &short_read);
471 
472 	if (urb->actual_length + xfer_length > urb->length) {
473 		dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
474 		xfer_length = urb->length - urb->actual_length;
475 	}
476 
477 	/* Non DWORD-aligned buffer case handling */
478 	if (chan->align_buf && xfer_length && chan->ep_is_in) {
479 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
480 		usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_POSTREAD);
481 		memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
482 		       xfer_length);
483 		usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_PREREAD);
484 	}
485 
486 	dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
487 		 urb->actual_length, xfer_length);
488 	urb->actual_length += xfer_length;
489 
490 	if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
491 	    (urb->flags & URB_SEND_ZERO_PACKET) &&
492 	    urb->actual_length >= urb->length &&
493 	    !(urb->length % chan->max_packet)) {
494 		xfer_done = 0;
495 	} else if (short_read || urb->actual_length >= urb->length) {
496 		xfer_done = 1;
497 		urb->status = 0;
498 	}
499 
500 	hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
501 	dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
502 		 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
503 	dev_vdbg(hsotg->dev, "  chan->xfer_len %d\n", chan->xfer_len);
504 	dev_vdbg(hsotg->dev, "  hctsiz.xfersize %d\n",
505 		 (hctsiz & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
506 	dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n", urb->length);
507 	dev_vdbg(hsotg->dev, "  urb->actual_length %d\n", urb->actual_length);
508 	dev_vdbg(hsotg->dev, "  short_read %d, xfer_done %d\n", short_read,
509 		 xfer_done);
510 
511 	return xfer_done;
512 }
513 
514 /*
515  * Save the starting data toggle for the next transfer. The data toggle is
516  * saved in the QH for non-control transfers and it's saved in the QTD for
517  * control transfers.
518  */
519 void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
520 			       struct dwc2_host_chan *chan, int chnum,
521 			       struct dwc2_qtd *qtd)
522 {
523 	u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
524 	u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
525 
526 	if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
527 		if (pid == TSIZ_SC_MC_PID_DATA0)
528 			chan->qh->data_toggle = DWC2_HC_PID_DATA0;
529 		else
530 			chan->qh->data_toggle = DWC2_HC_PID_DATA1;
531 	} else {
532 		if (pid == TSIZ_SC_MC_PID_DATA0)
533 			qtd->data_toggle = DWC2_HC_PID_DATA0;
534 		else
535 			qtd->data_toggle = DWC2_HC_PID_DATA1;
536 	}
537 }
538 
539 /**
540  * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
541  * the transfer is stopped for any reason. The fields of the current entry in
542  * the frame descriptor array are set based on the transfer state and the input
543  * halt_status. Completes the Isochronous URB if all the URB frames have been
544  * completed.
545  *
546  * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
547  * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
548  */
549 static enum dwc2_halt_status dwc2_update_isoc_urb_state(
550 		struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
551 		int chnum, struct dwc2_qtd *qtd,
552 		enum dwc2_halt_status halt_status)
553 {
554 	struct dwc2_hcd_iso_packet_desc *frame_desc;
555 	struct dwc2_hcd_urb *urb = qtd->urb;
556 
557 	if (!urb)
558 		return DWC2_HC_XFER_NO_HALT_STATUS;
559 
560 	frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
561 
562 	switch (halt_status) {
563 	case DWC2_HC_XFER_COMPLETE:
564 		frame_desc->status = 0;
565 		frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
566 					chan, chnum, qtd, halt_status, NULL);
567 
568 		/* Non DWORD-aligned buffer case handling */
569 		if (chan->align_buf && frame_desc->actual_length &&
570 		    chan->ep_is_in) {
571 			dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
572 				 __func__);
573 			usb_syncmem(urb->usbdma, 0, urb->length,
574 				    BUS_DMASYNC_POSTREAD);
575 			memcpy(urb->buf + frame_desc->offset +
576 			       qtd->isoc_split_offset, chan->qh->dw_align_buf,
577 			       frame_desc->actual_length);
578 			usb_syncmem(urb->usbdma, 0, urb->length,
579 				    BUS_DMASYNC_PREREAD);
580 		}
581 		break;
582 	case DWC2_HC_XFER_FRAME_OVERRUN:
583 		urb->error_count++;
584 		if (chan->ep_is_in)
585 			frame_desc->status = -ENOSR;
586 		else
587 			frame_desc->status = -ECOMM;
588 		frame_desc->actual_length = 0;
589 		break;
590 	case DWC2_HC_XFER_BABBLE_ERR:
591 		urb->error_count++;
592 		frame_desc->status = -EOVERFLOW;
593 		/* Don't need to update actual_length in this case */
594 		break;
595 	case DWC2_HC_XFER_XACT_ERR:
596 		urb->error_count++;
597 		frame_desc->status = -EPROTO;
598 		frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
599 					chan, chnum, qtd, halt_status, NULL);
600 
601 		/* Non DWORD-aligned buffer case handling */
602 		if (chan->align_buf && frame_desc->actual_length &&
603 		    chan->ep_is_in) {
604 			dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
605 				 __func__);
606 			usb_syncmem(urb->usbdma, 0, urb->length,
607 				    BUS_DMASYNC_POSTREAD);
608 			memcpy(urb->buf + frame_desc->offset +
609 			       qtd->isoc_split_offset, chan->qh->dw_align_buf,
610 			       frame_desc->actual_length);
611 			usb_syncmem(urb->usbdma, 0, urb->length,
612 				    BUS_DMASYNC_PREREAD);
613 		}
614 
615 		/* Skip whole frame */
616 		if (chan->qh->do_split &&
617 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
618 		    hsotg->core_params->dma_enable > 0) {
619 			qtd->complete_split = 0;
620 			qtd->isoc_split_offset = 0;
621 		}
622 
623 		break;
624 	default:
625 		dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
626 			halt_status);
627 		break;
628 	}
629 
630 	if (++qtd->isoc_frame_index == urb->packet_count) {
631 		/*
632 		 * urb->status is not used for isoc transfers. The individual
633 		 * frame_desc statuses are used instead.
634 		 */
635 		dwc2_host_complete(hsotg, qtd, 0);
636 		halt_status = DWC2_HC_XFER_URB_COMPLETE;
637 	} else {
638 		halt_status = DWC2_HC_XFER_COMPLETE;
639 	}
640 
641 	return halt_status;
642 }
643 
644 /*
645  * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
646  * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
647  * still linked to the QH, the QH is added to the end of the inactive
648  * non-periodic schedule. For periodic QHs, removes the QH from the periodic
649  * schedule if no more QTDs are linked to the QH.
650  */
651 static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
652 			       int free_qtd)
653 {
654 	int continue_split = 0;
655 	struct dwc2_qtd *qtd;
656 
657 	if (dbg_qh(qh))
658 		dev_vdbg(hsotg->dev, "  %s(%p,%p,%d)\n", __func__,
659 			 hsotg, qh, free_qtd);
660 
661 	if (list_empty(&qh->qtd_list)) {
662 		dev_dbg(hsotg->dev, "## QTD list empty ##\n");
663 		goto no_qtd;
664 	}
665 
666 	qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
667 
668 	if (qtd->complete_split)
669 		continue_split = 1;
670 	else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
671 		 qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
672 		continue_split = 1;
673 
674 	if (free_qtd) {
675 		dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
676 		continue_split = 0;
677 	}
678 
679 no_qtd:
680 	if (qh->channel)
681 		qh->channel->align_buf = 0;
682 	qh->channel = NULL;
683 	dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
684 }
685 
686 /**
687  * dwc2_release_channel() - Releases a host channel for use by other transfers
688  *
689  * @hsotg:       The HCD state structure
690  * @chan:        The host channel to release
691  * @qtd:         The QTD associated with the host channel. This QTD may be
692  *               freed if the transfer is complete or an error has occurred.
693  * @halt_status: Reason the channel is being released. This status
694  *               determines the actions taken by this function.
695  *
696  * Also attempts to select and queue more transactions since at least one host
697  * channel is available.
698  */
699 static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
700 				 struct dwc2_host_chan *chan,
701 				 struct dwc2_qtd *qtd,
702 				 enum dwc2_halt_status halt_status)
703 {
704 	enum dwc2_transaction_type tr_type;
705 	u32 haintmsk;
706 	int free_qtd = 0;
707 
708 	if (dbg_hc(chan))
709 		dev_vdbg(hsotg->dev, "  %s: channel %d, halt_status %d\n",
710 			 __func__, chan->hc_num, halt_status);
711 
712 	switch (halt_status) {
713 	case DWC2_HC_XFER_URB_COMPLETE:
714 		free_qtd = 1;
715 		break;
716 	case DWC2_HC_XFER_AHB_ERR:
717 	case DWC2_HC_XFER_STALL:
718 	case DWC2_HC_XFER_BABBLE_ERR:
719 		free_qtd = 1;
720 		break;
721 	case DWC2_HC_XFER_XACT_ERR:
722 		if (qtd && qtd->error_count >= 3) {
723 			dev_vdbg(hsotg->dev,
724 				 "  Complete URB with transaction error\n");
725 			free_qtd = 1;
726 			dwc2_host_complete(hsotg, qtd, -EPROTO);
727 		}
728 		break;
729 	case DWC2_HC_XFER_URB_DEQUEUE:
730 		/*
731 		 * The QTD has already been removed and the QH has been
732 		 * deactivated. Don't want to do anything except release the
733 		 * host channel and try to queue more transfers.
734 		 */
735 		goto cleanup;
736 	case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
737 		dev_vdbg(hsotg->dev, "  Complete URB with I/O error\n");
738 		free_qtd = 1;
739 		dwc2_host_complete(hsotg, qtd, -EIO);
740 		break;
741 	case DWC2_HC_XFER_NO_HALT_STATUS:
742 	default:
743 		break;
744 	}
745 
746 	dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
747 
748 cleanup:
749 	/*
750 	 * Release the host channel for use by other transfers. The cleanup
751 	 * function clears the channel interrupt enables and conditions, so
752 	 * there's no need to clear the Channel Halted interrupt separately.
753 	 */
754 	if (!list_empty(&chan->hc_list_entry))
755 		list_del(&chan->hc_list_entry);
756 	dwc2_hc_cleanup(hsotg, chan);
757 	list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
758 
759 	if (hsotg->core_params->uframe_sched > 0) {
760 		hsotg->available_host_channels++;
761 	} else {
762 		switch (chan->ep_type) {
763 		case USB_ENDPOINT_XFER_CONTROL:
764 		case USB_ENDPOINT_XFER_BULK:
765 			hsotg->non_periodic_channels--;
766 			break;
767 		default:
768 			/*
769 			 * Don't release reservations for periodic channels
770 			 * here. That's done when a periodic transfer is
771 			 * descheduled (i.e. when the QH is removed from the
772 			 * periodic schedule).
773 			 */
774 			break;
775 		}
776 	}
777 
778 	haintmsk = DWC2_READ_4(hsotg, HAINTMSK);
779 	haintmsk &= ~(1 << chan->hc_num);
780 	DWC2_WRITE_4(hsotg, HAINTMSK, haintmsk);
781 
782 	/* Try to queue more transfers now that there's a free channel */
783 	tr_type = dwc2_hcd_select_transactions(hsotg);
784 	if (tr_type != DWC2_TRANSACTION_NONE)
785 		dwc2_hcd_queue_transactions(hsotg, tr_type);
786 }
787 
788 /*
789  * Halts a host channel. If the channel cannot be halted immediately because
790  * the request queue is full, this function ensures that the FIFO empty
791  * interrupt for the appropriate queue is enabled so that the halt request can
792  * be queued when there is space in the request queue.
793  *
794  * This function may also be called in DMA mode. In that case, the channel is
795  * simply released since the core always halts the channel automatically in
796  * DMA mode.
797  */
798 static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
799 			      struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
800 			      enum dwc2_halt_status halt_status)
801 {
802 	if (dbg_hc(chan))
803 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
804 
805 	if (hsotg->core_params->dma_enable > 0) {
806 		if (dbg_hc(chan))
807 			dev_vdbg(hsotg->dev, "DMA enabled\n");
808 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
809 		return;
810 	}
811 
812 	/* Slave mode processing */
813 	dwc2_hc_halt(hsotg, chan, halt_status);
814 
815 	if (chan->halt_on_queue) {
816 		u32 gintmsk;
817 
818 		dev_vdbg(hsotg->dev, "Halt on queue\n");
819 		if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
820 		    chan->ep_type == USB_ENDPOINT_XFER_BULK) {
821 			dev_vdbg(hsotg->dev, "control/bulk\n");
822 			/*
823 			 * Make sure the Non-periodic Tx FIFO empty interrupt
824 			 * is enabled so that the non-periodic schedule will
825 			 * be processed
826 			 */
827 			gintmsk = DWC2_READ_4(hsotg, GINTMSK);
828 			gintmsk |= GINTSTS_NPTXFEMP;
829 			DWC2_WRITE_4(hsotg, GINTMSK, gintmsk);
830 		} else {
831 			dev_vdbg(hsotg->dev, "isoc/intr\n");
832 			/*
833 			 * Move the QH from the periodic queued schedule to
834 			 * the periodic assigned schedule. This allows the
835 			 * halt to be queued when the periodic schedule is
836 			 * processed.
837 			 */
838 			list_move(&chan->qh->qh_list_entry,
839 				  &hsotg->periodic_sched_assigned);
840 
841 			/*
842 			 * Make sure the Periodic Tx FIFO Empty interrupt is
843 			 * enabled so that the periodic schedule will be
844 			 * processed
845 			 */
846 			gintmsk = DWC2_READ_4(hsotg, GINTMSK);
847 			gintmsk |= GINTSTS_PTXFEMP;
848 			DWC2_WRITE_4(hsotg, GINTMSK, gintmsk);
849 		}
850 	}
851 }
852 
853 /*
854  * Performs common cleanup for non-periodic transfers after a Transfer
855  * Complete interrupt. This function should be called after any endpoint type
856  * specific handling is finished to release the host channel.
857  */
858 static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
859 					    struct dwc2_host_chan *chan,
860 					    int chnum, struct dwc2_qtd *qtd,
861 					    enum dwc2_halt_status halt_status)
862 {
863 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
864 
865 	qtd->error_count = 0;
866 
867 	if (chan->hcint & HCINTMSK_NYET) {
868 		/*
869 		 * Got a NYET on the last transaction of the transfer. This
870 		 * means that the endpoint should be in the PING state at the
871 		 * beginning of the next transfer.
872 		 */
873 		dev_vdbg(hsotg->dev, "got NYET\n");
874 		chan->qh->ping_state = 1;
875 	}
876 
877 	/*
878 	 * Always halt and release the host channel to make it available for
879 	 * more transfers. There may still be more phases for a control
880 	 * transfer or more data packets for a bulk transfer at this point,
881 	 * but the host channel is still halted. A channel will be reassigned
882 	 * to the transfer when the non-periodic schedule is processed after
883 	 * the channel is released. This allows transactions to be queued
884 	 * properly via dwc2_hcd_queue_transactions, which also enables the
885 	 * Tx FIFO Empty interrupt if necessary.
886 	 */
887 	if (chan->ep_is_in) {
888 		/*
889 		 * IN transfers in Slave mode require an explicit disable to
890 		 * halt the channel. (In DMA mode, this call simply releases
891 		 * the channel.)
892 		 */
893 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
894 	} else {
895 		/*
896 		 * The channel is automatically disabled by the core for OUT
897 		 * transfers in Slave mode
898 		 */
899 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
900 	}
901 }
902 
903 /*
904  * Performs common cleanup for periodic transfers after a Transfer Complete
905  * interrupt. This function should be called after any endpoint type specific
906  * handling is finished to release the host channel.
907  */
908 static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
909 					struct dwc2_host_chan *chan, int chnum,
910 					struct dwc2_qtd *qtd,
911 					enum dwc2_halt_status halt_status)
912 {
913 	u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
914 
915 	qtd->error_count = 0;
916 
917 	if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
918 		/* Core halts channel in these cases */
919 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
920 	else
921 		/* Flush any outstanding requests from the Tx queue */
922 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
923 }
924 
925 static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
926 				       struct dwc2_host_chan *chan, int chnum,
927 				       struct dwc2_qtd *qtd)
928 {
929 	struct dwc2_hcd_iso_packet_desc *frame_desc;
930 	u32 len;
931 
932 	if (!qtd->urb)
933 		return 0;
934 
935 	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
936 	len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
937 					  DWC2_HC_XFER_COMPLETE, NULL);
938 	if (!len) {
939 		qtd->complete_split = 0;
940 		qtd->isoc_split_offset = 0;
941 		return 0;
942 	}
943 
944 	frame_desc->actual_length += len;
945 
946 	if (chan->align_buf && len) {
947 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
948 		usb_syncmem(qtd->urb->usbdma, 0, qtd->urb->length,
949 			    BUS_DMASYNC_POSTREAD);
950 		memcpy(qtd->urb->buf + frame_desc->offset +
951 		       qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
952 		usb_syncmem(qtd->urb->usbdma, 0, qtd->urb->length,
953 			    BUS_DMASYNC_PREREAD);
954 	}
955 
956 	qtd->isoc_split_offset += len;
957 
958 	if (frame_desc->actual_length >= frame_desc->length) {
959 		frame_desc->status = 0;
960 		qtd->isoc_frame_index++;
961 		qtd->complete_split = 0;
962 		qtd->isoc_split_offset = 0;
963 	}
964 
965 	if (qtd->isoc_frame_index == qtd->urb->packet_count) {
966 		dwc2_host_complete(hsotg, qtd, 0);
967 		dwc2_release_channel(hsotg, chan, qtd,
968 				     DWC2_HC_XFER_URB_COMPLETE);
969 	} else {
970 		dwc2_release_channel(hsotg, chan, qtd,
971 				     DWC2_HC_XFER_NO_HALT_STATUS);
972 	}
973 
974 	return 1;	/* Indicates that channel released */
975 }
976 
977 /*
978  * Handles a host channel Transfer Complete interrupt. This handler may be
979  * called in either DMA mode or Slave mode.
980  */
981 static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
982 				  struct dwc2_host_chan *chan, int chnum,
983 				  struct dwc2_qtd *qtd)
984 {
985 	struct dwc2_hcd_urb *urb = qtd->urb;
986 	int pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
987 	enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
988 	int urb_xfer_done;
989 
990 	if (dbg_hc(chan))
991 		dev_vdbg(hsotg->dev,
992 			 "--Host Channel %d Interrupt: Transfer Complete--\n",
993 			 chnum);
994 
995 	if (hsotg->core_params->dma_desc_enable > 0) {
996 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
997 		if (pipe_type == USB_ENDPOINT_XFER_ISOC)
998 			/* Do not disable the interrupt, just clear it */
999 			return;
1000 		goto handle_xfercomp_done;
1001 	}
1002 
1003 	/* Handle xfer complete on CSPLIT */
1004 	if (chan->qh->do_split) {
1005 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
1006 		    hsotg->core_params->dma_enable > 0) {
1007 			if (qtd->complete_split &&
1008 			    dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
1009 							qtd))
1010 				goto handle_xfercomp_done;
1011 		} else {
1012 			qtd->complete_split = 0;
1013 		}
1014 	}
1015 
1016 	if (!urb)
1017 		goto handle_xfercomp_done;
1018 
1019 	/* Update the QTD and URB states */
1020 	switch (pipe_type) {
1021 	case USB_ENDPOINT_XFER_CONTROL:
1022 		switch (qtd->control_phase) {
1023 		case DWC2_CONTROL_SETUP:
1024 			if (urb->length > 0)
1025 				qtd->control_phase = DWC2_CONTROL_DATA;
1026 			else
1027 				qtd->control_phase = DWC2_CONTROL_STATUS;
1028 			dev_vdbg(hsotg->dev,
1029 				 "  Control setup transaction done\n");
1030 			halt_status = DWC2_HC_XFER_COMPLETE;
1031 			break;
1032 		case DWC2_CONTROL_DATA:
1033 			urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
1034 							      chnum, urb, qtd);
1035 			if (urb_xfer_done) {
1036 				qtd->control_phase = DWC2_CONTROL_STATUS;
1037 				dev_vdbg(hsotg->dev,
1038 					 "  Control data transfer done\n");
1039 			} else {
1040 				dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1041 							  qtd);
1042 			}
1043 			halt_status = DWC2_HC_XFER_COMPLETE;
1044 			break;
1045 		case DWC2_CONTROL_STATUS:
1046 			dev_vdbg(hsotg->dev, "  Control transfer complete\n");
1047 			if (urb->status == -EINPROGRESS)
1048 				urb->status = 0;
1049 			dwc2_host_complete(hsotg, qtd, urb->status);
1050 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1051 			break;
1052 		}
1053 
1054 		dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1055 						halt_status);
1056 		break;
1057 	case USB_ENDPOINT_XFER_BULK:
1058 		dev_vdbg(hsotg->dev, "  Bulk transfer complete\n");
1059 		urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1060 						      qtd);
1061 		if (urb_xfer_done) {
1062 			dwc2_host_complete(hsotg, qtd, urb->status);
1063 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1064 		} else {
1065 			halt_status = DWC2_HC_XFER_COMPLETE;
1066 		}
1067 
1068 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1069 		dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1070 						halt_status);
1071 		break;
1072 	case USB_ENDPOINT_XFER_INT:
1073 		dev_vdbg(hsotg->dev, "  Interrupt transfer complete\n");
1074 		urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1075 						      qtd);
1076 
1077 		/*
1078 		 * Interrupt URB is done on the first transfer complete
1079 		 * interrupt
1080 		 */
1081 		if (urb_xfer_done) {
1082 			dwc2_host_complete(hsotg, qtd, urb->status);
1083 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1084 		} else {
1085 			halt_status = DWC2_HC_XFER_COMPLETE;
1086 		}
1087 
1088 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1089 		dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1090 					    halt_status);
1091 		break;
1092 	case USB_ENDPOINT_XFER_ISOC:
1093 		if (dbg_perio())
1094 			dev_vdbg(hsotg->dev, "  Isochronous transfer complete\n");
1095 		if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
1096 			halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1097 					chnum, qtd, DWC2_HC_XFER_COMPLETE);
1098 		dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1099 					    halt_status);
1100 		break;
1101 	}
1102 
1103 handle_xfercomp_done:
1104 	disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
1105 }
1106 
1107 /*
1108  * Handles a host channel STALL interrupt. This handler may be called in
1109  * either DMA mode or Slave mode.
1110  */
1111 static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
1112 			       struct dwc2_host_chan *chan, int chnum,
1113 			       struct dwc2_qtd *qtd)
1114 {
1115 	struct dwc2_hcd_urb *urb = qtd->urb;
1116 	int pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1117 
1118 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
1119 		chnum);
1120 
1121 	if (hsotg->core_params->dma_desc_enable > 0) {
1122 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1123 					    DWC2_HC_XFER_STALL);
1124 		goto handle_stall_done;
1125 	}
1126 
1127 	if (!urb)
1128 		goto handle_stall_halt;
1129 
1130 	if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
1131 		dwc2_host_complete(hsotg, qtd, -EPIPE);
1132 
1133 	if (pipe_type == USB_ENDPOINT_XFER_BULK ||
1134 	    pipe_type == USB_ENDPOINT_XFER_INT) {
1135 		dwc2_host_complete(hsotg, qtd, -EPIPE);
1136 		/*
1137 		 * USB protocol requires resetting the data toggle for bulk
1138 		 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1139 		 * setup command is issued to the endpoint. Anticipate the
1140 		 * CLEAR_FEATURE command since a STALL has occurred and reset
1141 		 * the data toggle now.
1142 		 */
1143 		chan->qh->data_toggle = 0;
1144 	}
1145 
1146 handle_stall_halt:
1147 	dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
1148 
1149 handle_stall_done:
1150 	disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
1151 }
1152 
1153 /*
1154  * Updates the state of the URB when a transfer has been stopped due to an
1155  * abnormal condition before the transfer completes. Modifies the
1156  * actual_length field of the URB to reflect the number of bytes that have
1157  * actually been transferred via the host channel.
1158  */
1159 static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1160 				      struct dwc2_host_chan *chan, int chnum,
1161 				      struct dwc2_hcd_urb *urb,
1162 				      struct dwc2_qtd *qtd,
1163 				      enum dwc2_halt_status halt_status)
1164 {
1165 	u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
1166 						      qtd, halt_status, NULL);
1167 	u32 hctsiz;
1168 
1169 	if (urb->actual_length + xfer_length > urb->length) {
1170 		dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
1171 		xfer_length = urb->length - urb->actual_length;
1172 	}
1173 
1174 	/* Non DWORD-aligned buffer case handling */
1175 	if (chan->align_buf && xfer_length && chan->ep_is_in) {
1176 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
1177 		usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_POSTREAD);
1178 		memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
1179 		       xfer_length);
1180 		usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_PREREAD);
1181 	}
1182 
1183 	urb->actual_length += xfer_length;
1184 
1185 	hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
1186 	dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
1187 		 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
1188 	dev_vdbg(hsotg->dev, "  chan->start_pkt_count %d\n",
1189 		 chan->start_pkt_count);
1190 	dev_vdbg(hsotg->dev, "  hctsiz.pktcnt %d\n",
1191 		 (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
1192 	dev_vdbg(hsotg->dev, "  chan->max_packet %d\n", chan->max_packet);
1193 	dev_vdbg(hsotg->dev, "  bytes_transferred %d\n",
1194 		 xfer_length);
1195 	dev_vdbg(hsotg->dev, "  urb->actual_length %d\n",
1196 		 urb->actual_length);
1197 	dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n",
1198 		 urb->length);
1199 }
1200 
1201 /*
1202  * Handles a host channel NAK interrupt. This handler may be called in either
1203  * DMA mode or Slave mode.
1204  */
1205 static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1206 			     struct dwc2_host_chan *chan, int chnum,
1207 			     struct dwc2_qtd *qtd)
1208 {
1209 	if (dbg_hc(chan))
1210 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
1211 			 chnum);
1212 
1213 	/*
1214 	 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1215 	 * interrupt. Re-start the SSPLIT transfer.
1216 	 */
1217 	if (chan->do_split) {
1218 		if (chan->complete_split)
1219 			qtd->error_count = 0;
1220 		qtd->complete_split = 0;
1221 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1222 		goto handle_nak_done;
1223 	}
1224 
1225 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1226 	case USB_ENDPOINT_XFER_CONTROL:
1227 	case USB_ENDPOINT_XFER_BULK:
1228 		if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
1229 			/*
1230 			 * NAK interrupts are enabled on bulk/control IN
1231 			 * transfers in DMA mode for the sole purpose of
1232 			 * resetting the error count after a transaction error
1233 			 * occurs. The core will continue transferring data.
1234 			 */
1235 			qtd->error_count = 0;
1236 			break;
1237 		}
1238 
1239 		/*
1240 		 * NAK interrupts normally occur during OUT transfers in DMA
1241 		 * or Slave mode. For IN transfers, more requests will be
1242 		 * queued as request queue space is available.
1243 		 */
1244 		qtd->error_count = 0;
1245 
1246 		if (!chan->qh->ping_state) {
1247 			dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1248 						  qtd, DWC2_HC_XFER_NAK);
1249 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1250 
1251 			if (chan->speed == USB_SPEED_HIGH)
1252 				chan->qh->ping_state = 1;
1253 		}
1254 
1255 		/*
1256 		 * Halt the channel so the transfer can be re-started from
1257 		 * the appropriate point or the PING protocol will
1258 		 * start/continue
1259 		 */
1260 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1261 		break;
1262 	case USB_ENDPOINT_XFER_INT:
1263 		qtd->error_count = 0;
1264 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1265 		break;
1266 	case USB_ENDPOINT_XFER_ISOC:
1267 		/* Should never get called for isochronous transfers */
1268 		dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
1269 		break;
1270 	}
1271 
1272 handle_nak_done:
1273 	disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
1274 }
1275 
1276 /*
1277  * Handles a host channel ACK interrupt. This interrupt is enabled when
1278  * performing the PING protocol in Slave mode, when errors occur during
1279  * either Slave mode or DMA mode, and during Start Split transactions.
1280  */
1281 static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
1282 			     struct dwc2_host_chan *chan, int chnum,
1283 			     struct dwc2_qtd *qtd)
1284 {
1285 	struct dwc2_hcd_iso_packet_desc *frame_desc;
1286 
1287 	if (dbg_hc(chan))
1288 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
1289 			 chnum);
1290 
1291 	if (chan->do_split) {
1292 		/* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
1293 		if (!chan->ep_is_in &&
1294 		    chan->data_pid_start != DWC2_HC_PID_SETUP)
1295 			qtd->ssplit_out_xfer_count = chan->xfer_len;
1296 
1297 		if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
1298 			qtd->complete_split = 1;
1299 			dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1300 		} else {
1301 			/* ISOC OUT */
1302 			switch (chan->xact_pos) {
1303 			case DWC2_HCSPLT_XACTPOS_ALL:
1304 				break;
1305 			case DWC2_HCSPLT_XACTPOS_END:
1306 				qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1307 				qtd->isoc_split_offset = 0;
1308 				break;
1309 			case DWC2_HCSPLT_XACTPOS_BEGIN:
1310 			case DWC2_HCSPLT_XACTPOS_MID:
1311 				/*
1312 				 * For BEGIN or MID, calculate the length for
1313 				 * the next microframe to determine the correct
1314 				 * SSPLIT token, either MID or END
1315 				 */
1316 				frame_desc = &qtd->urb->iso_descs[
1317 						qtd->isoc_frame_index];
1318 				qtd->isoc_split_offset += 188;
1319 
1320 				if (frame_desc->length - qtd->isoc_split_offset
1321 							<= 188)
1322 					qtd->isoc_split_pos =
1323 							DWC2_HCSPLT_XACTPOS_END;
1324 				else
1325 					qtd->isoc_split_pos =
1326 							DWC2_HCSPLT_XACTPOS_MID;
1327 				break;
1328 			}
1329 		}
1330 	} else {
1331 		qtd->error_count = 0;
1332 
1333 		if (chan->qh->ping_state) {
1334 			chan->qh->ping_state = 0;
1335 			/*
1336 			 * Halt the channel so the transfer can be re-started
1337 			 * from the appropriate point. This only happens in
1338 			 * Slave mode. In DMA mode, the ping_state is cleared
1339 			 * when the transfer is started because the core
1340 			 * automatically executes the PING, then the transfer.
1341 			 */
1342 			dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1343 		}
1344 	}
1345 
1346 	/*
1347 	 * If the ACK occurred when _not_ in the PING state, let the channel
1348 	 * continue transferring data after clearing the error count
1349 	 */
1350 	disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
1351 }
1352 
1353 /*
1354  * Handles a host channel NYET interrupt. This interrupt should only occur on
1355  * Bulk and Control OUT endpoints and for complete split transactions. If a
1356  * NYET occurs at the same time as a Transfer Complete interrupt, it is
1357  * handled in the xfercomp interrupt handler, not here. This handler may be
1358  * called in either DMA mode or Slave mode.
1359  */
1360 static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1361 			      struct dwc2_host_chan *chan, int chnum,
1362 			      struct dwc2_qtd *qtd)
1363 {
1364 	if (dbg_hc(chan))
1365 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
1366 			 chnum);
1367 
1368 	/*
1369 	 * NYET on CSPLIT
1370 	 * re-do the CSPLIT immediately on non-periodic
1371 	 */
1372 	if (chan->do_split && chan->complete_split) {
1373 		if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
1374 		    hsotg->core_params->dma_enable > 0) {
1375 			qtd->complete_split = 0;
1376 			qtd->isoc_split_offset = 0;
1377 			qtd->isoc_frame_index++;
1378 			if (qtd->urb &&
1379 			    qtd->isoc_frame_index == qtd->urb->packet_count) {
1380 				dwc2_host_complete(hsotg, qtd, 0);
1381 				dwc2_release_channel(hsotg, chan, qtd,
1382 						     DWC2_HC_XFER_URB_COMPLETE);
1383 			} else {
1384 				dwc2_release_channel(hsotg, chan, qtd,
1385 						DWC2_HC_XFER_NO_HALT_STATUS);
1386 			}
1387 			goto handle_nyet_done;
1388 		}
1389 
1390 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1391 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1392 			int frnum = dwc2_hcd_get_frame_number(hsotg);
1393 
1394 			if (dwc2_full_frame_num(frnum) !=
1395 			    dwc2_full_frame_num(chan->qh->sched_frame)) {
1396 				/*
1397 				 * No longer in the same full speed frame.
1398 				 * Treat this as a transaction error.
1399 				 */
1400 #if 0
1401 				/*
1402 				 * Todo: Fix system performance so this can
1403 				 * be treated as an error. Right now complete
1404 				 * splits cannot be scheduled precisely enough
1405 				 * due to other system activity, so this error
1406 				 * occurs regularly in Slave mode.
1407 				 */
1408 				qtd->error_count++;
1409 #endif
1410 				qtd->complete_split = 0;
1411 				dwc2_halt_channel(hsotg, chan, qtd,
1412 						  DWC2_HC_XFER_XACT_ERR);
1413 				/* Todo: add support for isoc release */
1414 				goto handle_nyet_done;
1415 			}
1416 		}
1417 
1418 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1419 		goto handle_nyet_done;
1420 	}
1421 
1422 	chan->qh->ping_state = 1;
1423 	qtd->error_count = 0;
1424 
1425 	dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
1426 				  DWC2_HC_XFER_NYET);
1427 	dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1428 
1429 	/*
1430 	 * Halt the channel and re-start the transfer so the PING protocol
1431 	 * will start
1432 	 */
1433 	dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1434 
1435 handle_nyet_done:
1436 	disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
1437 }
1438 
1439 /*
1440  * Handles a host channel babble interrupt. This handler may be called in
1441  * either DMA mode or Slave mode.
1442  */
1443 static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
1444 				struct dwc2_host_chan *chan, int chnum,
1445 				struct dwc2_qtd *qtd)
1446 {
1447 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
1448 		chnum);
1449 
1450 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1451 
1452 	if (hsotg->core_params->dma_desc_enable > 0) {
1453 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1454 					    DWC2_HC_XFER_BABBLE_ERR);
1455 		goto disable_int;
1456 	}
1457 
1458 	if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1459 		dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
1460 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
1461 	} else {
1462 		enum dwc2_halt_status halt_status;
1463 
1464 		halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1465 						qtd, DWC2_HC_XFER_BABBLE_ERR);
1466 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1467 	}
1468 
1469 disable_int:
1470 	disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
1471 }
1472 
1473 /*
1474  * Handles a host channel AHB error interrupt. This handler is only called in
1475  * DMA mode.
1476  */
1477 static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1478 				struct dwc2_host_chan *chan, int chnum,
1479 				struct dwc2_qtd *qtd)
1480 {
1481 	struct dwc2_hcd_urb *urb = qtd->urb;
1482 	const char *pipetype, *speed;
1483 	u32 hcchar;
1484 	u32 hcsplt;
1485 	u32 hctsiz;
1486 	u32 hc_dma;
1487 
1488 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
1489 		chnum);
1490 
1491 	if (!urb)
1492 		goto handle_ahberr_halt;
1493 
1494 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1495 
1496 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1497 	hcsplt = DWC2_READ_4(hsotg, HCSPLT(chnum));
1498 	hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
1499 	hc_dma = DWC2_READ_4(hsotg, HCDMA(chnum));
1500 
1501 	dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
1502 	dev_err(hsotg->dev, "  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
1503 	dev_err(hsotg->dev, "  hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
1504 	dev_err(hsotg->dev, "  Device address: %d\n",
1505 		dwc2_hcd_get_dev_addr(&urb->pipe_info));
1506 	dev_err(hsotg->dev, "  Endpoint: %d, %s\n",
1507 		dwc2_hcd_get_ep_num(&urb->pipe_info),
1508 		dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
1509 
1510 	switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
1511 	case USB_ENDPOINT_XFER_CONTROL:
1512 		pipetype = "CONTROL";
1513 		break;
1514 	case USB_ENDPOINT_XFER_BULK:
1515 		pipetype = "BULK";
1516 		break;
1517 	case USB_ENDPOINT_XFER_INT:
1518 		pipetype = "INTERRUPT";
1519 		break;
1520 	case USB_ENDPOINT_XFER_ISOC:
1521 		pipetype = "ISOCHRONOUS";
1522 		break;
1523 	default:
1524 		pipetype = "UNKNOWN";
1525 		break;
1526 	}
1527 
1528 	dev_err(hsotg->dev, "  Endpoint type: %s\n", pipetype);
1529 
1530 	switch (chan->speed) {
1531 	case USB_SPEED_HIGH:
1532 		speed = "HIGH";
1533 		break;
1534 	case USB_SPEED_FULL:
1535 		speed = "FULL";
1536 		break;
1537 	case USB_SPEED_LOW:
1538 		speed = "LOW";
1539 		break;
1540 	default:
1541 		speed = "UNKNOWN";
1542 		break;
1543 	}
1544 
1545 	dev_err(hsotg->dev, "  Speed: %s\n", speed);
1546 
1547 	dev_err(hsotg->dev, "  Max packet size: %d\n",
1548 		dwc2_hcd_get_mps(&urb->pipe_info));
1549 	dev_err(hsotg->dev, "  Data buffer length: %d\n", urb->length);
1550 	dev_err(hsotg->dev, "  Transfer buffer: %p, Transfer DMA: %08lx\n",
1551 		urb->buf, (unsigned long)urb->dma);
1552 	dev_err(hsotg->dev, "  Setup buffer: %p, Setup DMA: %08lx\n",
1553 		urb->setup_packet, (unsigned long)urb->setup_dma);
1554 	dev_err(hsotg->dev, "  Interval: %d\n", urb->interval);
1555 
1556 	/* Core halts the channel for Descriptor DMA mode */
1557 	if (hsotg->core_params->dma_desc_enable > 0) {
1558 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1559 					    DWC2_HC_XFER_AHB_ERR);
1560 		goto handle_ahberr_done;
1561 	}
1562 
1563 	dwc2_host_complete(hsotg, qtd, -EIO);
1564 
1565 handle_ahberr_halt:
1566 	/*
1567 	 * Force a channel halt. Don't call dwc2_halt_channel because that won't
1568 	 * write to the HCCHARn register in DMA mode to force the halt.
1569 	 */
1570 	dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
1571 
1572 handle_ahberr_done:
1573 	disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
1574 }
1575 
1576 /*
1577  * Handles a host channel transaction error interrupt. This handler may be
1578  * called in either DMA mode or Slave mode.
1579  */
1580 static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
1581 				 struct dwc2_host_chan *chan, int chnum,
1582 				 struct dwc2_qtd *qtd)
1583 {
1584 	dev_dbg(hsotg->dev,
1585 		"--Host Channel %d Interrupt: Transaction Error--\n", chnum);
1586 
1587 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1588 
1589 	if (hsotg->core_params->dma_desc_enable > 0) {
1590 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1591 					    DWC2_HC_XFER_XACT_ERR);
1592 		goto handle_xacterr_done;
1593 	}
1594 
1595 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1596 	case USB_ENDPOINT_XFER_CONTROL:
1597 	case USB_ENDPOINT_XFER_BULK:
1598 		qtd->error_count++;
1599 		if (!chan->qh->ping_state) {
1600 
1601 			dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1602 						  qtd, DWC2_HC_XFER_XACT_ERR);
1603 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1604 			if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
1605 				chan->qh->ping_state = 1;
1606 		}
1607 
1608 		/*
1609 		 * Halt the channel so the transfer can be re-started from
1610 		 * the appropriate point or the PING protocol will start
1611 		 */
1612 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1613 		break;
1614 	case USB_ENDPOINT_XFER_INT:
1615 		qtd->error_count++;
1616 		if (chan->do_split && chan->complete_split)
1617 			qtd->complete_split = 0;
1618 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1619 		break;
1620 	case USB_ENDPOINT_XFER_ISOC:
1621 		{
1622 			enum dwc2_halt_status halt_status;
1623 
1624 			halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1625 					chnum, qtd, DWC2_HC_XFER_XACT_ERR);
1626 			dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1627 		}
1628 		break;
1629 	}
1630 
1631 handle_xacterr_done:
1632 	disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
1633 }
1634 
1635 /*
1636  * Handles a host channel frame overrun interrupt. This handler may be called
1637  * in either DMA mode or Slave mode.
1638  */
1639 static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
1640 				  struct dwc2_host_chan *chan, int chnum,
1641 				  struct dwc2_qtd *qtd)
1642 {
1643 	enum dwc2_halt_status halt_status;
1644 
1645 	if (dbg_hc(chan))
1646 		dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
1647 			chnum);
1648 
1649 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1650 
1651 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1652 	case USB_ENDPOINT_XFER_CONTROL:
1653 	case USB_ENDPOINT_XFER_BULK:
1654 		break;
1655 	case USB_ENDPOINT_XFER_INT:
1656 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1657 		break;
1658 	case USB_ENDPOINT_XFER_ISOC:
1659 		halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1660 					qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1661 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1662 		break;
1663 	}
1664 
1665 	disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
1666 }
1667 
1668 /*
1669  * Handles a host channel data toggle error interrupt. This handler may be
1670  * called in either DMA mode or Slave mode.
1671  */
1672 static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
1673 				    struct dwc2_host_chan *chan, int chnum,
1674 				    struct dwc2_qtd *qtd)
1675 {
1676 	dev_dbg(hsotg->dev,
1677 		"--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
1678 
1679 	if (chan->ep_is_in)
1680 		qtd->error_count = 0;
1681 	else
1682 		dev_err(hsotg->dev,
1683 			"Data Toggle Error on OUT transfer, channel %d\n",
1684 			chnum);
1685 
1686 // 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1687 	disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
1688 }
1689 
1690 /*
1691  * For debug only. It checks that a valid halt status is set and that
1692  * HCCHARn.chdis is clear. If there's a problem, corrective action is
1693  * taken and a warning is issued.
1694  *
1695  * Return: true if halt status is ok, false otherwise
1696  */
1697 static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
1698 				struct dwc2_host_chan *chan, int chnum,
1699 				struct dwc2_qtd *qtd)
1700 {
1701 #ifdef DEBUG
1702 	u32 hcchar;
1703 	u32 hctsiz;
1704 	u32 hcintmsk;
1705 	u32 hcsplt;
1706 
1707 	if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
1708 		/*
1709 		 * This code is here only as a check. This condition should
1710 		 * never happen. Ignore the halt if it does occur.
1711 		 */
1712 		hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1713 		hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum));
1714 		hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1715 		hcsplt = DWC2_READ_4(hsotg, HCSPLT(chnum));
1716 		dev_dbg(hsotg->dev,
1717 			"%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
1718 			 __func__);
1719 		dev_dbg(hsotg->dev,
1720 			"channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
1721 			chnum, hcchar, hctsiz);
1722 		dev_dbg(hsotg->dev,
1723 			"hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
1724 			chan->hcint, hcintmsk, hcsplt);
1725 		if (qtd)
1726 			dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
1727 				qtd->complete_split);
1728 		dev_warn(hsotg->dev,
1729 			 "%s: no halt status, channel %d, ignoring interrupt\n",
1730 			 __func__, chnum);
1731 		return false;
1732 	}
1733 
1734 	/*
1735 	 * This code is here only as a check. hcchar.chdis should never be set
1736 	 * when the halt interrupt occurs. Halt the channel again if it does
1737 	 * occur.
1738 	 */
1739 	hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum));
1740 	if (hcchar & HCCHAR_CHDIS) {
1741 		dev_warn(hsotg->dev,
1742 			 "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
1743 			 __func__, hcchar);
1744 		chan->halt_pending = 0;
1745 		dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
1746 		return false;
1747 	}
1748 #endif
1749 
1750 	return true;
1751 }
1752 
1753 /*
1754  * Handles a host Channel Halted interrupt in DMA mode. This handler
1755  * determines the reason the channel halted and proceeds accordingly.
1756  */
1757 static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
1758 				    struct dwc2_host_chan *chan, int chnum,
1759 				    struct dwc2_qtd *qtd)
1760 {
1761 	u32 hcintmsk;
1762 	int out_nak_enh = 0;
1763 
1764 	if (dbg_hc(chan))
1765 		dev_vdbg(hsotg->dev,
1766 			 "--Host Channel %d Interrupt: DMA Channel Halted--\n",
1767 			 chnum);
1768 
1769 	/*
1770 	 * For core with OUT NAK enhancement, the flow for high-speed
1771 	 * CONTROL/BULK OUT is handled a little differently
1772 	 */
1773 	if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
1774 		if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
1775 		    (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1776 		     chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
1777 			out_nak_enh = 1;
1778 		}
1779 	}
1780 
1781 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1782 	    (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
1783 	     hsotg->core_params->dma_desc_enable <= 0)) {
1784 		if (hsotg->core_params->dma_desc_enable > 0)
1785 			dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1786 						    chan->halt_status);
1787 		else
1788 			/*
1789 			 * Just release the channel. A dequeue can happen on a
1790 			 * transfer timeout. In the case of an AHB Error, the
1791 			 * channel was forced to halt because there's no way to
1792 			 * gracefully recover.
1793 			 */
1794 			dwc2_release_channel(hsotg, chan, qtd,
1795 					     chan->halt_status);
1796 		return;
1797 	}
1798 
1799 	hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1800 
1801 	if (chan->hcint & HCINTMSK_XFERCOMPL) {
1802 		/*
1803 		 * Todo: This is here because of a possible hardware bug. Spec
1804 		 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1805 		 * interrupt w/ACK bit set should occur, but I only see the
1806 		 * XFERCOMP bit, even with it masked out. This is a workaround
1807 		 * for that behavior. Should fix this when hardware is fixed.
1808 		 */
1809 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
1810 			dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1811 		dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
1812 	} else if (chan->hcint & HCINTMSK_STALL) {
1813 		dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
1814 	} else if ((chan->hcint & HCINTMSK_XACTERR) &&
1815 		   hsotg->core_params->dma_desc_enable <= 0) {
1816 		if (out_nak_enh) {
1817 			if (chan->hcint &
1818 			    (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
1819 				dev_vdbg(hsotg->dev,
1820 					 "XactErr with NYET/NAK/ACK\n");
1821 				qtd->error_count = 0;
1822 			} else {
1823 				dev_vdbg(hsotg->dev,
1824 					 "XactErr without NYET/NAK/ACK\n");
1825 			}
1826 		}
1827 
1828 		/*
1829 		 * Must handle xacterr before nak or ack. Could get a xacterr
1830 		 * at the same time as either of these on a BULK/CONTROL OUT
1831 		 * that started with a PING. The xacterr takes precedence.
1832 		 */
1833 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1834 	} else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
1835 		   hsotg->core_params->dma_desc_enable > 0) {
1836 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1837 	} else if ((chan->hcint & HCINTMSK_AHBERR) &&
1838 		   hsotg->core_params->dma_desc_enable > 0) {
1839 		dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
1840 	} else if (chan->hcint & HCINTMSK_BBLERR) {
1841 		dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
1842 	} else if (chan->hcint & HCINTMSK_FRMOVRUN) {
1843 		dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
1844 	} else if (!out_nak_enh) {
1845 		if (chan->hcint & HCINTMSK_NYET) {
1846 			/*
1847 			 * Must handle nyet before nak or ack. Could get a nyet
1848 			 * at the same time as either of those on a BULK/CONTROL
1849 			 * OUT that started with a PING. The nyet takes
1850 			 * precedence.
1851 			 */
1852 			dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
1853 		} else if ((chan->hcint & HCINTMSK_NAK) &&
1854 			   !(hcintmsk & HCINTMSK_NAK)) {
1855 			/*
1856 			 * If nak is not masked, it's because a non-split IN
1857 			 * transfer is in an error state. In that case, the nak
1858 			 * is handled by the nak interrupt handler, not here.
1859 			 * Handle nak here for BULK/CONTROL OUT transfers, which
1860 			 * halt on a NAK to allow rewinding the buffer pointer.
1861 			 */
1862 			dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
1863 		} else if ((chan->hcint & HCINTMSK_ACK) &&
1864 			   !(hcintmsk & HCINTMSK_ACK)) {
1865 			/*
1866 			 * If ack is not masked, it's because a non-split IN
1867 			 * transfer is in an error state. In that case, the ack
1868 			 * is handled by the ack interrupt handler, not here.
1869 			 * Handle ack here for split transfers. Start splits
1870 			 * halt on ACK.
1871 			 */
1872 			dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1873 		} else {
1874 			if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1875 			    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1876 				/*
1877 				 * A periodic transfer halted with no other
1878 				 * channel interrupts set. Assume it was halted
1879 				 * by the core because it could not be completed
1880 				 * in its scheduled (micro)frame.
1881 				 */
1882 				dev_dbg(hsotg->dev,
1883 					"%s: Halt channel %d (assume incomplete periodic transfer)\n",
1884 					__func__, chnum);
1885 				dwc2_halt_channel(hsotg, chan, qtd,
1886 					DWC2_HC_XFER_PERIODIC_INCOMPLETE);
1887 			} else {
1888 				dev_err(hsotg->dev,
1889 					"%s: Channel %d - ChHltd set, but reason is unknown\n",
1890 					__func__, chnum);
1891 				dev_err(hsotg->dev,
1892 					"hcint 0x%08x, intsts 0x%08x\n",
1893 					chan->hcint,
1894 					DWC2_READ_4(hsotg, GINTSTS));
1895 			}
1896 		}
1897 	} else {
1898 		dev_info(hsotg->dev,
1899 			 "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
1900 			 chan->hcint);
1901 	}
1902 }
1903 
1904 /*
1905  * Handles a host channel Channel Halted interrupt
1906  *
1907  * In slave mode, this handler is called only when the driver specifically
1908  * requests a halt. This occurs during handling other host channel interrupts
1909  * (e.g. nak, xacterr, stall, nyet, etc.).
1910  *
1911  * In DMA mode, this is the interrupt that occurs when the core has finished
1912  * processing a transfer on a channel. Other host channel interrupts (except
1913  * ahberr) are disabled in DMA mode.
1914  */
1915 static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
1916 				struct dwc2_host_chan *chan, int chnum,
1917 				struct dwc2_qtd *qtd)
1918 {
1919 	if (dbg_hc(chan))
1920 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
1921 			 chnum);
1922 
1923 	if (hsotg->core_params->dma_enable > 0) {
1924 		dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
1925 	} else {
1926 		if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
1927 			return;
1928 		dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
1929 	}
1930 }
1931 
1932 /* Handles interrupt for a specific Host Channel */
1933 static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
1934 {
1935 	struct dwc2_qtd *qtd;
1936 	struct dwc2_host_chan *chan;
1937 	u32 hcint, hcintmsk;
1938 
1939 	chan = hsotg->hc_ptr_array[chnum];
1940 
1941 	hcint = DWC2_READ_4(hsotg, HCINT(chnum));
1942 	hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum));
1943 	if (!chan) {
1944 		dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
1945 		DWC2_WRITE_4(hsotg, HCINT(chnum), hcint);
1946 		return;
1947 	}
1948 
1949 	if (dbg_hc(chan)) {
1950 		dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
1951 			 chnum);
1952 		dev_vdbg(hsotg->dev,
1953 			 "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1954 			 hcint, hcintmsk, hcint & hcintmsk);
1955 	}
1956 
1957 	DWC2_WRITE_4(hsotg, HCINT(chnum), hcint);
1958 	chan->hcint = hcint;
1959 	hcint &= hcintmsk;
1960 
1961 	/*
1962 	 * If the channel was halted due to a dequeue, the qtd list might
1963 	 * be empty or at least the first entry will not be the active qtd.
1964 	 * In this case, take a shortcut and just release the channel.
1965 	 */
1966 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1967 		/*
1968 		 * If the channel was halted, this should be the only
1969 		 * interrupt unmasked
1970 		 */
1971 		WARN_ON(hcint != HCINTMSK_CHHLTD);
1972 		if (hsotg->core_params->dma_desc_enable > 0)
1973 			dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1974 						    chan->halt_status);
1975 		else
1976 			dwc2_release_channel(hsotg, chan, NULL,
1977 					     chan->halt_status);
1978 		return;
1979 	}
1980 
1981 	if (list_empty(&chan->qh->qtd_list)) {
1982 		/*
1983 		 * TODO: Will this ever happen with the
1984 		 * DWC2_HC_XFER_URB_DEQUEUE handling above?
1985 		 */
1986 		dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
1987 			chnum);
1988 		dev_dbg(hsotg->dev,
1989 			"  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1990 			chan->hcint, hcintmsk, hcint);
1991 		chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
1992 		disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
1993 		chan->hcint = 0;
1994 		return;
1995 	}
1996 
1997 	qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
1998 			       qtd_list_entry);
1999 
2000 	if (hsotg->core_params->dma_enable <= 0) {
2001 		if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
2002 			hcint &= ~HCINTMSK_CHHLTD;
2003 	}
2004 
2005 	if (hcint & HCINTMSK_XFERCOMPL) {
2006 		dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
2007 		/*
2008 		 * If NYET occurred at same time as Xfer Complete, the NYET is
2009 		 * handled by the Xfer Complete interrupt handler. Don't want
2010 		 * to call the NYET interrupt handler in this case.
2011 		 */
2012 		hcint &= ~HCINTMSK_NYET;
2013 	}
2014 	if (hcint & HCINTMSK_CHHLTD)
2015 		dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
2016 	if (hcint & HCINTMSK_AHBERR)
2017 		dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
2018 	if (hcint & HCINTMSK_STALL)
2019 		dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
2020 	if (hcint & HCINTMSK_NAK)
2021 		dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
2022 	if (hcint & HCINTMSK_ACK)
2023 		dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
2024 	if (hcint & HCINTMSK_NYET)
2025 		dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
2026 	if (hcint & HCINTMSK_XACTERR)
2027 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
2028 	if (hcint & HCINTMSK_BBLERR)
2029 		dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
2030 	if (hcint & HCINTMSK_FRMOVRUN)
2031 		dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
2032 	if (hcint & HCINTMSK_DATATGLERR)
2033 		dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
2034 
2035 	chan->hcint = 0;
2036 }
2037 
2038 /*
2039  * This interrupt indicates that one or more host channels has a pending
2040  * interrupt. There are multiple conditions that can cause each host channel
2041  * interrupt. This function determines which conditions have occurred for each
2042  * host channel interrupt and handles them appropriately.
2043  */
2044 static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2045 {
2046 	u32 haint;
2047 	int i;
2048 
2049 	haint = DWC2_READ_4(hsotg, HAINT);
2050 	if (dbg_perio()) {
2051 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
2052 
2053 		dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2054 	}
2055 
2056 	for (i = 0; i < hsotg->core_params->host_channels; i++) {
2057 		if (haint & (1 << i))
2058 			dwc2_hc_n_intr(hsotg, i);
2059 	}
2060 }
2061 
2062 /* This function handles interrupts for the HCD */
2063 irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
2064 {
2065 	u32 gintsts, dbg_gintsts;
2066 	irqreturn_t retval = IRQ_NONE;
2067 
2068 	if (dwc2_check_core_status(hsotg) < 0) {
2069 		dev_warn(hsotg->dev, "Controller is disconnected\n");
2070 		return retval;
2071 	}
2072 
2073 	KASSERT(mutex_owned(&hsotg->lock));
2074 
2075 	/* Check if HOST Mode */
2076 	if (dwc2_is_host_mode(hsotg)) {
2077 		gintsts = dwc2_read_core_intr(hsotg);
2078 		if (!gintsts) {
2079 			return retval;
2080 		}
2081 
2082 		retval = IRQ_HANDLED;
2083 
2084 		dbg_gintsts = gintsts;
2085 #ifndef DEBUG_SOF
2086 		dbg_gintsts &= ~GINTSTS_SOF;
2087 #endif
2088 		if (!dbg_perio())
2089 			dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
2090 					 GINTSTS_PTXFEMP);
2091 
2092 		/* Only print if there are any non-suppressed interrupts left */
2093 		if (dbg_gintsts)
2094 			dev_vdbg(hsotg->dev,
2095 				 "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
2096 				 gintsts);
2097 
2098 		if (gintsts & GINTSTS_SOF)
2099 			dwc2_sof_intr(hsotg);
2100 		if (gintsts & GINTSTS_RXFLVL)
2101 			dwc2_rx_fifo_level_intr(hsotg);
2102 		if (gintsts & GINTSTS_NPTXFEMP)
2103 			dwc2_np_tx_fifo_empty_intr(hsotg);
2104 		if (gintsts & GINTSTS_PRTINT)
2105 			dwc2_port_intr(hsotg);
2106 		if (gintsts & GINTSTS_HCHINT)
2107 			dwc2_hc_intr(hsotg);
2108 		if (gintsts & GINTSTS_PTXFEMP)
2109 			dwc2_perio_tx_fifo_empty_intr(hsotg);
2110 
2111 		if (dbg_gintsts) {
2112 			dev_vdbg(hsotg->dev,
2113 				 "DWC OTG HCD Finished Servicing Interrupts\n");
2114 			dev_vdbg(hsotg->dev,
2115 				 "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
2116 				 DWC2_READ_4(hsotg, GINTSTS),
2117 				 DWC2_READ_4(hsotg, GINTMSK));
2118 		}
2119 	}
2120 
2121 	return retval;
2122 }
2123