1 /* $OpenBSD: dwc2_hcdintr.c,v 1.6 2015/06/28 11:48:18 jmatthew Exp $ */ 2 /* $NetBSD: dwc2_hcdintr.c,v 1.11 2014/11/24 10:14:14 skrll Exp $ */ 3 4 /* 5 * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling 6 * 7 * Copyright (C) 2004-2013 Synopsys, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The names of the above-listed copyright holders may not be used 19 * to endorse or promote products derived from this software without 20 * specific prior written permission. 21 * 22 * ALTERNATIVELY, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") as published by the Free Software 24 * Foundation; either version 2 of the License, or (at your option) any 25 * later version. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * This file contains the interrupt handlers for Host mode 42 */ 43 #if 0 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdintr.c,v 1.11 2014/11/24 10:14:14 skrll Exp $"); 46 #endif 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/types.h> 51 #include <sys/pool.h> 52 53 #include <machine/bus.h> 54 55 #include <dev/usb/usb.h> 56 #include <dev/usb/usbdi.h> 57 #include <dev/usb/usbdivar.h> 58 #include <dev/usb/usb_mem.h> 59 60 #include <dev/usb/dwc2/dwc2.h> 61 #include <dev/usb/dwc2/dwc2var.h> 62 63 #include <dev/usb/dwc2/dwc2_core.h> 64 #include <dev/usb/dwc2/dwc2_hcd.h> 65 66 /* This function is for debug only */ 67 STATIC void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg) 68 { 69 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS 70 u16 curr_frame_number = hsotg->frame_number; 71 72 if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) { 73 if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) != 74 curr_frame_number) { 75 hsotg->frame_num_array[hsotg->frame_num_idx] = 76 curr_frame_number; 77 hsotg->last_frame_num_array[hsotg->frame_num_idx] = 78 hsotg->last_frame_num; 79 hsotg->frame_num_idx++; 80 } 81 } else if (!hsotg->dumped_frame_num_array) { 82 int i; 83 84 dev_info(hsotg->dev, "Frame Last Frame\n"); 85 dev_info(hsotg->dev, "----- ----------\n"); 86 for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) { 87 dev_info(hsotg->dev, "0x%04x 0x%04x\n", 88 hsotg->frame_num_array[i], 89 hsotg->last_frame_num_array[i]); 90 } 91 hsotg->dumped_frame_num_array = 1; 92 } 93 hsotg->last_frame_num = curr_frame_number; 94 #endif 95 } 96 97 STATIC void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg, 98 struct dwc2_host_chan *chan, 99 struct dwc2_qtd *qtd) 100 { 101 // struct urb *usb_urb; 102 103 if (!chan->qh) 104 return; 105 106 if (chan->qh->dev_speed == USB_SPEED_HIGH) 107 return; 108 109 if (!qtd->urb) 110 return; 111 112 113 if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) { 114 chan->qh->tt_buffer_dirty = 1; 115 chan->qh->tt_buffer_dirty = 0; 116 } 117 } 118 119 /* 120 * Handles the start-of-frame interrupt in host mode. Non-periodic 121 * transactions may be queued to the DWC_otg controller for the current 122 * (micro)frame. Periodic transactions may be queued to the controller 123 * for the next (micro)frame. 124 */ 125 STATIC void dwc2_sof_intr(struct dwc2_hsotg *hsotg) 126 { 127 struct dwc2_qh *qh, *qhn; 128 enum dwc2_transaction_type tr_type; 129 130 #ifdef DEBUG_SOF 131 dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n"); 132 #endif 133 134 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); 135 136 dwc2_track_missed_sofs(hsotg); 137 138 /* Determine whether any periodic QHs should be executed */ 139 qh = TAILQ_FIRST(&hsotg->periodic_sched_inactive); 140 while (qh != NULL) { 141 qhn = TAILQ_NEXT(qh, qh_list_entry); 142 if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number)) { 143 /* 144 * Move QH to the ready list to be executed next 145 * (micro)frame 146 */ 147 TAILQ_REMOVE(&hsotg->periodic_sched_inactive, qh, qh_list_entry); 148 TAILQ_INSERT_TAIL(&hsotg->periodic_sched_ready, qh, qh_list_entry); 149 } 150 qh = qhn; 151 } 152 tr_type = dwc2_hcd_select_transactions(hsotg); 153 if (tr_type != DWC2_TRANSACTION_NONE) 154 dwc2_hcd_queue_transactions(hsotg, tr_type); 155 156 /* Clear interrupt */ 157 DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_SOF); 158 } 159 160 /* 161 * Handles the Rx FIFO Level Interrupt, which indicates that there is 162 * at least one packet in the Rx FIFO. The packets are moved from the FIFO to 163 * memory if the DWC_otg controller is operating in Slave mode. 164 */ 165 STATIC void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg) 166 { 167 u32 grxsts, chnum, bcnt, pktsts; 168 struct dwc2_host_chan *chan; 169 170 if (dbg_perio()) 171 dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n"); 172 173 grxsts = DWC2_READ_4(hsotg, GRXSTSP); 174 chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT; 175 chan = hsotg->hc_ptr_array[chnum]; 176 if (!chan) { 177 dev_err(hsotg->dev, "Unable to get corresponding channel\n"); 178 return; 179 } 180 181 bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT; 182 pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT; 183 184 /* Packet Status */ 185 if (dbg_perio()) { 186 dev_vdbg(hsotg->dev, " Ch num = %d\n", chnum); 187 dev_vdbg(hsotg->dev, " Count = %d\n", bcnt); 188 dev_vdbg(hsotg->dev, " DPID = %d, chan.dpid = %d\n", 189 (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT, 190 chan->data_pid_start); 191 dev_vdbg(hsotg->dev, " PStatus = %d\n", pktsts); 192 } 193 194 switch (pktsts) { 195 case GRXSTS_PKTSTS_HCHIN: 196 /* Read the data into the host buffer */ 197 if (bcnt > 0) { 198 dwc2_read_packet(hsotg, chan->xfer_buf, bcnt); 199 200 /* Update the HC fields for the next packet received */ 201 chan->xfer_count += bcnt; 202 chan->xfer_buf += bcnt; 203 } 204 break; 205 case GRXSTS_PKTSTS_HCHIN_XFER_COMP: 206 case GRXSTS_PKTSTS_DATATOGGLEERR: 207 case GRXSTS_PKTSTS_HCHHALTED: 208 /* Handled in interrupt, just ignore data */ 209 break; 210 default: 211 dev_err(hsotg->dev, 212 "RxFIFO Level Interrupt: Unknown status %d\n", pktsts); 213 break; 214 } 215 } 216 217 /* 218 * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More 219 * data packets may be written to the FIFO for OUT transfers. More requests 220 * may be written to the non-periodic request queue for IN transfers. This 221 * interrupt is enabled only in Slave mode. 222 */ 223 STATIC void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg) 224 { 225 dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n"); 226 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC); 227 } 228 229 /* 230 * This interrupt occurs when the periodic Tx FIFO is half-empty. More data 231 * packets may be written to the FIFO for OUT transfers. More requests may be 232 * written to the periodic request queue for IN transfers. This interrupt is 233 * enabled only in Slave mode. 234 */ 235 STATIC void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg) 236 { 237 if (dbg_perio()) 238 dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n"); 239 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC); 240 } 241 242 STATIC void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0, 243 u32 *hprt0_modify) 244 { 245 struct dwc2_core_params *params = hsotg->core_params; 246 int do_reset = 0; 247 u32 usbcfg; 248 u32 prtspd; 249 u32 hcfg; 250 u32 fslspclksel; 251 u32 hfir; 252 253 dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); 254 255 /* Every time when port enables calculate HFIR.FrInterval */ 256 hfir = DWC2_READ_4(hsotg, HFIR); 257 hfir &= ~HFIR_FRINT_MASK; 258 hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT & 259 HFIR_FRINT_MASK; 260 DWC2_WRITE_4(hsotg, HFIR, hfir); 261 262 /* Check if we need to adjust the PHY clock speed for low power */ 263 if (!params->host_support_fs_ls_low_power) { 264 /* Port has been enabled, set the reset change flag */ 265 hsotg->flags.b.port_reset_change = 1; 266 267 dwc2_root_intr(hsotg->hsotg_sc); 268 return; 269 } 270 271 usbcfg = DWC2_READ_4(hsotg, GUSBCFG); 272 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; 273 274 if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) { 275 /* Low power */ 276 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) { 277 /* Set PHY low power clock select for FS/LS devices */ 278 usbcfg |= GUSBCFG_PHY_LP_CLK_SEL; 279 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 280 do_reset = 1; 281 } 282 283 hcfg = DWC2_READ_4(hsotg, HCFG); 284 fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >> 285 HCFG_FSLSPCLKSEL_SHIFT; 286 287 if (prtspd == HPRT0_SPD_LOW_SPEED && 288 params->host_ls_low_power_phy_clk == 289 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) { 290 /* 6 MHZ */ 291 dev_vdbg(hsotg->dev, 292 "FS_PHY programming HCFG to 6 MHz\n"); 293 if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) { 294 fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ; 295 hcfg &= ~HCFG_FSLSPCLKSEL_MASK; 296 hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT; 297 DWC2_WRITE_4(hsotg, HCFG, hcfg); 298 do_reset = 1; 299 } 300 } else { 301 /* 48 MHZ */ 302 dev_vdbg(hsotg->dev, 303 "FS_PHY programming HCFG to 48 MHz\n"); 304 if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) { 305 fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ; 306 hcfg &= ~HCFG_FSLSPCLKSEL_MASK; 307 hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT; 308 DWC2_WRITE_4(hsotg, HCFG, hcfg); 309 do_reset = 1; 310 } 311 } 312 } else { 313 /* Not low power */ 314 if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) { 315 usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL; 316 DWC2_WRITE_4(hsotg, GUSBCFG, usbcfg); 317 do_reset = 1; 318 } 319 } 320 321 if (do_reset) { 322 *hprt0_modify |= HPRT0_RST; 323 queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work, 324 msecs_to_jiffies(60)); 325 } else { 326 /* Port has been enabled, set the reset change flag */ 327 hsotg->flags.b.port_reset_change = 1; 328 dwc2_root_intr(hsotg->hsotg_sc); 329 330 } 331 } 332 333 /* 334 * There are multiple conditions that can cause a port interrupt. This function 335 * determines which interrupt conditions have occurred and handles them 336 * appropriately. 337 */ 338 STATIC void dwc2_port_intr(struct dwc2_hsotg *hsotg) 339 { 340 u32 hprt0; 341 u32 hprt0_modify; 342 343 dev_vdbg(hsotg->dev, "--Port Interrupt--\n"); 344 345 hprt0 = DWC2_READ_4(hsotg, HPRT0); 346 hprt0_modify = hprt0; 347 348 /* 349 * Clear appropriate bits in HPRT0 to clear the interrupt bit in 350 * GINTSTS 351 */ 352 hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG | 353 HPRT0_OVRCURRCHG); 354 355 /* 356 * Port Connect Detected 357 * Set flag and clear if detected 358 */ 359 if (hprt0 & HPRT0_CONNDET) { 360 dev_vdbg(hsotg->dev, 361 "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n", 362 hprt0); 363 hsotg->flags.b.port_connect_status_change = 1; 364 hsotg->flags.b.port_connect_status = 1; 365 hprt0_modify |= HPRT0_CONNDET; 366 367 /* 368 * The Hub driver asserts a reset when it sees port connect 369 * status change flag 370 */ 371 } 372 373 /* 374 * Port Enable Changed 375 * Clear if detected - Set internal flag if disabled 376 */ 377 if (hprt0 & HPRT0_ENACHG) { 378 dev_vdbg(hsotg->dev, 379 " --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n", 380 hprt0, !!(hprt0 & HPRT0_ENA)); 381 hprt0_modify |= HPRT0_ENACHG; 382 if (hprt0 & HPRT0_ENA) 383 dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify); 384 else 385 hsotg->flags.b.port_enable_change = 1; 386 } 387 388 /* Overcurrent Change Interrupt */ 389 if (hprt0 & HPRT0_OVRCURRCHG) { 390 dev_vdbg(hsotg->dev, 391 " --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n", 392 hprt0); 393 hsotg->flags.b.port_over_current_change = 1; 394 hprt0_modify |= HPRT0_OVRCURRCHG; 395 } 396 397 /* Clear Port Interrupts */ 398 DWC2_WRITE_4(hsotg, HPRT0, hprt0_modify); 399 400 if (hsotg->flags.b.port_connect_status_change || 401 hsotg->flags.b.port_enable_change || 402 hsotg->flags.b.port_over_current_change) 403 dwc2_root_intr(hsotg->hsotg_sc); 404 } 405 406 /* 407 * Gets the actual length of a transfer after the transfer halts. halt_status 408 * holds the reason for the halt. 409 * 410 * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read 411 * is set to 1 upon return if less than the requested number of bytes were 412 * transferred. short_read may also be NULL on entry, in which case it remains 413 * unchanged. 414 */ 415 STATIC u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg, 416 struct dwc2_host_chan *chan, int chnum, 417 struct dwc2_qtd *qtd, 418 enum dwc2_halt_status halt_status, 419 int *short_read) 420 { 421 u32 hctsiz, count, length; 422 423 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum)); 424 425 if (halt_status == DWC2_HC_XFER_COMPLETE) { 426 if (chan->ep_is_in) { 427 count = (hctsiz & TSIZ_XFERSIZE_MASK) >> 428 TSIZ_XFERSIZE_SHIFT; 429 length = chan->xfer_len - count; 430 if (short_read != NULL) 431 *short_read = (count != 0); 432 } else if (chan->qh->do_split) { 433 length = qtd->ssplit_out_xfer_count; 434 } else { 435 length = chan->xfer_len; 436 } 437 } else { 438 /* 439 * Must use the hctsiz.pktcnt field to determine how much data 440 * has been transferred. This field reflects the number of 441 * packets that have been transferred via the USB. This is 442 * always an integral number of packets if the transfer was 443 * halted before its normal completion. (Can't use the 444 * hctsiz.xfersize field because that reflects the number of 445 * bytes transferred via the AHB, not the USB). 446 */ 447 count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT; 448 length = (chan->start_pkt_count - count) * chan->max_packet; 449 } 450 451 return length; 452 } 453 454 /** 455 * dwc2_update_urb_state() - Updates the state of the URB after a Transfer 456 * Complete interrupt on the host channel. Updates the actual_length field 457 * of the URB based on the number of bytes transferred via the host channel. 458 * Sets the URB status if the data transfer is finished. 459 * 460 * Return: 1 if the data transfer specified by the URB is completely finished, 461 * 0 otherwise 462 */ 463 STATIC int dwc2_update_urb_state(struct dwc2_hsotg *hsotg, 464 struct dwc2_host_chan *chan, int chnum, 465 struct dwc2_hcd_urb *urb, 466 struct dwc2_qtd *qtd) 467 { 468 int xfer_done = 0; 469 int short_read = 0; 470 int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd, 471 DWC2_HC_XFER_COMPLETE, 472 &short_read); 473 474 if (urb->actual_length + xfer_length > urb->length) { 475 dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__); 476 xfer_length = urb->length - urb->actual_length; 477 } 478 479 /* Non DWORD-aligned buffer case handling */ 480 if (chan->align_buf && xfer_length && chan->ep_is_in) { 481 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); 482 usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_POSTREAD); 483 memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf, 484 xfer_length); 485 usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_PREREAD); 486 } 487 488 dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n", 489 urb->actual_length, xfer_length); 490 urb->actual_length += xfer_length; 491 492 if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK && 493 (urb->flags & URB_SEND_ZERO_PACKET) && 494 urb->actual_length >= urb->length && 495 !(urb->length % chan->max_packet)) { 496 xfer_done = 0; 497 } else if (short_read || urb->actual_length >= urb->length) { 498 xfer_done = 1; 499 urb->status = 0; 500 } 501 502 dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n", 503 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum); 504 dev_vdbg(hsotg->dev, " chan->xfer_len %d\n", chan->xfer_len); 505 dev_vdbg(hsotg->dev, " hctsiz.xfersize %d\n", 506 (DWC2_READ_4(hsotg, HCTSIZ(chnum)) & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT); 507 dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n", urb->length); 508 dev_vdbg(hsotg->dev, " urb->actual_length %d\n", urb->actual_length); 509 dev_vdbg(hsotg->dev, " short_read %d, xfer_done %d\n", short_read, 510 xfer_done); 511 512 return xfer_done; 513 } 514 515 /* 516 * Save the starting data toggle for the next transfer. The data toggle is 517 * saved in the QH for non-control transfers and it's saved in the QTD for 518 * control transfers. 519 */ 520 void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg, 521 struct dwc2_host_chan *chan, int chnum, 522 struct dwc2_qtd *qtd) 523 { 524 u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum)); 525 u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT; 526 527 if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) { 528 if (pid == TSIZ_SC_MC_PID_DATA0) 529 chan->qh->data_toggle = DWC2_HC_PID_DATA0; 530 else 531 chan->qh->data_toggle = DWC2_HC_PID_DATA1; 532 } else { 533 if (pid == TSIZ_SC_MC_PID_DATA0) 534 qtd->data_toggle = DWC2_HC_PID_DATA0; 535 else 536 qtd->data_toggle = DWC2_HC_PID_DATA1; 537 } 538 } 539 540 /** 541 * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when 542 * the transfer is stopped for any reason. The fields of the current entry in 543 * the frame descriptor array are set based on the transfer state and the input 544 * halt_status. Completes the Isochronous URB if all the URB frames have been 545 * completed. 546 * 547 * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be 548 * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE. 549 */ 550 STATIC enum dwc2_halt_status dwc2_update_isoc_urb_state( 551 struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, 552 int chnum, struct dwc2_qtd *qtd, 553 enum dwc2_halt_status halt_status) 554 { 555 struct dwc2_hcd_iso_packet_desc *frame_desc; 556 struct dwc2_hcd_urb *urb = qtd->urb; 557 558 if (!urb) 559 return DWC2_HC_XFER_NO_HALT_STATUS; 560 561 frame_desc = &urb->iso_descs[qtd->isoc_frame_index]; 562 563 switch (halt_status) { 564 case DWC2_HC_XFER_COMPLETE: 565 frame_desc->status = 0; 566 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg, 567 chan, chnum, qtd, halt_status, NULL); 568 569 /* Non DWORD-aligned buffer case handling */ 570 if (chan->align_buf && frame_desc->actual_length && 571 chan->ep_is_in) { 572 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", 573 __func__); 574 usb_syncmem(urb->usbdma, 0, urb->length, 575 BUS_DMASYNC_POSTREAD); 576 memcpy(urb->buf + frame_desc->offset + 577 qtd->isoc_split_offset, chan->qh->dw_align_buf, 578 frame_desc->actual_length); 579 usb_syncmem(urb->usbdma, 0, urb->length, 580 BUS_DMASYNC_PREREAD); 581 } 582 break; 583 case DWC2_HC_XFER_FRAME_OVERRUN: 584 urb->error_count++; 585 if (chan->ep_is_in) 586 frame_desc->status = -ENOSR; 587 else 588 frame_desc->status = -ECOMM; 589 frame_desc->actual_length = 0; 590 break; 591 case DWC2_HC_XFER_BABBLE_ERR: 592 urb->error_count++; 593 frame_desc->status = -EOVERFLOW; 594 /* Don't need to update actual_length in this case */ 595 break; 596 case DWC2_HC_XFER_XACT_ERR: 597 urb->error_count++; 598 frame_desc->status = -EPROTO; 599 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg, 600 chan, chnum, qtd, halt_status, NULL); 601 602 /* Non DWORD-aligned buffer case handling */ 603 if (chan->align_buf && frame_desc->actual_length && 604 chan->ep_is_in) { 605 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", 606 __func__); 607 usb_syncmem(urb->usbdma, 0, urb->length, 608 BUS_DMASYNC_POSTREAD); 609 memcpy(urb->buf + frame_desc->offset + 610 qtd->isoc_split_offset, chan->qh->dw_align_buf, 611 frame_desc->actual_length); 612 usb_syncmem(urb->usbdma, 0, urb->length, 613 BUS_DMASYNC_PREREAD); 614 } 615 616 /* Skip whole frame */ 617 if (chan->qh->do_split && 618 chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in && 619 hsotg->core_params->dma_enable > 0) { 620 qtd->complete_split = 0; 621 qtd->isoc_split_offset = 0; 622 } 623 624 break; 625 default: 626 dev_err(hsotg->dev, "Unhandled halt_status (%d)\n", 627 halt_status); 628 break; 629 } 630 631 if (++qtd->isoc_frame_index == urb->packet_count) { 632 /* 633 * urb->status is not used for isoc transfers. The individual 634 * frame_desc statuses are used instead. 635 */ 636 dwc2_host_complete(hsotg, qtd, 0); 637 halt_status = DWC2_HC_XFER_URB_COMPLETE; 638 } else { 639 halt_status = DWC2_HC_XFER_COMPLETE; 640 } 641 642 return halt_status; 643 } 644 645 /* 646 * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic 647 * QHs, removes the QH from the active non-periodic schedule. If any QTDs are 648 * still linked to the QH, the QH is added to the end of the inactive 649 * non-periodic schedule. For periodic QHs, removes the QH from the periodic 650 * schedule if no more QTDs are linked to the QH. 651 */ 652 STATIC void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 653 int free_qtd) 654 { 655 int continue_split = 0; 656 struct dwc2_qtd *qtd; 657 658 if (dbg_qh(qh)) 659 dev_vdbg(hsotg->dev, " %s(%p,%p,%d)\n", __func__, 660 hsotg, qh, free_qtd); 661 662 if (TAILQ_EMPTY(&qh->qtd_list)) { 663 dev_dbg(hsotg->dev, "## QTD list empty ##\n"); 664 goto no_qtd; 665 } 666 667 qtd = TAILQ_FIRST(&qh->qtd_list); 668 669 if (qtd->complete_split) 670 continue_split = 1; 671 else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID || 672 qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END) 673 continue_split = 1; 674 675 if (free_qtd) { 676 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 677 continue_split = 0; 678 } 679 680 no_qtd: 681 if (qh->channel) 682 qh->channel->align_buf = 0; 683 dwc2_hcd_qh_deactivate(hsotg, qh, continue_split); 684 qh->channel = NULL; 685 } 686 687 /** 688 * dwc2_release_channel() - Releases a host channel for use by other transfers 689 * 690 * @hsotg: The HCD state structure 691 * @chan: The host channel to release 692 * @qtd: The QTD associated with the host channel. This QTD may be 693 * freed if the transfer is complete or an error has occurred. 694 * @halt_status: Reason the channel is being released. This status 695 * determines the actions taken by this function. 696 * 697 * Also attempts to select and queue more transactions since at least one host 698 * channel is available. 699 */ 700 STATIC void dwc2_release_channel(struct dwc2_hsotg *hsotg, 701 struct dwc2_host_chan *chan, 702 struct dwc2_qtd *qtd, 703 enum dwc2_halt_status halt_status) 704 { 705 enum dwc2_transaction_type tr_type; 706 u32 haintmsk; 707 int free_qtd = 0; 708 709 if (dbg_hc(chan)) 710 dev_vdbg(hsotg->dev, " %s: channel %d, halt_status %d\n", 711 __func__, chan->hc_num, halt_status); 712 713 switch (halt_status) { 714 case DWC2_HC_XFER_URB_COMPLETE: 715 free_qtd = 1; 716 break; 717 case DWC2_HC_XFER_AHB_ERR: 718 case DWC2_HC_XFER_STALL: 719 case DWC2_HC_XFER_BABBLE_ERR: 720 free_qtd = 1; 721 break; 722 case DWC2_HC_XFER_XACT_ERR: 723 if (qtd && qtd->error_count >= 3) { 724 dev_vdbg(hsotg->dev, 725 " Complete URB with transaction error\n"); 726 free_qtd = 1; 727 dwc2_host_complete(hsotg, qtd, -EPROTO); 728 } 729 break; 730 case DWC2_HC_XFER_URB_DEQUEUE: 731 /* 732 * The QTD has already been removed and the QH has been 733 * deactivated. Don't want to do anything except release the 734 * host channel and try to queue more transfers. 735 */ 736 goto cleanup; 737 case DWC2_HC_XFER_PERIODIC_INCOMPLETE: 738 dev_vdbg(hsotg->dev, " Complete URB with I/O error\n"); 739 free_qtd = 1; 740 dwc2_host_complete(hsotg, qtd, -EIO); 741 break; 742 case DWC2_HC_XFER_NO_HALT_STATUS: 743 default: 744 break; 745 } 746 747 dwc2_deactivate_qh(hsotg, chan->qh, free_qtd); 748 749 cleanup: 750 /* 751 * Release the host channel for use by other transfers. The cleanup 752 * function clears the channel interrupt enables and conditions, so 753 * there's no need to clear the Channel Halted interrupt separately. 754 */ 755 if (chan->in_freelist != 0) 756 LIST_REMOVE(chan, hc_list_entry); 757 dwc2_hc_cleanup(hsotg, chan); 758 LIST_INSERT_HEAD(&hsotg->free_hc_list, chan, hc_list_entry); 759 chan->in_freelist = 1; 760 761 if (hsotg->core_params->uframe_sched > 0) { 762 hsotg->available_host_channels++; 763 } else { 764 switch (chan->ep_type) { 765 case USB_ENDPOINT_XFER_CONTROL: 766 case USB_ENDPOINT_XFER_BULK: 767 hsotg->non_periodic_channels--; 768 break; 769 default: 770 /* 771 * Don't release reservations for periodic channels 772 * here. That's done when a periodic transfer is 773 * descheduled (i.e. when the QH is removed from the 774 * periodic schedule). 775 */ 776 break; 777 } 778 } 779 780 haintmsk = DWC2_READ_4(hsotg, HAINTMSK); 781 haintmsk &= ~(1 << chan->hc_num); 782 DWC2_WRITE_4(hsotg, HAINTMSK, haintmsk); 783 784 /* Try to queue more transfers now that there's a free channel */ 785 tr_type = dwc2_hcd_select_transactions(hsotg); 786 if (tr_type != DWC2_TRANSACTION_NONE) 787 dwc2_hcd_queue_transactions(hsotg, tr_type); 788 } 789 790 /* 791 * Halts a host channel. If the channel cannot be halted immediately because 792 * the request queue is full, this function ensures that the FIFO empty 793 * interrupt for the appropriate queue is enabled so that the halt request can 794 * be queued when there is space in the request queue. 795 * 796 * This function may also be called in DMA mode. In that case, the channel is 797 * simply released since the core always halts the channel automatically in 798 * DMA mode. 799 */ 800 STATIC void dwc2_halt_channel(struct dwc2_hsotg *hsotg, 801 struct dwc2_host_chan *chan, struct dwc2_qtd *qtd, 802 enum dwc2_halt_status halt_status) 803 { 804 if (dbg_hc(chan)) 805 dev_vdbg(hsotg->dev, "%s()\n", __func__); 806 807 if (hsotg->core_params->dma_enable > 0) { 808 if (dbg_hc(chan)) 809 dev_vdbg(hsotg->dev, "DMA enabled\n"); 810 dwc2_release_channel(hsotg, chan, qtd, halt_status); 811 return; 812 } 813 814 /* Slave mode processing */ 815 dwc2_hc_halt(hsotg, chan, halt_status); 816 817 if (chan->halt_on_queue) { 818 u32 gintmsk; 819 820 dev_vdbg(hsotg->dev, "Halt on queue\n"); 821 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || 822 chan->ep_type == USB_ENDPOINT_XFER_BULK) { 823 dev_vdbg(hsotg->dev, "control/bulk\n"); 824 /* 825 * Make sure the Non-periodic Tx FIFO empty interrupt 826 * is enabled so that the non-periodic schedule will 827 * be processed 828 */ 829 gintmsk = DWC2_READ_4(hsotg, GINTMSK); 830 gintmsk |= GINTSTS_NPTXFEMP; 831 DWC2_WRITE_4(hsotg, GINTMSK, gintmsk); 832 } else { 833 dev_vdbg(hsotg->dev, "isoc/intr\n"); 834 /* 835 * Move the QH from the periodic queued schedule to 836 * the periodic assigned schedule. This allows the 837 * halt to be queued when the periodic schedule is 838 * processed. 839 */ 840 TAILQ_REMOVE(&hsotg->periodic_sched_queued, chan->qh, qh_list_entry); 841 TAILQ_INSERT_TAIL(&hsotg->periodic_sched_assigned, chan->qh, qh_list_entry); 842 843 /* 844 * Make sure the Periodic Tx FIFO Empty interrupt is 845 * enabled so that the periodic schedule will be 846 * processed 847 */ 848 gintmsk = DWC2_READ_4(hsotg, GINTMSK); 849 gintmsk |= GINTSTS_PTXFEMP; 850 DWC2_WRITE_4(hsotg, GINTMSK, gintmsk); 851 } 852 } 853 } 854 855 /* 856 * Performs common cleanup for non-periodic transfers after a Transfer 857 * Complete interrupt. This function should be called after any endpoint type 858 * specific handling is finished to release the host channel. 859 */ 860 STATIC void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg, 861 struct dwc2_host_chan *chan, 862 int chnum, struct dwc2_qtd *qtd, 863 enum dwc2_halt_status halt_status) 864 { 865 dev_vdbg(hsotg->dev, "%s()\n", __func__); 866 867 qtd->error_count = 0; 868 869 if (chan->hcint & HCINTMSK_NYET) { 870 /* 871 * Got a NYET on the last transaction of the transfer. This 872 * means that the endpoint should be in the PING state at the 873 * beginning of the next transfer. 874 */ 875 dev_vdbg(hsotg->dev, "got NYET\n"); 876 chan->qh->ping_state = 1; 877 } 878 879 /* 880 * Always halt and release the host channel to make it available for 881 * more transfers. There may still be more phases for a control 882 * transfer or more data packets for a bulk transfer at this point, 883 * but the host channel is still halted. A channel will be reassigned 884 * to the transfer when the non-periodic schedule is processed after 885 * the channel is released. This allows transactions to be queued 886 * properly via dwc2_hcd_queue_transactions, which also enables the 887 * Tx FIFO Empty interrupt if necessary. 888 */ 889 if (chan->ep_is_in) { 890 /* 891 * IN transfers in Slave mode require an explicit disable to 892 * halt the channel. (In DMA mode, this call simply releases 893 * the channel.) 894 */ 895 dwc2_halt_channel(hsotg, chan, qtd, halt_status); 896 } else { 897 /* 898 * The channel is automatically disabled by the core for OUT 899 * transfers in Slave mode 900 */ 901 dwc2_release_channel(hsotg, chan, qtd, halt_status); 902 } 903 } 904 905 /* 906 * Performs common cleanup for periodic transfers after a Transfer Complete 907 * interrupt. This function should be called after any endpoint type specific 908 * handling is finished to release the host channel. 909 */ 910 STATIC void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg, 911 struct dwc2_host_chan *chan, int chnum, 912 struct dwc2_qtd *qtd, 913 enum dwc2_halt_status halt_status) 914 { 915 u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum)); 916 917 qtd->error_count = 0; 918 919 if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0) 920 /* Core halts channel in these cases */ 921 dwc2_release_channel(hsotg, chan, qtd, halt_status); 922 else 923 /* Flush any outstanding requests from the Tx queue */ 924 dwc2_halt_channel(hsotg, chan, qtd, halt_status); 925 } 926 927 STATIC int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg, 928 struct dwc2_host_chan *chan, int chnum, 929 struct dwc2_qtd *qtd) 930 { 931 struct dwc2_hcd_iso_packet_desc *frame_desc; 932 u32 len; 933 934 if (!qtd->urb) 935 return 0; 936 937 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index]; 938 len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd, 939 DWC2_HC_XFER_COMPLETE, NULL); 940 if (!len) { 941 qtd->complete_split = 0; 942 qtd->isoc_split_offset = 0; 943 return 0; 944 } 945 946 frame_desc->actual_length += len; 947 948 if (chan->align_buf) { 949 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); 950 usb_syncmem(qtd->urb->usbdma, 0, qtd->urb->length, 951 BUS_DMASYNC_POSTREAD); 952 memcpy(qtd->urb->buf + frame_desc->offset + 953 qtd->isoc_split_offset, chan->qh->dw_align_buf, len); 954 usb_syncmem(qtd->urb->usbdma, 0, qtd->urb->length, 955 BUS_DMASYNC_PREREAD); 956 } 957 958 qtd->isoc_split_offset += len; 959 960 if (frame_desc->actual_length >= frame_desc->length) { 961 frame_desc->status = 0; 962 qtd->isoc_frame_index++; 963 qtd->complete_split = 0; 964 qtd->isoc_split_offset = 0; 965 } 966 967 if (qtd->isoc_frame_index == qtd->urb->packet_count) { 968 dwc2_host_complete(hsotg, qtd, 0); 969 dwc2_release_channel(hsotg, chan, qtd, 970 DWC2_HC_XFER_URB_COMPLETE); 971 } else { 972 dwc2_release_channel(hsotg, chan, qtd, 973 DWC2_HC_XFER_NO_HALT_STATUS); 974 } 975 976 return 1; /* Indicates that channel released */ 977 } 978 979 /* 980 * Handles a host channel Transfer Complete interrupt. This handler may be 981 * called in either DMA mode or Slave mode. 982 */ 983 STATIC void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg, 984 struct dwc2_host_chan *chan, int chnum, 985 struct dwc2_qtd *qtd) 986 { 987 struct dwc2_hcd_urb *urb = qtd->urb; 988 enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE; 989 int pipe_type; 990 int urb_xfer_done; 991 992 if (dbg_hc(chan)) 993 dev_vdbg(hsotg->dev, 994 "--Host Channel %d Interrupt: Transfer Complete--\n", 995 chnum); 996 997 if (!urb) 998 goto handle_xfercomp_done; 999 1000 pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); 1001 1002 if (hsotg->core_params->dma_desc_enable > 0) { 1003 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status); 1004 if (pipe_type == USB_ENDPOINT_XFER_ISOC) 1005 /* Do not disable the interrupt, just clear it */ 1006 return; 1007 goto handle_xfercomp_done; 1008 } 1009 1010 /* Handle xfer complete on CSPLIT */ 1011 if (chan->qh->do_split) { 1012 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in && 1013 hsotg->core_params->dma_enable > 0) { 1014 if (qtd->complete_split && 1015 dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum, 1016 qtd)) 1017 goto handle_xfercomp_done; 1018 } else { 1019 qtd->complete_split = 0; 1020 } 1021 } 1022 1023 /* Update the QTD and URB states */ 1024 switch (pipe_type) { 1025 case USB_ENDPOINT_XFER_CONTROL: 1026 switch (qtd->control_phase) { 1027 case DWC2_CONTROL_SETUP: 1028 if (urb->length > 0) 1029 qtd->control_phase = DWC2_CONTROL_DATA; 1030 else 1031 qtd->control_phase = DWC2_CONTROL_STATUS; 1032 dev_vdbg(hsotg->dev, 1033 " Control setup transaction done\n"); 1034 halt_status = DWC2_HC_XFER_COMPLETE; 1035 break; 1036 case DWC2_CONTROL_DATA: 1037 urb_xfer_done = dwc2_update_urb_state(hsotg, chan, 1038 chnum, urb, qtd); 1039 if (urb_xfer_done) { 1040 qtd->control_phase = DWC2_CONTROL_STATUS; 1041 dev_vdbg(hsotg->dev, 1042 " Control data transfer done\n"); 1043 } else { 1044 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, 1045 qtd); 1046 } 1047 halt_status = DWC2_HC_XFER_COMPLETE; 1048 break; 1049 case DWC2_CONTROL_STATUS: 1050 dev_vdbg(hsotg->dev, " Control transfer complete\n"); 1051 if (urb->status == -EINPROGRESS) 1052 urb->status = 0; 1053 dwc2_host_complete(hsotg, qtd, urb->status); 1054 halt_status = DWC2_HC_XFER_URB_COMPLETE; 1055 break; 1056 } 1057 1058 dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd, 1059 halt_status); 1060 break; 1061 case USB_ENDPOINT_XFER_BULK: 1062 dev_vdbg(hsotg->dev, " Bulk transfer complete\n"); 1063 urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb, 1064 qtd); 1065 if (urb_xfer_done) { 1066 dwc2_host_complete(hsotg, qtd, urb->status); 1067 halt_status = DWC2_HC_XFER_URB_COMPLETE; 1068 } else { 1069 halt_status = DWC2_HC_XFER_COMPLETE; 1070 } 1071 1072 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1073 dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd, 1074 halt_status); 1075 break; 1076 case USB_ENDPOINT_XFER_INT: 1077 dev_vdbg(hsotg->dev, " Interrupt transfer complete\n"); 1078 urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb, 1079 qtd); 1080 1081 /* 1082 * Interrupt URB is done on the first transfer complete 1083 * interrupt 1084 */ 1085 if (urb_xfer_done) { 1086 dwc2_host_complete(hsotg, qtd, urb->status); 1087 halt_status = DWC2_HC_XFER_URB_COMPLETE; 1088 } else { 1089 halt_status = DWC2_HC_XFER_COMPLETE; 1090 } 1091 1092 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1093 dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd, 1094 halt_status); 1095 break; 1096 case USB_ENDPOINT_XFER_ISOC: 1097 if (dbg_perio()) 1098 dev_vdbg(hsotg->dev, " Isochronous transfer complete\n"); 1099 if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL) 1100 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, 1101 chnum, qtd, DWC2_HC_XFER_COMPLETE); 1102 dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd, 1103 halt_status); 1104 break; 1105 } 1106 1107 handle_xfercomp_done: 1108 disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL); 1109 } 1110 1111 /* 1112 * Handles a host channel STALL interrupt. This handler may be called in 1113 * either DMA mode or Slave mode. 1114 */ 1115 STATIC void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg, 1116 struct dwc2_host_chan *chan, int chnum, 1117 struct dwc2_qtd *qtd) 1118 { 1119 struct dwc2_hcd_urb *urb = qtd->urb; 1120 int pipe_type; 1121 1122 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n", 1123 chnum); 1124 1125 if (hsotg->core_params->dma_desc_enable > 0) { 1126 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1127 DWC2_HC_XFER_STALL); 1128 goto handle_stall_done; 1129 } 1130 1131 if (!urb) 1132 goto handle_stall_halt; 1133 1134 pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); 1135 1136 if (pipe_type == USB_ENDPOINT_XFER_CONTROL) 1137 dwc2_host_complete(hsotg, qtd, -EPIPE); 1138 1139 if (pipe_type == USB_ENDPOINT_XFER_BULK || 1140 pipe_type == USB_ENDPOINT_XFER_INT) { 1141 dwc2_host_complete(hsotg, qtd, -EPIPE); 1142 /* 1143 * USB protocol requires resetting the data toggle for bulk 1144 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT) 1145 * setup command is issued to the endpoint. Anticipate the 1146 * CLEAR_FEATURE command since a STALL has occurred and reset 1147 * the data toggle now. 1148 */ 1149 chan->qh->data_toggle = 0; 1150 } 1151 1152 handle_stall_halt: 1153 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL); 1154 1155 handle_stall_done: 1156 disable_hc_int(hsotg, chnum, HCINTMSK_STALL); 1157 } 1158 1159 /* 1160 * Updates the state of the URB when a transfer has been stopped due to an 1161 * abnormal condition before the transfer completes. Modifies the 1162 * actual_length field of the URB to reflect the number of bytes that have 1163 * actually been transferred via the host channel. 1164 */ 1165 STATIC void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg, 1166 struct dwc2_host_chan *chan, int chnum, 1167 struct dwc2_hcd_urb *urb, 1168 struct dwc2_qtd *qtd, 1169 enum dwc2_halt_status halt_status) 1170 { 1171 u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, 1172 qtd, halt_status, NULL); 1173 1174 if (urb->actual_length + xfer_length > urb->length) { 1175 dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__); 1176 xfer_length = urb->length - urb->actual_length; 1177 } 1178 1179 /* Non DWORD-aligned buffer case handling */ 1180 if (chan->align_buf && xfer_length && chan->ep_is_in) { 1181 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); 1182 usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_POSTREAD); 1183 memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf, 1184 xfer_length); 1185 usb_syncmem(urb->usbdma, 0, urb->length, BUS_DMASYNC_PREREAD); 1186 } 1187 1188 urb->actual_length += xfer_length; 1189 1190 dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n", 1191 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum); 1192 dev_vdbg(hsotg->dev, " chan->start_pkt_count %d\n", 1193 chan->start_pkt_count); 1194 dev_vdbg(hsotg->dev, " hctsiz.pktcnt %d\n", 1195 (DWC2_READ_4(hsotg, HCTSIZ(chnum)) & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT); 1196 dev_vdbg(hsotg->dev, " chan->max_packet %d\n", chan->max_packet); 1197 dev_vdbg(hsotg->dev, " bytes_transferred %d\n", 1198 xfer_length); 1199 dev_vdbg(hsotg->dev, " urb->actual_length %d\n", 1200 urb->actual_length); 1201 dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n", 1202 urb->length); 1203 } 1204 1205 /* 1206 * Handles a host channel NAK interrupt. This handler may be called in either 1207 * DMA mode or Slave mode. 1208 */ 1209 STATIC void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg, 1210 struct dwc2_host_chan *chan, int chnum, 1211 struct dwc2_qtd *qtd) 1212 { 1213 if (dbg_hc(chan)) 1214 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n", 1215 chnum); 1216 1217 /* 1218 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and 1219 * interrupt. Re-start the SSPLIT transfer. 1220 */ 1221 if (chan->do_split) { 1222 /* 1223 * When we get control/bulk NAKs then remember this so we holdoff on 1224 * this qh until the beginning of the next frame 1225 */ 1226 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) { 1227 case USB_ENDPOINT_XFER_CONTROL: 1228 case USB_ENDPOINT_XFER_BULK: 1229 chan->qh->nak_frame = dwc2_hcd_get_frame_number(hsotg); 1230 break; 1231 } 1232 1233 if (chan->complete_split) 1234 qtd->error_count = 0; 1235 qtd->complete_split = 0; 1236 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK); 1237 goto handle_nak_done; 1238 } 1239 1240 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) { 1241 case USB_ENDPOINT_XFER_CONTROL: 1242 case USB_ENDPOINT_XFER_BULK: 1243 if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) { 1244 /* 1245 * NAK interrupts are enabled on bulk/control IN 1246 * transfers in DMA mode for the sole purpose of 1247 * resetting the error count after a transaction error 1248 * occurs. The core will continue transferring data. 1249 */ 1250 qtd->error_count = 0; 1251 break; 1252 } 1253 1254 /* 1255 * NAK interrupts normally occur during OUT transfers in DMA 1256 * or Slave mode. For IN transfers, more requests will be 1257 * queued as request queue space is available. 1258 */ 1259 qtd->error_count = 0; 1260 1261 if (!chan->qh->ping_state) { 1262 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, 1263 qtd, DWC2_HC_XFER_NAK); 1264 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1265 1266 if (chan->speed == USB_SPEED_HIGH) 1267 chan->qh->ping_state = 1; 1268 } 1269 1270 /* 1271 * Halt the channel so the transfer can be re-started from 1272 * the appropriate point or the PING protocol will 1273 * start/continue 1274 */ 1275 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK); 1276 break; 1277 case USB_ENDPOINT_XFER_INT: 1278 qtd->error_count = 0; 1279 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK); 1280 break; 1281 case USB_ENDPOINT_XFER_ISOC: 1282 /* Should never get called for isochronous transfers */ 1283 dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n"); 1284 break; 1285 } 1286 1287 handle_nak_done: 1288 disable_hc_int(hsotg, chnum, HCINTMSK_NAK); 1289 } 1290 1291 /* 1292 * Handles a host channel ACK interrupt. This interrupt is enabled when 1293 * performing the PING protocol in Slave mode, when errors occur during 1294 * either Slave mode or DMA mode, and during Start Split transactions. 1295 */ 1296 STATIC void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg, 1297 struct dwc2_host_chan *chan, int chnum, 1298 struct dwc2_qtd *qtd) 1299 { 1300 struct dwc2_hcd_iso_packet_desc *frame_desc; 1301 1302 if (dbg_hc(chan)) 1303 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n", 1304 chnum); 1305 1306 if (chan->do_split) { 1307 /* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */ 1308 if (!chan->ep_is_in && 1309 chan->data_pid_start != DWC2_HC_PID_SETUP) 1310 qtd->ssplit_out_xfer_count = chan->xfer_len; 1311 1312 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) { 1313 qtd->complete_split = 1; 1314 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK); 1315 } else { 1316 /* ISOC OUT */ 1317 switch (chan->xact_pos) { 1318 case DWC2_HCSPLT_XACTPOS_ALL: 1319 break; 1320 case DWC2_HCSPLT_XACTPOS_END: 1321 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL; 1322 qtd->isoc_split_offset = 0; 1323 break; 1324 case DWC2_HCSPLT_XACTPOS_BEGIN: 1325 case DWC2_HCSPLT_XACTPOS_MID: 1326 /* 1327 * For BEGIN or MID, calculate the length for 1328 * the next microframe to determine the correct 1329 * SSPLIT token, either MID or END 1330 */ 1331 frame_desc = &qtd->urb->iso_descs[ 1332 qtd->isoc_frame_index]; 1333 qtd->isoc_split_offset += 188; 1334 1335 if (frame_desc->length - qtd->isoc_split_offset 1336 <= 188) 1337 qtd->isoc_split_pos = 1338 DWC2_HCSPLT_XACTPOS_END; 1339 else 1340 qtd->isoc_split_pos = 1341 DWC2_HCSPLT_XACTPOS_MID; 1342 break; 1343 } 1344 } 1345 } else { 1346 qtd->error_count = 0; 1347 1348 if (chan->qh->ping_state) { 1349 chan->qh->ping_state = 0; 1350 /* 1351 * Halt the channel so the transfer can be re-started 1352 * from the appropriate point. This only happens in 1353 * Slave mode. In DMA mode, the ping_state is cleared 1354 * when the transfer is started because the core 1355 * automatically executes the PING, then the transfer. 1356 */ 1357 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK); 1358 } 1359 } 1360 1361 /* 1362 * If the ACK occurred when _not_ in the PING state, let the channel 1363 * continue transferring data after clearing the error count 1364 */ 1365 disable_hc_int(hsotg, chnum, HCINTMSK_ACK); 1366 } 1367 1368 /* 1369 * Handles a host channel NYET interrupt. This interrupt should only occur on 1370 * Bulk and Control OUT endpoints and for complete split transactions. If a 1371 * NYET occurs at the same time as a Transfer Complete interrupt, it is 1372 * handled in the xfercomp interrupt handler, not here. This handler may be 1373 * called in either DMA mode or Slave mode. 1374 */ 1375 STATIC void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg, 1376 struct dwc2_host_chan *chan, int chnum, 1377 struct dwc2_qtd *qtd) 1378 { 1379 if (dbg_hc(chan)) 1380 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n", 1381 chnum); 1382 1383 /* 1384 * NYET on CSPLIT 1385 * re-do the CSPLIT immediately on non-periodic 1386 */ 1387 if (chan->do_split && chan->complete_split) { 1388 if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC && 1389 hsotg->core_params->dma_enable > 0) { 1390 qtd->complete_split = 0; 1391 qtd->isoc_split_offset = 0; 1392 qtd->isoc_frame_index++; 1393 if (qtd->urb && 1394 qtd->isoc_frame_index == qtd->urb->packet_count) { 1395 dwc2_host_complete(hsotg, qtd, 0); 1396 dwc2_release_channel(hsotg, chan, qtd, 1397 DWC2_HC_XFER_URB_COMPLETE); 1398 } else { 1399 dwc2_release_channel(hsotg, chan, qtd, 1400 DWC2_HC_XFER_NO_HALT_STATUS); 1401 } 1402 goto handle_nyet_done; 1403 } 1404 1405 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1406 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1407 int frnum = dwc2_hcd_get_frame_number(hsotg); 1408 1409 if (dwc2_full_frame_num(frnum) != 1410 dwc2_full_frame_num(chan->qh->sched_frame)) { 1411 /* 1412 * No longer in the same full speed frame. 1413 * Treat this as a transaction error. 1414 */ 1415 #if 0 1416 /* 1417 * Todo: Fix system performance so this can 1418 * be treated as an error. Right now complete 1419 * splits cannot be scheduled precisely enough 1420 * due to other system activity, so this error 1421 * occurs regularly in Slave mode. 1422 */ 1423 qtd->error_count++; 1424 #endif 1425 qtd->complete_split = 0; 1426 dwc2_halt_channel(hsotg, chan, qtd, 1427 DWC2_HC_XFER_XACT_ERR); 1428 /* Todo: add support for isoc release */ 1429 goto handle_nyet_done; 1430 } 1431 } 1432 1433 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET); 1434 goto handle_nyet_done; 1435 } 1436 1437 chan->qh->ping_state = 1; 1438 qtd->error_count = 0; 1439 1440 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd, 1441 DWC2_HC_XFER_NYET); 1442 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1443 1444 /* 1445 * Halt the channel and re-start the transfer so the PING protocol 1446 * will start 1447 */ 1448 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET); 1449 1450 handle_nyet_done: 1451 disable_hc_int(hsotg, chnum, HCINTMSK_NYET); 1452 } 1453 1454 /* 1455 * Handles a host channel babble interrupt. This handler may be called in 1456 * either DMA mode or Slave mode. 1457 */ 1458 STATIC void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg, 1459 struct dwc2_host_chan *chan, int chnum, 1460 struct dwc2_qtd *qtd) 1461 { 1462 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n", 1463 chnum); 1464 1465 // dwc2_hc_handle_tt_clear(hsotg, chan, qtd); 1466 1467 if (hsotg->core_params->dma_desc_enable > 0) { 1468 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1469 DWC2_HC_XFER_BABBLE_ERR); 1470 goto disable_int; 1471 } 1472 1473 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) { 1474 dwc2_host_complete(hsotg, qtd, -EOVERFLOW); 1475 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR); 1476 } else { 1477 enum dwc2_halt_status halt_status; 1478 1479 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum, 1480 qtd, DWC2_HC_XFER_BABBLE_ERR); 1481 dwc2_halt_channel(hsotg, chan, qtd, halt_status); 1482 } 1483 1484 disable_int: 1485 disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR); 1486 } 1487 1488 /* 1489 * Handles a host channel AHB error interrupt. This handler is only called in 1490 * DMA mode. 1491 */ 1492 STATIC void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg, 1493 struct dwc2_host_chan *chan, int chnum, 1494 struct dwc2_qtd *qtd) 1495 { 1496 struct dwc2_hcd_urb *urb = qtd->urb; 1497 1498 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n", 1499 chnum); 1500 1501 if (!urb) 1502 goto handle_ahberr_halt; 1503 1504 // dwc2_hc_handle_tt_clear(hsotg, chan, qtd); 1505 1506 #ifdef DWC2_DEBUG 1507 const char *pipetype, *speed; 1508 1509 u32 hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum)); 1510 u32 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chnum)); 1511 u32 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum)); 1512 u32 hc_dma = DWC2_READ_4(hsotg, HCDMA(chnum)); 1513 1514 dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum); 1515 dev_err(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt); 1516 dev_err(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma); 1517 dev_err(hsotg->dev, " Device address: %d\n", 1518 dwc2_hcd_get_dev_addr(&urb->pipe_info)); 1519 dev_err(hsotg->dev, " Endpoint: %d, %s\n", 1520 dwc2_hcd_get_ep_num(&urb->pipe_info), 1521 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"); 1522 1523 switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) { 1524 case USB_ENDPOINT_XFER_CONTROL: 1525 pipetype = "CONTROL"; 1526 break; 1527 case USB_ENDPOINT_XFER_BULK: 1528 pipetype = "BULK"; 1529 break; 1530 case USB_ENDPOINT_XFER_INT: 1531 pipetype = "INTERRUPT"; 1532 break; 1533 case USB_ENDPOINT_XFER_ISOC: 1534 pipetype = "ISOCHRONOUS"; 1535 break; 1536 default: 1537 pipetype = "UNKNOWN"; 1538 break; 1539 } 1540 1541 dev_err(hsotg->dev, " Endpoint type: %s\n", pipetype); 1542 1543 switch (chan->speed) { 1544 case USB_SPEED_HIGH: 1545 speed = "HIGH"; 1546 break; 1547 case USB_SPEED_FULL: 1548 speed = "FULL"; 1549 break; 1550 case USB_SPEED_LOW: 1551 speed = "LOW"; 1552 break; 1553 default: 1554 speed = "UNKNOWN"; 1555 break; 1556 } 1557 1558 dev_err(hsotg->dev, " Speed: %s\n", speed); 1559 1560 dev_err(hsotg->dev, " Max packet size: %d\n", 1561 dwc2_hcd_get_mps(&urb->pipe_info)); 1562 dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length); 1563 dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n", 1564 urb->buf, (unsigned long)urb->dma); 1565 dev_err(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n", 1566 urb->setup_packet, (unsigned long)urb->setup_dma); 1567 dev_err(hsotg->dev, " Interval: %d\n", urb->interval); 1568 #endif 1569 1570 /* Core halts the channel for Descriptor DMA mode */ 1571 if (hsotg->core_params->dma_desc_enable > 0) { 1572 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1573 DWC2_HC_XFER_AHB_ERR); 1574 goto handle_ahberr_done; 1575 } 1576 1577 dwc2_host_complete(hsotg, qtd, -EIO); 1578 1579 handle_ahberr_halt: 1580 /* 1581 * Force a channel halt. Don't call dwc2_halt_channel because that won't 1582 * write to the HCCHARn register in DMA mode to force the halt. 1583 */ 1584 dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR); 1585 1586 handle_ahberr_done: 1587 disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR); 1588 } 1589 1590 /* 1591 * Handles a host channel transaction error interrupt. This handler may be 1592 * called in either DMA mode or Slave mode. 1593 */ 1594 STATIC void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg, 1595 struct dwc2_host_chan *chan, int chnum, 1596 struct dwc2_qtd *qtd) 1597 { 1598 dev_dbg(hsotg->dev, 1599 "--Host Channel %d Interrupt: Transaction Error--\n", chnum); 1600 1601 // dwc2_hc_handle_tt_clear(hsotg, chan, qtd); 1602 1603 if (hsotg->core_params->dma_desc_enable > 0) { 1604 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1605 DWC2_HC_XFER_XACT_ERR); 1606 goto handle_xacterr_done; 1607 } 1608 1609 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) { 1610 case USB_ENDPOINT_XFER_CONTROL: 1611 case USB_ENDPOINT_XFER_BULK: 1612 qtd->error_count++; 1613 if (!chan->qh->ping_state) { 1614 1615 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, 1616 qtd, DWC2_HC_XFER_XACT_ERR); 1617 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1618 if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH) 1619 chan->qh->ping_state = 1; 1620 } 1621 1622 /* 1623 * Halt the channel so the transfer can be re-started from 1624 * the appropriate point or the PING protocol will start 1625 */ 1626 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR); 1627 break; 1628 case USB_ENDPOINT_XFER_INT: 1629 qtd->error_count++; 1630 if (chan->do_split && chan->complete_split) 1631 qtd->complete_split = 0; 1632 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR); 1633 break; 1634 case USB_ENDPOINT_XFER_ISOC: 1635 { 1636 enum dwc2_halt_status halt_status; 1637 1638 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, 1639 chnum, qtd, DWC2_HC_XFER_XACT_ERR); 1640 dwc2_halt_channel(hsotg, chan, qtd, halt_status); 1641 } 1642 break; 1643 } 1644 1645 handle_xacterr_done: 1646 disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR); 1647 } 1648 1649 /* 1650 * Handles a host channel frame overrun interrupt. This handler may be called 1651 * in either DMA mode or Slave mode. 1652 */ 1653 STATIC void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg, 1654 struct dwc2_host_chan *chan, int chnum, 1655 struct dwc2_qtd *qtd) 1656 { 1657 enum dwc2_halt_status halt_status; 1658 1659 if (dbg_hc(chan)) 1660 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n", 1661 chnum); 1662 1663 dwc2_hc_handle_tt_clear(hsotg, chan, qtd); 1664 1665 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) { 1666 case USB_ENDPOINT_XFER_CONTROL: 1667 case USB_ENDPOINT_XFER_BULK: 1668 break; 1669 case USB_ENDPOINT_XFER_INT: 1670 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN); 1671 break; 1672 case USB_ENDPOINT_XFER_ISOC: 1673 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum, 1674 qtd, DWC2_HC_XFER_FRAME_OVERRUN); 1675 dwc2_halt_channel(hsotg, chan, qtd, halt_status); 1676 break; 1677 } 1678 1679 disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN); 1680 } 1681 1682 /* 1683 * Handles a host channel data toggle error interrupt. This handler may be 1684 * called in either DMA mode or Slave mode. 1685 */ 1686 STATIC void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg, 1687 struct dwc2_host_chan *chan, int chnum, 1688 struct dwc2_qtd *qtd) 1689 { 1690 dev_dbg(hsotg->dev, 1691 "--Host Channel %d Interrupt: Data Toggle Error--\n", chnum); 1692 1693 if (chan->ep_is_in) 1694 qtd->error_count = 0; 1695 else 1696 dev_err(hsotg->dev, 1697 "Data Toggle Error on OUT transfer, channel %d\n", 1698 chnum); 1699 1700 // dwc2_hc_handle_tt_clear(hsotg, chan, qtd); 1701 disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR); 1702 } 1703 1704 /* 1705 * For debug only. It checks that a valid halt status is set and that 1706 * HCCHARn.chdis is clear. If there's a problem, corrective action is 1707 * taken and a warning is issued. 1708 * 1709 * Return: true if halt status is ok, false otherwise 1710 */ 1711 STATIC bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg, 1712 struct dwc2_host_chan *chan, int chnum, 1713 struct dwc2_qtd *qtd) 1714 { 1715 #ifdef DWC2_DEBUG 1716 u32 hcchar; 1717 u32 hctsiz; 1718 u32 hcintmsk; 1719 u32 hcsplt; 1720 1721 if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) { 1722 /* 1723 * This code is here only as a check. This condition should 1724 * never happen. Ignore the halt if it does occur. 1725 */ 1726 hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum)); 1727 hctsiz = DWC2_READ_4(hsotg, HCTSIZ(chnum)); 1728 hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum)); 1729 hcsplt = DWC2_READ_4(hsotg, HCSPLT(chnum)); 1730 dev_dbg(hsotg->dev, 1731 "%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n", 1732 __func__); 1733 dev_dbg(hsotg->dev, 1734 "channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n", 1735 chnum, hcchar, hctsiz); 1736 dev_dbg(hsotg->dev, 1737 "hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n", 1738 chan->hcint, hcintmsk, hcsplt); 1739 if (qtd) 1740 dev_dbg(hsotg->dev, "qtd->complete_split %d\n", 1741 qtd->complete_split); 1742 dev_warn(hsotg->dev, 1743 "%s: no halt status, channel %d, ignoring interrupt\n", 1744 __func__, chnum); 1745 return false; 1746 } 1747 1748 /* 1749 * This code is here only as a check. hcchar.chdis should never be set 1750 * when the halt interrupt occurs. Halt the channel again if it does 1751 * occur. 1752 */ 1753 hcchar = DWC2_READ_4(hsotg, HCCHAR(chnum)); 1754 if (hcchar & HCCHAR_CHDIS) { 1755 dev_warn(hsotg->dev, 1756 "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n", 1757 __func__, hcchar); 1758 chan->halt_pending = 0; 1759 dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status); 1760 return false; 1761 } 1762 #endif 1763 1764 return true; 1765 } 1766 1767 /* 1768 * Handles a host Channel Halted interrupt in DMA mode. This handler 1769 * determines the reason the channel halted and proceeds accordingly. 1770 */ 1771 STATIC void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg, 1772 struct dwc2_host_chan *chan, int chnum, 1773 struct dwc2_qtd *qtd) 1774 { 1775 u32 hcintmsk; 1776 int out_nak_enh = 0; 1777 1778 if (dbg_hc(chan)) 1779 dev_vdbg(hsotg->dev, 1780 "--Host Channel %d Interrupt: DMA Channel Halted--\n", 1781 chnum); 1782 1783 /* 1784 * For core with OUT NAK enhancement, the flow for high-speed 1785 * CONTROL/BULK OUT is handled a little differently 1786 */ 1787 if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) { 1788 if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in && 1789 (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || 1790 chan->ep_type == USB_ENDPOINT_XFER_BULK)) { 1791 out_nak_enh = 1; 1792 } 1793 } 1794 1795 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE || 1796 (chan->halt_status == DWC2_HC_XFER_AHB_ERR && 1797 hsotg->core_params->dma_desc_enable <= 0)) { 1798 if (hsotg->core_params->dma_desc_enable > 0) 1799 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1800 chan->halt_status); 1801 else 1802 /* 1803 * Just release the channel. A dequeue can happen on a 1804 * transfer timeout. In the case of an AHB Error, the 1805 * channel was forced to halt because there's no way to 1806 * gracefully recover. 1807 */ 1808 dwc2_release_channel(hsotg, chan, qtd, 1809 chan->halt_status); 1810 return; 1811 } 1812 1813 hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum)); 1814 1815 if (chan->hcint & HCINTMSK_XFERCOMPL) { 1816 /* 1817 * Todo: This is here because of a possible hardware bug. Spec 1818 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT 1819 * interrupt w/ACK bit set should occur, but I only see the 1820 * XFERCOMP bit, even with it masked out. This is a workaround 1821 * for that behavior. Should fix this when hardware is fixed. 1822 */ 1823 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in) 1824 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd); 1825 dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd); 1826 } else if (chan->hcint & HCINTMSK_STALL) { 1827 dwc2_hc_stall_intr(hsotg, chan, chnum, qtd); 1828 } else if ((chan->hcint & HCINTMSK_XACTERR) && 1829 hsotg->core_params->dma_desc_enable <= 0) { 1830 if (out_nak_enh) { 1831 if (chan->hcint & 1832 (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) { 1833 dev_vdbg(hsotg->dev, 1834 "XactErr with NYET/NAK/ACK\n"); 1835 qtd->error_count = 0; 1836 } else { 1837 dev_vdbg(hsotg->dev, 1838 "XactErr without NYET/NAK/ACK\n"); 1839 } 1840 } 1841 1842 /* 1843 * Must handle xacterr before nak or ack. Could get a xacterr 1844 * at the same time as either of these on a BULK/CONTROL OUT 1845 * that started with a PING. The xacterr takes precedence. 1846 */ 1847 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd); 1848 } else if ((chan->hcint & HCINTMSK_XCS_XACT) && 1849 hsotg->core_params->dma_desc_enable > 0) { 1850 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd); 1851 } else if ((chan->hcint & HCINTMSK_AHBERR) && 1852 hsotg->core_params->dma_desc_enable > 0) { 1853 dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd); 1854 } else if (chan->hcint & HCINTMSK_BBLERR) { 1855 dwc2_hc_babble_intr(hsotg, chan, chnum, qtd); 1856 } else if (chan->hcint & HCINTMSK_FRMOVRUN) { 1857 dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd); 1858 } else if (!out_nak_enh) { 1859 if (chan->hcint & HCINTMSK_NYET) { 1860 /* 1861 * Must handle nyet before nak or ack. Could get a nyet 1862 * at the same time as either of those on a BULK/CONTROL 1863 * OUT that started with a PING. The nyet takes 1864 * precedence. 1865 */ 1866 dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd); 1867 } else if ((chan->hcint & HCINTMSK_NAK) && 1868 !(hcintmsk & HCINTMSK_NAK)) { 1869 /* 1870 * If nak is not masked, it's because a non-split IN 1871 * transfer is in an error state. In that case, the nak 1872 * is handled by the nak interrupt handler, not here. 1873 * Handle nak here for BULK/CONTROL OUT transfers, which 1874 * halt on a NAK to allow rewinding the buffer pointer. 1875 */ 1876 dwc2_hc_nak_intr(hsotg, chan, chnum, qtd); 1877 } else if ((chan->hcint & HCINTMSK_ACK) && 1878 !(hcintmsk & HCINTMSK_ACK)) { 1879 /* 1880 * If ack is not masked, it's because a non-split IN 1881 * transfer is in an error state. In that case, the ack 1882 * is handled by the ack interrupt handler, not here. 1883 * Handle ack here for split transfers. Start splits 1884 * halt on ACK. 1885 */ 1886 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd); 1887 } else { 1888 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 1889 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1890 /* 1891 * A periodic transfer halted with no other 1892 * channel interrupts set. Assume it was halted 1893 * by the core because it could not be completed 1894 * in its scheduled (micro)frame. 1895 */ 1896 dev_dbg(hsotg->dev, 1897 "%s: Halt channel %d (assume incomplete periodic transfer)\n", 1898 __func__, chnum); 1899 dwc2_halt_channel(hsotg, chan, qtd, 1900 DWC2_HC_XFER_PERIODIC_INCOMPLETE); 1901 } else { 1902 dev_err(hsotg->dev, 1903 "%s: Channel %d - ChHltd set, but reason is unknown\n", 1904 __func__, chnum); 1905 dev_err(hsotg->dev, 1906 "hcint 0x%08x, intsts 0x%08x\n", 1907 chan->hcint, 1908 DWC2_READ_4(hsotg, GINTSTS)); 1909 goto error; 1910 } 1911 } 1912 } else { 1913 dev_info(hsotg->dev, 1914 "NYET/NAK/ACK/other in non-error case, 0x%08x\n", 1915 chan->hcint); 1916 error: 1917 /* use the 3-strikes rule */ 1918 qtd->error_count++; 1919 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, 1920 qtd, DWC2_HC_XFER_XACT_ERR); 1921 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1922 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR); 1923 } 1924 } 1925 1926 /* 1927 * Handles a host channel Channel Halted interrupt 1928 * 1929 * In slave mode, this handler is called only when the driver specifically 1930 * requests a halt. This occurs during handling other host channel interrupts 1931 * (e.g. nak, xacterr, stall, nyet, etc.). 1932 * 1933 * In DMA mode, this is the interrupt that occurs when the core has finished 1934 * processing a transfer on a channel. Other host channel interrupts (except 1935 * ahberr) are disabled in DMA mode. 1936 */ 1937 STATIC void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg, 1938 struct dwc2_host_chan *chan, int chnum, 1939 struct dwc2_qtd *qtd) 1940 { 1941 if (dbg_hc(chan)) 1942 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n", 1943 chnum); 1944 1945 if (hsotg->core_params->dma_enable > 0) { 1946 dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd); 1947 } else { 1948 if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd)) 1949 return; 1950 dwc2_release_channel(hsotg, chan, qtd, chan->halt_status); 1951 } 1952 } 1953 1954 /* Handles interrupt for a specific Host Channel */ 1955 STATIC void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum) 1956 { 1957 struct dwc2_qtd *qtd; 1958 struct dwc2_host_chan *chan; 1959 u32 hcint, hcintmsk; 1960 1961 chan = hsotg->hc_ptr_array[chnum]; 1962 1963 hcint = DWC2_READ_4(hsotg, HCINT(chnum)); 1964 hcintmsk = DWC2_READ_4(hsotg, HCINTMSK(chnum)); 1965 if (!chan) { 1966 dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n"); 1967 DWC2_WRITE_4(hsotg, HCINT(chnum), hcint); 1968 return; 1969 } 1970 1971 if (dbg_hc(chan)) { 1972 dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n", 1973 chnum); 1974 dev_vdbg(hsotg->dev, 1975 " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n", 1976 hcint, hcintmsk, hcint & hcintmsk); 1977 } 1978 1979 DWC2_WRITE_4(hsotg, HCINT(chnum), hcint); 1980 chan->hcint = hcint; 1981 hcint &= hcintmsk; 1982 1983 /* 1984 * If the channel was halted due to a dequeue, the qtd list might 1985 * be empty or at least the first entry will not be the active qtd. 1986 * In this case, take a shortcut and just release the channel. 1987 */ 1988 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { 1989 /* 1990 * If the channel was halted, this should be the only 1991 * interrupt unmasked 1992 */ 1993 WARN_ON(hcint != HCINTMSK_CHHLTD); 1994 if (hsotg->core_params->dma_desc_enable > 0) 1995 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1996 chan->halt_status); 1997 else 1998 dwc2_release_channel(hsotg, chan, NULL, 1999 chan->halt_status); 2000 return; 2001 } 2002 2003 if (TAILQ_EMPTY(&chan->qh->qtd_list)) { 2004 /* 2005 * TODO: Will this ever happen with the 2006 * DWC2_HC_XFER_URB_DEQUEUE handling above? 2007 */ 2008 dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n", 2009 chnum); 2010 dev_dbg(hsotg->dev, 2011 " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n", 2012 chan->hcint, hcintmsk, hcint); 2013 chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS; 2014 disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD); 2015 chan->hcint = 0; 2016 return; 2017 } 2018 2019 qtd = TAILQ_FIRST(&chan->qh->qtd_list); 2020 2021 if (hsotg->core_params->dma_enable <= 0) { 2022 if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD) 2023 hcint &= ~HCINTMSK_CHHLTD; 2024 } 2025 2026 if (hcint & HCINTMSK_XFERCOMPL) { 2027 dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd); 2028 /* 2029 * If NYET occurred at same time as Xfer Complete, the NYET is 2030 * handled by the Xfer Complete interrupt handler. Don't want 2031 * to call the NYET interrupt handler in this case. 2032 */ 2033 hcint &= ~HCINTMSK_NYET; 2034 } 2035 if (hcint & HCINTMSK_CHHLTD) 2036 dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd); 2037 if (hcint & HCINTMSK_AHBERR) 2038 dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd); 2039 if (hcint & HCINTMSK_STALL) 2040 dwc2_hc_stall_intr(hsotg, chan, chnum, qtd); 2041 if (hcint & HCINTMSK_NAK) 2042 dwc2_hc_nak_intr(hsotg, chan, chnum, qtd); 2043 if (hcint & HCINTMSK_ACK) 2044 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd); 2045 if (hcint & HCINTMSK_NYET) 2046 dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd); 2047 if (hcint & HCINTMSK_XACTERR) 2048 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd); 2049 if (hcint & HCINTMSK_BBLERR) 2050 dwc2_hc_babble_intr(hsotg, chan, chnum, qtd); 2051 if (hcint & HCINTMSK_FRMOVRUN) 2052 dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd); 2053 if (hcint & HCINTMSK_DATATGLERR) 2054 dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd); 2055 2056 chan->hcint = 0; 2057 } 2058 2059 /* 2060 * This interrupt indicates that one or more host channels has a pending 2061 * interrupt. There are multiple conditions that can cause each host channel 2062 * interrupt. This function determines which conditions have occurred for each 2063 * host channel interrupt and handles them appropriately. 2064 */ 2065 STATIC void dwc2_hc_intr(struct dwc2_hsotg *hsotg) 2066 { 2067 u32 haint; 2068 int i; 2069 2070 haint = DWC2_READ_4(hsotg, HAINT); 2071 if (dbg_perio()) { 2072 dev_vdbg(hsotg->dev, "%s()\n", __func__); 2073 2074 dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint); 2075 } 2076 2077 for (i = 0; i < hsotg->core_params->host_channels; i++) { 2078 if (haint & (1 << i)) 2079 dwc2_hc_n_intr(hsotg, i); 2080 } 2081 } 2082 2083 /* This function handles interrupts for the HCD */ 2084 irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg) 2085 { 2086 u32 gintsts, dbg_gintsts; 2087 irqreturn_t retval = IRQ_NONE; 2088 2089 if (!dwc2_is_controller_alive(hsotg)) { 2090 dev_warn(hsotg->dev, "Controller is dead\n"); 2091 return retval; 2092 } 2093 2094 KASSERT(mtx_owned(&hsotg->lock)); 2095 2096 /* Check if HOST Mode */ 2097 if (dwc2_is_host_mode(hsotg)) { 2098 gintsts = dwc2_read_core_intr(hsotg); 2099 if (!gintsts) { 2100 return retval; 2101 } 2102 2103 retval = IRQ_HANDLED; 2104 2105 dbg_gintsts = gintsts; 2106 #ifndef DEBUG_SOF 2107 dbg_gintsts &= ~GINTSTS_SOF; 2108 #endif 2109 if (!dbg_perio()) 2110 dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL | 2111 GINTSTS_PTXFEMP); 2112 2113 /* Only print if there are any non-suppressed interrupts left */ 2114 if (dbg_gintsts) 2115 dev_vdbg(hsotg->dev, 2116 "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n", 2117 gintsts); 2118 2119 if (gintsts & GINTSTS_SOF) 2120 dwc2_sof_intr(hsotg); 2121 if (gintsts & GINTSTS_RXFLVL) 2122 dwc2_rx_fifo_level_intr(hsotg); 2123 if (gintsts & GINTSTS_NPTXFEMP) 2124 dwc2_np_tx_fifo_empty_intr(hsotg); 2125 if (gintsts & GINTSTS_PRTINT) 2126 dwc2_port_intr(hsotg); 2127 if (gintsts & GINTSTS_HCHINT) 2128 dwc2_hc_intr(hsotg); 2129 if (gintsts & GINTSTS_PTXFEMP) 2130 dwc2_perio_tx_fifo_empty_intr(hsotg); 2131 2132 if (dbg_gintsts) { 2133 dev_vdbg(hsotg->dev, 2134 "DWC OTG HCD Finished Servicing Interrupts\n"); 2135 dev_vdbg(hsotg->dev, 2136 "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n", 2137 DWC2_READ_4(hsotg, GINTSTS), 2138 DWC2_READ_4(hsotg, GINTMSK)); 2139 } 2140 } 2141 2142 return retval; 2143 } 2144