1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd. 3 * Copyright(c) 2010-2017 Intel Corporation 4 */ 5 6 #include <errno.h> 7 #include <rte_common.h> 8 #include <ethdev_pci.h> 9 10 #include <rte_alarm.h> 11 12 #include "ngbe_logs.h" 13 #include "ngbe.h" 14 #include "ngbe_ethdev.h" 15 #include "ngbe_rxtx.h" 16 17 static int ngbe_dev_close(struct rte_eth_dev *dev); 18 static int ngbe_dev_link_update(struct rte_eth_dev *dev, 19 int wait_to_complete); 20 21 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev); 22 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 23 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 24 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev); 25 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 26 static void ngbe_dev_interrupt_handler(void *param); 27 static void ngbe_dev_interrupt_delayed_handler(void *param); 28 static void ngbe_configure_msix(struct rte_eth_dev *dev); 29 30 /* 31 * The set of PCI devices this driver supports 32 */ 33 static const struct rte_pci_id pci_id_ngbe_map[] = { 34 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) }, 35 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) }, 36 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) }, 37 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) }, 38 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) }, 39 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) }, 40 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) }, 41 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) }, 42 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) }, 43 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) }, 44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) }, 45 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) }, 46 { .vendor_id = 0, /* sentinel */ }, 47 }; 48 49 static const struct rte_eth_desc_lim rx_desc_lim = { 50 .nb_max = NGBE_RING_DESC_MAX, 51 .nb_min = NGBE_RING_DESC_MIN, 52 .nb_align = NGBE_RXD_ALIGN, 53 }; 54 55 static const struct rte_eth_desc_lim tx_desc_lim = { 56 .nb_max = NGBE_RING_DESC_MAX, 57 .nb_min = NGBE_RING_DESC_MIN, 58 .nb_align = NGBE_TXD_ALIGN, 59 .nb_seg_max = NGBE_TX_MAX_SEG, 60 .nb_mtu_seg_max = NGBE_TX_MAX_SEG, 61 }; 62 63 static const struct eth_dev_ops ngbe_eth_dev_ops; 64 65 static inline int32_t 66 ngbe_pf_reset_hw(struct ngbe_hw *hw) 67 { 68 uint32_t ctrl_ext; 69 int32_t status; 70 71 status = hw->mac.reset_hw(hw); 72 73 ctrl_ext = rd32(hw, NGBE_PORTCTL); 74 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 75 ctrl_ext |= NGBE_PORTCTL_RSTDONE; 76 wr32(hw, NGBE_PORTCTL, ctrl_ext); 77 ngbe_flush(hw); 78 79 if (status == NGBE_ERR_SFP_NOT_PRESENT) 80 status = 0; 81 return status; 82 } 83 84 static inline void 85 ngbe_enable_intr(struct rte_eth_dev *dev) 86 { 87 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 88 struct ngbe_hw *hw = ngbe_dev_hw(dev); 89 90 wr32(hw, NGBE_IENMISC, intr->mask_misc); 91 wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32); 92 ngbe_flush(hw); 93 } 94 95 static void 96 ngbe_disable_intr(struct ngbe_hw *hw) 97 { 98 PMD_INIT_FUNC_TRACE(); 99 100 wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK); 101 ngbe_flush(hw); 102 } 103 104 /* 105 * Ensure that all locks are released before first NVM or PHY access 106 */ 107 static void 108 ngbe_swfw_lock_reset(struct ngbe_hw *hw) 109 { 110 uint16_t mask; 111 112 /* 113 * These ones are more tricky since they are common to all ports; but 114 * swfw_sync retries last long enough (1s) to be almost sure that if 115 * lock can not be taken it is due to an improper lock of the 116 * semaphore. 117 */ 118 mask = NGBE_MNGSEM_SWPHY | 119 NGBE_MNGSEM_SWMBX | 120 NGBE_MNGSEM_SWFLASH; 121 if (hw->mac.acquire_swfw_sync(hw, mask) < 0) 122 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 123 124 hw->mac.release_swfw_sync(hw, mask); 125 } 126 127 static int 128 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 129 { 130 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 131 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev); 132 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 133 const struct rte_memzone *mz; 134 uint32_t ctrl_ext; 135 int err; 136 137 PMD_INIT_FUNC_TRACE(); 138 139 eth_dev->dev_ops = &ngbe_eth_dev_ops; 140 eth_dev->rx_pkt_burst = &ngbe_recv_pkts; 141 eth_dev->tx_pkt_burst = &ngbe_xmit_pkts_simple; 142 143 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 144 return 0; 145 146 rte_eth_copy_pci_info(eth_dev, pci_dev); 147 148 /* Vendor and Device ID need to be set before init of shared code */ 149 hw->device_id = pci_dev->id.device_id; 150 hw->vendor_id = pci_dev->id.vendor_id; 151 hw->sub_system_id = pci_dev->id.subsystem_device_id; 152 ngbe_map_device_id(hw); 153 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 154 155 /* Reserve memory for interrupt status block */ 156 mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1, 157 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY); 158 if (mz == NULL) 159 return -ENOMEM; 160 161 hw->isb_dma = TMZ_PADDR(mz); 162 hw->isb_mem = TMZ_VADDR(mz); 163 164 /* Initialize the shared code (base driver) */ 165 err = ngbe_init_shared_code(hw); 166 if (err != 0) { 167 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err); 168 return -EIO; 169 } 170 171 /* Unlock any pending hardware semaphore */ 172 ngbe_swfw_lock_reset(hw); 173 174 err = hw->rom.init_params(hw); 175 if (err != 0) { 176 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err); 177 return -EIO; 178 } 179 180 /* Make sure we have a good EEPROM before we read from it */ 181 err = hw->rom.validate_checksum(hw, NULL); 182 if (err != 0) { 183 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err); 184 return -EIO; 185 } 186 187 err = hw->mac.init_hw(hw); 188 if (err != 0) { 189 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err); 190 return -EIO; 191 } 192 193 /* disable interrupt */ 194 ngbe_disable_intr(hw); 195 196 /* Allocate memory for storing MAC addresses */ 197 eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN * 198 hw->mac.num_rar_entries, 0); 199 if (eth_dev->data->mac_addrs == NULL) { 200 PMD_INIT_LOG(ERR, 201 "Failed to allocate %u bytes needed to store MAC addresses", 202 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 203 return -ENOMEM; 204 } 205 206 /* Copy the permanent MAC address */ 207 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 208 ð_dev->data->mac_addrs[0]); 209 210 /* Allocate memory for storing hash filter MAC addresses */ 211 eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe", 212 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0); 213 if (eth_dev->data->hash_mac_addrs == NULL) { 214 PMD_INIT_LOG(ERR, 215 "Failed to allocate %d bytes needed to store MAC addresses", 216 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC); 217 rte_free(eth_dev->data->mac_addrs); 218 eth_dev->data->mac_addrs = NULL; 219 return -ENOMEM; 220 } 221 222 ctrl_ext = rd32(hw, NGBE_PORTCTL); 223 /* let hardware know driver is loaded */ 224 ctrl_ext |= NGBE_PORTCTL_DRVLOAD; 225 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 226 ctrl_ext |= NGBE_PORTCTL_RSTDONE; 227 wr32(hw, NGBE_PORTCTL, ctrl_ext); 228 ngbe_flush(hw); 229 230 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 231 (int)hw->mac.type, (int)hw->phy.type); 232 233 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 234 eth_dev->data->port_id, pci_dev->id.vendor_id, 235 pci_dev->id.device_id); 236 237 rte_intr_callback_register(intr_handle, 238 ngbe_dev_interrupt_handler, eth_dev); 239 240 /* enable uio/vfio intr/eventfd mapping */ 241 rte_intr_enable(intr_handle); 242 243 /* enable support intr */ 244 ngbe_enable_intr(eth_dev); 245 246 return 0; 247 } 248 249 static int 250 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev) 251 { 252 PMD_INIT_FUNC_TRACE(); 253 254 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 255 return 0; 256 257 ngbe_dev_close(eth_dev); 258 259 return 0; 260 } 261 262 static int 263 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 264 struct rte_pci_device *pci_dev) 265 { 266 return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 267 sizeof(struct ngbe_adapter), 268 eth_dev_pci_specific_init, pci_dev, 269 eth_ngbe_dev_init, NULL); 270 } 271 272 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev) 273 { 274 struct rte_eth_dev *ethdev; 275 276 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 277 if (ethdev == NULL) 278 return 0; 279 280 return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit); 281 } 282 283 static struct rte_pci_driver rte_ngbe_pmd = { 284 .id_table = pci_id_ngbe_map, 285 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | 286 RTE_PCI_DRV_INTR_LSC, 287 .probe = eth_ngbe_pci_probe, 288 .remove = eth_ngbe_pci_remove, 289 }; 290 291 static int 292 ngbe_dev_configure(struct rte_eth_dev *dev) 293 { 294 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 295 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 296 297 PMD_INIT_FUNC_TRACE(); 298 299 /* set flag to update link status after init */ 300 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE; 301 302 /* 303 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 304 * allocation Rx preconditions we will reset it. 305 */ 306 adapter->rx_bulk_alloc_allowed = true; 307 308 return 0; 309 } 310 311 static void 312 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 313 { 314 struct ngbe_hw *hw = ngbe_dev_hw(dev); 315 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 316 317 wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1)); 318 wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3)); 319 wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0)); 320 if (hw->phy.type == ngbe_phy_yt8521s_sfi) 321 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0)); 322 else 323 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3)); 324 325 intr->mask_misc |= NGBE_ICRMISC_GPIO; 326 } 327 328 /* 329 * Configure device link speed and setup link. 330 * It returns 0 on success. 331 */ 332 static int 333 ngbe_dev_start(struct rte_eth_dev *dev) 334 { 335 struct ngbe_hw *hw = ngbe_dev_hw(dev); 336 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 337 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 338 uint32_t intr_vector = 0; 339 int err; 340 bool link_up = false, negotiate = false; 341 uint32_t speed = 0; 342 uint32_t allowed_speeds = 0; 343 int status; 344 uint32_t *link_speeds; 345 346 PMD_INIT_FUNC_TRACE(); 347 348 /* disable uio/vfio intr/eventfd mapping */ 349 rte_intr_disable(intr_handle); 350 351 /* stop adapter */ 352 hw->adapter_stopped = 0; 353 ngbe_stop_hw(hw); 354 355 /* reinitialize adapter, this calls reset and start */ 356 hw->nb_rx_queues = dev->data->nb_rx_queues; 357 hw->nb_tx_queues = dev->data->nb_tx_queues; 358 status = ngbe_pf_reset_hw(hw); 359 if (status != 0) 360 return -1; 361 hw->mac.start_hw(hw); 362 hw->mac.get_link_status = true; 363 364 ngbe_dev_phy_intr_setup(dev); 365 366 /* check and configure queue intr-vector mapping */ 367 if ((rte_intr_cap_multiple(intr_handle) || 368 !RTE_ETH_DEV_SRIOV(dev).active) && 369 dev->data->dev_conf.intr_conf.rxq != 0) { 370 intr_vector = dev->data->nb_rx_queues; 371 if (rte_intr_efd_enable(intr_handle, intr_vector)) 372 return -1; 373 } 374 375 if (rte_intr_dp_is_en(intr_handle)) { 376 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 377 dev->data->nb_rx_queues)) { 378 PMD_INIT_LOG(ERR, 379 "Failed to allocate %d rx_queues intr_vec", 380 dev->data->nb_rx_queues); 381 return -ENOMEM; 382 } 383 } 384 385 /* confiugre MSI-X for sleep until Rx interrupt */ 386 ngbe_configure_msix(dev); 387 388 /* initialize transmission unit */ 389 ngbe_dev_tx_init(dev); 390 391 /* This can fail when allocating mbufs for descriptor rings */ 392 err = ngbe_dev_rx_init(dev); 393 if (err != 0) { 394 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware"); 395 goto error; 396 } 397 398 err = ngbe_dev_rxtx_start(dev); 399 if (err < 0) { 400 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 401 goto error; 402 } 403 404 err = hw->mac.check_link(hw, &speed, &link_up, 0); 405 if (err != 0) 406 goto error; 407 dev->data->dev_link.link_status = link_up; 408 409 link_speeds = &dev->data->dev_conf.link_speeds; 410 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) 411 negotiate = true; 412 413 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate); 414 if (err != 0) 415 goto error; 416 417 allowed_speeds = 0; 418 if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL) 419 allowed_speeds |= RTE_ETH_LINK_SPEED_1G; 420 if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL) 421 allowed_speeds |= RTE_ETH_LINK_SPEED_100M; 422 if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL) 423 allowed_speeds |= RTE_ETH_LINK_SPEED_10M; 424 425 if (*link_speeds & ~allowed_speeds) { 426 PMD_INIT_LOG(ERR, "Invalid link setting"); 427 goto error; 428 } 429 430 speed = 0x0; 431 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { 432 speed = hw->mac.default_speeds; 433 } else { 434 if (*link_speeds & RTE_ETH_LINK_SPEED_1G) 435 speed |= NGBE_LINK_SPEED_1GB_FULL; 436 if (*link_speeds & RTE_ETH_LINK_SPEED_100M) 437 speed |= NGBE_LINK_SPEED_100M_FULL; 438 if (*link_speeds & RTE_ETH_LINK_SPEED_10M) 439 speed |= NGBE_LINK_SPEED_10M_FULL; 440 } 441 442 hw->phy.init_hw(hw); 443 err = hw->mac.setup_link(hw, speed, link_up); 444 if (err != 0) 445 goto error; 446 447 if (rte_intr_allow_others(intr_handle)) { 448 ngbe_dev_misc_interrupt_setup(dev); 449 /* check if lsc interrupt is enabled */ 450 if (dev->data->dev_conf.intr_conf.lsc != 0) 451 ngbe_dev_lsc_interrupt_setup(dev, TRUE); 452 else 453 ngbe_dev_lsc_interrupt_setup(dev, FALSE); 454 ngbe_dev_macsec_interrupt_setup(dev); 455 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID); 456 } else { 457 rte_intr_callback_unregister(intr_handle, 458 ngbe_dev_interrupt_handler, dev); 459 if (dev->data->dev_conf.intr_conf.lsc != 0) 460 PMD_INIT_LOG(INFO, 461 "LSC won't enable because of no intr multiplex"); 462 } 463 464 /* check if rxq interrupt is enabled */ 465 if (dev->data->dev_conf.intr_conf.rxq != 0 && 466 rte_intr_dp_is_en(intr_handle)) 467 ngbe_dev_rxq_interrupt_setup(dev); 468 469 /* enable UIO/VFIO intr/eventfd mapping */ 470 rte_intr_enable(intr_handle); 471 472 /* resume enabled intr since HW reset */ 473 ngbe_enable_intr(dev); 474 475 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP || 476 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) { 477 /* gpio0 is used to power on/off control*/ 478 wr32(hw, NGBE_GPIODATA, 0); 479 } 480 481 /* 482 * Update link status right before return, because it may 483 * start link configuration process in a separate thread. 484 */ 485 ngbe_dev_link_update(dev, 0); 486 487 return 0; 488 489 error: 490 PMD_INIT_LOG(ERR, "failure in dev start: %d", err); 491 ngbe_dev_clear_queues(dev); 492 return -EIO; 493 } 494 495 /* 496 * Stop device: disable rx and tx functions to allow for reconfiguring. 497 */ 498 static int 499 ngbe_dev_stop(struct rte_eth_dev *dev) 500 { 501 struct rte_eth_link link; 502 struct ngbe_hw *hw = ngbe_dev_hw(dev); 503 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 504 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 505 506 if (hw->adapter_stopped) 507 return 0; 508 509 PMD_INIT_FUNC_TRACE(); 510 511 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP || 512 (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) { 513 /* gpio0 is used to power on/off control*/ 514 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0); 515 } 516 517 /* disable interrupts */ 518 ngbe_disable_intr(hw); 519 520 /* reset the NIC */ 521 ngbe_pf_reset_hw(hw); 522 hw->adapter_stopped = 0; 523 524 /* stop adapter */ 525 ngbe_stop_hw(hw); 526 527 ngbe_dev_clear_queues(dev); 528 529 /* Clear recorded link status */ 530 memset(&link, 0, sizeof(link)); 531 rte_eth_linkstatus_set(dev, &link); 532 533 if (!rte_intr_allow_others(intr_handle)) 534 /* resume to the default handler */ 535 rte_intr_callback_register(intr_handle, 536 ngbe_dev_interrupt_handler, 537 (void *)dev); 538 539 /* Clean datapath event and queue/vec mapping */ 540 rte_intr_efd_disable(intr_handle); 541 rte_intr_vec_list_free(intr_handle); 542 543 hw->adapter_stopped = true; 544 dev->data->dev_started = 0; 545 546 return 0; 547 } 548 549 /* 550 * Reset and stop device. 551 */ 552 static int 553 ngbe_dev_close(struct rte_eth_dev *dev) 554 { 555 struct ngbe_hw *hw = ngbe_dev_hw(dev); 556 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 557 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 558 int retries = 0; 559 int ret; 560 561 PMD_INIT_FUNC_TRACE(); 562 563 ngbe_pf_reset_hw(hw); 564 565 ngbe_dev_stop(dev); 566 567 ngbe_dev_free_queues(dev); 568 569 /* reprogram the RAR[0] in case user changed it. */ 570 ngbe_set_rar(hw, 0, hw->mac.addr, 0, true); 571 572 /* Unlock any pending hardware semaphore */ 573 ngbe_swfw_lock_reset(hw); 574 575 /* disable uio intr before callback unregister */ 576 rte_intr_disable(intr_handle); 577 578 do { 579 ret = rte_intr_callback_unregister(intr_handle, 580 ngbe_dev_interrupt_handler, dev); 581 if (ret >= 0 || ret == -ENOENT) { 582 break; 583 } else if (ret != -EAGAIN) { 584 PMD_INIT_LOG(ERR, 585 "intr callback unregister failed: %d", 586 ret); 587 } 588 rte_delay_ms(100); 589 } while (retries++ < (10 + NGBE_LINK_UP_TIME)); 590 591 rte_free(dev->data->mac_addrs); 592 dev->data->mac_addrs = NULL; 593 594 rte_free(dev->data->hash_mac_addrs); 595 dev->data->hash_mac_addrs = NULL; 596 597 return ret; 598 } 599 600 /* 601 * Reset PF device. 602 */ 603 static int 604 ngbe_dev_reset(struct rte_eth_dev *dev) 605 { 606 int ret; 607 608 ret = eth_ngbe_dev_uninit(dev); 609 if (ret != 0) 610 return ret; 611 612 ret = eth_ngbe_dev_init(dev, NULL); 613 614 return ret; 615 } 616 617 static int 618 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 619 { 620 struct ngbe_hw *hw = ngbe_dev_hw(dev); 621 622 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 623 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 624 dev_info->min_rx_bufsize = 1024; 625 dev_info->max_rx_pktlen = 15872; 626 627 dev_info->default_rxconf = (struct rte_eth_rxconf) { 628 .rx_thresh = { 629 .pthresh = NGBE_DEFAULT_RX_PTHRESH, 630 .hthresh = NGBE_DEFAULT_RX_HTHRESH, 631 .wthresh = NGBE_DEFAULT_RX_WTHRESH, 632 }, 633 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH, 634 .rx_drop_en = 0, 635 .offloads = 0, 636 }; 637 638 dev_info->default_txconf = (struct rte_eth_txconf) { 639 .tx_thresh = { 640 .pthresh = NGBE_DEFAULT_TX_PTHRESH, 641 .hthresh = NGBE_DEFAULT_TX_HTHRESH, 642 .wthresh = NGBE_DEFAULT_TX_WTHRESH, 643 }, 644 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH, 645 .offloads = 0, 646 }; 647 648 dev_info->rx_desc_lim = rx_desc_lim; 649 dev_info->tx_desc_lim = tx_desc_lim; 650 651 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M | 652 RTE_ETH_LINK_SPEED_10M; 653 654 /* Driver-preferred Rx/Tx parameters */ 655 dev_info->default_rxportconf.burst_size = 32; 656 dev_info->default_txportconf.burst_size = 32; 657 dev_info->default_rxportconf.nb_queues = 1; 658 dev_info->default_txportconf.nb_queues = 1; 659 dev_info->default_rxportconf.ring_size = 256; 660 dev_info->default_txportconf.ring_size = 256; 661 662 return 0; 663 } 664 665 /* return 0 means link status changed, -1 means not changed */ 666 int 667 ngbe_dev_link_update_share(struct rte_eth_dev *dev, 668 int wait_to_complete) 669 { 670 struct ngbe_hw *hw = ngbe_dev_hw(dev); 671 struct rte_eth_link link; 672 u32 link_speed = NGBE_LINK_SPEED_UNKNOWN; 673 u32 lan_speed = 0; 674 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 675 bool link_up; 676 int err; 677 int wait = 1; 678 679 memset(&link, 0, sizeof(link)); 680 link.link_status = RTE_ETH_LINK_DOWN; 681 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 682 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 683 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 684 ~RTE_ETH_LINK_SPEED_AUTONEG); 685 686 hw->mac.get_link_status = true; 687 688 if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG) 689 return rte_eth_linkstatus_set(dev, &link); 690 691 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 692 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 693 wait = 0; 694 695 err = hw->mac.check_link(hw, &link_speed, &link_up, wait); 696 if (err != 0) { 697 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 698 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 699 return rte_eth_linkstatus_set(dev, &link); 700 } 701 702 if (!link_up) 703 return rte_eth_linkstatus_set(dev, &link); 704 705 intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG; 706 link.link_status = RTE_ETH_LINK_UP; 707 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 708 709 switch (link_speed) { 710 default: 711 case NGBE_LINK_SPEED_UNKNOWN: 712 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 713 break; 714 715 case NGBE_LINK_SPEED_10M_FULL: 716 link.link_speed = RTE_ETH_SPEED_NUM_10M; 717 lan_speed = 0; 718 break; 719 720 case NGBE_LINK_SPEED_100M_FULL: 721 link.link_speed = RTE_ETH_SPEED_NUM_100M; 722 lan_speed = 1; 723 break; 724 725 case NGBE_LINK_SPEED_1GB_FULL: 726 link.link_speed = RTE_ETH_SPEED_NUM_1G; 727 lan_speed = 2; 728 break; 729 } 730 731 if (hw->is_pf) { 732 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed); 733 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL | 734 NGBE_LINK_SPEED_100M_FULL | 735 NGBE_LINK_SPEED_10M_FULL)) { 736 wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK, 737 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE); 738 } 739 } 740 741 return rte_eth_linkstatus_set(dev, &link); 742 } 743 744 static int 745 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 746 { 747 return ngbe_dev_link_update_share(dev, wait_to_complete); 748 } 749 750 /** 751 * It clears the interrupt causes and enables the interrupt. 752 * It will be called once only during NIC initialized. 753 * 754 * @param dev 755 * Pointer to struct rte_eth_dev. 756 * @param on 757 * Enable or Disable. 758 * 759 * @return 760 * - On success, zero. 761 * - On failure, a negative value. 762 */ 763 static int 764 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 765 { 766 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 767 768 ngbe_dev_link_status_print(dev); 769 if (on != 0) { 770 intr->mask_misc |= NGBE_ICRMISC_PHY; 771 intr->mask_misc |= NGBE_ICRMISC_GPIO; 772 } else { 773 intr->mask_misc &= ~NGBE_ICRMISC_PHY; 774 intr->mask_misc &= ~NGBE_ICRMISC_GPIO; 775 } 776 777 return 0; 778 } 779 780 /** 781 * It clears the interrupt causes and enables the interrupt. 782 * It will be called once only during NIC initialized. 783 * 784 * @param dev 785 * Pointer to struct rte_eth_dev. 786 * 787 * @return 788 * - On success, zero. 789 * - On failure, a negative value. 790 */ 791 static int 792 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev) 793 { 794 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 795 u64 mask; 796 797 mask = NGBE_ICR_MASK; 798 mask &= (1ULL << NGBE_MISC_VEC_ID); 799 intr->mask |= mask; 800 intr->mask_misc |= NGBE_ICRMISC_GPIO; 801 802 return 0; 803 } 804 805 /** 806 * It clears the interrupt causes and enables the interrupt. 807 * It will be called once only during NIC initialized. 808 * 809 * @param dev 810 * Pointer to struct rte_eth_dev. 811 * 812 * @return 813 * - On success, zero. 814 * - On failure, a negative value. 815 */ 816 static int 817 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 818 { 819 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 820 u64 mask; 821 822 mask = NGBE_ICR_MASK; 823 mask &= ~((1ULL << NGBE_RX_VEC_START) - 1); 824 intr->mask |= mask; 825 826 return 0; 827 } 828 829 /** 830 * It clears the interrupt causes and enables the interrupt. 831 * It will be called once only during NIC initialized. 832 * 833 * @param dev 834 * Pointer to struct rte_eth_dev. 835 * 836 * @return 837 * - On success, zero. 838 * - On failure, a negative value. 839 */ 840 static int 841 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 842 { 843 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 844 845 intr->mask_misc |= NGBE_ICRMISC_LNKSEC; 846 847 return 0; 848 } 849 850 /* 851 * It reads ICR and sets flag for the link_update. 852 * 853 * @param dev 854 * Pointer to struct rte_eth_dev. 855 * 856 * @return 857 * - On success, zero. 858 * - On failure, a negative value. 859 */ 860 static int 861 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 862 { 863 uint32_t eicr; 864 struct ngbe_hw *hw = ngbe_dev_hw(dev); 865 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 866 867 /* clear all cause mask */ 868 ngbe_disable_intr(hw); 869 870 /* read-on-clear nic registers here */ 871 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC]; 872 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 873 874 intr->flags = 0; 875 876 /* set flag for async link update */ 877 if (eicr & NGBE_ICRMISC_PHY) 878 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE; 879 880 if (eicr & NGBE_ICRMISC_VFMBX) 881 intr->flags |= NGBE_FLAG_MAILBOX; 882 883 if (eicr & NGBE_ICRMISC_LNKSEC) 884 intr->flags |= NGBE_FLAG_MACSEC; 885 886 if (eicr & NGBE_ICRMISC_GPIO) 887 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE; 888 889 return 0; 890 } 891 892 /** 893 * It gets and then prints the link status. 894 * 895 * @param dev 896 * Pointer to struct rte_eth_dev. 897 * 898 * @return 899 * - On success, zero. 900 * - On failure, a negative value. 901 */ 902 static void 903 ngbe_dev_link_status_print(struct rte_eth_dev *dev) 904 { 905 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 906 struct rte_eth_link link; 907 908 rte_eth_linkstatus_get(dev, &link); 909 910 if (link.link_status == RTE_ETH_LINK_UP) { 911 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 912 (int)(dev->data->port_id), 913 (unsigned int)link.link_speed, 914 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? 915 "full-duplex" : "half-duplex"); 916 } else { 917 PMD_INIT_LOG(INFO, " Port %d: Link Down", 918 (int)(dev->data->port_id)); 919 } 920 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 921 pci_dev->addr.domain, 922 pci_dev->addr.bus, 923 pci_dev->addr.devid, 924 pci_dev->addr.function); 925 } 926 927 /* 928 * It executes link_update after knowing an interrupt occurred. 929 * 930 * @param dev 931 * Pointer to struct rte_eth_dev. 932 * 933 * @return 934 * - On success, zero. 935 * - On failure, a negative value. 936 */ 937 static int 938 ngbe_dev_interrupt_action(struct rte_eth_dev *dev) 939 { 940 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 941 int64_t timeout; 942 943 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 944 945 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) { 946 struct rte_eth_link link; 947 948 /*get the link status before link update, for predicting later*/ 949 rte_eth_linkstatus_get(dev, &link); 950 951 ngbe_dev_link_update(dev, 0); 952 953 /* likely to up */ 954 if (link.link_status != RTE_ETH_LINK_UP) 955 /* handle it 1 sec later, wait it being stable */ 956 timeout = NGBE_LINK_UP_CHECK_TIMEOUT; 957 /* likely to down */ 958 else 959 /* handle it 4 sec later, wait it being stable */ 960 timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT; 961 962 ngbe_dev_link_status_print(dev); 963 if (rte_eal_alarm_set(timeout * 1000, 964 ngbe_dev_interrupt_delayed_handler, 965 (void *)dev) < 0) { 966 PMD_DRV_LOG(ERR, "Error setting alarm"); 967 } else { 968 /* remember original mask */ 969 intr->mask_misc_orig = intr->mask_misc; 970 /* only disable lsc interrupt */ 971 intr->mask_misc &= ~NGBE_ICRMISC_PHY; 972 973 intr->mask_orig = intr->mask; 974 /* only disable all misc interrupts */ 975 intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID); 976 } 977 } 978 979 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 980 ngbe_enable_intr(dev); 981 982 return 0; 983 } 984 985 /** 986 * Interrupt handler which shall be registered for alarm callback for delayed 987 * handling specific interrupt to wait for the stable nic state. As the 988 * NIC interrupt state is not stable for ngbe after link is just down, 989 * it needs to wait 4 seconds to get the stable status. 990 * 991 * @param param 992 * The address of parameter (struct rte_eth_dev *) registered before. 993 */ 994 static void 995 ngbe_dev_interrupt_delayed_handler(void *param) 996 { 997 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 998 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 999 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1000 uint32_t eicr; 1001 1002 ngbe_disable_intr(hw); 1003 1004 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC]; 1005 1006 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) { 1007 ngbe_dev_link_update(dev, 0); 1008 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE; 1009 ngbe_dev_link_status_print(dev); 1010 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 1011 NULL); 1012 } 1013 1014 if (intr->flags & NGBE_FLAG_MACSEC) { 1015 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, 1016 NULL); 1017 intr->flags &= ~NGBE_FLAG_MACSEC; 1018 } 1019 1020 /* restore original mask */ 1021 intr->mask_misc = intr->mask_misc_orig; 1022 intr->mask_misc_orig = 0; 1023 intr->mask = intr->mask_orig; 1024 intr->mask_orig = 0; 1025 1026 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); 1027 ngbe_enable_intr(dev); 1028 } 1029 1030 /** 1031 * Interrupt handler triggered by NIC for handling 1032 * specific interrupt. 1033 * 1034 * @param param 1035 * The address of parameter (struct rte_eth_dev *) registered before. 1036 */ 1037 static void 1038 ngbe_dev_interrupt_handler(void *param) 1039 { 1040 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 1041 1042 ngbe_dev_interrupt_get_status(dev); 1043 ngbe_dev_interrupt_action(dev); 1044 } 1045 1046 /** 1047 * Set the IVAR registers, mapping interrupt causes to vectors 1048 * @param hw 1049 * pointer to ngbe_hw struct 1050 * @direction 1051 * 0 for Rx, 1 for Tx, -1 for other causes 1052 * @queue 1053 * queue to map the corresponding interrupt to 1054 * @msix_vector 1055 * the vector to map to the corresponding queue 1056 */ 1057 void 1058 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction, 1059 uint8_t queue, uint8_t msix_vector) 1060 { 1061 uint32_t tmp, idx; 1062 1063 if (direction == -1) { 1064 /* other causes */ 1065 msix_vector |= NGBE_IVARMISC_VLD; 1066 idx = 0; 1067 tmp = rd32(hw, NGBE_IVARMISC); 1068 tmp &= ~(0xFF << idx); 1069 tmp |= (msix_vector << idx); 1070 wr32(hw, NGBE_IVARMISC, tmp); 1071 } else { 1072 /* rx or tx causes */ 1073 /* Workround for ICR lost */ 1074 idx = ((16 * (queue & 1)) + (8 * direction)); 1075 tmp = rd32(hw, NGBE_IVAR(queue >> 1)); 1076 tmp &= ~(0xFF << idx); 1077 tmp |= (msix_vector << idx); 1078 wr32(hw, NGBE_IVAR(queue >> 1), tmp); 1079 } 1080 } 1081 1082 /** 1083 * Sets up the hardware to properly generate MSI-X interrupts 1084 * @hw 1085 * board private structure 1086 */ 1087 static void 1088 ngbe_configure_msix(struct rte_eth_dev *dev) 1089 { 1090 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1091 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1092 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1093 uint32_t queue_id, base = NGBE_MISC_VEC_ID; 1094 uint32_t vec = NGBE_MISC_VEC_ID; 1095 uint32_t gpie; 1096 1097 /* 1098 * Won't configure MSI-X register if no mapping is done 1099 * between intr vector and event fd 1100 * but if MSI-X has been enabled already, need to configure 1101 * auto clean, auto mask and throttling. 1102 */ 1103 gpie = rd32(hw, NGBE_GPIE); 1104 if (!rte_intr_dp_is_en(intr_handle) && 1105 !(gpie & NGBE_GPIE_MSIX)) 1106 return; 1107 1108 if (rte_intr_allow_others(intr_handle)) { 1109 base = NGBE_RX_VEC_START; 1110 vec = base; 1111 } 1112 1113 /* setup GPIE for MSI-X mode */ 1114 gpie = rd32(hw, NGBE_GPIE); 1115 gpie |= NGBE_GPIE_MSIX; 1116 wr32(hw, NGBE_GPIE, gpie); 1117 1118 /* Populate the IVAR table and set the ITR values to the 1119 * corresponding register. 1120 */ 1121 if (rte_intr_dp_is_en(intr_handle)) { 1122 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 1123 queue_id++) { 1124 /* by default, 1:1 mapping */ 1125 ngbe_set_ivar_map(hw, 0, queue_id, vec); 1126 rte_intr_vec_list_index_set(intr_handle, 1127 queue_id, vec); 1128 if (vec < base + rte_intr_nb_efd_get(intr_handle) 1129 - 1) 1130 vec++; 1131 } 1132 1133 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID); 1134 } 1135 wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID), 1136 NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT) 1137 | NGBE_ITR_WRDSA); 1138 } 1139 1140 static const struct eth_dev_ops ngbe_eth_dev_ops = { 1141 .dev_configure = ngbe_dev_configure, 1142 .dev_infos_get = ngbe_dev_info_get, 1143 .dev_start = ngbe_dev_start, 1144 .dev_stop = ngbe_dev_stop, 1145 .dev_close = ngbe_dev_close, 1146 .dev_reset = ngbe_dev_reset, 1147 .link_update = ngbe_dev_link_update, 1148 .rx_queue_start = ngbe_dev_rx_queue_start, 1149 .rx_queue_stop = ngbe_dev_rx_queue_stop, 1150 .tx_queue_start = ngbe_dev_tx_queue_start, 1151 .tx_queue_stop = ngbe_dev_tx_queue_stop, 1152 .rx_queue_setup = ngbe_dev_rx_queue_setup, 1153 .rx_queue_release = ngbe_dev_rx_queue_release, 1154 .tx_queue_setup = ngbe_dev_tx_queue_setup, 1155 .tx_queue_release = ngbe_dev_tx_queue_release, 1156 }; 1157 1158 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd); 1159 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map); 1160 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci"); 1161 1162 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE); 1163 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE); 1164 1165 #ifdef RTE_ETHDEV_DEBUG_RX 1166 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG); 1167 #endif 1168 #ifdef RTE_ETHDEV_DEBUG_TX 1169 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG); 1170 #endif 1171