1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd. 3 * Copyright(c) 2010-2017 Intel Corporation 4 */ 5 6 #include <errno.h> 7 #include <rte_common.h> 8 #include <ethdev_pci.h> 9 10 #include <rte_alarm.h> 11 12 #include "ngbe_logs.h" 13 #include "ngbe.h" 14 #include "ngbe_ethdev.h" 15 #include "ngbe_rxtx.h" 16 #include "ngbe_regs_group.h" 17 18 static const struct reg_info ngbe_regs_general[] = { 19 {NGBE_RST, 1, 1, "NGBE_RST"}, 20 {NGBE_STAT, 1, 1, "NGBE_STAT"}, 21 {NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"}, 22 {NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"}, 23 {NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"}, 24 {NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"}, 25 {0, 0, 0, ""} 26 }; 27 28 static const struct reg_info ngbe_regs_nvm[] = { 29 {0, 0, 0, ""} 30 }; 31 32 static const struct reg_info ngbe_regs_interrupt[] = { 33 {0, 0, 0, ""} 34 }; 35 36 static const struct reg_info ngbe_regs_fctl_others[] = { 37 {0, 0, 0, ""} 38 }; 39 40 static const struct reg_info ngbe_regs_rxdma[] = { 41 {0, 0, 0, ""} 42 }; 43 44 static const struct reg_info ngbe_regs_rx[] = { 45 {0, 0, 0, ""} 46 }; 47 48 static struct reg_info ngbe_regs_tx[] = { 49 {0, 0, 0, ""} 50 }; 51 52 static const struct reg_info ngbe_regs_wakeup[] = { 53 {0, 0, 0, ""} 54 }; 55 56 static const struct reg_info ngbe_regs_mac[] = { 57 {0, 0, 0, ""} 58 }; 59 60 static const struct reg_info ngbe_regs_diagnostic[] = { 61 {0, 0, 0, ""}, 62 }; 63 64 /* PF registers */ 65 static const struct reg_info *ngbe_regs_others[] = { 66 ngbe_regs_general, 67 ngbe_regs_nvm, 68 ngbe_regs_interrupt, 69 ngbe_regs_fctl_others, 70 ngbe_regs_rxdma, 71 ngbe_regs_rx, 72 ngbe_regs_tx, 73 ngbe_regs_wakeup, 74 ngbe_regs_mac, 75 ngbe_regs_diagnostic, 76 NULL}; 77 78 static int ngbe_dev_close(struct rte_eth_dev *dev); 79 static int ngbe_dev_link_update(struct rte_eth_dev *dev, 80 int wait_to_complete); 81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev); 82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, 84 uint16_t queue); 85 86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev); 87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev); 90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 91 static void ngbe_dev_interrupt_handler(void *param); 92 static void ngbe_configure_msix(struct rte_eth_dev *dev); 93 static void ngbe_pbthresh_set(struct rte_eth_dev *dev); 94 95 #define NGBE_SET_HWSTRIP(h, q) do {\ 96 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 97 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 98 (h)->bitmap[idx] |= 1 << bit;\ 99 } while (0) 100 101 #define NGBE_CLEAR_HWSTRIP(h, q) do {\ 102 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 103 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 104 (h)->bitmap[idx] &= ~(1 << bit);\ 105 } while (0) 106 107 #define NGBE_GET_HWSTRIP(h, q, r) do {\ 108 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 109 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 110 (r) = (h)->bitmap[idx] >> bit & 1;\ 111 } while (0) 112 113 /* 114 * The set of PCI devices this driver supports 115 */ 116 static const struct rte_pci_id pci_id_ngbe_map[] = { 117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) }, 118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) }, 119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) }, 120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) }, 121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) }, 122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) }, 123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) }, 124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) }, 125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) }, 126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) }, 127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) }, 128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) }, 129 { .vendor_id = 0, /* sentinel */ }, 130 }; 131 132 static const struct rte_eth_desc_lim rx_desc_lim = { 133 .nb_max = NGBE_RING_DESC_MAX, 134 .nb_min = NGBE_RING_DESC_MIN, 135 .nb_align = NGBE_RXD_ALIGN, 136 }; 137 138 static const struct rte_eth_desc_lim tx_desc_lim = { 139 .nb_max = NGBE_RING_DESC_MAX, 140 .nb_min = NGBE_RING_DESC_MIN, 141 .nb_align = NGBE_TXD_ALIGN, 142 .nb_seg_max = NGBE_TX_MAX_SEG, 143 .nb_mtu_seg_max = NGBE_TX_MAX_SEG, 144 }; 145 146 static const struct eth_dev_ops ngbe_eth_dev_ops; 147 148 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)} 149 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)} 150 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = { 151 /* MNG RxTx */ 152 HW_XSTAT(mng_bmc2host_packets), 153 HW_XSTAT(mng_host2bmc_packets), 154 /* Basic RxTx */ 155 HW_XSTAT(rx_packets), 156 HW_XSTAT(tx_packets), 157 HW_XSTAT(rx_bytes), 158 HW_XSTAT(tx_bytes), 159 HW_XSTAT(rx_total_bytes), 160 HW_XSTAT(rx_total_packets), 161 HW_XSTAT(tx_total_packets), 162 HW_XSTAT(rx_total_missed_packets), 163 HW_XSTAT(rx_broadcast_packets), 164 HW_XSTAT(tx_broadcast_packets), 165 HW_XSTAT(rx_multicast_packets), 166 HW_XSTAT(tx_multicast_packets), 167 HW_XSTAT(rx_management_packets), 168 HW_XSTAT(tx_management_packets), 169 HW_XSTAT(rx_management_dropped), 170 HW_XSTAT(rx_dma_drop), 171 HW_XSTAT(tx_dma_drop), 172 HW_XSTAT(tx_secdrp_packets), 173 174 /* Basic Error */ 175 HW_XSTAT(rx_crc_errors), 176 HW_XSTAT(rx_illegal_byte_errors), 177 HW_XSTAT(rx_error_bytes), 178 HW_XSTAT(rx_mac_short_packet_dropped), 179 HW_XSTAT(rx_length_errors), 180 HW_XSTAT(rx_undersize_errors), 181 HW_XSTAT(rx_fragment_errors), 182 HW_XSTAT(rx_oversize_cnt), 183 HW_XSTAT(rx_jabber_errors), 184 HW_XSTAT(rx_l3_l4_xsum_error), 185 HW_XSTAT(mac_local_errors), 186 HW_XSTAT(mac_remote_errors), 187 188 /* PB Stats */ 189 HW_XSTAT(rx_up_dropped), 190 HW_XSTAT(rdb_pkt_cnt), 191 HW_XSTAT(rdb_repli_cnt), 192 HW_XSTAT(rdb_drp_cnt), 193 194 /* MACSEC */ 195 HW_XSTAT(tx_macsec_pkts_untagged), 196 HW_XSTAT(tx_macsec_pkts_encrypted), 197 HW_XSTAT(tx_macsec_pkts_protected), 198 HW_XSTAT(tx_macsec_octets_encrypted), 199 HW_XSTAT(tx_macsec_octets_protected), 200 HW_XSTAT(rx_macsec_pkts_untagged), 201 HW_XSTAT(rx_macsec_pkts_badtag), 202 HW_XSTAT(rx_macsec_pkts_nosci), 203 HW_XSTAT(rx_macsec_pkts_unknownsci), 204 HW_XSTAT(rx_macsec_octets_decrypted), 205 HW_XSTAT(rx_macsec_octets_validated), 206 HW_XSTAT(rx_macsec_sc_pkts_unchecked), 207 HW_XSTAT(rx_macsec_sc_pkts_delayed), 208 HW_XSTAT(rx_macsec_sc_pkts_late), 209 HW_XSTAT(rx_macsec_sa_pkts_ok), 210 HW_XSTAT(rx_macsec_sa_pkts_invalid), 211 HW_XSTAT(rx_macsec_sa_pkts_notvalid), 212 HW_XSTAT(rx_macsec_sa_pkts_unusedsa), 213 HW_XSTAT(rx_macsec_sa_pkts_notusingsa), 214 215 /* MAC RxTx */ 216 HW_XSTAT(rx_size_64_packets), 217 HW_XSTAT(rx_size_65_to_127_packets), 218 HW_XSTAT(rx_size_128_to_255_packets), 219 HW_XSTAT(rx_size_256_to_511_packets), 220 HW_XSTAT(rx_size_512_to_1023_packets), 221 HW_XSTAT(rx_size_1024_to_max_packets), 222 HW_XSTAT(tx_size_64_packets), 223 HW_XSTAT(tx_size_65_to_127_packets), 224 HW_XSTAT(tx_size_128_to_255_packets), 225 HW_XSTAT(tx_size_256_to_511_packets), 226 HW_XSTAT(tx_size_512_to_1023_packets), 227 HW_XSTAT(tx_size_1024_to_max_packets), 228 229 /* Flow Control */ 230 HW_XSTAT(tx_xon_packets), 231 HW_XSTAT(rx_xon_packets), 232 HW_XSTAT(tx_xoff_packets), 233 HW_XSTAT(rx_xoff_packets), 234 235 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"), 236 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"), 237 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"), 238 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"), 239 }; 240 241 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \ 242 sizeof(rte_ngbe_stats_strings[0])) 243 244 /* Per-queue statistics */ 245 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)} 246 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = { 247 QP_XSTAT(rx_qp_packets), 248 QP_XSTAT(tx_qp_packets), 249 QP_XSTAT(rx_qp_bytes), 250 QP_XSTAT(tx_qp_bytes), 251 QP_XSTAT(rx_qp_mc_packets), 252 }; 253 254 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \ 255 sizeof(rte_ngbe_qp_strings[0])) 256 257 static inline int32_t 258 ngbe_pf_reset_hw(struct ngbe_hw *hw) 259 { 260 uint32_t ctrl_ext; 261 int32_t status; 262 263 status = hw->mac.reset_hw(hw); 264 265 ctrl_ext = rd32(hw, NGBE_PORTCTL); 266 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 267 ctrl_ext |= NGBE_PORTCTL_RSTDONE; 268 wr32(hw, NGBE_PORTCTL, ctrl_ext); 269 ngbe_flush(hw); 270 271 if (status == NGBE_ERR_SFP_NOT_PRESENT) 272 status = 0; 273 return status; 274 } 275 276 static inline void 277 ngbe_enable_intr(struct rte_eth_dev *dev) 278 { 279 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 280 struct ngbe_hw *hw = ngbe_dev_hw(dev); 281 282 wr32(hw, NGBE_IENMISC, intr->mask_misc); 283 wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32); 284 ngbe_flush(hw); 285 } 286 287 static void 288 ngbe_disable_intr(struct ngbe_hw *hw) 289 { 290 PMD_INIT_FUNC_TRACE(); 291 292 wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK); 293 ngbe_flush(hw); 294 } 295 296 /* 297 * Ensure that all locks are released before first NVM or PHY access 298 */ 299 static void 300 ngbe_swfw_lock_reset(struct ngbe_hw *hw) 301 { 302 uint16_t mask; 303 304 /* 305 * These ones are more tricky since they are common to all ports; but 306 * swfw_sync retries last long enough (1s) to be almost sure that if 307 * lock can not be taken it is due to an improper lock of the 308 * semaphore. 309 */ 310 mask = NGBE_MNGSEM_SWPHY | 311 NGBE_MNGSEM_SWMBX | 312 NGBE_MNGSEM_SWFLASH; 313 if (hw->mac.acquire_swfw_sync(hw, mask) < 0) 314 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 315 316 hw->mac.release_swfw_sync(hw, mask); 317 } 318 319 static int 320 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 321 { 322 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 323 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev); 324 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev); 325 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev); 326 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 327 const struct rte_memzone *mz; 328 uint32_t ctrl_ext; 329 u32 led_conf = 0; 330 int err, ret; 331 332 PMD_INIT_FUNC_TRACE(); 333 334 eth_dev->dev_ops = &ngbe_eth_dev_ops; 335 eth_dev->rx_queue_count = ngbe_dev_rx_queue_count; 336 eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status; 337 eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status; 338 eth_dev->rx_pkt_burst = &ngbe_recv_pkts; 339 eth_dev->tx_pkt_burst = &ngbe_xmit_pkts; 340 eth_dev->tx_pkt_prepare = &ngbe_prep_pkts; 341 342 /* 343 * For secondary processes, we don't initialise any further as primary 344 * has already done this work. Only check we don't need a different 345 * Rx and Tx function. 346 */ 347 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 348 struct ngbe_tx_queue *txq; 349 /* Tx queue function in primary, set by last queue initialized 350 * Tx queue may not initialized by primary process 351 */ 352 if (eth_dev->data->tx_queues) { 353 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues; 354 txq = eth_dev->data->tx_queues[nb_tx_queues - 1]; 355 ngbe_set_tx_function(eth_dev, txq); 356 } else { 357 /* Use default Tx function if we get here */ 358 PMD_INIT_LOG(NOTICE, 359 "No Tx queues configured yet. Using default Tx function."); 360 } 361 362 ngbe_set_rx_function(eth_dev); 363 364 return 0; 365 } 366 367 rte_eth_copy_pci_info(eth_dev, pci_dev); 368 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 369 370 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 371 372 /* Vendor and Device ID need to be set before init of shared code */ 373 hw->back = pci_dev; 374 hw->device_id = pci_dev->id.device_id; 375 hw->vendor_id = pci_dev->id.vendor_id; 376 if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) { 377 hw->sub_system_id = pci_dev->id.subsystem_device_id; 378 } else { 379 u32 ssid; 380 381 ssid = ngbe_flash_read_dword(hw, 0xFFFDC); 382 if (ssid == 0x1) { 383 PMD_INIT_LOG(ERR, 384 "Read of internal subsystem device id failed\n"); 385 return -ENODEV; 386 } 387 hw->sub_system_id = (u16)ssid >> 8 | (u16)ssid << 8; 388 } 389 ngbe_map_device_id(hw); 390 391 /* Reserve memory for interrupt status block */ 392 mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1, 393 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY); 394 if (mz == NULL) 395 return -ENOMEM; 396 397 hw->isb_dma = TMZ_PADDR(mz); 398 hw->isb_mem = TMZ_VADDR(mz); 399 400 /* Initialize the shared code (base driver) */ 401 err = ngbe_init_shared_code(hw); 402 if (err != 0) { 403 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err); 404 return -EIO; 405 } 406 407 /* Unlock any pending hardware semaphore */ 408 ngbe_swfw_lock_reset(hw); 409 ngbe_set_ncsi_status(hw); 410 411 /* Get Hardware Flow Control setting */ 412 hw->fc.requested_mode = ngbe_fc_full; 413 hw->fc.current_mode = ngbe_fc_full; 414 hw->fc.pause_time = NGBE_FC_PAUSE_TIME; 415 hw->fc.low_water = NGBE_FC_XON_LOTH; 416 hw->fc.high_water = NGBE_FC_XOFF_HITH; 417 hw->fc.send_xon = 1; 418 419 err = hw->rom.init_params(hw); 420 if (err != 0) { 421 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err); 422 return -EIO; 423 } 424 425 /* Make sure we have a good EEPROM before we read from it */ 426 err = hw->rom.validate_checksum(hw, NULL); 427 if (err != 0) { 428 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err); 429 return -EIO; 430 } 431 432 err = hw->phy.led_oem_chk(hw, &led_conf); 433 if (err == 0) 434 hw->led_conf = led_conf; 435 else 436 hw->led_conf = 0xFFFF; 437 438 err = hw->mac.init_hw(hw); 439 if (err != 0) { 440 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err); 441 return -EIO; 442 } 443 444 /* Reset the hw statistics */ 445 ngbe_dev_stats_reset(eth_dev); 446 447 /* disable interrupt */ 448 ngbe_disable_intr(hw); 449 450 /* Allocate memory for storing MAC addresses */ 451 eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN * 452 hw->mac.num_rar_entries, 0); 453 if (eth_dev->data->mac_addrs == NULL) { 454 PMD_INIT_LOG(ERR, 455 "Failed to allocate %u bytes needed to store MAC addresses", 456 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 457 return -ENOMEM; 458 } 459 460 /* Copy the permanent MAC address */ 461 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 462 ð_dev->data->mac_addrs[0]); 463 464 /* Allocate memory for storing hash filter MAC addresses */ 465 eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe", 466 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0); 467 if (eth_dev->data->hash_mac_addrs == NULL) { 468 PMD_INIT_LOG(ERR, 469 "Failed to allocate %d bytes needed to store MAC addresses", 470 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC); 471 rte_free(eth_dev->data->mac_addrs); 472 eth_dev->data->mac_addrs = NULL; 473 return -ENOMEM; 474 } 475 476 /* initialize the vfta */ 477 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 478 479 /* initialize the hw strip bitmap*/ 480 memset(hwstrip, 0, sizeof(*hwstrip)); 481 482 /* initialize PF if max_vfs not zero */ 483 ret = ngbe_pf_host_init(eth_dev); 484 if (ret) { 485 rte_free(eth_dev->data->mac_addrs); 486 eth_dev->data->mac_addrs = NULL; 487 rte_free(eth_dev->data->hash_mac_addrs); 488 eth_dev->data->hash_mac_addrs = NULL; 489 return ret; 490 } 491 492 ctrl_ext = rd32(hw, NGBE_PORTCTL); 493 /* let hardware know driver is loaded */ 494 ctrl_ext |= NGBE_PORTCTL_DRVLOAD; 495 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 496 ctrl_ext |= NGBE_PORTCTL_RSTDONE; 497 wr32(hw, NGBE_PORTCTL, ctrl_ext); 498 ngbe_flush(hw); 499 500 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 501 (int)hw->mac.type, (int)hw->phy.type); 502 503 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 504 eth_dev->data->port_id, pci_dev->id.vendor_id, 505 pci_dev->id.device_id); 506 507 rte_intr_callback_register(intr_handle, 508 ngbe_dev_interrupt_handler, eth_dev); 509 510 /* enable uio/vfio intr/eventfd mapping */ 511 rte_intr_enable(intr_handle); 512 513 /* enable support intr */ 514 ngbe_enable_intr(eth_dev); 515 516 return 0; 517 } 518 519 static int 520 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev) 521 { 522 PMD_INIT_FUNC_TRACE(); 523 524 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 525 return 0; 526 527 ngbe_dev_close(eth_dev); 528 529 return 0; 530 } 531 532 static int 533 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 534 struct rte_pci_device *pci_dev) 535 { 536 return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 537 sizeof(struct ngbe_adapter), 538 eth_dev_pci_specific_init, pci_dev, 539 eth_ngbe_dev_init, NULL); 540 } 541 542 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev) 543 { 544 struct rte_eth_dev *ethdev; 545 546 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 547 if (ethdev == NULL) 548 return 0; 549 550 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ngbe_dev_uninit); 551 } 552 553 static struct rte_pci_driver rte_ngbe_pmd = { 554 .id_table = pci_id_ngbe_map, 555 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | 556 RTE_PCI_DRV_INTR_LSC, 557 .probe = eth_ngbe_pci_probe, 558 .remove = eth_ngbe_pci_remove, 559 }; 560 561 static int 562 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 563 { 564 struct ngbe_hw *hw = ngbe_dev_hw(dev); 565 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev); 566 uint32_t vfta; 567 uint32_t vid_idx; 568 uint32_t vid_bit; 569 570 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F); 571 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F)); 572 vfta = rd32(hw, NGBE_VLANTBL(vid_idx)); 573 if (on) 574 vfta |= vid_bit; 575 else 576 vfta &= ~vid_bit; 577 wr32(hw, NGBE_VLANTBL(vid_idx), vfta); 578 579 /* update local VFTA copy */ 580 shadow_vfta->vfta[vid_idx] = vfta; 581 582 return 0; 583 } 584 585 static void 586 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 587 { 588 struct ngbe_hw *hw = ngbe_dev_hw(dev); 589 struct ngbe_rx_queue *rxq; 590 bool restart; 591 uint32_t rxcfg, rxbal, rxbah; 592 593 if (on) 594 ngbe_vlan_hw_strip_enable(dev, queue); 595 else 596 ngbe_vlan_hw_strip_disable(dev, queue); 597 598 rxq = dev->data->rx_queues[queue]; 599 rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx)); 600 rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx)); 601 rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx)); 602 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 603 restart = (rxcfg & NGBE_RXCFG_ENA) && 604 !(rxcfg & NGBE_RXCFG_VLAN); 605 rxcfg |= NGBE_RXCFG_VLAN; 606 } else { 607 restart = (rxcfg & NGBE_RXCFG_ENA) && 608 (rxcfg & NGBE_RXCFG_VLAN); 609 rxcfg &= ~NGBE_RXCFG_VLAN; 610 } 611 rxcfg &= ~NGBE_RXCFG_ENA; 612 613 if (restart) { 614 /* set vlan strip for ring */ 615 ngbe_dev_rx_queue_stop(dev, queue); 616 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal); 617 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah); 618 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg); 619 ngbe_dev_rx_queue_start(dev, queue); 620 } 621 } 622 623 static int 624 ngbe_vlan_tpid_set(struct rte_eth_dev *dev, 625 enum rte_vlan_type vlan_type, 626 uint16_t tpid) 627 { 628 struct ngbe_hw *hw = ngbe_dev_hw(dev); 629 int ret = 0; 630 uint32_t portctrl, vlan_ext, qinq; 631 632 portctrl = rd32(hw, NGBE_PORTCTL); 633 634 vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT); 635 qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ); 636 switch (vlan_type) { 637 case RTE_ETH_VLAN_TYPE_INNER: 638 if (vlan_ext) { 639 wr32m(hw, NGBE_VLANCTL, 640 NGBE_VLANCTL_TPID_MASK, 641 NGBE_VLANCTL_TPID(tpid)); 642 wr32m(hw, NGBE_DMATXCTRL, 643 NGBE_DMATXCTRL_TPID_MASK, 644 NGBE_DMATXCTRL_TPID(tpid)); 645 } else { 646 ret = -ENOTSUP; 647 PMD_DRV_LOG(ERR, 648 "Inner type is not supported by single VLAN"); 649 } 650 651 if (qinq) { 652 wr32m(hw, NGBE_TAGTPID(0), 653 NGBE_TAGTPID_LSB_MASK, 654 NGBE_TAGTPID_LSB(tpid)); 655 } 656 break; 657 case RTE_ETH_VLAN_TYPE_OUTER: 658 if (vlan_ext) { 659 /* Only the high 16-bits is valid */ 660 wr32m(hw, NGBE_EXTAG, 661 NGBE_EXTAG_VLAN_MASK, 662 NGBE_EXTAG_VLAN(tpid)); 663 } else { 664 wr32m(hw, NGBE_VLANCTL, 665 NGBE_VLANCTL_TPID_MASK, 666 NGBE_VLANCTL_TPID(tpid)); 667 wr32m(hw, NGBE_DMATXCTRL, 668 NGBE_DMATXCTRL_TPID_MASK, 669 NGBE_DMATXCTRL_TPID(tpid)); 670 } 671 672 if (qinq) { 673 wr32m(hw, NGBE_TAGTPID(0), 674 NGBE_TAGTPID_MSB_MASK, 675 NGBE_TAGTPID_MSB(tpid)); 676 } 677 break; 678 default: 679 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 680 return -EINVAL; 681 } 682 683 return ret; 684 } 685 686 void 687 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 688 { 689 struct ngbe_hw *hw = ngbe_dev_hw(dev); 690 uint32_t vlnctrl; 691 692 PMD_INIT_FUNC_TRACE(); 693 694 /* Filter Table Disable */ 695 vlnctrl = rd32(hw, NGBE_VLANCTL); 696 vlnctrl &= ~NGBE_VLANCTL_VFE; 697 wr32(hw, NGBE_VLANCTL, vlnctrl); 698 } 699 700 void 701 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 702 { 703 struct ngbe_hw *hw = ngbe_dev_hw(dev); 704 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev); 705 uint32_t vlnctrl; 706 uint16_t i; 707 708 PMD_INIT_FUNC_TRACE(); 709 710 /* Filter Table Enable */ 711 vlnctrl = rd32(hw, NGBE_VLANCTL); 712 vlnctrl &= ~NGBE_VLANCTL_CFIENA; 713 vlnctrl |= NGBE_VLANCTL_VFE; 714 wr32(hw, NGBE_VLANCTL, vlnctrl); 715 716 /* write whatever is in local vfta copy */ 717 for (i = 0; i < NGBE_VFTA_SIZE; i++) 718 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]); 719 } 720 721 void 722 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 723 { 724 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev); 725 struct ngbe_rx_queue *rxq; 726 727 if (queue >= NGBE_MAX_RX_QUEUE_NUM) 728 return; 729 730 if (on) 731 NGBE_SET_HWSTRIP(hwstrip, queue); 732 else 733 NGBE_CLEAR_HWSTRIP(hwstrip, queue); 734 735 if (queue >= dev->data->nb_rx_queues) 736 return; 737 738 rxq = dev->data->rx_queues[queue]; 739 740 if (on) { 741 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 742 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 743 } else { 744 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN; 745 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 746 } 747 } 748 749 static void 750 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 751 { 752 struct ngbe_hw *hw = ngbe_dev_hw(dev); 753 uint32_t ctrl; 754 755 PMD_INIT_FUNC_TRACE(); 756 757 ctrl = rd32(hw, NGBE_RXCFG(queue)); 758 ctrl &= ~NGBE_RXCFG_VLAN; 759 wr32(hw, NGBE_RXCFG(queue), ctrl); 760 761 /* record those setting for HW strip per queue */ 762 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 763 } 764 765 static void 766 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 767 { 768 struct ngbe_hw *hw = ngbe_dev_hw(dev); 769 uint32_t ctrl; 770 771 PMD_INIT_FUNC_TRACE(); 772 773 ctrl = rd32(hw, NGBE_RXCFG(queue)); 774 ctrl |= NGBE_RXCFG_VLAN; 775 wr32(hw, NGBE_RXCFG(queue), ctrl); 776 777 /* record those setting for HW strip per queue */ 778 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 779 } 780 781 static void 782 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 783 { 784 struct ngbe_hw *hw = ngbe_dev_hw(dev); 785 uint32_t ctrl; 786 787 PMD_INIT_FUNC_TRACE(); 788 789 ctrl = rd32(hw, NGBE_PORTCTL); 790 ctrl &= ~NGBE_PORTCTL_VLANEXT; 791 ctrl &= ~NGBE_PORTCTL_QINQ; 792 wr32(hw, NGBE_PORTCTL, ctrl); 793 } 794 795 static void 796 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 797 { 798 struct ngbe_hw *hw = ngbe_dev_hw(dev); 799 uint32_t ctrl; 800 801 PMD_INIT_FUNC_TRACE(); 802 803 ctrl = rd32(hw, NGBE_PORTCTL); 804 ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ; 805 wr32(hw, NGBE_PORTCTL, ctrl); 806 } 807 808 static void 809 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev) 810 { 811 struct ngbe_hw *hw = ngbe_dev_hw(dev); 812 uint32_t ctrl; 813 814 PMD_INIT_FUNC_TRACE(); 815 816 ctrl = rd32(hw, NGBE_PORTCTL); 817 ctrl &= ~NGBE_PORTCTL_QINQ; 818 wr32(hw, NGBE_PORTCTL, ctrl); 819 } 820 821 static void 822 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev) 823 { 824 struct ngbe_hw *hw = ngbe_dev_hw(dev); 825 uint32_t ctrl; 826 827 PMD_INIT_FUNC_TRACE(); 828 829 ctrl = rd32(hw, NGBE_PORTCTL); 830 ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT; 831 wr32(hw, NGBE_PORTCTL, ctrl); 832 } 833 834 void 835 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev) 836 { 837 struct ngbe_rx_queue *rxq; 838 uint16_t i; 839 840 PMD_INIT_FUNC_TRACE(); 841 842 for (i = 0; i < dev->data->nb_rx_queues; i++) { 843 rxq = dev->data->rx_queues[i]; 844 845 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 846 ngbe_vlan_hw_strip_enable(dev, i); 847 else 848 ngbe_vlan_hw_strip_disable(dev, i); 849 } 850 } 851 852 void 853 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) 854 { 855 uint16_t i; 856 struct rte_eth_rxmode *rxmode; 857 struct ngbe_rx_queue *rxq; 858 859 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 860 rxmode = &dev->data->dev_conf.rxmode; 861 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 862 for (i = 0; i < dev->data->nb_rx_queues; i++) { 863 rxq = dev->data->rx_queues[i]; 864 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 865 } 866 else 867 for (i = 0; i < dev->data->nb_rx_queues; i++) { 868 rxq = dev->data->rx_queues[i]; 869 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 870 } 871 } 872 } 873 874 static int 875 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) 876 { 877 struct rte_eth_rxmode *rxmode; 878 rxmode = &dev->data->dev_conf.rxmode; 879 880 if (mask & RTE_ETH_VLAN_STRIP_MASK) 881 ngbe_vlan_hw_strip_config(dev); 882 883 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 884 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 885 ngbe_vlan_hw_filter_enable(dev); 886 else 887 ngbe_vlan_hw_filter_disable(dev); 888 } 889 890 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 891 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 892 ngbe_vlan_hw_extend_enable(dev); 893 else 894 ngbe_vlan_hw_extend_disable(dev); 895 } 896 897 if (mask & RTE_ETH_QINQ_STRIP_MASK) { 898 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 899 ngbe_qinq_hw_strip_enable(dev); 900 else 901 ngbe_qinq_hw_strip_disable(dev); 902 } 903 904 return 0; 905 } 906 907 static int 908 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 909 { 910 ngbe_config_vlan_strip_on_all_queues(dev, mask); 911 912 ngbe_vlan_offload_config(dev, mask); 913 914 return 0; 915 } 916 917 static int 918 ngbe_dev_configure(struct rte_eth_dev *dev) 919 { 920 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 921 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 922 923 PMD_INIT_FUNC_TRACE(); 924 925 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 926 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 927 928 /* set flag to update link status after init */ 929 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE; 930 931 /* 932 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 933 * allocation Rx preconditions we will reset it. 934 */ 935 adapter->rx_bulk_alloc_allowed = true; 936 adapter->rx_vec_allowed = true; 937 938 return 0; 939 } 940 941 static void 942 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 943 { 944 struct ngbe_hw *hw = ngbe_dev_hw(dev); 945 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 946 947 wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1)); 948 wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3)); 949 wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0)); 950 if (hw->phy.type == ngbe_phy_yt8521s_sfi) 951 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0)); 952 else 953 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3)); 954 955 intr->mask_misc |= NGBE_ICRMISC_GPIO | NGBE_ICRMISC_HEAT; 956 } 957 958 /* 959 * Configure device link speed and setup link. 960 * It returns 0 on success. 961 */ 962 static int 963 ngbe_dev_start(struct rte_eth_dev *dev) 964 { 965 struct ngbe_hw *hw = ngbe_dev_hw(dev); 966 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 967 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 968 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 969 uint32_t intr_vector = 0; 970 int err; 971 bool link_up = false, negotiate = false; 972 uint32_t speed = 0; 973 uint32_t allowed_speeds = 0; 974 int mask = 0; 975 int status; 976 uint32_t *link_speeds; 977 978 PMD_INIT_FUNC_TRACE(); 979 980 /* disable uio/vfio intr/eventfd mapping */ 981 rte_intr_disable(intr_handle); 982 983 /* stop adapter */ 984 hw->adapter_stopped = 0; 985 986 /* reinitialize adapter, this calls reset and start */ 987 hw->nb_rx_queues = dev->data->nb_rx_queues; 988 hw->nb_tx_queues = dev->data->nb_tx_queues; 989 status = ngbe_pf_reset_hw(hw); 990 if (status != 0) 991 return -1; 992 hw->mac.start_hw(hw); 993 hw->mac.get_link_status = true; 994 995 ngbe_set_pcie_master(hw, true); 996 997 /* configure PF module if SRIOV enabled */ 998 ngbe_pf_host_configure(dev); 999 1000 ngbe_dev_phy_intr_setup(dev); 1001 1002 /* check and configure queue intr-vector mapping */ 1003 if ((rte_intr_cap_multiple(intr_handle) || 1004 !RTE_ETH_DEV_SRIOV(dev).active) && 1005 dev->data->dev_conf.intr_conf.rxq != 0) { 1006 intr_vector = dev->data->nb_rx_queues; 1007 if (rte_intr_efd_enable(intr_handle, intr_vector)) 1008 return -1; 1009 } 1010 1011 if (rte_intr_dp_is_en(intr_handle)) { 1012 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 1013 dev->data->nb_rx_queues)) { 1014 PMD_INIT_LOG(ERR, 1015 "Failed to allocate %d rx_queues intr_vec", 1016 dev->data->nb_rx_queues); 1017 return -ENOMEM; 1018 } 1019 } 1020 1021 /* configure MSI-X for sleep until Rx interrupt */ 1022 ngbe_configure_msix(dev); 1023 1024 /* initialize transmission unit */ 1025 ngbe_dev_tx_init(dev); 1026 1027 /* This can fail when allocating mbufs for descriptor rings */ 1028 err = ngbe_dev_rx_init(dev); 1029 if (err != 0) { 1030 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware"); 1031 goto error; 1032 } 1033 1034 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 1035 RTE_ETH_VLAN_EXTEND_MASK; 1036 err = ngbe_vlan_offload_config(dev, mask); 1037 if (err != 0) { 1038 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 1039 goto error; 1040 } 1041 1042 hw->mac.setup_pba(hw); 1043 ngbe_pbthresh_set(dev); 1044 ngbe_configure_port(dev); 1045 1046 err = ngbe_dev_rxtx_start(dev); 1047 if (err < 0) { 1048 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 1049 goto error; 1050 } 1051 1052 /* Skip link setup if loopback mode is enabled. */ 1053 if (hw->is_pf && dev->data->dev_conf.lpbk_mode) 1054 goto skip_link_setup; 1055 1056 hw->lsc = dev->data->dev_conf.intr_conf.lsc; 1057 1058 err = hw->mac.check_link(hw, &speed, &link_up, 0); 1059 if (err != 0) 1060 goto error; 1061 dev->data->dev_link.link_status = link_up; 1062 1063 link_speeds = &dev->data->dev_conf.link_speeds; 1064 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) 1065 negotiate = true; 1066 1067 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate); 1068 if (err != 0) 1069 goto error; 1070 1071 allowed_speeds = 0; 1072 if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL) 1073 allowed_speeds |= RTE_ETH_LINK_SPEED_1G; 1074 if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL) 1075 allowed_speeds |= RTE_ETH_LINK_SPEED_100M; 1076 if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL) 1077 allowed_speeds |= RTE_ETH_LINK_SPEED_10M; 1078 1079 if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { 1080 PMD_INIT_LOG(ERR, "Invalid link setting"); 1081 goto error; 1082 } 1083 1084 speed = 0x0; 1085 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { 1086 speed = hw->mac.default_speeds; 1087 } else { 1088 if (*link_speeds & RTE_ETH_LINK_SPEED_1G) 1089 speed |= NGBE_LINK_SPEED_1GB_FULL; 1090 if (*link_speeds & RTE_ETH_LINK_SPEED_100M) 1091 speed |= NGBE_LINK_SPEED_100M_FULL; 1092 if (*link_speeds & RTE_ETH_LINK_SPEED_10M) 1093 speed |= NGBE_LINK_SPEED_10M_FULL; 1094 } 1095 1096 if (!hw->ncsi_enabled) { 1097 err = hw->phy.init_hw(hw); 1098 if (err != 0) { 1099 PMD_INIT_LOG(ERR, "PHY init failed"); 1100 goto error; 1101 } 1102 } 1103 err = hw->mac.setup_link(hw, speed, link_up); 1104 if (err != 0) 1105 goto error; 1106 1107 skip_link_setup: 1108 1109 if (rte_intr_allow_others(intr_handle)) { 1110 ngbe_dev_misc_interrupt_setup(dev); 1111 /* check if lsc interrupt is enabled */ 1112 if (dev->data->dev_conf.intr_conf.lsc != 0) 1113 ngbe_dev_lsc_interrupt_setup(dev, TRUE); 1114 else 1115 ngbe_dev_lsc_interrupt_setup(dev, FALSE); 1116 ngbe_dev_macsec_interrupt_setup(dev); 1117 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID); 1118 } else { 1119 rte_intr_callback_unregister(intr_handle, 1120 ngbe_dev_interrupt_handler, dev); 1121 if (dev->data->dev_conf.intr_conf.lsc != 0) 1122 PMD_INIT_LOG(INFO, 1123 "LSC won't enable because of no intr multiplex"); 1124 } 1125 1126 /* check if rxq interrupt is enabled */ 1127 if (dev->data->dev_conf.intr_conf.rxq != 0 && 1128 rte_intr_dp_is_en(intr_handle)) 1129 ngbe_dev_rxq_interrupt_setup(dev); 1130 1131 /* enable UIO/VFIO intr/eventfd mapping */ 1132 rte_intr_enable(intr_handle); 1133 1134 /* resume enabled intr since HW reset */ 1135 ngbe_enable_intr(dev); 1136 1137 if (hw->gpio_ctl) { 1138 /* gpio0 is used to power on/off control*/ 1139 wr32(hw, NGBE_GPIODATA, 0); 1140 } 1141 1142 /* 1143 * Update link status right before return, because it may 1144 * start link configuration process in a separate thread. 1145 */ 1146 ngbe_dev_link_update(dev, 0); 1147 1148 ngbe_read_stats_registers(hw, hw_stats); 1149 hw->offset_loaded = 1; 1150 1151 return 0; 1152 1153 error: 1154 PMD_INIT_LOG(ERR, "failure in dev start: %d", err); 1155 ngbe_dev_clear_queues(dev); 1156 return -EIO; 1157 } 1158 1159 /* 1160 * Stop device: disable rx and tx functions to allow for reconfiguring. 1161 */ 1162 static int 1163 ngbe_dev_stop(struct rte_eth_dev *dev) 1164 { 1165 struct rte_eth_link link; 1166 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 1167 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1168 struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev); 1169 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1170 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1171 int vf; 1172 1173 if (hw->adapter_stopped) 1174 goto out; 1175 1176 PMD_INIT_FUNC_TRACE(); 1177 1178 if (hw->gpio_ctl) { 1179 /* gpio0 is used to power on/off control*/ 1180 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0); 1181 } 1182 1183 /* disable interrupts */ 1184 ngbe_disable_intr(hw); 1185 1186 /* reset the NIC */ 1187 ngbe_pf_reset_hw(hw); 1188 hw->adapter_stopped = 0; 1189 1190 /* stop adapter */ 1191 ngbe_stop_hw(hw); 1192 1193 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) 1194 vfinfo[vf].clear_to_send = false; 1195 1196 ngbe_dev_clear_queues(dev); 1197 1198 /* Clear stored conf */ 1199 dev->data->scattered_rx = 0; 1200 1201 /* Clear recorded link status */ 1202 memset(&link, 0, sizeof(link)); 1203 rte_eth_linkstatus_set(dev, &link); 1204 1205 if (!rte_intr_allow_others(intr_handle)) 1206 /* resume to the default handler */ 1207 rte_intr_callback_register(intr_handle, 1208 ngbe_dev_interrupt_handler, 1209 (void *)dev); 1210 1211 /* Clean datapath event and queue/vec mapping */ 1212 rte_intr_efd_disable(intr_handle); 1213 rte_intr_vec_list_free(intr_handle); 1214 1215 ngbe_set_pcie_master(hw, true); 1216 1217 adapter->rss_reta_updated = 0; 1218 1219 hw->adapter_stopped = true; 1220 dev->data->dev_started = 0; 1221 1222 out: 1223 /* close phy to prevent reset in dev_close from restarting physical link */ 1224 if (!(hw->wol_enabled || hw->ncsi_enabled)) 1225 hw->phy.set_phy_power(hw, false); 1226 1227 return 0; 1228 } 1229 1230 /* 1231 * Set device link up: power on. 1232 */ 1233 static int 1234 ngbe_dev_set_link_up(struct rte_eth_dev *dev) 1235 { 1236 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1237 1238 if (!(hw->ncsi_enabled || hw->wol_enabled)) 1239 hw->phy.set_phy_power(hw, true); 1240 1241 return 0; 1242 } 1243 1244 /* 1245 * Set device link down: power off. 1246 */ 1247 static int 1248 ngbe_dev_set_link_down(struct rte_eth_dev *dev) 1249 { 1250 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1251 1252 if (!(hw->ncsi_enabled || hw->wol_enabled)) 1253 hw->phy.set_phy_power(hw, false); 1254 1255 return 0; 1256 } 1257 1258 /* 1259 * Reset and stop device. 1260 */ 1261 static int 1262 ngbe_dev_close(struct rte_eth_dev *dev) 1263 { 1264 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1265 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1266 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1267 int retries = 0; 1268 int ret; 1269 1270 PMD_INIT_FUNC_TRACE(); 1271 1272 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1273 return 0; 1274 1275 ngbe_pf_reset_hw(hw); 1276 1277 ngbe_dev_stop(dev); 1278 1279 ngbe_dev_free_queues(dev); 1280 1281 ngbe_set_pcie_master(hw, false); 1282 1283 /* reprogram the RAR[0] in case user changed it. */ 1284 ngbe_set_rar(hw, 0, hw->mac.addr, 0, true); 1285 1286 /* Unlock any pending hardware semaphore */ 1287 ngbe_swfw_lock_reset(hw); 1288 1289 /* disable uio intr before callback unregister */ 1290 rte_intr_disable(intr_handle); 1291 1292 do { 1293 ret = rte_intr_callback_unregister(intr_handle, 1294 ngbe_dev_interrupt_handler, dev); 1295 if (ret >= 0 || ret == -ENOENT) { 1296 break; 1297 } else if (ret != -EAGAIN) { 1298 PMD_INIT_LOG(ERR, 1299 "intr callback unregister failed: %d", 1300 ret); 1301 } 1302 rte_delay_ms(100); 1303 } while (retries++ < (10 + NGBE_LINK_UP_TIME)); 1304 1305 /* uninitialize PF if max_vfs not zero */ 1306 ngbe_pf_host_uninit(dev); 1307 1308 rte_free(dev->data->mac_addrs); 1309 dev->data->mac_addrs = NULL; 1310 1311 rte_free(dev->data->hash_mac_addrs); 1312 dev->data->hash_mac_addrs = NULL; 1313 1314 return ret; 1315 } 1316 1317 /* 1318 * Reset PF device. 1319 */ 1320 static int 1321 ngbe_dev_reset(struct rte_eth_dev *dev) 1322 { 1323 int ret; 1324 1325 /* When a DPDK PMD PF begin to reset PF port, it should notify all 1326 * its VF to make them align with it. The detailed notification 1327 * mechanism is PMD specific. As to ngbe PF, it is rather complex. 1328 * To avoid unexpected behavior in VF, currently reset of PF with 1329 * SR-IOV activation is not supported. It might be supported later. 1330 */ 1331 if (dev->data->sriov.active) 1332 return -ENOTSUP; 1333 1334 ret = eth_ngbe_dev_uninit(dev); 1335 if (ret != 0) 1336 return ret; 1337 1338 ret = eth_ngbe_dev_init(dev, NULL); 1339 1340 return ret; 1341 } 1342 1343 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \ 1344 { \ 1345 uint32_t current_counter = rd32(hw, reg); \ 1346 if (current_counter < last_counter) \ 1347 current_counter += 0x100000000LL; \ 1348 if (!hw->offset_loaded) \ 1349 last_counter = current_counter; \ 1350 counter = current_counter - last_counter; \ 1351 counter &= 0xFFFFFFFFLL; \ 1352 } 1353 1354 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 1355 { \ 1356 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \ 1357 uint64_t current_counter_msb = rd32(hw, reg_msb); \ 1358 uint64_t current_counter = (current_counter_msb << 32) | \ 1359 current_counter_lsb; \ 1360 if (current_counter < last_counter) \ 1361 current_counter += 0x1000000000LL; \ 1362 if (!hw->offset_loaded) \ 1363 last_counter = current_counter; \ 1364 counter = current_counter - last_counter; \ 1365 counter &= 0xFFFFFFFFFLL; \ 1366 } 1367 1368 void 1369 ngbe_read_stats_registers(struct ngbe_hw *hw, 1370 struct ngbe_hw_stats *hw_stats) 1371 { 1372 unsigned int i; 1373 1374 /* QP Stats */ 1375 for (i = 0; i < hw->nb_rx_queues; i++) { 1376 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i), 1377 hw->qp_last[i].rx_qp_packets, 1378 hw_stats->qp[i].rx_qp_packets); 1379 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i), 1380 hw->qp_last[i].rx_qp_bytes, 1381 hw_stats->qp[i].rx_qp_bytes); 1382 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i), 1383 hw->qp_last[i].rx_qp_mc_packets, 1384 hw_stats->qp[i].rx_qp_mc_packets); 1385 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i), 1386 hw->qp_last[i].rx_qp_bc_packets, 1387 hw_stats->qp[i].rx_qp_bc_packets); 1388 } 1389 1390 for (i = 0; i < hw->nb_tx_queues; i++) { 1391 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i), 1392 hw->qp_last[i].tx_qp_packets, 1393 hw_stats->qp[i].tx_qp_packets); 1394 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i), 1395 hw->qp_last[i].tx_qp_bytes, 1396 hw_stats->qp[i].tx_qp_bytes); 1397 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i), 1398 hw->qp_last[i].tx_qp_mc_packets, 1399 hw_stats->qp[i].tx_qp_mc_packets); 1400 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i), 1401 hw->qp_last[i].tx_qp_bc_packets, 1402 hw_stats->qp[i].tx_qp_bc_packets); 1403 } 1404 1405 /* PB Stats */ 1406 hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS); 1407 hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT); 1408 hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP); 1409 hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP); 1410 hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF); 1411 hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON); 1412 1413 hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON); 1414 hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF); 1415 1416 /* DMA Stats */ 1417 hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP); 1418 hw_stats->tx_dma_drop += rd32(hw, NGBE_DMATXDROP); 1419 hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP); 1420 hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT); 1421 hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT); 1422 hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL); 1423 hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL); 1424 1425 /* MAC Stats */ 1426 hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL); 1427 hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL); 1428 hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL); 1429 1430 hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL); 1431 hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL); 1432 hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL); 1433 1434 hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL); 1435 hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL); 1436 1437 hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L); 1438 hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L); 1439 hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L); 1440 hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L); 1441 hw_stats->rx_size_512_to_1023_packets += 1442 rd64(hw, NGBE_MACRX512TO1023L); 1443 hw_stats->rx_size_1024_to_max_packets += 1444 rd64(hw, NGBE_MACRX1024TOMAXL); 1445 hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L); 1446 hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L); 1447 hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L); 1448 hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L); 1449 hw_stats->tx_size_512_to_1023_packets += 1450 rd64(hw, NGBE_MACTX512TO1023L); 1451 hw_stats->tx_size_1024_to_max_packets += 1452 rd64(hw, NGBE_MACTX1024TOMAXL); 1453 1454 hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL); 1455 hw_stats->rx_oversize_cnt += rd32(hw, NGBE_MACRXOVERSIZE); 1456 hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER); 1457 1458 /* MNG Stats */ 1459 hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS); 1460 hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC); 1461 hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG); 1462 hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG); 1463 1464 /* MACsec Stats */ 1465 hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT); 1466 hw_stats->tx_macsec_pkts_encrypted += 1467 rd32(hw, NGBE_LSECTX_ENCPKT); 1468 hw_stats->tx_macsec_pkts_protected += 1469 rd32(hw, NGBE_LSECTX_PROTPKT); 1470 hw_stats->tx_macsec_octets_encrypted += 1471 rd32(hw, NGBE_LSECTX_ENCOCT); 1472 hw_stats->tx_macsec_octets_protected += 1473 rd32(hw, NGBE_LSECTX_PROTOCT); 1474 hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT); 1475 hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT); 1476 hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT); 1477 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT); 1478 hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT); 1479 hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT); 1480 hw_stats->rx_macsec_sc_pkts_unchecked += 1481 rd32(hw, NGBE_LSECRX_UNCHKPKT); 1482 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT); 1483 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT); 1484 for (i = 0; i < 2; i++) { 1485 hw_stats->rx_macsec_sa_pkts_ok += 1486 rd32(hw, NGBE_LSECRX_OKPKT(i)); 1487 hw_stats->rx_macsec_sa_pkts_invalid += 1488 rd32(hw, NGBE_LSECRX_INVPKT(i)); 1489 hw_stats->rx_macsec_sa_pkts_notvalid += 1490 rd32(hw, NGBE_LSECRX_BADPKT(i)); 1491 } 1492 for (i = 0; i < 4; i++) { 1493 hw_stats->rx_macsec_sa_pkts_unusedsa += 1494 rd32(hw, NGBE_LSECRX_INVSAPKT(i)); 1495 hw_stats->rx_macsec_sa_pkts_notusingsa += 1496 rd32(hw, NGBE_LSECRX_BADSAPKT(i)); 1497 } 1498 hw_stats->rx_total_missed_packets = 1499 hw_stats->rx_up_dropped; 1500 } 1501 1502 static int 1503 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 1504 { 1505 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1506 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1507 struct ngbe_stat_mappings *stat_mappings = 1508 NGBE_DEV_STAT_MAPPINGS(dev); 1509 uint32_t i, j; 1510 1511 ngbe_read_stats_registers(hw, hw_stats); 1512 1513 if (stats == NULL) 1514 return -EINVAL; 1515 1516 /* Fill out the rte_eth_stats statistics structure */ 1517 stats->ipackets = hw_stats->rx_packets; 1518 stats->ibytes = hw_stats->rx_bytes; 1519 stats->opackets = hw_stats->tx_packets; 1520 stats->obytes = hw_stats->tx_bytes; 1521 1522 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets)); 1523 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets)); 1524 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes)); 1525 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes)); 1526 memset(&stats->q_errors, 0, sizeof(stats->q_errors)); 1527 for (i = 0; i < NGBE_MAX_QP; i++) { 1528 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG; 1529 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8; 1530 uint32_t q_map; 1531 1532 q_map = (stat_mappings->rqsm[n] >> offset) 1533 & QMAP_FIELD_RESERVED_BITS_MASK; 1534 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS 1535 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS); 1536 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets; 1537 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes; 1538 1539 q_map = (stat_mappings->tqsm[n] >> offset) 1540 & QMAP_FIELD_RESERVED_BITS_MASK; 1541 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS 1542 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS); 1543 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets; 1544 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes; 1545 } 1546 1547 /* Rx Errors */ 1548 stats->imissed = hw_stats->rx_total_missed_packets + 1549 hw_stats->rx_dma_drop; 1550 stats->ierrors = hw_stats->rx_crc_errors + 1551 hw_stats->rx_mac_short_packet_dropped + 1552 hw_stats->rx_length_errors + 1553 hw_stats->rx_undersize_errors + 1554 hw_stats->rdb_drp_cnt + 1555 hw_stats->rx_illegal_byte_errors + 1556 hw_stats->rx_error_bytes + 1557 hw_stats->rx_fragment_errors; 1558 1559 /* Tx Errors */ 1560 stats->oerrors = 0; 1561 return 0; 1562 } 1563 1564 static int 1565 ngbe_dev_stats_reset(struct rte_eth_dev *dev) 1566 { 1567 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1568 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1569 1570 /* HW registers are cleared on read */ 1571 hw->offset_loaded = 0; 1572 ngbe_dev_stats_get(dev, NULL); 1573 hw->offset_loaded = 1; 1574 1575 /* Reset software totals */ 1576 memset(hw_stats, 0, sizeof(*hw_stats)); 1577 1578 return 0; 1579 } 1580 1581 /* This function calculates the number of xstats based on the current config */ 1582 static unsigned 1583 ngbe_xstats_calc_num(struct rte_eth_dev *dev) 1584 { 1585 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues); 1586 return NGBE_NB_HW_STATS + 1587 NGBE_NB_QP_STATS * nb_queues; 1588 } 1589 1590 static inline int 1591 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size) 1592 { 1593 int nb, st; 1594 1595 /* Extended stats from ngbe_hw_stats */ 1596 if (id < NGBE_NB_HW_STATS) { 1597 snprintf(name, size, "[hw]%s", 1598 rte_ngbe_stats_strings[id].name); 1599 return 0; 1600 } 1601 id -= NGBE_NB_HW_STATS; 1602 1603 /* Queue Stats */ 1604 if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) { 1605 nb = id / NGBE_NB_QP_STATS; 1606 st = id % NGBE_NB_QP_STATS; 1607 snprintf(name, size, "[q%u]%s", nb, 1608 rte_ngbe_qp_strings[st].name); 1609 return 0; 1610 } 1611 id -= NGBE_NB_QP_STATS * NGBE_MAX_QP; 1612 1613 return -(int)(id + 1); 1614 } 1615 1616 static inline int 1617 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset) 1618 { 1619 int nb, st; 1620 1621 /* Extended stats from ngbe_hw_stats */ 1622 if (id < NGBE_NB_HW_STATS) { 1623 *offset = rte_ngbe_stats_strings[id].offset; 1624 return 0; 1625 } 1626 id -= NGBE_NB_HW_STATS; 1627 1628 /* Queue Stats */ 1629 if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) { 1630 nb = id / NGBE_NB_QP_STATS; 1631 st = id % NGBE_NB_QP_STATS; 1632 *offset = rte_ngbe_qp_strings[st].offset + 1633 nb * (NGBE_NB_QP_STATS * sizeof(uint64_t)); 1634 return 0; 1635 } 1636 1637 return -1; 1638 } 1639 1640 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev, 1641 struct rte_eth_xstat_name *xstats_names, unsigned int limit) 1642 { 1643 unsigned int i, count; 1644 1645 count = ngbe_xstats_calc_num(dev); 1646 if (xstats_names == NULL) 1647 return count; 1648 1649 /* Note: limit >= cnt_stats checked upstream 1650 * in rte_eth_xstats_names() 1651 */ 1652 limit = min(limit, count); 1653 1654 /* Extended stats from ngbe_hw_stats */ 1655 for (i = 0; i < limit; i++) { 1656 if (ngbe_get_name_by_id(i, xstats_names[i].name, 1657 sizeof(xstats_names[i].name))) { 1658 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1659 break; 1660 } 1661 } 1662 1663 return i; 1664 } 1665 1666 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 1667 const uint64_t *ids, 1668 struct rte_eth_xstat_name *xstats_names, 1669 unsigned int limit) 1670 { 1671 unsigned int i; 1672 1673 if (ids == NULL) 1674 return ngbe_dev_xstats_get_names(dev, xstats_names, limit); 1675 1676 for (i = 0; i < limit; i++) { 1677 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name, 1678 sizeof(xstats_names[i].name))) { 1679 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1680 return -1; 1681 } 1682 } 1683 1684 return i; 1685 } 1686 1687 static int 1688 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1689 unsigned int limit) 1690 { 1691 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1692 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1693 unsigned int i, count; 1694 1695 ngbe_read_stats_registers(hw, hw_stats); 1696 1697 /* If this is a reset xstats is NULL, and we have cleared the 1698 * registers by reading them. 1699 */ 1700 count = ngbe_xstats_calc_num(dev); 1701 if (xstats == NULL) 1702 return count; 1703 1704 limit = min(limit, ngbe_xstats_calc_num(dev)); 1705 1706 /* Extended stats from ngbe_hw_stats */ 1707 for (i = 0; i < limit; i++) { 1708 uint32_t offset = 0; 1709 1710 if (ngbe_get_offset_by_id(i, &offset)) { 1711 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1712 break; 1713 } 1714 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset); 1715 xstats[i].id = i; 1716 } 1717 1718 return i; 1719 } 1720 1721 static int 1722 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values, 1723 unsigned int limit) 1724 { 1725 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1726 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1727 unsigned int i, count; 1728 1729 ngbe_read_stats_registers(hw, hw_stats); 1730 1731 /* If this is a reset xstats is NULL, and we have cleared the 1732 * registers by reading them. 1733 */ 1734 count = ngbe_xstats_calc_num(dev); 1735 if (values == NULL) 1736 return count; 1737 1738 limit = min(limit, ngbe_xstats_calc_num(dev)); 1739 1740 /* Extended stats from ngbe_hw_stats */ 1741 for (i = 0; i < limit; i++) { 1742 uint32_t offset; 1743 1744 if (ngbe_get_offset_by_id(i, &offset)) { 1745 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1746 break; 1747 } 1748 values[i] = *(uint64_t *)(((char *)hw_stats) + offset); 1749 } 1750 1751 return i; 1752 } 1753 1754 static int 1755 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1756 uint64_t *values, unsigned int limit) 1757 { 1758 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1759 unsigned int i; 1760 1761 if (ids == NULL) 1762 return ngbe_dev_xstats_get_(dev, values, limit); 1763 1764 for (i = 0; i < limit; i++) { 1765 uint32_t offset; 1766 1767 if (ngbe_get_offset_by_id(ids[i], &offset)) { 1768 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1769 break; 1770 } 1771 values[i] = *(uint64_t *)(((char *)hw_stats) + offset); 1772 } 1773 1774 return i; 1775 } 1776 1777 static int 1778 ngbe_dev_xstats_reset(struct rte_eth_dev *dev) 1779 { 1780 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1781 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1782 1783 /* HW registers are cleared on read */ 1784 hw->offset_loaded = 0; 1785 ngbe_read_stats_registers(hw, hw_stats); 1786 hw->offset_loaded = 1; 1787 1788 /* Reset software totals */ 1789 memset(hw_stats, 0, sizeof(*hw_stats)); 1790 1791 return 0; 1792 } 1793 1794 static int 1795 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 1796 { 1797 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1798 int ret; 1799 1800 ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id); 1801 1802 if (ret < 0) 1803 return -EINVAL; 1804 1805 ret += 1; /* add the size of '\0' */ 1806 if (fw_size < (size_t)ret) 1807 return ret; 1808 1809 return 0; 1810 } 1811 1812 static int 1813 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1814 { 1815 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1816 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1817 1818 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 1819 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 1820 dev_info->min_rx_bufsize = 1024; 1821 dev_info->max_rx_pktlen = NGBE_MAX_MTU + NGBE_ETH_OVERHEAD; 1822 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 1823 dev_info->max_mtu = NGBE_MAX_MTU; 1824 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 1825 dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC; 1826 dev_info->max_vfs = pci_dev->max_vfs; 1827 dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev); 1828 dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) | 1829 dev_info->rx_queue_offload_capa); 1830 dev_info->tx_queue_offload_capa = 0; 1831 dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev); 1832 1833 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1834 .rx_thresh = { 1835 .pthresh = NGBE_DEFAULT_RX_PTHRESH, 1836 .hthresh = NGBE_DEFAULT_RX_HTHRESH, 1837 .wthresh = NGBE_DEFAULT_RX_WTHRESH, 1838 }, 1839 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH, 1840 .rx_drop_en = 0, 1841 .offloads = 0, 1842 }; 1843 1844 dev_info->default_txconf = (struct rte_eth_txconf) { 1845 .tx_thresh = { 1846 .pthresh = NGBE_DEFAULT_TX_PTHRESH, 1847 .hthresh = NGBE_DEFAULT_TX_HTHRESH, 1848 .wthresh = NGBE_DEFAULT_TX_WTHRESH, 1849 }, 1850 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH, 1851 .offloads = 0, 1852 }; 1853 1854 dev_info->rx_desc_lim = rx_desc_lim; 1855 dev_info->tx_desc_lim = tx_desc_lim; 1856 1857 dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 1858 dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128; 1859 dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL; 1860 1861 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M | 1862 RTE_ETH_LINK_SPEED_10M; 1863 1864 /* Driver-preferred Rx/Tx parameters */ 1865 dev_info->default_rxportconf.burst_size = 32; 1866 dev_info->default_txportconf.burst_size = 32; 1867 dev_info->default_rxportconf.nb_queues = 1; 1868 dev_info->default_txportconf.nb_queues = 1; 1869 dev_info->default_rxportconf.ring_size = 256; 1870 dev_info->default_txportconf.ring_size = 256; 1871 1872 return 0; 1873 } 1874 1875 const uint32_t * 1876 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements) 1877 { 1878 if (dev->rx_pkt_burst == ngbe_recv_pkts || 1879 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM) 1880 dev->rx_pkt_burst == ngbe_recv_pkts_vec || 1881 dev->rx_pkt_burst == ngbe_recv_scattered_pkts_vec || 1882 #endif 1883 dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc || 1884 dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc || 1885 dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc) 1886 return ngbe_get_supported_ptypes(no_of_elements); 1887 1888 return NULL; 1889 } 1890 1891 static void 1892 ngbe_dev_overheat(struct rte_eth_dev *dev) 1893 { 1894 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1895 s32 temp_state; 1896 1897 temp_state = hw->mac.check_overtemp(hw); 1898 if (!temp_state) 1899 return; 1900 1901 if (temp_state == NGBE_ERR_UNDERTEMP) { 1902 PMD_DRV_LOG(CRIT, "Network adapter has been started again, " 1903 "since the temperature has been back to normal state."); 1904 wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, NGBE_PBRXCTL_ENA); 1905 ngbe_dev_set_link_up(dev); 1906 } else if (temp_state == NGBE_ERR_OVERTEMP) { 1907 PMD_DRV_LOG(CRIT, "Network adapter has been stopped because it has over heated."); 1908 wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0); 1909 ngbe_dev_set_link_down(dev); 1910 } 1911 } 1912 1913 /* return 0 means link status changed, -1 means not changed */ 1914 int 1915 ngbe_dev_link_update_share(struct rte_eth_dev *dev, 1916 int wait_to_complete) 1917 { 1918 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1919 struct rte_eth_link link; 1920 u32 link_speed = NGBE_LINK_SPEED_UNKNOWN; 1921 u32 lan_speed = 0; 1922 bool link_up; 1923 int err; 1924 int wait = 1; 1925 1926 memset(&link, 0, sizeof(link)); 1927 link.link_status = RTE_ETH_LINK_DOWN; 1928 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 1929 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 1930 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 1931 ~RTE_ETH_LINK_SPEED_AUTONEG); 1932 1933 hw->mac.get_link_status = true; 1934 1935 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 1936 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 1937 wait = 0; 1938 1939 err = hw->mac.check_link(hw, &link_speed, &link_up, wait); 1940 if (err != 0) { 1941 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 1942 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1943 return rte_eth_linkstatus_set(dev, &link); 1944 } 1945 1946 if (!link_up) 1947 return rte_eth_linkstatus_set(dev, &link); 1948 1949 link.link_status = RTE_ETH_LINK_UP; 1950 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1951 1952 switch (link_speed) { 1953 default: 1954 case NGBE_LINK_SPEED_UNKNOWN: 1955 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 1956 break; 1957 1958 case NGBE_LINK_SPEED_10M_FULL: 1959 link.link_speed = RTE_ETH_SPEED_NUM_10M; 1960 lan_speed = 0; 1961 break; 1962 1963 case NGBE_LINK_SPEED_100M_FULL: 1964 link.link_speed = RTE_ETH_SPEED_NUM_100M; 1965 lan_speed = 1; 1966 break; 1967 1968 case NGBE_LINK_SPEED_1GB_FULL: 1969 link.link_speed = RTE_ETH_SPEED_NUM_1G; 1970 lan_speed = 2; 1971 break; 1972 } 1973 1974 if (hw->is_pf) { 1975 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed); 1976 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL | 1977 NGBE_LINK_SPEED_100M_FULL | 1978 NGBE_LINK_SPEED_10M_FULL)) { 1979 wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK, 1980 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE); 1981 } 1982 wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_PROMISC, 1983 NGBE_MACRXFLT_PROMISC); 1984 } 1985 1986 return rte_eth_linkstatus_set(dev, &link); 1987 } 1988 1989 static int 1990 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 1991 { 1992 return ngbe_dev_link_update_share(dev, wait_to_complete); 1993 } 1994 1995 static int 1996 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 1997 { 1998 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1999 uint32_t fctrl; 2000 2001 fctrl = rd32(hw, NGBE_PSRCTL); 2002 fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP); 2003 wr32(hw, NGBE_PSRCTL, fctrl); 2004 2005 return 0; 2006 } 2007 2008 static int 2009 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 2010 { 2011 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2012 uint32_t fctrl; 2013 2014 fctrl = rd32(hw, NGBE_PSRCTL); 2015 fctrl &= (~NGBE_PSRCTL_UCP); 2016 if (dev->data->all_multicast == 1) 2017 fctrl |= NGBE_PSRCTL_MCP; 2018 else 2019 fctrl &= (~NGBE_PSRCTL_MCP); 2020 wr32(hw, NGBE_PSRCTL, fctrl); 2021 2022 return 0; 2023 } 2024 2025 static int 2026 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 2027 { 2028 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2029 uint32_t fctrl; 2030 2031 fctrl = rd32(hw, NGBE_PSRCTL); 2032 fctrl |= NGBE_PSRCTL_MCP; 2033 wr32(hw, NGBE_PSRCTL, fctrl); 2034 2035 return 0; 2036 } 2037 2038 static int 2039 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 2040 { 2041 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2042 uint32_t fctrl; 2043 2044 if (dev->data->promiscuous == 1) 2045 return 0; /* must remain in all_multicast mode */ 2046 2047 fctrl = rd32(hw, NGBE_PSRCTL); 2048 fctrl &= (~NGBE_PSRCTL_MCP); 2049 wr32(hw, NGBE_PSRCTL, fctrl); 2050 2051 return 0; 2052 } 2053 2054 /** 2055 * It clears the interrupt causes and enables the interrupt. 2056 * It will be called once only during NIC initialized. 2057 * 2058 * @param dev 2059 * Pointer to struct rte_eth_dev. 2060 * @param on 2061 * Enable or Disable. 2062 * 2063 * @return 2064 * - On success, zero. 2065 * - On failure, a negative value. 2066 */ 2067 static int 2068 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 2069 { 2070 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2071 2072 ngbe_dev_link_status_print(dev); 2073 if (on != 0) { 2074 intr->mask_misc |= NGBE_ICRMISC_PHY; 2075 intr->mask_misc |= NGBE_ICRMISC_GPIO; 2076 } else { 2077 intr->mask_misc &= ~NGBE_ICRMISC_PHY; 2078 intr->mask_misc &= ~NGBE_ICRMISC_GPIO; 2079 } 2080 2081 return 0; 2082 } 2083 2084 /** 2085 * It clears the interrupt causes and enables the interrupt. 2086 * It will be called once only during NIC initialized. 2087 * 2088 * @param dev 2089 * Pointer to struct rte_eth_dev. 2090 * 2091 * @return 2092 * - On success, zero. 2093 * - On failure, a negative value. 2094 */ 2095 static int 2096 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev) 2097 { 2098 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2099 u64 mask; 2100 2101 mask = NGBE_ICR_MASK; 2102 mask &= (1ULL << NGBE_MISC_VEC_ID); 2103 intr->mask |= mask; 2104 intr->mask_misc |= NGBE_ICRMISC_GPIO; 2105 2106 return 0; 2107 } 2108 2109 /** 2110 * It clears the interrupt causes and enables the interrupt. 2111 * It will be called once only during NIC initialized. 2112 * 2113 * @param dev 2114 * Pointer to struct rte_eth_dev. 2115 * 2116 * @return 2117 * - On success, zero. 2118 * - On failure, a negative value. 2119 */ 2120 static int 2121 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 2122 { 2123 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2124 u64 mask; 2125 2126 mask = NGBE_ICR_MASK; 2127 mask &= ~((1ULL << NGBE_RX_VEC_START) - 1); 2128 intr->mask |= mask; 2129 2130 return 0; 2131 } 2132 2133 /** 2134 * It clears the interrupt causes and enables the interrupt. 2135 * It will be called once only during NIC initialized. 2136 * 2137 * @param dev 2138 * Pointer to struct rte_eth_dev. 2139 * 2140 * @return 2141 * - On success, zero. 2142 * - On failure, a negative value. 2143 */ 2144 static int 2145 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 2146 { 2147 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2148 2149 intr->mask_misc |= NGBE_ICRMISC_LNKSEC; 2150 2151 return 0; 2152 } 2153 2154 /* 2155 * It reads ICR and sets flag for the link_update. 2156 * 2157 * @param dev 2158 * Pointer to struct rte_eth_dev. 2159 * 2160 * @return 2161 * - On success, zero. 2162 * - On failure, a negative value. 2163 */ 2164 static int 2165 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 2166 { 2167 uint32_t eicr; 2168 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2169 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2170 2171 /* read-on-clear nic registers here */ 2172 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC]; 2173 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 2174 2175 intr->flags = 0; 2176 2177 /* set flag for async link update */ 2178 if (eicr & NGBE_ICRMISC_PHY) 2179 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE; 2180 2181 if (eicr & NGBE_ICRMISC_VFMBX) 2182 intr->flags |= NGBE_FLAG_MAILBOX; 2183 2184 if (eicr & NGBE_ICRMISC_LNKSEC) 2185 intr->flags |= NGBE_FLAG_MACSEC; 2186 2187 if (eicr & NGBE_ICRMISC_GPIO) 2188 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE; 2189 2190 if (eicr & NGBE_ICRMISC_HEAT) 2191 intr->flags |= NGBE_FLAG_OVERHEAT; 2192 2193 ((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0; 2194 2195 return 0; 2196 } 2197 2198 /** 2199 * It gets and then prints the link status. 2200 * 2201 * @param dev 2202 * Pointer to struct rte_eth_dev. 2203 * 2204 * @return 2205 * - On success, zero. 2206 * - On failure, a negative value. 2207 */ 2208 static void 2209 ngbe_dev_link_status_print(struct rte_eth_dev *dev) 2210 { 2211 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2212 struct rte_eth_link link; 2213 2214 rte_eth_linkstatus_get(dev, &link); 2215 2216 if (link.link_status == RTE_ETH_LINK_UP) { 2217 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 2218 (int)(dev->data->port_id), 2219 (unsigned int)link.link_speed, 2220 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? 2221 "full-duplex" : "half-duplex"); 2222 } else { 2223 PMD_INIT_LOG(INFO, " Port %d: Link Down", 2224 (int)(dev->data->port_id)); 2225 } 2226 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 2227 pci_dev->addr.domain, 2228 pci_dev->addr.bus, 2229 pci_dev->addr.devid, 2230 pci_dev->addr.function); 2231 } 2232 2233 /* 2234 * It executes link_update after knowing an interrupt occurred. 2235 * 2236 * @param dev 2237 * Pointer to struct rte_eth_dev. 2238 * 2239 * @return 2240 * - On success, zero. 2241 * - On failure, a negative value. 2242 */ 2243 static int 2244 ngbe_dev_interrupt_action(struct rte_eth_dev *dev) 2245 { 2246 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2247 2248 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 2249 2250 if (intr->flags & NGBE_FLAG_MAILBOX) { 2251 ngbe_pf_mbx_process(dev); 2252 intr->flags &= ~NGBE_FLAG_MAILBOX; 2253 } 2254 2255 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) { 2256 struct rte_eth_link link; 2257 2258 /*get the link status before link update, for predicting later*/ 2259 rte_eth_linkstatus_get(dev, &link); 2260 2261 ngbe_dev_link_update(dev, 0); 2262 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE; 2263 ngbe_dev_link_status_print(dev); 2264 if (dev->data->dev_link.link_speed != link.link_speed) 2265 rte_eth_dev_callback_process(dev, 2266 RTE_ETH_EVENT_INTR_LSC, NULL); 2267 } 2268 2269 if (intr->flags & NGBE_FLAG_OVERHEAT) { 2270 ngbe_dev_overheat(dev); 2271 intr->flags &= ~NGBE_FLAG_OVERHEAT; 2272 } 2273 2274 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 2275 ngbe_enable_intr(dev); 2276 2277 return 0; 2278 } 2279 2280 /** 2281 * Interrupt handler triggered by NIC for handling 2282 * specific interrupt. 2283 * 2284 * @param param 2285 * The address of parameter (struct rte_eth_dev *) registered before. 2286 */ 2287 static void 2288 ngbe_dev_interrupt_handler(void *param) 2289 { 2290 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2291 2292 ngbe_dev_interrupt_get_status(dev); 2293 ngbe_dev_interrupt_action(dev); 2294 } 2295 2296 static int 2297 ngbe_dev_led_on(struct rte_eth_dev *dev) 2298 { 2299 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2300 return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP; 2301 } 2302 2303 static int 2304 ngbe_dev_led_off(struct rte_eth_dev *dev) 2305 { 2306 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2307 return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP; 2308 } 2309 2310 static int 2311 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2312 { 2313 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2314 uint32_t mflcn_reg; 2315 uint32_t fccfg_reg; 2316 int rx_pause; 2317 int tx_pause; 2318 2319 fc_conf->pause_time = hw->fc.pause_time; 2320 fc_conf->high_water = hw->fc.high_water; 2321 fc_conf->low_water = hw->fc.low_water; 2322 fc_conf->send_xon = hw->fc.send_xon; 2323 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 2324 2325 /* 2326 * Return rx_pause status according to actual setting of 2327 * RXFCCFG register. 2328 */ 2329 mflcn_reg = rd32(hw, NGBE_RXFCCFG); 2330 if (mflcn_reg & NGBE_RXFCCFG_FC) 2331 rx_pause = 1; 2332 else 2333 rx_pause = 0; 2334 2335 /* 2336 * Return tx_pause status according to actual setting of 2337 * TXFCCFG register. 2338 */ 2339 fccfg_reg = rd32(hw, NGBE_TXFCCFG); 2340 if (fccfg_reg & NGBE_TXFCCFG_FC) 2341 tx_pause = 1; 2342 else 2343 tx_pause = 0; 2344 2345 if (rx_pause && tx_pause) 2346 fc_conf->mode = RTE_ETH_FC_FULL; 2347 else if (rx_pause) 2348 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2349 else if (tx_pause) 2350 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2351 else 2352 fc_conf->mode = RTE_ETH_FC_NONE; 2353 2354 return 0; 2355 } 2356 2357 static int 2358 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2359 { 2360 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2361 int err; 2362 uint32_t rx_buf_size; 2363 uint32_t max_high_water; 2364 enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = { 2365 ngbe_fc_none, 2366 ngbe_fc_rx_pause, 2367 ngbe_fc_tx_pause, 2368 ngbe_fc_full 2369 }; 2370 2371 PMD_INIT_FUNC_TRACE(); 2372 2373 rx_buf_size = rd32(hw, NGBE_PBRXSIZE); 2374 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 2375 2376 /* 2377 * At least reserve one Ethernet frame for watermark 2378 * high_water/low_water in kilo bytes for ngbe 2379 */ 2380 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10; 2381 if (fc_conf->high_water > max_high_water || 2382 fc_conf->high_water < fc_conf->low_water) { 2383 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 2384 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 2385 return -EINVAL; 2386 } 2387 2388 hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode]; 2389 hw->fc.pause_time = fc_conf->pause_time; 2390 hw->fc.high_water = fc_conf->high_water; 2391 hw->fc.low_water = fc_conf->low_water; 2392 hw->fc.send_xon = fc_conf->send_xon; 2393 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 2394 2395 err = hw->mac.fc_enable(hw); 2396 2397 /* Not negotiated is not an error case */ 2398 if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) { 2399 wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK, 2400 (fc_conf->mac_ctrl_frame_fwd 2401 ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP)); 2402 ngbe_flush(hw); 2403 2404 return 0; 2405 } 2406 2407 PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err); 2408 return -EIO; 2409 } 2410 2411 /* Additional bittime to account for NGBE framing */ 2412 #define NGBE_ETH_FRAMING 20 2413 2414 /* 2415 * ngbe_fc_hpbthresh_set - calculate high water mark for flow control 2416 * 2417 * @dv_id: device interface delay 2418 * @pb: packet buffer to calculate 2419 */ 2420 static s32 2421 ngbe_fc_hpbthresh_set(struct rte_eth_dev *dev) 2422 { 2423 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2424 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2425 u32 max_frame_size, tc, dv_id, rx_pb; 2426 s32 kb, marker; 2427 2428 /* Calculate max LAN frame size */ 2429 max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK); 2430 tc = max_frame_size + NGBE_ETH_FRAMING; 2431 2432 /* Calculate delay value for device */ 2433 dv_id = NGBE_DV(tc, tc); 2434 2435 /* Loopback switch introduces additional latency */ 2436 if (pci_dev->max_vfs) 2437 dv_id += NGBE_B2BT(tc); 2438 2439 /* Delay value is calculated in bit times convert to KB */ 2440 kb = NGBE_BT2KB(dv_id); 2441 rx_pb = rd32(hw, NGBE_PBRXSIZE) >> 10; 2442 2443 marker = rx_pb - kb; 2444 2445 /* It is possible that the packet buffer is not large enough 2446 * to provide required headroom. In this case throw an error 2447 * to user and do the best we can. 2448 */ 2449 if (marker < 0) { 2450 PMD_DRV_LOG(WARNING, "Packet Buffer can not provide enough headroom to support flow control."); 2451 marker = tc + 1; 2452 } 2453 2454 return marker; 2455 } 2456 2457 /* 2458 * ngbe_fc_lpbthresh_set - calculate low water mark for flow control 2459 * 2460 * @dv_id: device interface delay 2461 */ 2462 static s32 2463 ngbe_fc_lpbthresh_set(struct rte_eth_dev *dev) 2464 { 2465 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2466 u32 max_frame_size, tc, dv_id; 2467 s32 kb; 2468 2469 /* Calculate max LAN frame size */ 2470 max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK); 2471 tc = max_frame_size + NGBE_ETH_FRAMING; 2472 2473 /* Calculate delay value for device */ 2474 dv_id = NGBE_LOW_DV(tc); 2475 2476 /* Delay value is calculated in bit times convert to KB */ 2477 kb = NGBE_BT2KB(dv_id); 2478 2479 return kb; 2480 } 2481 2482 /* 2483 * ngbe_pbthresh_setup - calculate and setup high low water marks 2484 */ 2485 static void 2486 ngbe_pbthresh_set(struct rte_eth_dev *dev) 2487 { 2488 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2489 2490 hw->fc.high_water = ngbe_fc_hpbthresh_set(dev); 2491 hw->fc.low_water = ngbe_fc_lpbthresh_set(dev); 2492 2493 /* Low water marks must not be larger than high water marks */ 2494 if (hw->fc.low_water > hw->fc.high_water) 2495 hw->fc.low_water = 0; 2496 } 2497 2498 int 2499 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev, 2500 struct rte_eth_rss_reta_entry64 *reta_conf, 2501 uint16_t reta_size) 2502 { 2503 uint8_t i, j, mask; 2504 uint32_t reta; 2505 uint16_t idx, shift; 2506 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2507 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2508 2509 PMD_INIT_FUNC_TRACE(); 2510 2511 if (!hw->is_pf) { 2512 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 2513 "NIC."); 2514 return -ENOTSUP; 2515 } 2516 2517 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { 2518 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 2519 "(%d) doesn't match the number hardware can supported " 2520 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128); 2521 return -EINVAL; 2522 } 2523 2524 for (i = 0; i < reta_size; i += 4) { 2525 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2526 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2527 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF); 2528 if (!mask) 2529 continue; 2530 2531 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2); 2532 for (j = 0; j < 4; j++) { 2533 if (RS8(mask, j, 0x1)) { 2534 reta &= ~(MS32(8 * j, 0xFF)); 2535 reta |= LS32(reta_conf[idx].reta[shift + j], 2536 8 * j, 0xFF); 2537 } 2538 } 2539 wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta); 2540 } 2541 adapter->rss_reta_updated = 1; 2542 2543 return 0; 2544 } 2545 2546 int 2547 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev, 2548 struct rte_eth_rss_reta_entry64 *reta_conf, 2549 uint16_t reta_size) 2550 { 2551 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2552 uint8_t i, j, mask; 2553 uint32_t reta; 2554 uint16_t idx, shift; 2555 2556 PMD_INIT_FUNC_TRACE(); 2557 2558 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { 2559 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 2560 "(%d) doesn't match the number hardware can supported " 2561 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128); 2562 return -EINVAL; 2563 } 2564 2565 for (i = 0; i < reta_size; i += 4) { 2566 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2567 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2568 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF); 2569 if (!mask) 2570 continue; 2571 2572 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2); 2573 for (j = 0; j < 4; j++) { 2574 if (RS8(mask, j, 0x1)) 2575 reta_conf[idx].reta[shift + j] = 2576 (uint16_t)RS32(reta, 8 * j, 0xFF); 2577 } 2578 } 2579 2580 return 0; 2581 } 2582 2583 static int 2584 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 2585 uint32_t index, uint32_t pool) 2586 { 2587 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2588 uint32_t enable_addr = 1; 2589 2590 return ngbe_set_rar(hw, index, mac_addr->addr_bytes, 2591 pool, enable_addr); 2592 } 2593 2594 static void 2595 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 2596 { 2597 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2598 2599 ngbe_clear_rar(hw, index); 2600 } 2601 2602 static int 2603 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 2604 { 2605 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2606 2607 ngbe_remove_rar(dev, 0); 2608 ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs); 2609 2610 return 0; 2611 } 2612 2613 static int 2614 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 2615 { 2616 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2617 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 2618 struct rte_eth_dev_data *dev_data = dev->data; 2619 2620 /* If device is started, refuse mtu that requires the support of 2621 * scattered packets when this feature has not been enabled before. 2622 */ 2623 if (dev_data->dev_started && !dev_data->scattered_rx && 2624 (frame_size + 2 * RTE_VLAN_HLEN > 2625 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 2626 PMD_INIT_LOG(ERR, "Stop port first."); 2627 return -EINVAL; 2628 } 2629 2630 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK, 2631 NGBE_FRMSZ_MAX(frame_size)); 2632 2633 return 0; 2634 } 2635 2636 static uint32_t 2637 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr) 2638 { 2639 uint32_t vector = 0; 2640 2641 switch (hw->mac.mc_filter_type) { 2642 case 0: /* use bits [47:36] of the address */ 2643 vector = ((uc_addr->addr_bytes[4] >> 4) | 2644 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 2645 break; 2646 case 1: /* use bits [46:35] of the address */ 2647 vector = ((uc_addr->addr_bytes[4] >> 3) | 2648 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 2649 break; 2650 case 2: /* use bits [45:34] of the address */ 2651 vector = ((uc_addr->addr_bytes[4] >> 2) | 2652 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 2653 break; 2654 case 3: /* use bits [43:32] of the address */ 2655 vector = ((uc_addr->addr_bytes[4]) | 2656 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 2657 break; 2658 default: /* Invalid mc_filter_type */ 2659 break; 2660 } 2661 2662 /* vector can only be 12-bits or boundary will be exceeded */ 2663 vector &= 0xFFF; 2664 return vector; 2665 } 2666 2667 static int 2668 ngbe_uc_hash_table_set(struct rte_eth_dev *dev, 2669 struct rte_ether_addr *mac_addr, uint8_t on) 2670 { 2671 uint32_t vector; 2672 uint32_t uta_idx; 2673 uint32_t reg_val; 2674 uint32_t uta_mask; 2675 uint32_t psrctl; 2676 2677 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2678 struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev); 2679 2680 vector = ngbe_uta_vector(hw, mac_addr); 2681 uta_idx = (vector >> 5) & 0x7F; 2682 uta_mask = 0x1UL << (vector & 0x1F); 2683 2684 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask)) 2685 return 0; 2686 2687 reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx)); 2688 if (on) { 2689 uta_info->uta_in_use++; 2690 reg_val |= uta_mask; 2691 uta_info->uta_shadow[uta_idx] |= uta_mask; 2692 } else { 2693 uta_info->uta_in_use--; 2694 reg_val &= ~uta_mask; 2695 uta_info->uta_shadow[uta_idx] &= ~uta_mask; 2696 } 2697 2698 wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val); 2699 2700 psrctl = rd32(hw, NGBE_PSRCTL); 2701 if (uta_info->uta_in_use > 0) 2702 psrctl |= NGBE_PSRCTL_UCHFENA; 2703 else 2704 psrctl &= ~NGBE_PSRCTL_UCHFENA; 2705 2706 psrctl &= ~NGBE_PSRCTL_ADHF12_MASK; 2707 psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type); 2708 wr32(hw, NGBE_PSRCTL, psrctl); 2709 2710 return 0; 2711 } 2712 2713 static int 2714 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 2715 { 2716 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2717 struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev); 2718 uint32_t psrctl; 2719 int i; 2720 2721 if (on) { 2722 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 2723 uta_info->uta_shadow[i] = ~0; 2724 wr32(hw, NGBE_UCADDRTBL(i), ~0); 2725 } 2726 } else { 2727 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 2728 uta_info->uta_shadow[i] = 0; 2729 wr32(hw, NGBE_UCADDRTBL(i), 0); 2730 } 2731 } 2732 2733 psrctl = rd32(hw, NGBE_PSRCTL); 2734 if (on) 2735 psrctl |= NGBE_PSRCTL_UCHFENA; 2736 else 2737 psrctl &= ~NGBE_PSRCTL_UCHFENA; 2738 2739 psrctl &= ~NGBE_PSRCTL_ADHF12_MASK; 2740 psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type); 2741 wr32(hw, NGBE_PSRCTL, psrctl); 2742 2743 return 0; 2744 } 2745 2746 static int 2747 ngbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 2748 { 2749 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2750 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2751 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2752 uint32_t mask; 2753 2754 mask = rd32(hw, NGBE_IMC(0)); 2755 mask |= (1 << queue_id); 2756 wr32(hw, NGBE_IMC(0), mask); 2757 rte_intr_enable(intr_handle); 2758 2759 return 0; 2760 } 2761 2762 static int 2763 ngbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 2764 { 2765 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2766 uint32_t mask; 2767 2768 mask = rd32(hw, NGBE_IMS(0)); 2769 mask |= (1 << queue_id); 2770 wr32(hw, NGBE_IMS(0), mask); 2771 2772 return 0; 2773 } 2774 2775 /** 2776 * Set the IVAR registers, mapping interrupt causes to vectors 2777 * @param hw 2778 * pointer to ngbe_hw struct 2779 * @direction 2780 * 0 for Rx, 1 for Tx, -1 for other causes 2781 * @queue 2782 * queue to map the corresponding interrupt to 2783 * @msix_vector 2784 * the vector to map to the corresponding queue 2785 */ 2786 void 2787 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction, 2788 uint8_t queue, uint8_t msix_vector) 2789 { 2790 uint32_t tmp, idx; 2791 2792 if (direction == -1) { 2793 /* other causes */ 2794 msix_vector |= NGBE_IVARMISC_VLD; 2795 idx = 0; 2796 tmp = rd32(hw, NGBE_IVARMISC); 2797 tmp &= ~(0xFF << idx); 2798 tmp |= (msix_vector << idx); 2799 wr32(hw, NGBE_IVARMISC, tmp); 2800 } else { 2801 /* rx or tx causes */ 2802 msix_vector |= NGBE_IVAR_VLD; /* Workaround for ICR lost */ 2803 idx = ((16 * (queue & 1)) + (8 * direction)); 2804 tmp = rd32(hw, NGBE_IVAR(queue >> 1)); 2805 tmp &= ~(0xFF << idx); 2806 tmp |= (msix_vector << idx); 2807 wr32(hw, NGBE_IVAR(queue >> 1), tmp); 2808 } 2809 } 2810 2811 /** 2812 * Sets up the hardware to properly generate MSI-X interrupts 2813 * @hw 2814 * board private structure 2815 */ 2816 static void 2817 ngbe_configure_msix(struct rte_eth_dev *dev) 2818 { 2819 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2820 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2821 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2822 uint32_t queue_id, base = NGBE_MISC_VEC_ID; 2823 uint32_t vec = NGBE_MISC_VEC_ID; 2824 uint32_t gpie; 2825 2826 /* 2827 * Won't configure MSI-X register if no mapping is done 2828 * between intr vector and event fd 2829 * but if MSI-X has been enabled already, need to configure 2830 * auto clean, auto mask and throttling. 2831 */ 2832 gpie = rd32(hw, NGBE_GPIE); 2833 if (!rte_intr_dp_is_en(intr_handle) && 2834 !(gpie & NGBE_GPIE_MSIX)) 2835 return; 2836 2837 if (rte_intr_allow_others(intr_handle)) { 2838 base = NGBE_RX_VEC_START; 2839 vec = base; 2840 } 2841 2842 /* setup GPIE for MSI-X mode */ 2843 gpie = rd32(hw, NGBE_GPIE); 2844 gpie |= NGBE_GPIE_MSIX; 2845 wr32(hw, NGBE_GPIE, gpie); 2846 2847 /* Populate the IVAR table and set the ITR values to the 2848 * corresponding register. 2849 */ 2850 if (rte_intr_dp_is_en(intr_handle)) { 2851 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 2852 queue_id++) { 2853 /* by default, 1:1 mapping */ 2854 ngbe_set_ivar_map(hw, 0, queue_id, vec); 2855 rte_intr_vec_list_index_set(intr_handle, 2856 queue_id, vec); 2857 if (vec < base + rte_intr_nb_efd_get(intr_handle) 2858 - 1) 2859 vec++; 2860 } 2861 2862 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID); 2863 } 2864 wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID), 2865 NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT) 2866 | NGBE_ITR_WRDSA); 2867 } 2868 2869 static u8 * 2870 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw, 2871 u8 **mc_addr_ptr, u32 *vmdq) 2872 { 2873 u8 *mc_addr; 2874 2875 *vmdq = 0; 2876 mc_addr = *mc_addr_ptr; 2877 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); 2878 return mc_addr; 2879 } 2880 2881 int 2882 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 2883 struct rte_ether_addr *mc_addr_set, 2884 uint32_t nb_mc_addr) 2885 { 2886 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2887 u8 *mc_addr_list; 2888 2889 mc_addr_list = (u8 *)mc_addr_set; 2890 return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 2891 ngbe_dev_addr_list_itr, TRUE); 2892 } 2893 2894 static uint64_t 2895 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 2896 { 2897 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2898 uint64_t systime_cycles; 2899 2900 systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL); 2901 systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32; 2902 2903 return systime_cycles; 2904 } 2905 2906 static uint64_t 2907 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 2908 { 2909 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2910 uint64_t rx_tstamp_cycles; 2911 2912 /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */ 2913 rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL); 2914 rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32; 2915 2916 return rx_tstamp_cycles; 2917 } 2918 2919 static uint64_t 2920 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 2921 { 2922 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2923 uint64_t tx_tstamp_cycles; 2924 2925 /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */ 2926 tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL); 2927 tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32; 2928 2929 return tx_tstamp_cycles; 2930 } 2931 2932 static void 2933 ngbe_start_timecounters(struct rte_eth_dev *dev) 2934 { 2935 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2936 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2937 uint32_t incval = 0; 2938 uint32_t shift = 0; 2939 2940 incval = NGBE_INCVAL_1GB; 2941 shift = NGBE_INCVAL_SHIFT_1GB; 2942 2943 wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval)); 2944 2945 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 2946 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 2947 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 2948 2949 adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK; 2950 adapter->systime_tc.cc_shift = shift; 2951 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 2952 2953 adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK; 2954 adapter->rx_tstamp_tc.cc_shift = shift; 2955 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 2956 2957 adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK; 2958 adapter->tx_tstamp_tc.cc_shift = shift; 2959 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 2960 } 2961 2962 static int 2963 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 2964 { 2965 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2966 2967 adapter->systime_tc.nsec += delta; 2968 adapter->rx_tstamp_tc.nsec += delta; 2969 adapter->tx_tstamp_tc.nsec += delta; 2970 2971 return 0; 2972 } 2973 2974 static int 2975 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 2976 { 2977 uint64_t ns; 2978 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2979 2980 ns = rte_timespec_to_ns(ts); 2981 /* Set the timecounters to a new value. */ 2982 adapter->systime_tc.nsec = ns; 2983 adapter->rx_tstamp_tc.nsec = ns; 2984 adapter->tx_tstamp_tc.nsec = ns; 2985 2986 return 0; 2987 } 2988 2989 static int 2990 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 2991 { 2992 uint64_t ns, systime_cycles; 2993 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2994 2995 systime_cycles = ngbe_read_systime_cyclecounter(dev); 2996 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 2997 *ts = rte_ns_to_timespec(ns); 2998 2999 return 0; 3000 } 3001 3002 static int 3003 ngbe_timesync_enable(struct rte_eth_dev *dev) 3004 { 3005 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3006 uint32_t tsync_ctl; 3007 3008 /* Stop the timesync system time. */ 3009 wr32(hw, NGBE_TSTIMEINC, 0x0); 3010 /* Reset the timesync system time value. */ 3011 wr32(hw, NGBE_TSTIMEL, 0x0); 3012 wr32(hw, NGBE_TSTIMEH, 0x0); 3013 3014 ngbe_start_timecounters(dev); 3015 3016 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 3017 wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 3018 RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588); 3019 3020 /* Enable timestamping of received PTP packets. */ 3021 tsync_ctl = rd32(hw, NGBE_TSRXCTL); 3022 tsync_ctl |= NGBE_TSRXCTL_ENA; 3023 wr32(hw, NGBE_TSRXCTL, tsync_ctl); 3024 3025 /* Enable timestamping of transmitted PTP packets. */ 3026 tsync_ctl = rd32(hw, NGBE_TSTXCTL); 3027 tsync_ctl |= NGBE_TSTXCTL_ENA; 3028 wr32(hw, NGBE_TSTXCTL, tsync_ctl); 3029 3030 ngbe_flush(hw); 3031 3032 return 0; 3033 } 3034 3035 static int 3036 ngbe_timesync_disable(struct rte_eth_dev *dev) 3037 { 3038 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3039 uint32_t tsync_ctl; 3040 3041 /* Disable timestamping of transmitted PTP packets. */ 3042 tsync_ctl = rd32(hw, NGBE_TSTXCTL); 3043 tsync_ctl &= ~NGBE_TSTXCTL_ENA; 3044 wr32(hw, NGBE_TSTXCTL, tsync_ctl); 3045 3046 /* Disable timestamping of received PTP packets. */ 3047 tsync_ctl = rd32(hw, NGBE_TSRXCTL); 3048 tsync_ctl &= ~NGBE_TSRXCTL_ENA; 3049 wr32(hw, NGBE_TSRXCTL, tsync_ctl); 3050 3051 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 3052 wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0); 3053 3054 /* Stop incrementing the System Time registers. */ 3055 wr32(hw, NGBE_TSTIMEINC, 0); 3056 3057 return 0; 3058 } 3059 3060 static int 3061 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3062 struct timespec *timestamp, 3063 uint32_t flags __rte_unused) 3064 { 3065 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3066 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 3067 uint32_t tsync_rxctl; 3068 uint64_t rx_tstamp_cycles; 3069 uint64_t ns; 3070 3071 tsync_rxctl = rd32(hw, NGBE_TSRXCTL); 3072 if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0) 3073 return -EINVAL; 3074 3075 rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev); 3076 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 3077 *timestamp = rte_ns_to_timespec(ns); 3078 3079 return 0; 3080 } 3081 3082 static int 3083 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3084 struct timespec *timestamp) 3085 { 3086 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3087 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 3088 uint32_t tsync_txctl; 3089 uint64_t tx_tstamp_cycles; 3090 uint64_t ns; 3091 3092 tsync_txctl = rd32(hw, NGBE_TSTXCTL); 3093 if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0) 3094 return -EINVAL; 3095 3096 tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev); 3097 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 3098 *timestamp = rte_ns_to_timespec(ns); 3099 3100 return 0; 3101 } 3102 3103 static int 3104 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused) 3105 { 3106 int count = 0; 3107 int g_ind = 0; 3108 const struct reg_info *reg_group; 3109 const struct reg_info **reg_set = ngbe_regs_others; 3110 3111 while ((reg_group = reg_set[g_ind++])) 3112 count += ngbe_regs_group_count(reg_group); 3113 3114 return count; 3115 } 3116 3117 static int 3118 ngbe_get_regs(struct rte_eth_dev *dev, 3119 struct rte_dev_reg_info *regs) 3120 { 3121 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3122 uint32_t *data = regs->data; 3123 int g_ind = 0; 3124 int count = 0; 3125 const struct reg_info *reg_group; 3126 const struct reg_info **reg_set = ngbe_regs_others; 3127 3128 if (data == NULL) { 3129 regs->length = ngbe_get_reg_length(dev); 3130 regs->width = sizeof(uint32_t); 3131 return 0; 3132 } 3133 3134 /* Support only full register dump */ 3135 if (regs->length == 0 || 3136 regs->length == (uint32_t)ngbe_get_reg_length(dev)) { 3137 regs->version = hw->mac.type << 24 | 3138 hw->revision_id << 16 | 3139 hw->device_id; 3140 while ((reg_group = reg_set[g_ind++])) 3141 count += ngbe_read_regs_group(dev, &data[count], 3142 reg_group); 3143 return 0; 3144 } 3145 3146 return -ENOTSUP; 3147 } 3148 3149 static int 3150 ngbe_get_eeprom_length(struct rte_eth_dev *dev) 3151 { 3152 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3153 3154 /* Return unit is byte count */ 3155 return hw->rom.word_size * 2; 3156 } 3157 3158 static int 3159 ngbe_get_eeprom(struct rte_eth_dev *dev, 3160 struct rte_dev_eeprom_info *in_eeprom) 3161 { 3162 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3163 struct ngbe_rom_info *eeprom = &hw->rom; 3164 uint16_t *data = in_eeprom->data; 3165 int first, length; 3166 3167 first = in_eeprom->offset >> 1; 3168 length = in_eeprom->length >> 1; 3169 if (first > hw->rom.word_size || 3170 ((first + length) > hw->rom.word_size)) 3171 return -EINVAL; 3172 3173 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 3174 3175 return eeprom->readw_buffer(hw, first, length, data); 3176 } 3177 3178 static int 3179 ngbe_set_eeprom(struct rte_eth_dev *dev, 3180 struct rte_dev_eeprom_info *in_eeprom) 3181 { 3182 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3183 struct ngbe_rom_info *eeprom = &hw->rom; 3184 uint16_t *data = in_eeprom->data; 3185 int first, length; 3186 3187 first = in_eeprom->offset >> 1; 3188 length = in_eeprom->length >> 1; 3189 if (first > hw->rom.word_size || 3190 ((first + length) > hw->rom.word_size)) 3191 return -EINVAL; 3192 3193 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 3194 3195 return eeprom->writew_buffer(hw, first, length, data); 3196 } 3197 3198 static const struct eth_dev_ops ngbe_eth_dev_ops = { 3199 .dev_configure = ngbe_dev_configure, 3200 .dev_infos_get = ngbe_dev_info_get, 3201 .dev_start = ngbe_dev_start, 3202 .dev_stop = ngbe_dev_stop, 3203 .dev_set_link_up = ngbe_dev_set_link_up, 3204 .dev_set_link_down = ngbe_dev_set_link_down, 3205 .dev_close = ngbe_dev_close, 3206 .dev_reset = ngbe_dev_reset, 3207 .promiscuous_enable = ngbe_dev_promiscuous_enable, 3208 .promiscuous_disable = ngbe_dev_promiscuous_disable, 3209 .allmulticast_enable = ngbe_dev_allmulticast_enable, 3210 .allmulticast_disable = ngbe_dev_allmulticast_disable, 3211 .link_update = ngbe_dev_link_update, 3212 .stats_get = ngbe_dev_stats_get, 3213 .xstats_get = ngbe_dev_xstats_get, 3214 .xstats_get_by_id = ngbe_dev_xstats_get_by_id, 3215 .stats_reset = ngbe_dev_stats_reset, 3216 .xstats_reset = ngbe_dev_xstats_reset, 3217 .xstats_get_names = ngbe_dev_xstats_get_names, 3218 .xstats_get_names_by_id = ngbe_dev_xstats_get_names_by_id, 3219 .fw_version_get = ngbe_fw_version_get, 3220 .dev_supported_ptypes_get = ngbe_dev_supported_ptypes_get, 3221 .mtu_set = ngbe_dev_mtu_set, 3222 .vlan_filter_set = ngbe_vlan_filter_set, 3223 .vlan_tpid_set = ngbe_vlan_tpid_set, 3224 .vlan_offload_set = ngbe_vlan_offload_set, 3225 .vlan_strip_queue_set = ngbe_vlan_strip_queue_set, 3226 .rx_queue_start = ngbe_dev_rx_queue_start, 3227 .rx_queue_stop = ngbe_dev_rx_queue_stop, 3228 .tx_queue_start = ngbe_dev_tx_queue_start, 3229 .tx_queue_stop = ngbe_dev_tx_queue_stop, 3230 .rx_queue_setup = ngbe_dev_rx_queue_setup, 3231 .rx_queue_release = ngbe_dev_rx_queue_release, 3232 .tx_queue_setup = ngbe_dev_tx_queue_setup, 3233 .tx_queue_release = ngbe_dev_tx_queue_release, 3234 .rx_queue_intr_enable = ngbe_dev_rx_queue_intr_enable, 3235 .rx_queue_intr_disable = ngbe_dev_rx_queue_intr_disable, 3236 .dev_led_on = ngbe_dev_led_on, 3237 .dev_led_off = ngbe_dev_led_off, 3238 .flow_ctrl_get = ngbe_flow_ctrl_get, 3239 .flow_ctrl_set = ngbe_flow_ctrl_set, 3240 .mac_addr_add = ngbe_add_rar, 3241 .mac_addr_remove = ngbe_remove_rar, 3242 .mac_addr_set = ngbe_set_default_mac_addr, 3243 .uc_hash_table_set = ngbe_uc_hash_table_set, 3244 .uc_all_hash_table_set = ngbe_uc_all_hash_table_set, 3245 .reta_update = ngbe_dev_rss_reta_update, 3246 .reta_query = ngbe_dev_rss_reta_query, 3247 .rss_hash_update = ngbe_dev_rss_hash_update, 3248 .rss_hash_conf_get = ngbe_dev_rss_hash_conf_get, 3249 .set_mc_addr_list = ngbe_dev_set_mc_addr_list, 3250 .rxq_info_get = ngbe_rxq_info_get, 3251 .txq_info_get = ngbe_txq_info_get, 3252 .rx_burst_mode_get = ngbe_rx_burst_mode_get, 3253 .tx_burst_mode_get = ngbe_tx_burst_mode_get, 3254 .timesync_enable = ngbe_timesync_enable, 3255 .timesync_disable = ngbe_timesync_disable, 3256 .timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp, 3257 .timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp, 3258 .get_reg = ngbe_get_regs, 3259 .get_eeprom_length = ngbe_get_eeprom_length, 3260 .get_eeprom = ngbe_get_eeprom, 3261 .set_eeprom = ngbe_set_eeprom, 3262 .timesync_adjust_time = ngbe_timesync_adjust_time, 3263 .timesync_read_time = ngbe_timesync_read_time, 3264 .timesync_write_time = ngbe_timesync_write_time, 3265 .tx_done_cleanup = ngbe_dev_tx_done_cleanup, 3266 }; 3267 3268 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd); 3269 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map); 3270 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci"); 3271 3272 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE); 3273 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE); 3274 3275 #ifdef RTE_ETHDEV_DEBUG_RX 3276 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG); 3277 #endif 3278 #ifdef RTE_ETHDEV_DEBUG_TX 3279 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG); 3280 #endif 3281