1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd. 3 * Copyright(c) 2010-2017 Intel Corporation 4 */ 5 6 #include <errno.h> 7 #include <rte_common.h> 8 #include <ethdev_pci.h> 9 10 #include <rte_alarm.h> 11 12 #include "ngbe_logs.h" 13 #include "ngbe.h" 14 #include "ngbe_ethdev.h" 15 #include "ngbe_rxtx.h" 16 #include "ngbe_regs_group.h" 17 18 static const struct reg_info ngbe_regs_general[] = { 19 {NGBE_RST, 1, 1, "NGBE_RST"}, 20 {NGBE_STAT, 1, 1, "NGBE_STAT"}, 21 {NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"}, 22 {NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"}, 23 {NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"}, 24 {NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"}, 25 {0, 0, 0, ""} 26 }; 27 28 static const struct reg_info ngbe_regs_nvm[] = { 29 {0, 0, 0, ""} 30 }; 31 32 static const struct reg_info ngbe_regs_interrupt[] = { 33 {0, 0, 0, ""} 34 }; 35 36 static const struct reg_info ngbe_regs_fctl_others[] = { 37 {0, 0, 0, ""} 38 }; 39 40 static const struct reg_info ngbe_regs_rxdma[] = { 41 {0, 0, 0, ""} 42 }; 43 44 static const struct reg_info ngbe_regs_rx[] = { 45 {0, 0, 0, ""} 46 }; 47 48 static struct reg_info ngbe_regs_tx[] = { 49 {0, 0, 0, ""} 50 }; 51 52 static const struct reg_info ngbe_regs_wakeup[] = { 53 {0, 0, 0, ""} 54 }; 55 56 static const struct reg_info ngbe_regs_mac[] = { 57 {0, 0, 0, ""} 58 }; 59 60 static const struct reg_info ngbe_regs_diagnostic[] = { 61 {0, 0, 0, ""}, 62 }; 63 64 /* PF registers */ 65 static const struct reg_info *ngbe_regs_others[] = { 66 ngbe_regs_general, 67 ngbe_regs_nvm, 68 ngbe_regs_interrupt, 69 ngbe_regs_fctl_others, 70 ngbe_regs_rxdma, 71 ngbe_regs_rx, 72 ngbe_regs_tx, 73 ngbe_regs_wakeup, 74 ngbe_regs_mac, 75 ngbe_regs_diagnostic, 76 NULL}; 77 78 static int ngbe_dev_close(struct rte_eth_dev *dev); 79 static int ngbe_dev_link_update(struct rte_eth_dev *dev, 80 int wait_to_complete); 81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev); 82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, 84 uint16_t queue); 85 86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev); 87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev); 90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 91 static void ngbe_dev_interrupt_handler(void *param); 92 static void ngbe_configure_msix(struct rte_eth_dev *dev); 93 static void ngbe_pbthresh_set(struct rte_eth_dev *dev); 94 95 #define NGBE_SET_HWSTRIP(h, q) do {\ 96 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 97 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 98 (h)->bitmap[idx] |= 1 << bit;\ 99 } while (0) 100 101 #define NGBE_CLEAR_HWSTRIP(h, q) do {\ 102 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 103 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 104 (h)->bitmap[idx] &= ~(1 << bit);\ 105 } while (0) 106 107 #define NGBE_GET_HWSTRIP(h, q, r) do {\ 108 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 109 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 110 (r) = (h)->bitmap[idx] >> bit & 1;\ 111 } while (0) 112 113 /* 114 * The set of PCI devices this driver supports 115 */ 116 static const struct rte_pci_id pci_id_ngbe_map[] = { 117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) }, 118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) }, 119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) }, 120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) }, 121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) }, 122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) }, 123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) }, 124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) }, 125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) }, 126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) }, 127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) }, 128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) }, 129 { .vendor_id = 0, /* sentinel */ }, 130 }; 131 132 static const struct rte_eth_desc_lim rx_desc_lim = { 133 .nb_max = NGBE_RING_DESC_MAX, 134 .nb_min = NGBE_RING_DESC_MIN, 135 .nb_align = NGBE_RXD_ALIGN, 136 }; 137 138 static const struct rte_eth_desc_lim tx_desc_lim = { 139 .nb_max = NGBE_RING_DESC_MAX, 140 .nb_min = NGBE_RING_DESC_MIN, 141 .nb_align = NGBE_TXD_ALIGN, 142 .nb_seg_max = NGBE_TX_MAX_SEG, 143 .nb_mtu_seg_max = NGBE_TX_MAX_SEG, 144 }; 145 146 static const struct eth_dev_ops ngbe_eth_dev_ops; 147 148 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)} 149 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)} 150 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = { 151 /* MNG RxTx */ 152 HW_XSTAT(mng_bmc2host_packets), 153 HW_XSTAT(mng_host2bmc_packets), 154 /* Basic RxTx */ 155 HW_XSTAT(rx_packets), 156 HW_XSTAT(tx_packets), 157 HW_XSTAT(rx_bytes), 158 HW_XSTAT(tx_bytes), 159 HW_XSTAT(rx_total_bytes), 160 HW_XSTAT(rx_total_packets), 161 HW_XSTAT(tx_total_packets), 162 HW_XSTAT(rx_total_missed_packets), 163 HW_XSTAT(rx_broadcast_packets), 164 HW_XSTAT(tx_broadcast_packets), 165 HW_XSTAT(rx_multicast_packets), 166 HW_XSTAT(tx_multicast_packets), 167 HW_XSTAT(rx_management_packets), 168 HW_XSTAT(tx_management_packets), 169 HW_XSTAT(rx_management_dropped), 170 HW_XSTAT(rx_dma_drop), 171 HW_XSTAT(tx_dma_drop), 172 HW_XSTAT(tx_secdrp_packets), 173 174 /* Basic Error */ 175 HW_XSTAT(rx_crc_errors), 176 HW_XSTAT(rx_illegal_byte_errors), 177 HW_XSTAT(rx_error_bytes), 178 HW_XSTAT(rx_mac_short_packet_dropped), 179 HW_XSTAT(rx_length_errors), 180 HW_XSTAT(rx_undersize_errors), 181 HW_XSTAT(rx_fragment_errors), 182 HW_XSTAT(rx_oversize_cnt), 183 HW_XSTAT(rx_jabber_errors), 184 HW_XSTAT(rx_l3_l4_xsum_error), 185 HW_XSTAT(mac_local_errors), 186 HW_XSTAT(mac_remote_errors), 187 188 /* PB Stats */ 189 HW_XSTAT(rx_up_dropped), 190 HW_XSTAT(rdb_pkt_cnt), 191 HW_XSTAT(rdb_repli_cnt), 192 HW_XSTAT(rdb_drp_cnt), 193 194 /* MACSEC */ 195 HW_XSTAT(tx_macsec_pkts_untagged), 196 HW_XSTAT(tx_macsec_pkts_encrypted), 197 HW_XSTAT(tx_macsec_pkts_protected), 198 HW_XSTAT(tx_macsec_octets_encrypted), 199 HW_XSTAT(tx_macsec_octets_protected), 200 HW_XSTAT(rx_macsec_pkts_untagged), 201 HW_XSTAT(rx_macsec_pkts_badtag), 202 HW_XSTAT(rx_macsec_pkts_nosci), 203 HW_XSTAT(rx_macsec_pkts_unknownsci), 204 HW_XSTAT(rx_macsec_octets_decrypted), 205 HW_XSTAT(rx_macsec_octets_validated), 206 HW_XSTAT(rx_macsec_sc_pkts_unchecked), 207 HW_XSTAT(rx_macsec_sc_pkts_delayed), 208 HW_XSTAT(rx_macsec_sc_pkts_late), 209 HW_XSTAT(rx_macsec_sa_pkts_ok), 210 HW_XSTAT(rx_macsec_sa_pkts_invalid), 211 HW_XSTAT(rx_macsec_sa_pkts_notvalid), 212 HW_XSTAT(rx_macsec_sa_pkts_unusedsa), 213 HW_XSTAT(rx_macsec_sa_pkts_notusingsa), 214 215 /* MAC RxTx */ 216 HW_XSTAT(rx_size_64_packets), 217 HW_XSTAT(rx_size_65_to_127_packets), 218 HW_XSTAT(rx_size_128_to_255_packets), 219 HW_XSTAT(rx_size_256_to_511_packets), 220 HW_XSTAT(rx_size_512_to_1023_packets), 221 HW_XSTAT(rx_size_1024_to_max_packets), 222 HW_XSTAT(tx_size_64_packets), 223 HW_XSTAT(tx_size_65_to_127_packets), 224 HW_XSTAT(tx_size_128_to_255_packets), 225 HW_XSTAT(tx_size_256_to_511_packets), 226 HW_XSTAT(tx_size_512_to_1023_packets), 227 HW_XSTAT(tx_size_1024_to_max_packets), 228 229 /* Flow Control */ 230 HW_XSTAT(tx_xon_packets), 231 HW_XSTAT(rx_xon_packets), 232 HW_XSTAT(tx_xoff_packets), 233 HW_XSTAT(rx_xoff_packets), 234 235 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"), 236 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"), 237 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"), 238 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"), 239 }; 240 241 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \ 242 sizeof(rte_ngbe_stats_strings[0])) 243 244 /* Per-queue statistics */ 245 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)} 246 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = { 247 QP_XSTAT(rx_qp_packets), 248 QP_XSTAT(tx_qp_packets), 249 QP_XSTAT(rx_qp_bytes), 250 QP_XSTAT(tx_qp_bytes), 251 QP_XSTAT(rx_qp_mc_packets), 252 }; 253 254 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \ 255 sizeof(rte_ngbe_qp_strings[0])) 256 257 static inline int32_t 258 ngbe_pf_reset_hw(struct ngbe_hw *hw) 259 { 260 uint32_t ctrl_ext; 261 int32_t status; 262 263 status = hw->mac.reset_hw(hw); 264 265 ctrl_ext = rd32(hw, NGBE_PORTCTL); 266 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 267 ctrl_ext |= NGBE_PORTCTL_RSTDONE; 268 wr32(hw, NGBE_PORTCTL, ctrl_ext); 269 ngbe_flush(hw); 270 271 if (status == NGBE_ERR_SFP_NOT_PRESENT) 272 status = 0; 273 return status; 274 } 275 276 static inline void 277 ngbe_enable_intr(struct rte_eth_dev *dev) 278 { 279 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 280 struct ngbe_hw *hw = ngbe_dev_hw(dev); 281 282 wr32(hw, NGBE_IENMISC, intr->mask_misc); 283 wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32); 284 ngbe_flush(hw); 285 } 286 287 static void 288 ngbe_disable_intr(struct ngbe_hw *hw) 289 { 290 PMD_INIT_FUNC_TRACE(); 291 292 wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK); 293 ngbe_flush(hw); 294 } 295 296 /* 297 * Ensure that all locks are released before first NVM or PHY access 298 */ 299 static void 300 ngbe_swfw_lock_reset(struct ngbe_hw *hw) 301 { 302 uint16_t mask; 303 304 /* 305 * These ones are more tricky since they are common to all ports; but 306 * swfw_sync retries last long enough (1s) to be almost sure that if 307 * lock can not be taken it is due to an improper lock of the 308 * semaphore. 309 */ 310 mask = NGBE_MNGSEM_SWPHY | 311 NGBE_MNGSEM_SWMBX | 312 NGBE_MNGSEM_SWFLASH; 313 if (hw->mac.acquire_swfw_sync(hw, mask) < 0) 314 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 315 316 hw->mac.release_swfw_sync(hw, mask); 317 } 318 319 static int 320 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 321 { 322 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 323 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev); 324 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev); 325 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev); 326 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 327 const struct rte_memzone *mz; 328 uint32_t ctrl_ext; 329 u32 led_conf = 0; 330 int err, ret; 331 332 PMD_INIT_FUNC_TRACE(); 333 334 eth_dev->dev_ops = &ngbe_eth_dev_ops; 335 eth_dev->rx_queue_count = ngbe_dev_rx_queue_count; 336 eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status; 337 eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status; 338 eth_dev->rx_pkt_burst = &ngbe_recv_pkts; 339 eth_dev->tx_pkt_burst = &ngbe_xmit_pkts; 340 eth_dev->tx_pkt_prepare = &ngbe_prep_pkts; 341 342 /* 343 * For secondary processes, we don't initialise any further as primary 344 * has already done this work. Only check we don't need a different 345 * Rx and Tx function. 346 */ 347 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 348 struct ngbe_tx_queue *txq; 349 /* Tx queue function in primary, set by last queue initialized 350 * Tx queue may not initialized by primary process 351 */ 352 if (eth_dev->data->tx_queues) { 353 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues; 354 txq = eth_dev->data->tx_queues[nb_tx_queues - 1]; 355 ngbe_set_tx_function(eth_dev, txq); 356 } else { 357 /* Use default Tx function if we get here */ 358 PMD_INIT_LOG(NOTICE, 359 "No Tx queues configured yet. Using default Tx function."); 360 } 361 362 ngbe_set_rx_function(eth_dev); 363 364 return 0; 365 } 366 367 rte_eth_copy_pci_info(eth_dev, pci_dev); 368 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 369 370 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 371 372 /* Vendor and Device ID need to be set before init of shared code */ 373 hw->back = pci_dev; 374 hw->device_id = pci_dev->id.device_id; 375 hw->vendor_id = pci_dev->id.vendor_id; 376 if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) { 377 hw->sub_system_id = pci_dev->id.subsystem_device_id; 378 } else { 379 u32 ssid; 380 381 ssid = ngbe_flash_read_dword(hw, 0xFFFDC); 382 if (ssid == 0x1) { 383 PMD_INIT_LOG(ERR, 384 "Read of internal subsystem device id failed\n"); 385 return -ENODEV; 386 } 387 hw->sub_system_id = (u16)ssid >> 8 | (u16)ssid << 8; 388 } 389 ngbe_map_device_id(hw); 390 391 /* Reserve memory for interrupt status block */ 392 mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1, 393 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY); 394 if (mz == NULL) 395 return -ENOMEM; 396 397 hw->isb_dma = TMZ_PADDR(mz); 398 hw->isb_mem = TMZ_VADDR(mz); 399 400 /* Initialize the shared code (base driver) */ 401 err = ngbe_init_shared_code(hw); 402 if (err != 0) { 403 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err); 404 return -EIO; 405 } 406 407 /* Unlock any pending hardware semaphore */ 408 ngbe_swfw_lock_reset(hw); 409 410 /* Get Hardware Flow Control setting */ 411 hw->fc.requested_mode = ngbe_fc_full; 412 hw->fc.current_mode = ngbe_fc_full; 413 hw->fc.pause_time = NGBE_FC_PAUSE_TIME; 414 hw->fc.low_water = NGBE_FC_XON_LOTH; 415 hw->fc.high_water = NGBE_FC_XOFF_HITH; 416 hw->fc.send_xon = 1; 417 418 err = hw->rom.init_params(hw); 419 if (err != 0) { 420 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err); 421 return -EIO; 422 } 423 424 /* Make sure we have a good EEPROM before we read from it */ 425 err = hw->rom.validate_checksum(hw, NULL); 426 if (err != 0) { 427 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err); 428 return -EIO; 429 } 430 431 err = hw->phy.led_oem_chk(hw, &led_conf); 432 if (err == 0) 433 hw->led_conf = led_conf; 434 else 435 hw->led_conf = 0xFFFF; 436 437 err = hw->mac.init_hw(hw); 438 if (err != 0) { 439 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err); 440 return -EIO; 441 } 442 443 /* Reset the hw statistics */ 444 ngbe_dev_stats_reset(eth_dev); 445 446 /* disable interrupt */ 447 ngbe_disable_intr(hw); 448 449 /* Allocate memory for storing MAC addresses */ 450 eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN * 451 hw->mac.num_rar_entries, 0); 452 if (eth_dev->data->mac_addrs == NULL) { 453 PMD_INIT_LOG(ERR, 454 "Failed to allocate %u bytes needed to store MAC addresses", 455 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 456 return -ENOMEM; 457 } 458 459 /* Copy the permanent MAC address */ 460 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 461 ð_dev->data->mac_addrs[0]); 462 463 /* Allocate memory for storing hash filter MAC addresses */ 464 eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe", 465 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0); 466 if (eth_dev->data->hash_mac_addrs == NULL) { 467 PMD_INIT_LOG(ERR, 468 "Failed to allocate %d bytes needed to store MAC addresses", 469 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC); 470 rte_free(eth_dev->data->mac_addrs); 471 eth_dev->data->mac_addrs = NULL; 472 return -ENOMEM; 473 } 474 475 /* initialize the vfta */ 476 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 477 478 /* initialize the hw strip bitmap*/ 479 memset(hwstrip, 0, sizeof(*hwstrip)); 480 481 /* initialize PF if max_vfs not zero */ 482 ret = ngbe_pf_host_init(eth_dev); 483 if (ret) { 484 rte_free(eth_dev->data->mac_addrs); 485 eth_dev->data->mac_addrs = NULL; 486 rte_free(eth_dev->data->hash_mac_addrs); 487 eth_dev->data->hash_mac_addrs = NULL; 488 return ret; 489 } 490 491 ctrl_ext = rd32(hw, NGBE_PORTCTL); 492 /* let hardware know driver is loaded */ 493 ctrl_ext |= NGBE_PORTCTL_DRVLOAD; 494 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 495 ctrl_ext |= NGBE_PORTCTL_RSTDONE; 496 wr32(hw, NGBE_PORTCTL, ctrl_ext); 497 ngbe_flush(hw); 498 499 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 500 (int)hw->mac.type, (int)hw->phy.type); 501 502 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 503 eth_dev->data->port_id, pci_dev->id.vendor_id, 504 pci_dev->id.device_id); 505 506 rte_intr_callback_register(intr_handle, 507 ngbe_dev_interrupt_handler, eth_dev); 508 509 /* enable uio/vfio intr/eventfd mapping */ 510 rte_intr_enable(intr_handle); 511 512 /* enable support intr */ 513 ngbe_enable_intr(eth_dev); 514 515 return 0; 516 } 517 518 static int 519 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev) 520 { 521 PMD_INIT_FUNC_TRACE(); 522 523 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 524 return 0; 525 526 ngbe_dev_close(eth_dev); 527 528 return 0; 529 } 530 531 static int 532 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 533 struct rte_pci_device *pci_dev) 534 { 535 return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 536 sizeof(struct ngbe_adapter), 537 eth_dev_pci_specific_init, pci_dev, 538 eth_ngbe_dev_init, NULL); 539 } 540 541 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev) 542 { 543 struct rte_eth_dev *ethdev; 544 545 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 546 if (ethdev == NULL) 547 return 0; 548 549 return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit); 550 } 551 552 static struct rte_pci_driver rte_ngbe_pmd = { 553 .id_table = pci_id_ngbe_map, 554 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | 555 RTE_PCI_DRV_INTR_LSC, 556 .probe = eth_ngbe_pci_probe, 557 .remove = eth_ngbe_pci_remove, 558 }; 559 560 static int 561 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 562 { 563 struct ngbe_hw *hw = ngbe_dev_hw(dev); 564 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev); 565 uint32_t vfta; 566 uint32_t vid_idx; 567 uint32_t vid_bit; 568 569 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F); 570 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F)); 571 vfta = rd32(hw, NGBE_VLANTBL(vid_idx)); 572 if (on) 573 vfta |= vid_bit; 574 else 575 vfta &= ~vid_bit; 576 wr32(hw, NGBE_VLANTBL(vid_idx), vfta); 577 578 /* update local VFTA copy */ 579 shadow_vfta->vfta[vid_idx] = vfta; 580 581 return 0; 582 } 583 584 static void 585 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 586 { 587 struct ngbe_hw *hw = ngbe_dev_hw(dev); 588 struct ngbe_rx_queue *rxq; 589 bool restart; 590 uint32_t rxcfg, rxbal, rxbah; 591 592 if (on) 593 ngbe_vlan_hw_strip_enable(dev, queue); 594 else 595 ngbe_vlan_hw_strip_disable(dev, queue); 596 597 rxq = dev->data->rx_queues[queue]; 598 rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx)); 599 rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx)); 600 rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx)); 601 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 602 restart = (rxcfg & NGBE_RXCFG_ENA) && 603 !(rxcfg & NGBE_RXCFG_VLAN); 604 rxcfg |= NGBE_RXCFG_VLAN; 605 } else { 606 restart = (rxcfg & NGBE_RXCFG_ENA) && 607 (rxcfg & NGBE_RXCFG_VLAN); 608 rxcfg &= ~NGBE_RXCFG_VLAN; 609 } 610 rxcfg &= ~NGBE_RXCFG_ENA; 611 612 if (restart) { 613 /* set vlan strip for ring */ 614 ngbe_dev_rx_queue_stop(dev, queue); 615 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal); 616 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah); 617 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg); 618 ngbe_dev_rx_queue_start(dev, queue); 619 } 620 } 621 622 static int 623 ngbe_vlan_tpid_set(struct rte_eth_dev *dev, 624 enum rte_vlan_type vlan_type, 625 uint16_t tpid) 626 { 627 struct ngbe_hw *hw = ngbe_dev_hw(dev); 628 int ret = 0; 629 uint32_t portctrl, vlan_ext, qinq; 630 631 portctrl = rd32(hw, NGBE_PORTCTL); 632 633 vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT); 634 qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ); 635 switch (vlan_type) { 636 case RTE_ETH_VLAN_TYPE_INNER: 637 if (vlan_ext) { 638 wr32m(hw, NGBE_VLANCTL, 639 NGBE_VLANCTL_TPID_MASK, 640 NGBE_VLANCTL_TPID(tpid)); 641 wr32m(hw, NGBE_DMATXCTRL, 642 NGBE_DMATXCTRL_TPID_MASK, 643 NGBE_DMATXCTRL_TPID(tpid)); 644 } else { 645 ret = -ENOTSUP; 646 PMD_DRV_LOG(ERR, 647 "Inner type is not supported by single VLAN"); 648 } 649 650 if (qinq) { 651 wr32m(hw, NGBE_TAGTPID(0), 652 NGBE_TAGTPID_LSB_MASK, 653 NGBE_TAGTPID_LSB(tpid)); 654 } 655 break; 656 case RTE_ETH_VLAN_TYPE_OUTER: 657 if (vlan_ext) { 658 /* Only the high 16-bits is valid */ 659 wr32m(hw, NGBE_EXTAG, 660 NGBE_EXTAG_VLAN_MASK, 661 NGBE_EXTAG_VLAN(tpid)); 662 } else { 663 wr32m(hw, NGBE_VLANCTL, 664 NGBE_VLANCTL_TPID_MASK, 665 NGBE_VLANCTL_TPID(tpid)); 666 wr32m(hw, NGBE_DMATXCTRL, 667 NGBE_DMATXCTRL_TPID_MASK, 668 NGBE_DMATXCTRL_TPID(tpid)); 669 } 670 671 if (qinq) { 672 wr32m(hw, NGBE_TAGTPID(0), 673 NGBE_TAGTPID_MSB_MASK, 674 NGBE_TAGTPID_MSB(tpid)); 675 } 676 break; 677 default: 678 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 679 return -EINVAL; 680 } 681 682 return ret; 683 } 684 685 void 686 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 687 { 688 struct ngbe_hw *hw = ngbe_dev_hw(dev); 689 uint32_t vlnctrl; 690 691 PMD_INIT_FUNC_TRACE(); 692 693 /* Filter Table Disable */ 694 vlnctrl = rd32(hw, NGBE_VLANCTL); 695 vlnctrl &= ~NGBE_VLANCTL_VFE; 696 wr32(hw, NGBE_VLANCTL, vlnctrl); 697 } 698 699 void 700 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 701 { 702 struct ngbe_hw *hw = ngbe_dev_hw(dev); 703 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev); 704 uint32_t vlnctrl; 705 uint16_t i; 706 707 PMD_INIT_FUNC_TRACE(); 708 709 /* Filter Table Enable */ 710 vlnctrl = rd32(hw, NGBE_VLANCTL); 711 vlnctrl &= ~NGBE_VLANCTL_CFIENA; 712 vlnctrl |= NGBE_VLANCTL_VFE; 713 wr32(hw, NGBE_VLANCTL, vlnctrl); 714 715 /* write whatever is in local vfta copy */ 716 for (i = 0; i < NGBE_VFTA_SIZE; i++) 717 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]); 718 } 719 720 void 721 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 722 { 723 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev); 724 struct ngbe_rx_queue *rxq; 725 726 if (queue >= NGBE_MAX_RX_QUEUE_NUM) 727 return; 728 729 if (on) 730 NGBE_SET_HWSTRIP(hwstrip, queue); 731 else 732 NGBE_CLEAR_HWSTRIP(hwstrip, queue); 733 734 if (queue >= dev->data->nb_rx_queues) 735 return; 736 737 rxq = dev->data->rx_queues[queue]; 738 739 if (on) { 740 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 741 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 742 } else { 743 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN; 744 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 745 } 746 } 747 748 static void 749 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 750 { 751 struct ngbe_hw *hw = ngbe_dev_hw(dev); 752 uint32_t ctrl; 753 754 PMD_INIT_FUNC_TRACE(); 755 756 ctrl = rd32(hw, NGBE_RXCFG(queue)); 757 ctrl &= ~NGBE_RXCFG_VLAN; 758 wr32(hw, NGBE_RXCFG(queue), ctrl); 759 760 /* record those setting for HW strip per queue */ 761 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 762 } 763 764 static void 765 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 766 { 767 struct ngbe_hw *hw = ngbe_dev_hw(dev); 768 uint32_t ctrl; 769 770 PMD_INIT_FUNC_TRACE(); 771 772 ctrl = rd32(hw, NGBE_RXCFG(queue)); 773 ctrl |= NGBE_RXCFG_VLAN; 774 wr32(hw, NGBE_RXCFG(queue), ctrl); 775 776 /* record those setting for HW strip per queue */ 777 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 778 } 779 780 static void 781 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 782 { 783 struct ngbe_hw *hw = ngbe_dev_hw(dev); 784 uint32_t ctrl; 785 786 PMD_INIT_FUNC_TRACE(); 787 788 ctrl = rd32(hw, NGBE_PORTCTL); 789 ctrl &= ~NGBE_PORTCTL_VLANEXT; 790 ctrl &= ~NGBE_PORTCTL_QINQ; 791 wr32(hw, NGBE_PORTCTL, ctrl); 792 } 793 794 static void 795 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 796 { 797 struct ngbe_hw *hw = ngbe_dev_hw(dev); 798 uint32_t ctrl; 799 800 PMD_INIT_FUNC_TRACE(); 801 802 ctrl = rd32(hw, NGBE_PORTCTL); 803 ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ; 804 wr32(hw, NGBE_PORTCTL, ctrl); 805 } 806 807 static void 808 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev) 809 { 810 struct ngbe_hw *hw = ngbe_dev_hw(dev); 811 uint32_t ctrl; 812 813 PMD_INIT_FUNC_TRACE(); 814 815 ctrl = rd32(hw, NGBE_PORTCTL); 816 ctrl &= ~NGBE_PORTCTL_QINQ; 817 wr32(hw, NGBE_PORTCTL, ctrl); 818 } 819 820 static void 821 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev) 822 { 823 struct ngbe_hw *hw = ngbe_dev_hw(dev); 824 uint32_t ctrl; 825 826 PMD_INIT_FUNC_TRACE(); 827 828 ctrl = rd32(hw, NGBE_PORTCTL); 829 ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT; 830 wr32(hw, NGBE_PORTCTL, ctrl); 831 } 832 833 void 834 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev) 835 { 836 struct ngbe_rx_queue *rxq; 837 uint16_t i; 838 839 PMD_INIT_FUNC_TRACE(); 840 841 for (i = 0; i < dev->data->nb_rx_queues; i++) { 842 rxq = dev->data->rx_queues[i]; 843 844 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 845 ngbe_vlan_hw_strip_enable(dev, i); 846 else 847 ngbe_vlan_hw_strip_disable(dev, i); 848 } 849 } 850 851 void 852 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) 853 { 854 uint16_t i; 855 struct rte_eth_rxmode *rxmode; 856 struct ngbe_rx_queue *rxq; 857 858 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 859 rxmode = &dev->data->dev_conf.rxmode; 860 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 861 for (i = 0; i < dev->data->nb_rx_queues; i++) { 862 rxq = dev->data->rx_queues[i]; 863 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 864 } 865 else 866 for (i = 0; i < dev->data->nb_rx_queues; i++) { 867 rxq = dev->data->rx_queues[i]; 868 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 869 } 870 } 871 } 872 873 static int 874 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) 875 { 876 struct rte_eth_rxmode *rxmode; 877 rxmode = &dev->data->dev_conf.rxmode; 878 879 if (mask & RTE_ETH_VLAN_STRIP_MASK) 880 ngbe_vlan_hw_strip_config(dev); 881 882 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 883 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 884 ngbe_vlan_hw_filter_enable(dev); 885 else 886 ngbe_vlan_hw_filter_disable(dev); 887 } 888 889 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 890 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 891 ngbe_vlan_hw_extend_enable(dev); 892 else 893 ngbe_vlan_hw_extend_disable(dev); 894 } 895 896 if (mask & RTE_ETH_QINQ_STRIP_MASK) { 897 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 898 ngbe_qinq_hw_strip_enable(dev); 899 else 900 ngbe_qinq_hw_strip_disable(dev); 901 } 902 903 return 0; 904 } 905 906 static int 907 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 908 { 909 ngbe_config_vlan_strip_on_all_queues(dev, mask); 910 911 ngbe_vlan_offload_config(dev, mask); 912 913 return 0; 914 } 915 916 static int 917 ngbe_dev_configure(struct rte_eth_dev *dev) 918 { 919 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 920 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 921 922 PMD_INIT_FUNC_TRACE(); 923 924 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 925 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 926 927 /* set flag to update link status after init */ 928 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE; 929 930 /* 931 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 932 * allocation Rx preconditions we will reset it. 933 */ 934 adapter->rx_bulk_alloc_allowed = true; 935 936 return 0; 937 } 938 939 static void 940 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 941 { 942 struct ngbe_hw *hw = ngbe_dev_hw(dev); 943 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 944 945 wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1)); 946 wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3)); 947 wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0)); 948 if (hw->phy.type == ngbe_phy_yt8521s_sfi) 949 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0)); 950 else 951 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3)); 952 953 intr->mask_misc |= NGBE_ICRMISC_GPIO | NGBE_ICRMISC_HEAT; 954 } 955 956 /* 957 * Configure device link speed and setup link. 958 * It returns 0 on success. 959 */ 960 static int 961 ngbe_dev_start(struct rte_eth_dev *dev) 962 { 963 struct ngbe_hw *hw = ngbe_dev_hw(dev); 964 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 965 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 966 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 967 uint32_t intr_vector = 0; 968 int err; 969 bool link_up = false, negotiate = false; 970 uint32_t speed = 0; 971 uint32_t allowed_speeds = 0; 972 int mask = 0; 973 int status; 974 uint32_t *link_speeds; 975 976 PMD_INIT_FUNC_TRACE(); 977 978 /* disable uio/vfio intr/eventfd mapping */ 979 rte_intr_disable(intr_handle); 980 981 /* stop adapter */ 982 hw->adapter_stopped = 0; 983 984 /* reinitialize adapter, this calls reset and start */ 985 hw->nb_rx_queues = dev->data->nb_rx_queues; 986 hw->nb_tx_queues = dev->data->nb_tx_queues; 987 status = ngbe_pf_reset_hw(hw); 988 if (status != 0) 989 return -1; 990 hw->mac.start_hw(hw); 991 hw->mac.get_link_status = true; 992 993 ngbe_set_pcie_master(hw, true); 994 995 /* configure PF module if SRIOV enabled */ 996 ngbe_pf_host_configure(dev); 997 998 ngbe_dev_phy_intr_setup(dev); 999 1000 /* check and configure queue intr-vector mapping */ 1001 if ((rte_intr_cap_multiple(intr_handle) || 1002 !RTE_ETH_DEV_SRIOV(dev).active) && 1003 dev->data->dev_conf.intr_conf.rxq != 0) { 1004 intr_vector = dev->data->nb_rx_queues; 1005 if (rte_intr_efd_enable(intr_handle, intr_vector)) 1006 return -1; 1007 } 1008 1009 if (rte_intr_dp_is_en(intr_handle)) { 1010 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 1011 dev->data->nb_rx_queues)) { 1012 PMD_INIT_LOG(ERR, 1013 "Failed to allocate %d rx_queues intr_vec", 1014 dev->data->nb_rx_queues); 1015 return -ENOMEM; 1016 } 1017 } 1018 1019 /* configure MSI-X for sleep until Rx interrupt */ 1020 ngbe_configure_msix(dev); 1021 1022 /* initialize transmission unit */ 1023 ngbe_dev_tx_init(dev); 1024 1025 /* This can fail when allocating mbufs for descriptor rings */ 1026 err = ngbe_dev_rx_init(dev); 1027 if (err != 0) { 1028 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware"); 1029 goto error; 1030 } 1031 1032 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 1033 RTE_ETH_VLAN_EXTEND_MASK; 1034 err = ngbe_vlan_offload_config(dev, mask); 1035 if (err != 0) { 1036 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 1037 goto error; 1038 } 1039 1040 hw->mac.setup_pba(hw); 1041 ngbe_pbthresh_set(dev); 1042 ngbe_configure_port(dev); 1043 1044 err = ngbe_dev_rxtx_start(dev); 1045 if (err < 0) { 1046 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 1047 goto error; 1048 } 1049 1050 /* Skip link setup if loopback mode is enabled. */ 1051 if (hw->is_pf && dev->data->dev_conf.lpbk_mode) 1052 goto skip_link_setup; 1053 1054 hw->lsc = dev->data->dev_conf.intr_conf.lsc; 1055 1056 err = hw->mac.check_link(hw, &speed, &link_up, 0); 1057 if (err != 0) 1058 goto error; 1059 dev->data->dev_link.link_status = link_up; 1060 1061 link_speeds = &dev->data->dev_conf.link_speeds; 1062 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) 1063 negotiate = true; 1064 1065 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate); 1066 if (err != 0) 1067 goto error; 1068 1069 allowed_speeds = 0; 1070 if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL) 1071 allowed_speeds |= RTE_ETH_LINK_SPEED_1G; 1072 if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL) 1073 allowed_speeds |= RTE_ETH_LINK_SPEED_100M; 1074 if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL) 1075 allowed_speeds |= RTE_ETH_LINK_SPEED_10M; 1076 1077 if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { 1078 PMD_INIT_LOG(ERR, "Invalid link setting"); 1079 goto error; 1080 } 1081 1082 speed = 0x0; 1083 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { 1084 speed = hw->mac.default_speeds; 1085 } else { 1086 if (*link_speeds & RTE_ETH_LINK_SPEED_1G) 1087 speed |= NGBE_LINK_SPEED_1GB_FULL; 1088 if (*link_speeds & RTE_ETH_LINK_SPEED_100M) 1089 speed |= NGBE_LINK_SPEED_100M_FULL; 1090 if (*link_speeds & RTE_ETH_LINK_SPEED_10M) 1091 speed |= NGBE_LINK_SPEED_10M_FULL; 1092 } 1093 1094 err = hw->phy.init_hw(hw); 1095 if (err != 0) { 1096 PMD_INIT_LOG(ERR, "PHY init failed"); 1097 goto error; 1098 } 1099 err = hw->mac.setup_link(hw, speed, link_up); 1100 if (err != 0) 1101 goto error; 1102 1103 skip_link_setup: 1104 1105 if (rte_intr_allow_others(intr_handle)) { 1106 ngbe_dev_misc_interrupt_setup(dev); 1107 /* check if lsc interrupt is enabled */ 1108 if (dev->data->dev_conf.intr_conf.lsc != 0) 1109 ngbe_dev_lsc_interrupt_setup(dev, TRUE); 1110 else 1111 ngbe_dev_lsc_interrupt_setup(dev, FALSE); 1112 ngbe_dev_macsec_interrupt_setup(dev); 1113 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID); 1114 } else { 1115 rte_intr_callback_unregister(intr_handle, 1116 ngbe_dev_interrupt_handler, dev); 1117 if (dev->data->dev_conf.intr_conf.lsc != 0) 1118 PMD_INIT_LOG(INFO, 1119 "LSC won't enable because of no intr multiplex"); 1120 } 1121 1122 /* check if rxq interrupt is enabled */ 1123 if (dev->data->dev_conf.intr_conf.rxq != 0 && 1124 rte_intr_dp_is_en(intr_handle)) 1125 ngbe_dev_rxq_interrupt_setup(dev); 1126 1127 /* enable UIO/VFIO intr/eventfd mapping */ 1128 rte_intr_enable(intr_handle); 1129 1130 /* resume enabled intr since HW reset */ 1131 ngbe_enable_intr(dev); 1132 1133 if (hw->gpio_ctl) { 1134 /* gpio0 is used to power on/off control*/ 1135 wr32(hw, NGBE_GPIODATA, 0); 1136 } 1137 1138 /* 1139 * Update link status right before return, because it may 1140 * start link configuration process in a separate thread. 1141 */ 1142 ngbe_dev_link_update(dev, 0); 1143 1144 ngbe_read_stats_registers(hw, hw_stats); 1145 hw->offset_loaded = 1; 1146 1147 return 0; 1148 1149 error: 1150 PMD_INIT_LOG(ERR, "failure in dev start: %d", err); 1151 ngbe_dev_clear_queues(dev); 1152 return -EIO; 1153 } 1154 1155 /* 1156 * Stop device: disable rx and tx functions to allow for reconfiguring. 1157 */ 1158 static int 1159 ngbe_dev_stop(struct rte_eth_dev *dev) 1160 { 1161 struct rte_eth_link link; 1162 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 1163 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1164 struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev); 1165 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1166 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1167 int vf; 1168 1169 if (hw->adapter_stopped) 1170 goto out; 1171 1172 PMD_INIT_FUNC_TRACE(); 1173 1174 if (hw->gpio_ctl) { 1175 /* gpio0 is used to power on/off control*/ 1176 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0); 1177 } 1178 1179 /* disable interrupts */ 1180 ngbe_disable_intr(hw); 1181 1182 /* reset the NIC */ 1183 ngbe_pf_reset_hw(hw); 1184 hw->adapter_stopped = 0; 1185 1186 /* stop adapter */ 1187 ngbe_stop_hw(hw); 1188 1189 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) 1190 vfinfo[vf].clear_to_send = false; 1191 1192 ngbe_dev_clear_queues(dev); 1193 1194 /* Clear stored conf */ 1195 dev->data->scattered_rx = 0; 1196 1197 /* Clear recorded link status */ 1198 memset(&link, 0, sizeof(link)); 1199 rte_eth_linkstatus_set(dev, &link); 1200 1201 if (!rte_intr_allow_others(intr_handle)) 1202 /* resume to the default handler */ 1203 rte_intr_callback_register(intr_handle, 1204 ngbe_dev_interrupt_handler, 1205 (void *)dev); 1206 1207 /* Clean datapath event and queue/vec mapping */ 1208 rte_intr_efd_disable(intr_handle); 1209 rte_intr_vec_list_free(intr_handle); 1210 1211 ngbe_set_pcie_master(hw, true); 1212 1213 adapter->rss_reta_updated = 0; 1214 1215 hw->adapter_stopped = true; 1216 dev->data->dev_started = 0; 1217 1218 out: 1219 /* close phy to prevent reset in dev_close from restarting physical link */ 1220 hw->phy.set_phy_power(hw, false); 1221 1222 return 0; 1223 } 1224 1225 /* 1226 * Set device link up: power on. 1227 */ 1228 static int 1229 ngbe_dev_set_link_up(struct rte_eth_dev *dev) 1230 { 1231 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1232 1233 hw->phy.set_phy_power(hw, true); 1234 1235 return 0; 1236 } 1237 1238 /* 1239 * Set device link down: power off. 1240 */ 1241 static int 1242 ngbe_dev_set_link_down(struct rte_eth_dev *dev) 1243 { 1244 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1245 1246 hw->phy.set_phy_power(hw, false); 1247 1248 return 0; 1249 } 1250 1251 /* 1252 * Reset and stop device. 1253 */ 1254 static int 1255 ngbe_dev_close(struct rte_eth_dev *dev) 1256 { 1257 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1258 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1259 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1260 int retries = 0; 1261 int ret; 1262 1263 PMD_INIT_FUNC_TRACE(); 1264 1265 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1266 return 0; 1267 1268 ngbe_pf_reset_hw(hw); 1269 1270 ngbe_dev_stop(dev); 1271 1272 ngbe_dev_free_queues(dev); 1273 1274 ngbe_set_pcie_master(hw, false); 1275 1276 /* reprogram the RAR[0] in case user changed it. */ 1277 ngbe_set_rar(hw, 0, hw->mac.addr, 0, true); 1278 1279 /* Unlock any pending hardware semaphore */ 1280 ngbe_swfw_lock_reset(hw); 1281 1282 /* disable uio intr before callback unregister */ 1283 rte_intr_disable(intr_handle); 1284 1285 do { 1286 ret = rte_intr_callback_unregister(intr_handle, 1287 ngbe_dev_interrupt_handler, dev); 1288 if (ret >= 0 || ret == -ENOENT) { 1289 break; 1290 } else if (ret != -EAGAIN) { 1291 PMD_INIT_LOG(ERR, 1292 "intr callback unregister failed: %d", 1293 ret); 1294 } 1295 rte_delay_ms(100); 1296 } while (retries++ < (10 + NGBE_LINK_UP_TIME)); 1297 1298 /* uninitialize PF if max_vfs not zero */ 1299 ngbe_pf_host_uninit(dev); 1300 1301 rte_free(dev->data->mac_addrs); 1302 dev->data->mac_addrs = NULL; 1303 1304 rte_free(dev->data->hash_mac_addrs); 1305 dev->data->hash_mac_addrs = NULL; 1306 1307 return ret; 1308 } 1309 1310 /* 1311 * Reset PF device. 1312 */ 1313 static int 1314 ngbe_dev_reset(struct rte_eth_dev *dev) 1315 { 1316 int ret; 1317 1318 /* When a DPDK PMD PF begin to reset PF port, it should notify all 1319 * its VF to make them align with it. The detailed notification 1320 * mechanism is PMD specific. As to ngbe PF, it is rather complex. 1321 * To avoid unexpected behavior in VF, currently reset of PF with 1322 * SR-IOV activation is not supported. It might be supported later. 1323 */ 1324 if (dev->data->sriov.active) 1325 return -ENOTSUP; 1326 1327 ret = eth_ngbe_dev_uninit(dev); 1328 if (ret != 0) 1329 return ret; 1330 1331 ret = eth_ngbe_dev_init(dev, NULL); 1332 1333 return ret; 1334 } 1335 1336 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \ 1337 { \ 1338 uint32_t current_counter = rd32(hw, reg); \ 1339 if (current_counter < last_counter) \ 1340 current_counter += 0x100000000LL; \ 1341 if (!hw->offset_loaded) \ 1342 last_counter = current_counter; \ 1343 counter = current_counter - last_counter; \ 1344 counter &= 0xFFFFFFFFLL; \ 1345 } 1346 1347 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 1348 { \ 1349 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \ 1350 uint64_t current_counter_msb = rd32(hw, reg_msb); \ 1351 uint64_t current_counter = (current_counter_msb << 32) | \ 1352 current_counter_lsb; \ 1353 if (current_counter < last_counter) \ 1354 current_counter += 0x1000000000LL; \ 1355 if (!hw->offset_loaded) \ 1356 last_counter = current_counter; \ 1357 counter = current_counter - last_counter; \ 1358 counter &= 0xFFFFFFFFFLL; \ 1359 } 1360 1361 void 1362 ngbe_read_stats_registers(struct ngbe_hw *hw, 1363 struct ngbe_hw_stats *hw_stats) 1364 { 1365 unsigned int i; 1366 1367 /* QP Stats */ 1368 for (i = 0; i < hw->nb_rx_queues; i++) { 1369 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i), 1370 hw->qp_last[i].rx_qp_packets, 1371 hw_stats->qp[i].rx_qp_packets); 1372 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i), 1373 hw->qp_last[i].rx_qp_bytes, 1374 hw_stats->qp[i].rx_qp_bytes); 1375 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i), 1376 hw->qp_last[i].rx_qp_mc_packets, 1377 hw_stats->qp[i].rx_qp_mc_packets); 1378 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i), 1379 hw->qp_last[i].rx_qp_bc_packets, 1380 hw_stats->qp[i].rx_qp_bc_packets); 1381 } 1382 1383 for (i = 0; i < hw->nb_tx_queues; i++) { 1384 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i), 1385 hw->qp_last[i].tx_qp_packets, 1386 hw_stats->qp[i].tx_qp_packets); 1387 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i), 1388 hw->qp_last[i].tx_qp_bytes, 1389 hw_stats->qp[i].tx_qp_bytes); 1390 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i), 1391 hw->qp_last[i].tx_qp_mc_packets, 1392 hw_stats->qp[i].tx_qp_mc_packets); 1393 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i), 1394 hw->qp_last[i].tx_qp_bc_packets, 1395 hw_stats->qp[i].tx_qp_bc_packets); 1396 } 1397 1398 /* PB Stats */ 1399 hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS); 1400 hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT); 1401 hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP); 1402 hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP); 1403 hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF); 1404 hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON); 1405 1406 hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON); 1407 hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF); 1408 1409 /* DMA Stats */ 1410 hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP); 1411 hw_stats->tx_dma_drop += rd32(hw, NGBE_DMATXDROP); 1412 hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP); 1413 hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT); 1414 hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT); 1415 hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL); 1416 hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL); 1417 1418 /* MAC Stats */ 1419 hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL); 1420 hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL); 1421 hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL); 1422 1423 hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL); 1424 hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL); 1425 hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL); 1426 1427 hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL); 1428 hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL); 1429 1430 hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L); 1431 hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L); 1432 hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L); 1433 hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L); 1434 hw_stats->rx_size_512_to_1023_packets += 1435 rd64(hw, NGBE_MACRX512TO1023L); 1436 hw_stats->rx_size_1024_to_max_packets += 1437 rd64(hw, NGBE_MACRX1024TOMAXL); 1438 hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L); 1439 hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L); 1440 hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L); 1441 hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L); 1442 hw_stats->tx_size_512_to_1023_packets += 1443 rd64(hw, NGBE_MACTX512TO1023L); 1444 hw_stats->tx_size_1024_to_max_packets += 1445 rd64(hw, NGBE_MACTX1024TOMAXL); 1446 1447 hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL); 1448 hw_stats->rx_oversize_cnt += rd32(hw, NGBE_MACRXOVERSIZE); 1449 hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER); 1450 1451 /* MNG Stats */ 1452 hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS); 1453 hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC); 1454 hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG); 1455 hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG); 1456 1457 /* MACsec Stats */ 1458 hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT); 1459 hw_stats->tx_macsec_pkts_encrypted += 1460 rd32(hw, NGBE_LSECTX_ENCPKT); 1461 hw_stats->tx_macsec_pkts_protected += 1462 rd32(hw, NGBE_LSECTX_PROTPKT); 1463 hw_stats->tx_macsec_octets_encrypted += 1464 rd32(hw, NGBE_LSECTX_ENCOCT); 1465 hw_stats->tx_macsec_octets_protected += 1466 rd32(hw, NGBE_LSECTX_PROTOCT); 1467 hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT); 1468 hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT); 1469 hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT); 1470 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT); 1471 hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT); 1472 hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT); 1473 hw_stats->rx_macsec_sc_pkts_unchecked += 1474 rd32(hw, NGBE_LSECRX_UNCHKPKT); 1475 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT); 1476 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT); 1477 for (i = 0; i < 2; i++) { 1478 hw_stats->rx_macsec_sa_pkts_ok += 1479 rd32(hw, NGBE_LSECRX_OKPKT(i)); 1480 hw_stats->rx_macsec_sa_pkts_invalid += 1481 rd32(hw, NGBE_LSECRX_INVPKT(i)); 1482 hw_stats->rx_macsec_sa_pkts_notvalid += 1483 rd32(hw, NGBE_LSECRX_BADPKT(i)); 1484 } 1485 for (i = 0; i < 4; i++) { 1486 hw_stats->rx_macsec_sa_pkts_unusedsa += 1487 rd32(hw, NGBE_LSECRX_INVSAPKT(i)); 1488 hw_stats->rx_macsec_sa_pkts_notusingsa += 1489 rd32(hw, NGBE_LSECRX_BADSAPKT(i)); 1490 } 1491 hw_stats->rx_total_missed_packets = 1492 hw_stats->rx_up_dropped; 1493 } 1494 1495 static int 1496 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 1497 { 1498 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1499 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1500 struct ngbe_stat_mappings *stat_mappings = 1501 NGBE_DEV_STAT_MAPPINGS(dev); 1502 uint32_t i, j; 1503 1504 ngbe_read_stats_registers(hw, hw_stats); 1505 1506 if (stats == NULL) 1507 return -EINVAL; 1508 1509 /* Fill out the rte_eth_stats statistics structure */ 1510 stats->ipackets = hw_stats->rx_packets; 1511 stats->ibytes = hw_stats->rx_bytes; 1512 stats->opackets = hw_stats->tx_packets; 1513 stats->obytes = hw_stats->tx_bytes; 1514 1515 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets)); 1516 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets)); 1517 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes)); 1518 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes)); 1519 memset(&stats->q_errors, 0, sizeof(stats->q_errors)); 1520 for (i = 0; i < NGBE_MAX_QP; i++) { 1521 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG; 1522 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8; 1523 uint32_t q_map; 1524 1525 q_map = (stat_mappings->rqsm[n] >> offset) 1526 & QMAP_FIELD_RESERVED_BITS_MASK; 1527 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS 1528 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS); 1529 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets; 1530 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes; 1531 1532 q_map = (stat_mappings->tqsm[n] >> offset) 1533 & QMAP_FIELD_RESERVED_BITS_MASK; 1534 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS 1535 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS); 1536 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets; 1537 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes; 1538 } 1539 1540 /* Rx Errors */ 1541 stats->imissed = hw_stats->rx_total_missed_packets + 1542 hw_stats->rx_dma_drop; 1543 stats->ierrors = hw_stats->rx_crc_errors + 1544 hw_stats->rx_mac_short_packet_dropped + 1545 hw_stats->rx_length_errors + 1546 hw_stats->rx_undersize_errors + 1547 hw_stats->rdb_drp_cnt + 1548 hw_stats->rx_illegal_byte_errors + 1549 hw_stats->rx_error_bytes + 1550 hw_stats->rx_fragment_errors; 1551 1552 /* Tx Errors */ 1553 stats->oerrors = 0; 1554 return 0; 1555 } 1556 1557 static int 1558 ngbe_dev_stats_reset(struct rte_eth_dev *dev) 1559 { 1560 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1561 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1562 1563 /* HW registers are cleared on read */ 1564 hw->offset_loaded = 0; 1565 ngbe_dev_stats_get(dev, NULL); 1566 hw->offset_loaded = 1; 1567 1568 /* Reset software totals */ 1569 memset(hw_stats, 0, sizeof(*hw_stats)); 1570 1571 return 0; 1572 } 1573 1574 /* This function calculates the number of xstats based on the current config */ 1575 static unsigned 1576 ngbe_xstats_calc_num(struct rte_eth_dev *dev) 1577 { 1578 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues); 1579 return NGBE_NB_HW_STATS + 1580 NGBE_NB_QP_STATS * nb_queues; 1581 } 1582 1583 static inline int 1584 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size) 1585 { 1586 int nb, st; 1587 1588 /* Extended stats from ngbe_hw_stats */ 1589 if (id < NGBE_NB_HW_STATS) { 1590 snprintf(name, size, "[hw]%s", 1591 rte_ngbe_stats_strings[id].name); 1592 return 0; 1593 } 1594 id -= NGBE_NB_HW_STATS; 1595 1596 /* Queue Stats */ 1597 if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) { 1598 nb = id / NGBE_NB_QP_STATS; 1599 st = id % NGBE_NB_QP_STATS; 1600 snprintf(name, size, "[q%u]%s", nb, 1601 rte_ngbe_qp_strings[st].name); 1602 return 0; 1603 } 1604 id -= NGBE_NB_QP_STATS * NGBE_MAX_QP; 1605 1606 return -(int)(id + 1); 1607 } 1608 1609 static inline int 1610 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset) 1611 { 1612 int nb, st; 1613 1614 /* Extended stats from ngbe_hw_stats */ 1615 if (id < NGBE_NB_HW_STATS) { 1616 *offset = rte_ngbe_stats_strings[id].offset; 1617 return 0; 1618 } 1619 id -= NGBE_NB_HW_STATS; 1620 1621 /* Queue Stats */ 1622 if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) { 1623 nb = id / NGBE_NB_QP_STATS; 1624 st = id % NGBE_NB_QP_STATS; 1625 *offset = rte_ngbe_qp_strings[st].offset + 1626 nb * (NGBE_NB_QP_STATS * sizeof(uint64_t)); 1627 return 0; 1628 } 1629 1630 return -1; 1631 } 1632 1633 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev, 1634 struct rte_eth_xstat_name *xstats_names, unsigned int limit) 1635 { 1636 unsigned int i, count; 1637 1638 count = ngbe_xstats_calc_num(dev); 1639 if (xstats_names == NULL) 1640 return count; 1641 1642 /* Note: limit >= cnt_stats checked upstream 1643 * in rte_eth_xstats_names() 1644 */ 1645 limit = min(limit, count); 1646 1647 /* Extended stats from ngbe_hw_stats */ 1648 for (i = 0; i < limit; i++) { 1649 if (ngbe_get_name_by_id(i, xstats_names[i].name, 1650 sizeof(xstats_names[i].name))) { 1651 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1652 break; 1653 } 1654 } 1655 1656 return i; 1657 } 1658 1659 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 1660 const uint64_t *ids, 1661 struct rte_eth_xstat_name *xstats_names, 1662 unsigned int limit) 1663 { 1664 unsigned int i; 1665 1666 if (ids == NULL) 1667 return ngbe_dev_xstats_get_names(dev, xstats_names, limit); 1668 1669 for (i = 0; i < limit; i++) { 1670 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name, 1671 sizeof(xstats_names[i].name))) { 1672 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1673 return -1; 1674 } 1675 } 1676 1677 return i; 1678 } 1679 1680 static int 1681 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1682 unsigned int limit) 1683 { 1684 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1685 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1686 unsigned int i, count; 1687 1688 ngbe_read_stats_registers(hw, hw_stats); 1689 1690 /* If this is a reset xstats is NULL, and we have cleared the 1691 * registers by reading them. 1692 */ 1693 count = ngbe_xstats_calc_num(dev); 1694 if (xstats == NULL) 1695 return count; 1696 1697 limit = min(limit, ngbe_xstats_calc_num(dev)); 1698 1699 /* Extended stats from ngbe_hw_stats */ 1700 for (i = 0; i < limit; i++) { 1701 uint32_t offset = 0; 1702 1703 if (ngbe_get_offset_by_id(i, &offset)) { 1704 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1705 break; 1706 } 1707 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset); 1708 xstats[i].id = i; 1709 } 1710 1711 return i; 1712 } 1713 1714 static int 1715 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values, 1716 unsigned int limit) 1717 { 1718 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1719 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1720 unsigned int i, count; 1721 1722 ngbe_read_stats_registers(hw, hw_stats); 1723 1724 /* If this is a reset xstats is NULL, and we have cleared the 1725 * registers by reading them. 1726 */ 1727 count = ngbe_xstats_calc_num(dev); 1728 if (values == NULL) 1729 return count; 1730 1731 limit = min(limit, ngbe_xstats_calc_num(dev)); 1732 1733 /* Extended stats from ngbe_hw_stats */ 1734 for (i = 0; i < limit; i++) { 1735 uint32_t offset; 1736 1737 if (ngbe_get_offset_by_id(i, &offset)) { 1738 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1739 break; 1740 } 1741 values[i] = *(uint64_t *)(((char *)hw_stats) + offset); 1742 } 1743 1744 return i; 1745 } 1746 1747 static int 1748 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1749 uint64_t *values, unsigned int limit) 1750 { 1751 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1752 unsigned int i; 1753 1754 if (ids == NULL) 1755 return ngbe_dev_xstats_get_(dev, values, limit); 1756 1757 for (i = 0; i < limit; i++) { 1758 uint32_t offset; 1759 1760 if (ngbe_get_offset_by_id(ids[i], &offset)) { 1761 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1762 break; 1763 } 1764 values[i] = *(uint64_t *)(((char *)hw_stats) + offset); 1765 } 1766 1767 return i; 1768 } 1769 1770 static int 1771 ngbe_dev_xstats_reset(struct rte_eth_dev *dev) 1772 { 1773 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1774 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1775 1776 /* HW registers are cleared on read */ 1777 hw->offset_loaded = 0; 1778 ngbe_read_stats_registers(hw, hw_stats); 1779 hw->offset_loaded = 1; 1780 1781 /* Reset software totals */ 1782 memset(hw_stats, 0, sizeof(*hw_stats)); 1783 1784 return 0; 1785 } 1786 1787 static int 1788 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 1789 { 1790 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1791 int ret; 1792 1793 ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id); 1794 1795 if (ret < 0) 1796 return -EINVAL; 1797 1798 ret += 1; /* add the size of '\0' */ 1799 if (fw_size < (size_t)ret) 1800 return ret; 1801 1802 return 0; 1803 } 1804 1805 static int 1806 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1807 { 1808 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1809 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1810 1811 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 1812 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 1813 dev_info->min_rx_bufsize = 1024; 1814 dev_info->max_rx_pktlen = 15872; 1815 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 1816 dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC; 1817 dev_info->max_vfs = pci_dev->max_vfs; 1818 dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev); 1819 dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) | 1820 dev_info->rx_queue_offload_capa); 1821 dev_info->tx_queue_offload_capa = 0; 1822 dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev); 1823 1824 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1825 .rx_thresh = { 1826 .pthresh = NGBE_DEFAULT_RX_PTHRESH, 1827 .hthresh = NGBE_DEFAULT_RX_HTHRESH, 1828 .wthresh = NGBE_DEFAULT_RX_WTHRESH, 1829 }, 1830 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH, 1831 .rx_drop_en = 0, 1832 .offloads = 0, 1833 }; 1834 1835 dev_info->default_txconf = (struct rte_eth_txconf) { 1836 .tx_thresh = { 1837 .pthresh = NGBE_DEFAULT_TX_PTHRESH, 1838 .hthresh = NGBE_DEFAULT_TX_HTHRESH, 1839 .wthresh = NGBE_DEFAULT_TX_WTHRESH, 1840 }, 1841 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH, 1842 .offloads = 0, 1843 }; 1844 1845 dev_info->rx_desc_lim = rx_desc_lim; 1846 dev_info->tx_desc_lim = tx_desc_lim; 1847 1848 dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 1849 dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128; 1850 dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL; 1851 1852 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M | 1853 RTE_ETH_LINK_SPEED_10M; 1854 1855 /* Driver-preferred Rx/Tx parameters */ 1856 dev_info->default_rxportconf.burst_size = 32; 1857 dev_info->default_txportconf.burst_size = 32; 1858 dev_info->default_rxportconf.nb_queues = 1; 1859 dev_info->default_txportconf.nb_queues = 1; 1860 dev_info->default_rxportconf.ring_size = 256; 1861 dev_info->default_txportconf.ring_size = 256; 1862 1863 return 0; 1864 } 1865 1866 const uint32_t * 1867 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1868 { 1869 if (dev->rx_pkt_burst == ngbe_recv_pkts || 1870 dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc || 1871 dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc || 1872 dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc) 1873 return ngbe_get_supported_ptypes(); 1874 1875 return NULL; 1876 } 1877 1878 static void 1879 ngbe_dev_overheat(struct rte_eth_dev *dev) 1880 { 1881 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1882 s32 temp_state; 1883 1884 temp_state = hw->mac.check_overtemp(hw); 1885 if (!temp_state) 1886 return; 1887 1888 if (temp_state == NGBE_ERR_UNDERTEMP) { 1889 PMD_DRV_LOG(CRIT, "Network adapter has been started again, " 1890 "since the temperature has been back to normal state."); 1891 wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, NGBE_PBRXCTL_ENA); 1892 ngbe_dev_set_link_up(dev); 1893 } else if (temp_state == NGBE_ERR_OVERTEMP) { 1894 PMD_DRV_LOG(CRIT, "Network adapter has been stopped because it has over heated."); 1895 wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0); 1896 ngbe_dev_set_link_down(dev); 1897 } 1898 } 1899 1900 /* return 0 means link status changed, -1 means not changed */ 1901 int 1902 ngbe_dev_link_update_share(struct rte_eth_dev *dev, 1903 int wait_to_complete) 1904 { 1905 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1906 struct rte_eth_link link; 1907 u32 link_speed = NGBE_LINK_SPEED_UNKNOWN; 1908 u32 lan_speed = 0; 1909 bool link_up; 1910 int err; 1911 int wait = 1; 1912 1913 memset(&link, 0, sizeof(link)); 1914 link.link_status = RTE_ETH_LINK_DOWN; 1915 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 1916 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 1917 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 1918 ~RTE_ETH_LINK_SPEED_AUTONEG); 1919 1920 hw->mac.get_link_status = true; 1921 1922 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 1923 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 1924 wait = 0; 1925 1926 err = hw->mac.check_link(hw, &link_speed, &link_up, wait); 1927 if (err != 0) { 1928 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 1929 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1930 return rte_eth_linkstatus_set(dev, &link); 1931 } 1932 1933 if (!link_up) 1934 return rte_eth_linkstatus_set(dev, &link); 1935 1936 link.link_status = RTE_ETH_LINK_UP; 1937 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1938 1939 switch (link_speed) { 1940 default: 1941 case NGBE_LINK_SPEED_UNKNOWN: 1942 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 1943 break; 1944 1945 case NGBE_LINK_SPEED_10M_FULL: 1946 link.link_speed = RTE_ETH_SPEED_NUM_10M; 1947 lan_speed = 0; 1948 break; 1949 1950 case NGBE_LINK_SPEED_100M_FULL: 1951 link.link_speed = RTE_ETH_SPEED_NUM_100M; 1952 lan_speed = 1; 1953 break; 1954 1955 case NGBE_LINK_SPEED_1GB_FULL: 1956 link.link_speed = RTE_ETH_SPEED_NUM_1G; 1957 lan_speed = 2; 1958 break; 1959 } 1960 1961 if (hw->is_pf) { 1962 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed); 1963 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL | 1964 NGBE_LINK_SPEED_100M_FULL | 1965 NGBE_LINK_SPEED_10M_FULL)) { 1966 wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK, 1967 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE); 1968 } 1969 wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_PROMISC, 1970 NGBE_MACRXFLT_PROMISC); 1971 } 1972 1973 return rte_eth_linkstatus_set(dev, &link); 1974 } 1975 1976 static int 1977 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 1978 { 1979 return ngbe_dev_link_update_share(dev, wait_to_complete); 1980 } 1981 1982 static int 1983 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 1984 { 1985 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1986 uint32_t fctrl; 1987 1988 fctrl = rd32(hw, NGBE_PSRCTL); 1989 fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP); 1990 wr32(hw, NGBE_PSRCTL, fctrl); 1991 1992 return 0; 1993 } 1994 1995 static int 1996 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 1997 { 1998 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1999 uint32_t fctrl; 2000 2001 fctrl = rd32(hw, NGBE_PSRCTL); 2002 fctrl &= (~NGBE_PSRCTL_UCP); 2003 if (dev->data->all_multicast == 1) 2004 fctrl |= NGBE_PSRCTL_MCP; 2005 else 2006 fctrl &= (~NGBE_PSRCTL_MCP); 2007 wr32(hw, NGBE_PSRCTL, fctrl); 2008 2009 return 0; 2010 } 2011 2012 static int 2013 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 2014 { 2015 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2016 uint32_t fctrl; 2017 2018 fctrl = rd32(hw, NGBE_PSRCTL); 2019 fctrl |= NGBE_PSRCTL_MCP; 2020 wr32(hw, NGBE_PSRCTL, fctrl); 2021 2022 return 0; 2023 } 2024 2025 static int 2026 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 2027 { 2028 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2029 uint32_t fctrl; 2030 2031 if (dev->data->promiscuous == 1) 2032 return 0; /* must remain in all_multicast mode */ 2033 2034 fctrl = rd32(hw, NGBE_PSRCTL); 2035 fctrl &= (~NGBE_PSRCTL_MCP); 2036 wr32(hw, NGBE_PSRCTL, fctrl); 2037 2038 return 0; 2039 } 2040 2041 /** 2042 * It clears the interrupt causes and enables the interrupt. 2043 * It will be called once only during NIC initialized. 2044 * 2045 * @param dev 2046 * Pointer to struct rte_eth_dev. 2047 * @param on 2048 * Enable or Disable. 2049 * 2050 * @return 2051 * - On success, zero. 2052 * - On failure, a negative value. 2053 */ 2054 static int 2055 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 2056 { 2057 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2058 2059 ngbe_dev_link_status_print(dev); 2060 if (on != 0) { 2061 intr->mask_misc |= NGBE_ICRMISC_PHY; 2062 intr->mask_misc |= NGBE_ICRMISC_GPIO; 2063 } else { 2064 intr->mask_misc &= ~NGBE_ICRMISC_PHY; 2065 intr->mask_misc &= ~NGBE_ICRMISC_GPIO; 2066 } 2067 2068 return 0; 2069 } 2070 2071 /** 2072 * It clears the interrupt causes and enables the interrupt. 2073 * It will be called once only during NIC initialized. 2074 * 2075 * @param dev 2076 * Pointer to struct rte_eth_dev. 2077 * 2078 * @return 2079 * - On success, zero. 2080 * - On failure, a negative value. 2081 */ 2082 static int 2083 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev) 2084 { 2085 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2086 u64 mask; 2087 2088 mask = NGBE_ICR_MASK; 2089 mask &= (1ULL << NGBE_MISC_VEC_ID); 2090 intr->mask |= mask; 2091 intr->mask_misc |= NGBE_ICRMISC_GPIO; 2092 2093 return 0; 2094 } 2095 2096 /** 2097 * It clears the interrupt causes and enables the interrupt. 2098 * It will be called once only during NIC initialized. 2099 * 2100 * @param dev 2101 * Pointer to struct rte_eth_dev. 2102 * 2103 * @return 2104 * - On success, zero. 2105 * - On failure, a negative value. 2106 */ 2107 static int 2108 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 2109 { 2110 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2111 u64 mask; 2112 2113 mask = NGBE_ICR_MASK; 2114 mask &= ~((1ULL << NGBE_RX_VEC_START) - 1); 2115 intr->mask |= mask; 2116 2117 return 0; 2118 } 2119 2120 /** 2121 * It clears the interrupt causes and enables the interrupt. 2122 * It will be called once only during NIC initialized. 2123 * 2124 * @param dev 2125 * Pointer to struct rte_eth_dev. 2126 * 2127 * @return 2128 * - On success, zero. 2129 * - On failure, a negative value. 2130 */ 2131 static int 2132 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 2133 { 2134 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2135 2136 intr->mask_misc |= NGBE_ICRMISC_LNKSEC; 2137 2138 return 0; 2139 } 2140 2141 /* 2142 * It reads ICR and sets flag for the link_update. 2143 * 2144 * @param dev 2145 * Pointer to struct rte_eth_dev. 2146 * 2147 * @return 2148 * - On success, zero. 2149 * - On failure, a negative value. 2150 */ 2151 static int 2152 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 2153 { 2154 uint32_t eicr; 2155 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2156 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2157 2158 /* read-on-clear nic registers here */ 2159 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC]; 2160 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 2161 2162 intr->flags = 0; 2163 2164 /* set flag for async link update */ 2165 if (eicr & NGBE_ICRMISC_PHY) 2166 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE; 2167 2168 if (eicr & NGBE_ICRMISC_VFMBX) 2169 intr->flags |= NGBE_FLAG_MAILBOX; 2170 2171 if (eicr & NGBE_ICRMISC_LNKSEC) 2172 intr->flags |= NGBE_FLAG_MACSEC; 2173 2174 if (eicr & NGBE_ICRMISC_GPIO) 2175 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE; 2176 2177 if (eicr & NGBE_ICRMISC_HEAT) 2178 intr->flags |= NGBE_FLAG_OVERHEAT; 2179 2180 ((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0; 2181 2182 return 0; 2183 } 2184 2185 /** 2186 * It gets and then prints the link status. 2187 * 2188 * @param dev 2189 * Pointer to struct rte_eth_dev. 2190 * 2191 * @return 2192 * - On success, zero. 2193 * - On failure, a negative value. 2194 */ 2195 static void 2196 ngbe_dev_link_status_print(struct rte_eth_dev *dev) 2197 { 2198 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2199 struct rte_eth_link link; 2200 2201 rte_eth_linkstatus_get(dev, &link); 2202 2203 if (link.link_status == RTE_ETH_LINK_UP) { 2204 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 2205 (int)(dev->data->port_id), 2206 (unsigned int)link.link_speed, 2207 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? 2208 "full-duplex" : "half-duplex"); 2209 } else { 2210 PMD_INIT_LOG(INFO, " Port %d: Link Down", 2211 (int)(dev->data->port_id)); 2212 } 2213 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 2214 pci_dev->addr.domain, 2215 pci_dev->addr.bus, 2216 pci_dev->addr.devid, 2217 pci_dev->addr.function); 2218 } 2219 2220 /* 2221 * It executes link_update after knowing an interrupt occurred. 2222 * 2223 * @param dev 2224 * Pointer to struct rte_eth_dev. 2225 * 2226 * @return 2227 * - On success, zero. 2228 * - On failure, a negative value. 2229 */ 2230 static int 2231 ngbe_dev_interrupt_action(struct rte_eth_dev *dev) 2232 { 2233 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2234 2235 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 2236 2237 if (intr->flags & NGBE_FLAG_MAILBOX) { 2238 ngbe_pf_mbx_process(dev); 2239 intr->flags &= ~NGBE_FLAG_MAILBOX; 2240 } 2241 2242 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) { 2243 struct rte_eth_link link; 2244 2245 /*get the link status before link update, for predicting later*/ 2246 rte_eth_linkstatus_get(dev, &link); 2247 2248 ngbe_dev_link_update(dev, 0); 2249 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE; 2250 ngbe_dev_link_status_print(dev); 2251 if (dev->data->dev_link.link_speed != link.link_speed) 2252 rte_eth_dev_callback_process(dev, 2253 RTE_ETH_EVENT_INTR_LSC, NULL); 2254 } 2255 2256 if (intr->flags & NGBE_FLAG_OVERHEAT) { 2257 ngbe_dev_overheat(dev); 2258 intr->flags &= ~NGBE_FLAG_OVERHEAT; 2259 } 2260 2261 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 2262 ngbe_enable_intr(dev); 2263 2264 return 0; 2265 } 2266 2267 /** 2268 * Interrupt handler triggered by NIC for handling 2269 * specific interrupt. 2270 * 2271 * @param param 2272 * The address of parameter (struct rte_eth_dev *) registered before. 2273 */ 2274 static void 2275 ngbe_dev_interrupt_handler(void *param) 2276 { 2277 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2278 2279 ngbe_dev_interrupt_get_status(dev); 2280 ngbe_dev_interrupt_action(dev); 2281 } 2282 2283 static int 2284 ngbe_dev_led_on(struct rte_eth_dev *dev) 2285 { 2286 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2287 return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP; 2288 } 2289 2290 static int 2291 ngbe_dev_led_off(struct rte_eth_dev *dev) 2292 { 2293 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2294 return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP; 2295 } 2296 2297 static int 2298 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2299 { 2300 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2301 uint32_t mflcn_reg; 2302 uint32_t fccfg_reg; 2303 int rx_pause; 2304 int tx_pause; 2305 2306 fc_conf->pause_time = hw->fc.pause_time; 2307 fc_conf->high_water = hw->fc.high_water; 2308 fc_conf->low_water = hw->fc.low_water; 2309 fc_conf->send_xon = hw->fc.send_xon; 2310 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 2311 2312 /* 2313 * Return rx_pause status according to actual setting of 2314 * RXFCCFG register. 2315 */ 2316 mflcn_reg = rd32(hw, NGBE_RXFCCFG); 2317 if (mflcn_reg & NGBE_RXFCCFG_FC) 2318 rx_pause = 1; 2319 else 2320 rx_pause = 0; 2321 2322 /* 2323 * Return tx_pause status according to actual setting of 2324 * TXFCCFG register. 2325 */ 2326 fccfg_reg = rd32(hw, NGBE_TXFCCFG); 2327 if (fccfg_reg & NGBE_TXFCCFG_FC) 2328 tx_pause = 1; 2329 else 2330 tx_pause = 0; 2331 2332 if (rx_pause && tx_pause) 2333 fc_conf->mode = RTE_ETH_FC_FULL; 2334 else if (rx_pause) 2335 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2336 else if (tx_pause) 2337 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2338 else 2339 fc_conf->mode = RTE_ETH_FC_NONE; 2340 2341 return 0; 2342 } 2343 2344 static int 2345 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2346 { 2347 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2348 int err; 2349 uint32_t rx_buf_size; 2350 uint32_t max_high_water; 2351 enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = { 2352 ngbe_fc_none, 2353 ngbe_fc_rx_pause, 2354 ngbe_fc_tx_pause, 2355 ngbe_fc_full 2356 }; 2357 2358 PMD_INIT_FUNC_TRACE(); 2359 2360 rx_buf_size = rd32(hw, NGBE_PBRXSIZE); 2361 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 2362 2363 /* 2364 * At least reserve one Ethernet frame for watermark 2365 * high_water/low_water in kilo bytes for ngbe 2366 */ 2367 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10; 2368 if (fc_conf->high_water > max_high_water || 2369 fc_conf->high_water < fc_conf->low_water) { 2370 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 2371 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 2372 return -EINVAL; 2373 } 2374 2375 hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode]; 2376 hw->fc.pause_time = fc_conf->pause_time; 2377 hw->fc.high_water = fc_conf->high_water; 2378 hw->fc.low_water = fc_conf->low_water; 2379 hw->fc.send_xon = fc_conf->send_xon; 2380 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 2381 2382 err = hw->mac.fc_enable(hw); 2383 2384 /* Not negotiated is not an error case */ 2385 if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) { 2386 wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK, 2387 (fc_conf->mac_ctrl_frame_fwd 2388 ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP)); 2389 ngbe_flush(hw); 2390 2391 return 0; 2392 } 2393 2394 PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err); 2395 return -EIO; 2396 } 2397 2398 /* Additional bittime to account for NGBE framing */ 2399 #define NGBE_ETH_FRAMING 20 2400 2401 /* 2402 * ngbe_fc_hpbthresh_set - calculate high water mark for flow control 2403 * 2404 * @dv_id: device interface delay 2405 * @pb: packet buffer to calculate 2406 */ 2407 static s32 2408 ngbe_fc_hpbthresh_set(struct rte_eth_dev *dev) 2409 { 2410 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2411 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2412 u32 max_frame_size, tc, dv_id, rx_pb; 2413 s32 kb, marker; 2414 2415 /* Calculate max LAN frame size */ 2416 max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK); 2417 tc = max_frame_size + NGBE_ETH_FRAMING; 2418 2419 /* Calculate delay value for device */ 2420 dv_id = NGBE_DV(tc, tc); 2421 2422 /* Loopback switch introduces additional latency */ 2423 if (pci_dev->max_vfs) 2424 dv_id += NGBE_B2BT(tc); 2425 2426 /* Delay value is calculated in bit times convert to KB */ 2427 kb = NGBE_BT2KB(dv_id); 2428 rx_pb = rd32(hw, NGBE_PBRXSIZE) >> 10; 2429 2430 marker = rx_pb - kb; 2431 2432 /* It is possible that the packet buffer is not large enough 2433 * to provide required headroom. In this case throw an error 2434 * to user and do the best we can. 2435 */ 2436 if (marker < 0) { 2437 PMD_DRV_LOG(WARNING, "Packet Buffer can not provide enough headroom to support flow control."); 2438 marker = tc + 1; 2439 } 2440 2441 return marker; 2442 } 2443 2444 /* 2445 * ngbe_fc_lpbthresh_set - calculate low water mark for flow control 2446 * 2447 * @dv_id: device interface delay 2448 */ 2449 static s32 2450 ngbe_fc_lpbthresh_set(struct rte_eth_dev *dev) 2451 { 2452 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2453 u32 max_frame_size, tc, dv_id; 2454 s32 kb; 2455 2456 /* Calculate max LAN frame size */ 2457 max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK); 2458 tc = max_frame_size + NGBE_ETH_FRAMING; 2459 2460 /* Calculate delay value for device */ 2461 dv_id = NGBE_LOW_DV(tc); 2462 2463 /* Delay value is calculated in bit times convert to KB */ 2464 kb = NGBE_BT2KB(dv_id); 2465 2466 return kb; 2467 } 2468 2469 /* 2470 * ngbe_pbthresh_setup - calculate and setup high low water marks 2471 */ 2472 static void 2473 ngbe_pbthresh_set(struct rte_eth_dev *dev) 2474 { 2475 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2476 2477 hw->fc.high_water = ngbe_fc_hpbthresh_set(dev); 2478 hw->fc.low_water = ngbe_fc_lpbthresh_set(dev); 2479 2480 /* Low water marks must not be larger than high water marks */ 2481 if (hw->fc.low_water > hw->fc.high_water) 2482 hw->fc.low_water = 0; 2483 } 2484 2485 int 2486 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev, 2487 struct rte_eth_rss_reta_entry64 *reta_conf, 2488 uint16_t reta_size) 2489 { 2490 uint8_t i, j, mask; 2491 uint32_t reta; 2492 uint16_t idx, shift; 2493 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2494 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2495 2496 PMD_INIT_FUNC_TRACE(); 2497 2498 if (!hw->is_pf) { 2499 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 2500 "NIC."); 2501 return -ENOTSUP; 2502 } 2503 2504 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { 2505 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 2506 "(%d) doesn't match the number hardware can supported " 2507 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128); 2508 return -EINVAL; 2509 } 2510 2511 for (i = 0; i < reta_size; i += 4) { 2512 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2513 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2514 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF); 2515 if (!mask) 2516 continue; 2517 2518 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2); 2519 for (j = 0; j < 4; j++) { 2520 if (RS8(mask, j, 0x1)) { 2521 reta &= ~(MS32(8 * j, 0xFF)); 2522 reta |= LS32(reta_conf[idx].reta[shift + j], 2523 8 * j, 0xFF); 2524 } 2525 } 2526 wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta); 2527 } 2528 adapter->rss_reta_updated = 1; 2529 2530 return 0; 2531 } 2532 2533 int 2534 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev, 2535 struct rte_eth_rss_reta_entry64 *reta_conf, 2536 uint16_t reta_size) 2537 { 2538 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2539 uint8_t i, j, mask; 2540 uint32_t reta; 2541 uint16_t idx, shift; 2542 2543 PMD_INIT_FUNC_TRACE(); 2544 2545 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { 2546 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 2547 "(%d) doesn't match the number hardware can supported " 2548 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128); 2549 return -EINVAL; 2550 } 2551 2552 for (i = 0; i < reta_size; i += 4) { 2553 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2554 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2555 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF); 2556 if (!mask) 2557 continue; 2558 2559 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2); 2560 for (j = 0; j < 4; j++) { 2561 if (RS8(mask, j, 0x1)) 2562 reta_conf[idx].reta[shift + j] = 2563 (uint16_t)RS32(reta, 8 * j, 0xFF); 2564 } 2565 } 2566 2567 return 0; 2568 } 2569 2570 static int 2571 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 2572 uint32_t index, uint32_t pool) 2573 { 2574 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2575 uint32_t enable_addr = 1; 2576 2577 return ngbe_set_rar(hw, index, mac_addr->addr_bytes, 2578 pool, enable_addr); 2579 } 2580 2581 static void 2582 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 2583 { 2584 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2585 2586 ngbe_clear_rar(hw, index); 2587 } 2588 2589 static int 2590 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 2591 { 2592 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2593 2594 ngbe_remove_rar(dev, 0); 2595 ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs); 2596 2597 return 0; 2598 } 2599 2600 static int 2601 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 2602 { 2603 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2604 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 2605 struct rte_eth_dev_data *dev_data = dev->data; 2606 2607 /* If device is started, refuse mtu that requires the support of 2608 * scattered packets when this feature has not been enabled before. 2609 */ 2610 if (dev_data->dev_started && !dev_data->scattered_rx && 2611 (frame_size + 2 * RTE_VLAN_HLEN > 2612 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 2613 PMD_INIT_LOG(ERR, "Stop port first."); 2614 return -EINVAL; 2615 } 2616 2617 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK, 2618 NGBE_FRMSZ_MAX(frame_size)); 2619 2620 return 0; 2621 } 2622 2623 static uint32_t 2624 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr) 2625 { 2626 uint32_t vector = 0; 2627 2628 switch (hw->mac.mc_filter_type) { 2629 case 0: /* use bits [47:36] of the address */ 2630 vector = ((uc_addr->addr_bytes[4] >> 4) | 2631 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 2632 break; 2633 case 1: /* use bits [46:35] of the address */ 2634 vector = ((uc_addr->addr_bytes[4] >> 3) | 2635 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 2636 break; 2637 case 2: /* use bits [45:34] of the address */ 2638 vector = ((uc_addr->addr_bytes[4] >> 2) | 2639 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 2640 break; 2641 case 3: /* use bits [43:32] of the address */ 2642 vector = ((uc_addr->addr_bytes[4]) | 2643 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 2644 break; 2645 default: /* Invalid mc_filter_type */ 2646 break; 2647 } 2648 2649 /* vector can only be 12-bits or boundary will be exceeded */ 2650 vector &= 0xFFF; 2651 return vector; 2652 } 2653 2654 static int 2655 ngbe_uc_hash_table_set(struct rte_eth_dev *dev, 2656 struct rte_ether_addr *mac_addr, uint8_t on) 2657 { 2658 uint32_t vector; 2659 uint32_t uta_idx; 2660 uint32_t reg_val; 2661 uint32_t uta_mask; 2662 uint32_t psrctl; 2663 2664 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2665 struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev); 2666 2667 vector = ngbe_uta_vector(hw, mac_addr); 2668 uta_idx = (vector >> 5) & 0x7F; 2669 uta_mask = 0x1UL << (vector & 0x1F); 2670 2671 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask)) 2672 return 0; 2673 2674 reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx)); 2675 if (on) { 2676 uta_info->uta_in_use++; 2677 reg_val |= uta_mask; 2678 uta_info->uta_shadow[uta_idx] |= uta_mask; 2679 } else { 2680 uta_info->uta_in_use--; 2681 reg_val &= ~uta_mask; 2682 uta_info->uta_shadow[uta_idx] &= ~uta_mask; 2683 } 2684 2685 wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val); 2686 2687 psrctl = rd32(hw, NGBE_PSRCTL); 2688 if (uta_info->uta_in_use > 0) 2689 psrctl |= NGBE_PSRCTL_UCHFENA; 2690 else 2691 psrctl &= ~NGBE_PSRCTL_UCHFENA; 2692 2693 psrctl &= ~NGBE_PSRCTL_ADHF12_MASK; 2694 psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type); 2695 wr32(hw, NGBE_PSRCTL, psrctl); 2696 2697 return 0; 2698 } 2699 2700 static int 2701 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 2702 { 2703 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2704 struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev); 2705 uint32_t psrctl; 2706 int i; 2707 2708 if (on) { 2709 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 2710 uta_info->uta_shadow[i] = ~0; 2711 wr32(hw, NGBE_UCADDRTBL(i), ~0); 2712 } 2713 } else { 2714 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 2715 uta_info->uta_shadow[i] = 0; 2716 wr32(hw, NGBE_UCADDRTBL(i), 0); 2717 } 2718 } 2719 2720 psrctl = rd32(hw, NGBE_PSRCTL); 2721 if (on) 2722 psrctl |= NGBE_PSRCTL_UCHFENA; 2723 else 2724 psrctl &= ~NGBE_PSRCTL_UCHFENA; 2725 2726 psrctl &= ~NGBE_PSRCTL_ADHF12_MASK; 2727 psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type); 2728 wr32(hw, NGBE_PSRCTL, psrctl); 2729 2730 return 0; 2731 } 2732 2733 /** 2734 * Set the IVAR registers, mapping interrupt causes to vectors 2735 * @param hw 2736 * pointer to ngbe_hw struct 2737 * @direction 2738 * 0 for Rx, 1 for Tx, -1 for other causes 2739 * @queue 2740 * queue to map the corresponding interrupt to 2741 * @msix_vector 2742 * the vector to map to the corresponding queue 2743 */ 2744 void 2745 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction, 2746 uint8_t queue, uint8_t msix_vector) 2747 { 2748 uint32_t tmp, idx; 2749 2750 if (direction == -1) { 2751 /* other causes */ 2752 msix_vector |= NGBE_IVARMISC_VLD; 2753 idx = 0; 2754 tmp = rd32(hw, NGBE_IVARMISC); 2755 tmp &= ~(0xFF << idx); 2756 tmp |= (msix_vector << idx); 2757 wr32(hw, NGBE_IVARMISC, tmp); 2758 } else { 2759 /* rx or tx causes */ 2760 /* Workaround for ICR lost */ 2761 idx = ((16 * (queue & 1)) + (8 * direction)); 2762 tmp = rd32(hw, NGBE_IVAR(queue >> 1)); 2763 tmp &= ~(0xFF << idx); 2764 tmp |= (msix_vector << idx); 2765 wr32(hw, NGBE_IVAR(queue >> 1), tmp); 2766 } 2767 } 2768 2769 /** 2770 * Sets up the hardware to properly generate MSI-X interrupts 2771 * @hw 2772 * board private structure 2773 */ 2774 static void 2775 ngbe_configure_msix(struct rte_eth_dev *dev) 2776 { 2777 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2778 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2779 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2780 uint32_t queue_id, base = NGBE_MISC_VEC_ID; 2781 uint32_t vec = NGBE_MISC_VEC_ID; 2782 uint32_t gpie; 2783 2784 /* 2785 * Won't configure MSI-X register if no mapping is done 2786 * between intr vector and event fd 2787 * but if MSI-X has been enabled already, need to configure 2788 * auto clean, auto mask and throttling. 2789 */ 2790 gpie = rd32(hw, NGBE_GPIE); 2791 if (!rte_intr_dp_is_en(intr_handle) && 2792 !(gpie & NGBE_GPIE_MSIX)) 2793 return; 2794 2795 if (rte_intr_allow_others(intr_handle)) { 2796 base = NGBE_RX_VEC_START; 2797 vec = base; 2798 } 2799 2800 /* setup GPIE for MSI-X mode */ 2801 gpie = rd32(hw, NGBE_GPIE); 2802 gpie |= NGBE_GPIE_MSIX; 2803 wr32(hw, NGBE_GPIE, gpie); 2804 2805 /* Populate the IVAR table and set the ITR values to the 2806 * corresponding register. 2807 */ 2808 if (rte_intr_dp_is_en(intr_handle)) { 2809 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 2810 queue_id++) { 2811 /* by default, 1:1 mapping */ 2812 ngbe_set_ivar_map(hw, 0, queue_id, vec); 2813 rte_intr_vec_list_index_set(intr_handle, 2814 queue_id, vec); 2815 if (vec < base + rte_intr_nb_efd_get(intr_handle) 2816 - 1) 2817 vec++; 2818 } 2819 2820 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID); 2821 } 2822 wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID), 2823 NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT) 2824 | NGBE_ITR_WRDSA); 2825 } 2826 2827 static u8 * 2828 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw, 2829 u8 **mc_addr_ptr, u32 *vmdq) 2830 { 2831 u8 *mc_addr; 2832 2833 *vmdq = 0; 2834 mc_addr = *mc_addr_ptr; 2835 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); 2836 return mc_addr; 2837 } 2838 2839 int 2840 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 2841 struct rte_ether_addr *mc_addr_set, 2842 uint32_t nb_mc_addr) 2843 { 2844 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2845 u8 *mc_addr_list; 2846 2847 mc_addr_list = (u8 *)mc_addr_set; 2848 return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 2849 ngbe_dev_addr_list_itr, TRUE); 2850 } 2851 2852 static uint64_t 2853 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 2854 { 2855 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2856 uint64_t systime_cycles; 2857 2858 systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL); 2859 systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32; 2860 2861 return systime_cycles; 2862 } 2863 2864 static uint64_t 2865 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 2866 { 2867 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2868 uint64_t rx_tstamp_cycles; 2869 2870 /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */ 2871 rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL); 2872 rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32; 2873 2874 return rx_tstamp_cycles; 2875 } 2876 2877 static uint64_t 2878 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 2879 { 2880 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2881 uint64_t tx_tstamp_cycles; 2882 2883 /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */ 2884 tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL); 2885 tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32; 2886 2887 return tx_tstamp_cycles; 2888 } 2889 2890 static void 2891 ngbe_start_timecounters(struct rte_eth_dev *dev) 2892 { 2893 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2894 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2895 uint32_t incval = 0; 2896 uint32_t shift = 0; 2897 2898 incval = NGBE_INCVAL_1GB; 2899 shift = NGBE_INCVAL_SHIFT_1GB; 2900 2901 wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval)); 2902 2903 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 2904 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 2905 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 2906 2907 adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK; 2908 adapter->systime_tc.cc_shift = shift; 2909 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 2910 2911 adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK; 2912 adapter->rx_tstamp_tc.cc_shift = shift; 2913 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 2914 2915 adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK; 2916 adapter->tx_tstamp_tc.cc_shift = shift; 2917 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 2918 } 2919 2920 static int 2921 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 2922 { 2923 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2924 2925 adapter->systime_tc.nsec += delta; 2926 adapter->rx_tstamp_tc.nsec += delta; 2927 adapter->tx_tstamp_tc.nsec += delta; 2928 2929 return 0; 2930 } 2931 2932 static int 2933 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 2934 { 2935 uint64_t ns; 2936 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2937 2938 ns = rte_timespec_to_ns(ts); 2939 /* Set the timecounters to a new value. */ 2940 adapter->systime_tc.nsec = ns; 2941 adapter->rx_tstamp_tc.nsec = ns; 2942 adapter->tx_tstamp_tc.nsec = ns; 2943 2944 return 0; 2945 } 2946 2947 static int 2948 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 2949 { 2950 uint64_t ns, systime_cycles; 2951 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2952 2953 systime_cycles = ngbe_read_systime_cyclecounter(dev); 2954 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 2955 *ts = rte_ns_to_timespec(ns); 2956 2957 return 0; 2958 } 2959 2960 static int 2961 ngbe_timesync_enable(struct rte_eth_dev *dev) 2962 { 2963 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2964 uint32_t tsync_ctl; 2965 2966 /* Stop the timesync system time. */ 2967 wr32(hw, NGBE_TSTIMEINC, 0x0); 2968 /* Reset the timesync system time value. */ 2969 wr32(hw, NGBE_TSTIMEL, 0x0); 2970 wr32(hw, NGBE_TSTIMEH, 0x0); 2971 2972 ngbe_start_timecounters(dev); 2973 2974 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 2975 wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 2976 RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588); 2977 2978 /* Enable timestamping of received PTP packets. */ 2979 tsync_ctl = rd32(hw, NGBE_TSRXCTL); 2980 tsync_ctl |= NGBE_TSRXCTL_ENA; 2981 wr32(hw, NGBE_TSRXCTL, tsync_ctl); 2982 2983 /* Enable timestamping of transmitted PTP packets. */ 2984 tsync_ctl = rd32(hw, NGBE_TSTXCTL); 2985 tsync_ctl |= NGBE_TSTXCTL_ENA; 2986 wr32(hw, NGBE_TSTXCTL, tsync_ctl); 2987 2988 ngbe_flush(hw); 2989 2990 return 0; 2991 } 2992 2993 static int 2994 ngbe_timesync_disable(struct rte_eth_dev *dev) 2995 { 2996 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2997 uint32_t tsync_ctl; 2998 2999 /* Disable timestamping of transmitted PTP packets. */ 3000 tsync_ctl = rd32(hw, NGBE_TSTXCTL); 3001 tsync_ctl &= ~NGBE_TSTXCTL_ENA; 3002 wr32(hw, NGBE_TSTXCTL, tsync_ctl); 3003 3004 /* Disable timestamping of received PTP packets. */ 3005 tsync_ctl = rd32(hw, NGBE_TSRXCTL); 3006 tsync_ctl &= ~NGBE_TSRXCTL_ENA; 3007 wr32(hw, NGBE_TSRXCTL, tsync_ctl); 3008 3009 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 3010 wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0); 3011 3012 /* Stop incrementing the System Time registers. */ 3013 wr32(hw, NGBE_TSTIMEINC, 0); 3014 3015 return 0; 3016 } 3017 3018 static int 3019 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3020 struct timespec *timestamp, 3021 uint32_t flags __rte_unused) 3022 { 3023 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3024 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 3025 uint32_t tsync_rxctl; 3026 uint64_t rx_tstamp_cycles; 3027 uint64_t ns; 3028 3029 tsync_rxctl = rd32(hw, NGBE_TSRXCTL); 3030 if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0) 3031 return -EINVAL; 3032 3033 rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev); 3034 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 3035 *timestamp = rte_ns_to_timespec(ns); 3036 3037 return 0; 3038 } 3039 3040 static int 3041 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3042 struct timespec *timestamp) 3043 { 3044 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3045 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 3046 uint32_t tsync_txctl; 3047 uint64_t tx_tstamp_cycles; 3048 uint64_t ns; 3049 3050 tsync_txctl = rd32(hw, NGBE_TSTXCTL); 3051 if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0) 3052 return -EINVAL; 3053 3054 tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev); 3055 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 3056 *timestamp = rte_ns_to_timespec(ns); 3057 3058 return 0; 3059 } 3060 3061 static int 3062 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused) 3063 { 3064 int count = 0; 3065 int g_ind = 0; 3066 const struct reg_info *reg_group; 3067 const struct reg_info **reg_set = ngbe_regs_others; 3068 3069 while ((reg_group = reg_set[g_ind++])) 3070 count += ngbe_regs_group_count(reg_group); 3071 3072 return count; 3073 } 3074 3075 static int 3076 ngbe_get_regs(struct rte_eth_dev *dev, 3077 struct rte_dev_reg_info *regs) 3078 { 3079 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3080 uint32_t *data = regs->data; 3081 int g_ind = 0; 3082 int count = 0; 3083 const struct reg_info *reg_group; 3084 const struct reg_info **reg_set = ngbe_regs_others; 3085 3086 if (data == NULL) { 3087 regs->length = ngbe_get_reg_length(dev); 3088 regs->width = sizeof(uint32_t); 3089 return 0; 3090 } 3091 3092 /* Support only full register dump */ 3093 if (regs->length == 0 || 3094 regs->length == (uint32_t)ngbe_get_reg_length(dev)) { 3095 regs->version = hw->mac.type << 24 | 3096 hw->revision_id << 16 | 3097 hw->device_id; 3098 while ((reg_group = reg_set[g_ind++])) 3099 count += ngbe_read_regs_group(dev, &data[count], 3100 reg_group); 3101 return 0; 3102 } 3103 3104 return -ENOTSUP; 3105 } 3106 3107 static int 3108 ngbe_get_eeprom_length(struct rte_eth_dev *dev) 3109 { 3110 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3111 3112 /* Return unit is byte count */ 3113 return hw->rom.word_size * 2; 3114 } 3115 3116 static int 3117 ngbe_get_eeprom(struct rte_eth_dev *dev, 3118 struct rte_dev_eeprom_info *in_eeprom) 3119 { 3120 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3121 struct ngbe_rom_info *eeprom = &hw->rom; 3122 uint16_t *data = in_eeprom->data; 3123 int first, length; 3124 3125 first = in_eeprom->offset >> 1; 3126 length = in_eeprom->length >> 1; 3127 if (first > hw->rom.word_size || 3128 ((first + length) > hw->rom.word_size)) 3129 return -EINVAL; 3130 3131 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 3132 3133 return eeprom->readw_buffer(hw, first, length, data); 3134 } 3135 3136 static int 3137 ngbe_set_eeprom(struct rte_eth_dev *dev, 3138 struct rte_dev_eeprom_info *in_eeprom) 3139 { 3140 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3141 struct ngbe_rom_info *eeprom = &hw->rom; 3142 uint16_t *data = in_eeprom->data; 3143 int first, length; 3144 3145 first = in_eeprom->offset >> 1; 3146 length = in_eeprom->length >> 1; 3147 if (first > hw->rom.word_size || 3148 ((first + length) > hw->rom.word_size)) 3149 return -EINVAL; 3150 3151 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 3152 3153 return eeprom->writew_buffer(hw, first, length, data); 3154 } 3155 3156 static const struct eth_dev_ops ngbe_eth_dev_ops = { 3157 .dev_configure = ngbe_dev_configure, 3158 .dev_infos_get = ngbe_dev_info_get, 3159 .dev_start = ngbe_dev_start, 3160 .dev_stop = ngbe_dev_stop, 3161 .dev_set_link_up = ngbe_dev_set_link_up, 3162 .dev_set_link_down = ngbe_dev_set_link_down, 3163 .dev_close = ngbe_dev_close, 3164 .dev_reset = ngbe_dev_reset, 3165 .promiscuous_enable = ngbe_dev_promiscuous_enable, 3166 .promiscuous_disable = ngbe_dev_promiscuous_disable, 3167 .allmulticast_enable = ngbe_dev_allmulticast_enable, 3168 .allmulticast_disable = ngbe_dev_allmulticast_disable, 3169 .link_update = ngbe_dev_link_update, 3170 .stats_get = ngbe_dev_stats_get, 3171 .xstats_get = ngbe_dev_xstats_get, 3172 .xstats_get_by_id = ngbe_dev_xstats_get_by_id, 3173 .stats_reset = ngbe_dev_stats_reset, 3174 .xstats_reset = ngbe_dev_xstats_reset, 3175 .xstats_get_names = ngbe_dev_xstats_get_names, 3176 .xstats_get_names_by_id = ngbe_dev_xstats_get_names_by_id, 3177 .fw_version_get = ngbe_fw_version_get, 3178 .dev_supported_ptypes_get = ngbe_dev_supported_ptypes_get, 3179 .mtu_set = ngbe_dev_mtu_set, 3180 .vlan_filter_set = ngbe_vlan_filter_set, 3181 .vlan_tpid_set = ngbe_vlan_tpid_set, 3182 .vlan_offload_set = ngbe_vlan_offload_set, 3183 .vlan_strip_queue_set = ngbe_vlan_strip_queue_set, 3184 .rx_queue_start = ngbe_dev_rx_queue_start, 3185 .rx_queue_stop = ngbe_dev_rx_queue_stop, 3186 .tx_queue_start = ngbe_dev_tx_queue_start, 3187 .tx_queue_stop = ngbe_dev_tx_queue_stop, 3188 .rx_queue_setup = ngbe_dev_rx_queue_setup, 3189 .rx_queue_release = ngbe_dev_rx_queue_release, 3190 .tx_queue_setup = ngbe_dev_tx_queue_setup, 3191 .tx_queue_release = ngbe_dev_tx_queue_release, 3192 .dev_led_on = ngbe_dev_led_on, 3193 .dev_led_off = ngbe_dev_led_off, 3194 .flow_ctrl_get = ngbe_flow_ctrl_get, 3195 .flow_ctrl_set = ngbe_flow_ctrl_set, 3196 .mac_addr_add = ngbe_add_rar, 3197 .mac_addr_remove = ngbe_remove_rar, 3198 .mac_addr_set = ngbe_set_default_mac_addr, 3199 .uc_hash_table_set = ngbe_uc_hash_table_set, 3200 .uc_all_hash_table_set = ngbe_uc_all_hash_table_set, 3201 .reta_update = ngbe_dev_rss_reta_update, 3202 .reta_query = ngbe_dev_rss_reta_query, 3203 .rss_hash_update = ngbe_dev_rss_hash_update, 3204 .rss_hash_conf_get = ngbe_dev_rss_hash_conf_get, 3205 .set_mc_addr_list = ngbe_dev_set_mc_addr_list, 3206 .rxq_info_get = ngbe_rxq_info_get, 3207 .txq_info_get = ngbe_txq_info_get, 3208 .rx_burst_mode_get = ngbe_rx_burst_mode_get, 3209 .tx_burst_mode_get = ngbe_tx_burst_mode_get, 3210 .timesync_enable = ngbe_timesync_enable, 3211 .timesync_disable = ngbe_timesync_disable, 3212 .timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp, 3213 .timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp, 3214 .get_reg = ngbe_get_regs, 3215 .get_eeprom_length = ngbe_get_eeprom_length, 3216 .get_eeprom = ngbe_get_eeprom, 3217 .set_eeprom = ngbe_set_eeprom, 3218 .timesync_adjust_time = ngbe_timesync_adjust_time, 3219 .timesync_read_time = ngbe_timesync_read_time, 3220 .timesync_write_time = ngbe_timesync_write_time, 3221 .tx_done_cleanup = ngbe_dev_tx_done_cleanup, 3222 }; 3223 3224 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd); 3225 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map); 3226 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci"); 3227 3228 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE); 3229 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE); 3230 3231 #ifdef RTE_ETHDEV_DEBUG_RX 3232 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG); 3233 #endif 3234 #ifdef RTE_ETHDEV_DEBUG_TX 3235 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG); 3236 #endif 3237