1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd. 3 * Copyright(c) 2010-2017 Intel Corporation 4 */ 5 6 #include <errno.h> 7 #include <rte_common.h> 8 #include <ethdev_pci.h> 9 10 #include <rte_alarm.h> 11 12 #include "ngbe_logs.h" 13 #include "ngbe.h" 14 #include "ngbe_ethdev.h" 15 #include "ngbe_rxtx.h" 16 #include "ngbe_regs_group.h" 17 18 static const struct reg_info ngbe_regs_general[] = { 19 {NGBE_RST, 1, 1, "NGBE_RST"}, 20 {NGBE_STAT, 1, 1, "NGBE_STAT"}, 21 {NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"}, 22 {NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"}, 23 {NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"}, 24 {NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"}, 25 {0, 0, 0, ""} 26 }; 27 28 static const struct reg_info ngbe_regs_nvm[] = { 29 {0, 0, 0, ""} 30 }; 31 32 static const struct reg_info ngbe_regs_interrupt[] = { 33 {0, 0, 0, ""} 34 }; 35 36 static const struct reg_info ngbe_regs_fctl_others[] = { 37 {0, 0, 0, ""} 38 }; 39 40 static const struct reg_info ngbe_regs_rxdma[] = { 41 {0, 0, 0, ""} 42 }; 43 44 static const struct reg_info ngbe_regs_rx[] = { 45 {0, 0, 0, ""} 46 }; 47 48 static struct reg_info ngbe_regs_tx[] = { 49 {0, 0, 0, ""} 50 }; 51 52 static const struct reg_info ngbe_regs_wakeup[] = { 53 {0, 0, 0, ""} 54 }; 55 56 static const struct reg_info ngbe_regs_mac[] = { 57 {0, 0, 0, ""} 58 }; 59 60 static const struct reg_info ngbe_regs_diagnostic[] = { 61 {0, 0, 0, ""}, 62 }; 63 64 /* PF registers */ 65 static const struct reg_info *ngbe_regs_others[] = { 66 ngbe_regs_general, 67 ngbe_regs_nvm, 68 ngbe_regs_interrupt, 69 ngbe_regs_fctl_others, 70 ngbe_regs_rxdma, 71 ngbe_regs_rx, 72 ngbe_regs_tx, 73 ngbe_regs_wakeup, 74 ngbe_regs_mac, 75 ngbe_regs_diagnostic, 76 NULL}; 77 78 static int ngbe_dev_close(struct rte_eth_dev *dev); 79 static int ngbe_dev_link_update(struct rte_eth_dev *dev, 80 int wait_to_complete); 81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev); 82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, 84 uint16_t queue); 85 86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev); 87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev); 90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 91 static void ngbe_dev_interrupt_handler(void *param); 92 static void ngbe_configure_msix(struct rte_eth_dev *dev); 93 94 #define NGBE_SET_HWSTRIP(h, q) do {\ 95 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 96 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 97 (h)->bitmap[idx] |= 1 << bit;\ 98 } while (0) 99 100 #define NGBE_CLEAR_HWSTRIP(h, q) do {\ 101 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 102 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 103 (h)->bitmap[idx] &= ~(1 << bit);\ 104 } while (0) 105 106 #define NGBE_GET_HWSTRIP(h, q, r) do {\ 107 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 108 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 109 (r) = (h)->bitmap[idx] >> bit & 1;\ 110 } while (0) 111 112 /* 113 * The set of PCI devices this driver supports 114 */ 115 static const struct rte_pci_id pci_id_ngbe_map[] = { 116 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) }, 117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) }, 118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) }, 119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) }, 120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) }, 121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) }, 122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) }, 123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) }, 124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) }, 125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) }, 126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) }, 127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) }, 128 { .vendor_id = 0, /* sentinel */ }, 129 }; 130 131 static const struct rte_eth_desc_lim rx_desc_lim = { 132 .nb_max = NGBE_RING_DESC_MAX, 133 .nb_min = NGBE_RING_DESC_MIN, 134 .nb_align = NGBE_RXD_ALIGN, 135 }; 136 137 static const struct rte_eth_desc_lim tx_desc_lim = { 138 .nb_max = NGBE_RING_DESC_MAX, 139 .nb_min = NGBE_RING_DESC_MIN, 140 .nb_align = NGBE_TXD_ALIGN, 141 .nb_seg_max = NGBE_TX_MAX_SEG, 142 .nb_mtu_seg_max = NGBE_TX_MAX_SEG, 143 }; 144 145 static const struct eth_dev_ops ngbe_eth_dev_ops; 146 147 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)} 148 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)} 149 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = { 150 /* MNG RxTx */ 151 HW_XSTAT(mng_bmc2host_packets), 152 HW_XSTAT(mng_host2bmc_packets), 153 /* Basic RxTx */ 154 HW_XSTAT(rx_packets), 155 HW_XSTAT(tx_packets), 156 HW_XSTAT(rx_bytes), 157 HW_XSTAT(tx_bytes), 158 HW_XSTAT(rx_total_bytes), 159 HW_XSTAT(rx_total_packets), 160 HW_XSTAT(tx_total_packets), 161 HW_XSTAT(rx_total_missed_packets), 162 HW_XSTAT(rx_broadcast_packets), 163 HW_XSTAT(rx_multicast_packets), 164 HW_XSTAT(rx_management_packets), 165 HW_XSTAT(tx_management_packets), 166 HW_XSTAT(rx_management_dropped), 167 HW_XSTAT(rx_dma_drop), 168 HW_XSTAT(tx_dma_drop), 169 HW_XSTAT(tx_secdrp_packets), 170 171 /* Basic Error */ 172 HW_XSTAT(rx_crc_errors), 173 HW_XSTAT(rx_illegal_byte_errors), 174 HW_XSTAT(rx_error_bytes), 175 HW_XSTAT(rx_mac_short_packet_dropped), 176 HW_XSTAT(rx_length_errors), 177 HW_XSTAT(rx_undersize_errors), 178 HW_XSTAT(rx_fragment_errors), 179 HW_XSTAT(rx_oversize_cnt), 180 HW_XSTAT(rx_jabber_errors), 181 HW_XSTAT(rx_l3_l4_xsum_error), 182 HW_XSTAT(mac_local_errors), 183 HW_XSTAT(mac_remote_errors), 184 185 /* PB Stats */ 186 HW_XSTAT(rx_up_dropped), 187 HW_XSTAT(rdb_pkt_cnt), 188 HW_XSTAT(rdb_repli_cnt), 189 HW_XSTAT(rdb_drp_cnt), 190 191 /* MACSEC */ 192 HW_XSTAT(tx_macsec_pkts_untagged), 193 HW_XSTAT(tx_macsec_pkts_encrypted), 194 HW_XSTAT(tx_macsec_pkts_protected), 195 HW_XSTAT(tx_macsec_octets_encrypted), 196 HW_XSTAT(tx_macsec_octets_protected), 197 HW_XSTAT(rx_macsec_pkts_untagged), 198 HW_XSTAT(rx_macsec_pkts_badtag), 199 HW_XSTAT(rx_macsec_pkts_nosci), 200 HW_XSTAT(rx_macsec_pkts_unknownsci), 201 HW_XSTAT(rx_macsec_octets_decrypted), 202 HW_XSTAT(rx_macsec_octets_validated), 203 HW_XSTAT(rx_macsec_sc_pkts_unchecked), 204 HW_XSTAT(rx_macsec_sc_pkts_delayed), 205 HW_XSTAT(rx_macsec_sc_pkts_late), 206 HW_XSTAT(rx_macsec_sa_pkts_ok), 207 HW_XSTAT(rx_macsec_sa_pkts_invalid), 208 HW_XSTAT(rx_macsec_sa_pkts_notvalid), 209 HW_XSTAT(rx_macsec_sa_pkts_unusedsa), 210 HW_XSTAT(rx_macsec_sa_pkts_notusingsa), 211 212 /* MAC RxTx */ 213 HW_XSTAT(rx_size_64_packets), 214 HW_XSTAT(rx_size_65_to_127_packets), 215 HW_XSTAT(rx_size_128_to_255_packets), 216 HW_XSTAT(rx_size_256_to_511_packets), 217 HW_XSTAT(rx_size_512_to_1023_packets), 218 HW_XSTAT(rx_size_1024_to_max_packets), 219 HW_XSTAT(tx_size_64_packets), 220 HW_XSTAT(tx_size_65_to_127_packets), 221 HW_XSTAT(tx_size_128_to_255_packets), 222 HW_XSTAT(tx_size_256_to_511_packets), 223 HW_XSTAT(tx_size_512_to_1023_packets), 224 HW_XSTAT(tx_size_1024_to_max_packets), 225 226 /* Flow Control */ 227 HW_XSTAT(tx_xon_packets), 228 HW_XSTAT(rx_xon_packets), 229 HW_XSTAT(tx_xoff_packets), 230 HW_XSTAT(rx_xoff_packets), 231 232 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"), 233 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"), 234 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"), 235 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"), 236 }; 237 238 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \ 239 sizeof(rte_ngbe_stats_strings[0])) 240 241 /* Per-queue statistics */ 242 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)} 243 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = { 244 QP_XSTAT(rx_qp_packets), 245 QP_XSTAT(tx_qp_packets), 246 QP_XSTAT(rx_qp_bytes), 247 QP_XSTAT(tx_qp_bytes), 248 QP_XSTAT(rx_qp_mc_packets), 249 }; 250 251 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \ 252 sizeof(rte_ngbe_qp_strings[0])) 253 254 static inline int32_t 255 ngbe_pf_reset_hw(struct ngbe_hw *hw) 256 { 257 uint32_t ctrl_ext; 258 int32_t status; 259 260 status = hw->mac.reset_hw(hw); 261 262 ctrl_ext = rd32(hw, NGBE_PORTCTL); 263 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 264 ctrl_ext |= NGBE_PORTCTL_RSTDONE; 265 wr32(hw, NGBE_PORTCTL, ctrl_ext); 266 ngbe_flush(hw); 267 268 if (status == NGBE_ERR_SFP_NOT_PRESENT) 269 status = 0; 270 return status; 271 } 272 273 static inline void 274 ngbe_enable_intr(struct rte_eth_dev *dev) 275 { 276 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 277 struct ngbe_hw *hw = ngbe_dev_hw(dev); 278 279 wr32(hw, NGBE_IENMISC, intr->mask_misc); 280 wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32); 281 ngbe_flush(hw); 282 } 283 284 static void 285 ngbe_disable_intr(struct ngbe_hw *hw) 286 { 287 PMD_INIT_FUNC_TRACE(); 288 289 wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK); 290 ngbe_flush(hw); 291 } 292 293 /* 294 * Ensure that all locks are released before first NVM or PHY access 295 */ 296 static void 297 ngbe_swfw_lock_reset(struct ngbe_hw *hw) 298 { 299 uint16_t mask; 300 301 /* 302 * These ones are more tricky since they are common to all ports; but 303 * swfw_sync retries last long enough (1s) to be almost sure that if 304 * lock can not be taken it is due to an improper lock of the 305 * semaphore. 306 */ 307 mask = NGBE_MNGSEM_SWPHY | 308 NGBE_MNGSEM_SWMBX | 309 NGBE_MNGSEM_SWFLASH; 310 if (hw->mac.acquire_swfw_sync(hw, mask) < 0) 311 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 312 313 hw->mac.release_swfw_sync(hw, mask); 314 } 315 316 static int 317 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 318 { 319 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 320 struct ngbe_hw *hw = ngbe_dev_hw(eth_dev); 321 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev); 322 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev); 323 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 324 const struct rte_memzone *mz; 325 uint32_t ctrl_ext; 326 u32 led_conf = 0; 327 int err, ret; 328 329 PMD_INIT_FUNC_TRACE(); 330 331 eth_dev->dev_ops = &ngbe_eth_dev_ops; 332 eth_dev->rx_queue_count = ngbe_dev_rx_queue_count; 333 eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status; 334 eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status; 335 eth_dev->rx_pkt_burst = &ngbe_recv_pkts; 336 eth_dev->tx_pkt_burst = &ngbe_xmit_pkts; 337 eth_dev->tx_pkt_prepare = &ngbe_prep_pkts; 338 339 /* 340 * For secondary processes, we don't initialise any further as primary 341 * has already done this work. Only check we don't need a different 342 * Rx and Tx function. 343 */ 344 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 345 struct ngbe_tx_queue *txq; 346 /* Tx queue function in primary, set by last queue initialized 347 * Tx queue may not initialized by primary process 348 */ 349 if (eth_dev->data->tx_queues) { 350 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues; 351 txq = eth_dev->data->tx_queues[nb_tx_queues - 1]; 352 ngbe_set_tx_function(eth_dev, txq); 353 } else { 354 /* Use default Tx function if we get here */ 355 PMD_INIT_LOG(NOTICE, 356 "No Tx queues configured yet. Using default Tx function."); 357 } 358 359 ngbe_set_rx_function(eth_dev); 360 361 return 0; 362 } 363 364 rte_eth_copy_pci_info(eth_dev, pci_dev); 365 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 366 367 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 368 369 /* Vendor and Device ID need to be set before init of shared code */ 370 hw->back = pci_dev; 371 hw->device_id = pci_dev->id.device_id; 372 hw->vendor_id = pci_dev->id.vendor_id; 373 if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) { 374 hw->sub_system_id = pci_dev->id.subsystem_device_id; 375 } else { 376 u32 ssid; 377 378 ssid = ngbe_flash_read_dword(hw, 0xFFFDC); 379 if (ssid == 0x1) { 380 PMD_INIT_LOG(ERR, 381 "Read of internal subsystem device id failed\n"); 382 return -ENODEV; 383 } 384 hw->sub_system_id = (u16)ssid >> 8 | (u16)ssid << 8; 385 } 386 ngbe_map_device_id(hw); 387 388 /* Reserve memory for interrupt status block */ 389 mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1, 390 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY); 391 if (mz == NULL) 392 return -ENOMEM; 393 394 hw->isb_dma = TMZ_PADDR(mz); 395 hw->isb_mem = TMZ_VADDR(mz); 396 397 /* Initialize the shared code (base driver) */ 398 err = ngbe_init_shared_code(hw); 399 if (err != 0) { 400 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err); 401 return -EIO; 402 } 403 404 /* Unlock any pending hardware semaphore */ 405 ngbe_swfw_lock_reset(hw); 406 407 /* Get Hardware Flow Control setting */ 408 hw->fc.requested_mode = ngbe_fc_full; 409 hw->fc.current_mode = ngbe_fc_full; 410 hw->fc.pause_time = NGBE_FC_PAUSE_TIME; 411 hw->fc.low_water = NGBE_FC_XON_LOTH; 412 hw->fc.high_water = NGBE_FC_XOFF_HITH; 413 hw->fc.send_xon = 1; 414 415 err = hw->rom.init_params(hw); 416 if (err != 0) { 417 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err); 418 return -EIO; 419 } 420 421 /* Make sure we have a good EEPROM before we read from it */ 422 err = hw->rom.validate_checksum(hw, NULL); 423 if (err != 0) { 424 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err); 425 return -EIO; 426 } 427 428 err = hw->phy.led_oem_chk(hw, &led_conf); 429 if (err == 0) 430 hw->led_conf = led_conf; 431 else 432 hw->led_conf = 0xFFFF; 433 434 err = hw->mac.init_hw(hw); 435 if (err != 0) { 436 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err); 437 return -EIO; 438 } 439 440 /* Reset the hw statistics */ 441 ngbe_dev_stats_reset(eth_dev); 442 443 /* disable interrupt */ 444 ngbe_disable_intr(hw); 445 446 /* Allocate memory for storing MAC addresses */ 447 eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN * 448 hw->mac.num_rar_entries, 0); 449 if (eth_dev->data->mac_addrs == NULL) { 450 PMD_INIT_LOG(ERR, 451 "Failed to allocate %u bytes needed to store MAC addresses", 452 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 453 return -ENOMEM; 454 } 455 456 /* Copy the permanent MAC address */ 457 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 458 ð_dev->data->mac_addrs[0]); 459 460 /* Allocate memory for storing hash filter MAC addresses */ 461 eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe", 462 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0); 463 if (eth_dev->data->hash_mac_addrs == NULL) { 464 PMD_INIT_LOG(ERR, 465 "Failed to allocate %d bytes needed to store MAC addresses", 466 RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC); 467 rte_free(eth_dev->data->mac_addrs); 468 eth_dev->data->mac_addrs = NULL; 469 return -ENOMEM; 470 } 471 472 /* initialize the vfta */ 473 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 474 475 /* initialize the hw strip bitmap*/ 476 memset(hwstrip, 0, sizeof(*hwstrip)); 477 478 /* initialize PF if max_vfs not zero */ 479 ret = ngbe_pf_host_init(eth_dev); 480 if (ret) { 481 rte_free(eth_dev->data->mac_addrs); 482 eth_dev->data->mac_addrs = NULL; 483 rte_free(eth_dev->data->hash_mac_addrs); 484 eth_dev->data->hash_mac_addrs = NULL; 485 return ret; 486 } 487 488 ctrl_ext = rd32(hw, NGBE_PORTCTL); 489 /* let hardware know driver is loaded */ 490 ctrl_ext |= NGBE_PORTCTL_DRVLOAD; 491 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 492 ctrl_ext |= NGBE_PORTCTL_RSTDONE; 493 wr32(hw, NGBE_PORTCTL, ctrl_ext); 494 ngbe_flush(hw); 495 496 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 497 (int)hw->mac.type, (int)hw->phy.type); 498 499 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 500 eth_dev->data->port_id, pci_dev->id.vendor_id, 501 pci_dev->id.device_id); 502 503 rte_intr_callback_register(intr_handle, 504 ngbe_dev_interrupt_handler, eth_dev); 505 506 /* enable uio/vfio intr/eventfd mapping */ 507 rte_intr_enable(intr_handle); 508 509 /* enable support intr */ 510 ngbe_enable_intr(eth_dev); 511 512 return 0; 513 } 514 515 static int 516 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev) 517 { 518 PMD_INIT_FUNC_TRACE(); 519 520 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 521 return 0; 522 523 ngbe_dev_close(eth_dev); 524 525 return 0; 526 } 527 528 static int 529 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 530 struct rte_pci_device *pci_dev) 531 { 532 return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 533 sizeof(struct ngbe_adapter), 534 eth_dev_pci_specific_init, pci_dev, 535 eth_ngbe_dev_init, NULL); 536 } 537 538 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev) 539 { 540 struct rte_eth_dev *ethdev; 541 542 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 543 if (ethdev == NULL) 544 return 0; 545 546 return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit); 547 } 548 549 static struct rte_pci_driver rte_ngbe_pmd = { 550 .id_table = pci_id_ngbe_map, 551 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | 552 RTE_PCI_DRV_INTR_LSC, 553 .probe = eth_ngbe_pci_probe, 554 .remove = eth_ngbe_pci_remove, 555 }; 556 557 static int 558 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 559 { 560 struct ngbe_hw *hw = ngbe_dev_hw(dev); 561 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev); 562 uint32_t vfta; 563 uint32_t vid_idx; 564 uint32_t vid_bit; 565 566 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F); 567 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F)); 568 vfta = rd32(hw, NGBE_VLANTBL(vid_idx)); 569 if (on) 570 vfta |= vid_bit; 571 else 572 vfta &= ~vid_bit; 573 wr32(hw, NGBE_VLANTBL(vid_idx), vfta); 574 575 /* update local VFTA copy */ 576 shadow_vfta->vfta[vid_idx] = vfta; 577 578 return 0; 579 } 580 581 static void 582 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 583 { 584 struct ngbe_hw *hw = ngbe_dev_hw(dev); 585 struct ngbe_rx_queue *rxq; 586 bool restart; 587 uint32_t rxcfg, rxbal, rxbah; 588 589 if (on) 590 ngbe_vlan_hw_strip_enable(dev, queue); 591 else 592 ngbe_vlan_hw_strip_disable(dev, queue); 593 594 rxq = dev->data->rx_queues[queue]; 595 rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx)); 596 rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx)); 597 rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx)); 598 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 599 restart = (rxcfg & NGBE_RXCFG_ENA) && 600 !(rxcfg & NGBE_RXCFG_VLAN); 601 rxcfg |= NGBE_RXCFG_VLAN; 602 } else { 603 restart = (rxcfg & NGBE_RXCFG_ENA) && 604 (rxcfg & NGBE_RXCFG_VLAN); 605 rxcfg &= ~NGBE_RXCFG_VLAN; 606 } 607 rxcfg &= ~NGBE_RXCFG_ENA; 608 609 if (restart) { 610 /* set vlan strip for ring */ 611 ngbe_dev_rx_queue_stop(dev, queue); 612 wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal); 613 wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah); 614 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg); 615 ngbe_dev_rx_queue_start(dev, queue); 616 } 617 } 618 619 static int 620 ngbe_vlan_tpid_set(struct rte_eth_dev *dev, 621 enum rte_vlan_type vlan_type, 622 uint16_t tpid) 623 { 624 struct ngbe_hw *hw = ngbe_dev_hw(dev); 625 int ret = 0; 626 uint32_t portctrl, vlan_ext, qinq; 627 628 portctrl = rd32(hw, NGBE_PORTCTL); 629 630 vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT); 631 qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ); 632 switch (vlan_type) { 633 case RTE_ETH_VLAN_TYPE_INNER: 634 if (vlan_ext) { 635 wr32m(hw, NGBE_VLANCTL, 636 NGBE_VLANCTL_TPID_MASK, 637 NGBE_VLANCTL_TPID(tpid)); 638 wr32m(hw, NGBE_DMATXCTRL, 639 NGBE_DMATXCTRL_TPID_MASK, 640 NGBE_DMATXCTRL_TPID(tpid)); 641 } else { 642 ret = -ENOTSUP; 643 PMD_DRV_LOG(ERR, 644 "Inner type is not supported by single VLAN"); 645 } 646 647 if (qinq) { 648 wr32m(hw, NGBE_TAGTPID(0), 649 NGBE_TAGTPID_LSB_MASK, 650 NGBE_TAGTPID_LSB(tpid)); 651 } 652 break; 653 case RTE_ETH_VLAN_TYPE_OUTER: 654 if (vlan_ext) { 655 /* Only the high 16-bits is valid */ 656 wr32m(hw, NGBE_EXTAG, 657 NGBE_EXTAG_VLAN_MASK, 658 NGBE_EXTAG_VLAN(tpid)); 659 } else { 660 wr32m(hw, NGBE_VLANCTL, 661 NGBE_VLANCTL_TPID_MASK, 662 NGBE_VLANCTL_TPID(tpid)); 663 wr32m(hw, NGBE_DMATXCTRL, 664 NGBE_DMATXCTRL_TPID_MASK, 665 NGBE_DMATXCTRL_TPID(tpid)); 666 } 667 668 if (qinq) { 669 wr32m(hw, NGBE_TAGTPID(0), 670 NGBE_TAGTPID_MSB_MASK, 671 NGBE_TAGTPID_MSB(tpid)); 672 } 673 break; 674 default: 675 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 676 return -EINVAL; 677 } 678 679 return ret; 680 } 681 682 void 683 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 684 { 685 struct ngbe_hw *hw = ngbe_dev_hw(dev); 686 uint32_t vlnctrl; 687 688 PMD_INIT_FUNC_TRACE(); 689 690 /* Filter Table Disable */ 691 vlnctrl = rd32(hw, NGBE_VLANCTL); 692 vlnctrl &= ~NGBE_VLANCTL_VFE; 693 wr32(hw, NGBE_VLANCTL, vlnctrl); 694 } 695 696 void 697 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 698 { 699 struct ngbe_hw *hw = ngbe_dev_hw(dev); 700 struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev); 701 uint32_t vlnctrl; 702 uint16_t i; 703 704 PMD_INIT_FUNC_TRACE(); 705 706 /* Filter Table Enable */ 707 vlnctrl = rd32(hw, NGBE_VLANCTL); 708 vlnctrl &= ~NGBE_VLANCTL_CFIENA; 709 vlnctrl |= NGBE_VLANCTL_VFE; 710 wr32(hw, NGBE_VLANCTL, vlnctrl); 711 712 /* write whatever is in local vfta copy */ 713 for (i = 0; i < NGBE_VFTA_SIZE; i++) 714 wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]); 715 } 716 717 void 718 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 719 { 720 struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev); 721 struct ngbe_rx_queue *rxq; 722 723 if (queue >= NGBE_MAX_RX_QUEUE_NUM) 724 return; 725 726 if (on) 727 NGBE_SET_HWSTRIP(hwstrip, queue); 728 else 729 NGBE_CLEAR_HWSTRIP(hwstrip, queue); 730 731 if (queue >= dev->data->nb_rx_queues) 732 return; 733 734 rxq = dev->data->rx_queues[queue]; 735 736 if (on) { 737 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 738 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 739 } else { 740 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN; 741 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 742 } 743 } 744 745 static void 746 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 747 { 748 struct ngbe_hw *hw = ngbe_dev_hw(dev); 749 uint32_t ctrl; 750 751 PMD_INIT_FUNC_TRACE(); 752 753 ctrl = rd32(hw, NGBE_RXCFG(queue)); 754 ctrl &= ~NGBE_RXCFG_VLAN; 755 wr32(hw, NGBE_RXCFG(queue), ctrl); 756 757 /* record those setting for HW strip per queue */ 758 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 759 } 760 761 static void 762 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 763 { 764 struct ngbe_hw *hw = ngbe_dev_hw(dev); 765 uint32_t ctrl; 766 767 PMD_INIT_FUNC_TRACE(); 768 769 ctrl = rd32(hw, NGBE_RXCFG(queue)); 770 ctrl |= NGBE_RXCFG_VLAN; 771 wr32(hw, NGBE_RXCFG(queue), ctrl); 772 773 /* record those setting for HW strip per queue */ 774 ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 775 } 776 777 static void 778 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 779 { 780 struct ngbe_hw *hw = ngbe_dev_hw(dev); 781 uint32_t ctrl; 782 783 PMD_INIT_FUNC_TRACE(); 784 785 ctrl = rd32(hw, NGBE_PORTCTL); 786 ctrl &= ~NGBE_PORTCTL_VLANEXT; 787 ctrl &= ~NGBE_PORTCTL_QINQ; 788 wr32(hw, NGBE_PORTCTL, ctrl); 789 } 790 791 static void 792 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 793 { 794 struct ngbe_hw *hw = ngbe_dev_hw(dev); 795 uint32_t ctrl; 796 797 PMD_INIT_FUNC_TRACE(); 798 799 ctrl = rd32(hw, NGBE_PORTCTL); 800 ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ; 801 wr32(hw, NGBE_PORTCTL, ctrl); 802 } 803 804 static void 805 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev) 806 { 807 struct ngbe_hw *hw = ngbe_dev_hw(dev); 808 uint32_t ctrl; 809 810 PMD_INIT_FUNC_TRACE(); 811 812 ctrl = rd32(hw, NGBE_PORTCTL); 813 ctrl &= ~NGBE_PORTCTL_QINQ; 814 wr32(hw, NGBE_PORTCTL, ctrl); 815 } 816 817 static void 818 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev) 819 { 820 struct ngbe_hw *hw = ngbe_dev_hw(dev); 821 uint32_t ctrl; 822 823 PMD_INIT_FUNC_TRACE(); 824 825 ctrl = rd32(hw, NGBE_PORTCTL); 826 ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT; 827 wr32(hw, NGBE_PORTCTL, ctrl); 828 } 829 830 void 831 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev) 832 { 833 struct ngbe_rx_queue *rxq; 834 uint16_t i; 835 836 PMD_INIT_FUNC_TRACE(); 837 838 for (i = 0; i < dev->data->nb_rx_queues; i++) { 839 rxq = dev->data->rx_queues[i]; 840 841 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 842 ngbe_vlan_hw_strip_enable(dev, i); 843 else 844 ngbe_vlan_hw_strip_disable(dev, i); 845 } 846 } 847 848 void 849 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) 850 { 851 uint16_t i; 852 struct rte_eth_rxmode *rxmode; 853 struct ngbe_rx_queue *rxq; 854 855 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 856 rxmode = &dev->data->dev_conf.rxmode; 857 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 858 for (i = 0; i < dev->data->nb_rx_queues; i++) { 859 rxq = dev->data->rx_queues[i]; 860 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 861 } 862 else 863 for (i = 0; i < dev->data->nb_rx_queues; i++) { 864 rxq = dev->data->rx_queues[i]; 865 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 866 } 867 } 868 } 869 870 static int 871 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) 872 { 873 struct rte_eth_rxmode *rxmode; 874 rxmode = &dev->data->dev_conf.rxmode; 875 876 if (mask & RTE_ETH_VLAN_STRIP_MASK) 877 ngbe_vlan_hw_strip_config(dev); 878 879 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 880 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 881 ngbe_vlan_hw_filter_enable(dev); 882 else 883 ngbe_vlan_hw_filter_disable(dev); 884 } 885 886 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 887 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 888 ngbe_vlan_hw_extend_enable(dev); 889 else 890 ngbe_vlan_hw_extend_disable(dev); 891 } 892 893 if (mask & RTE_ETH_QINQ_STRIP_MASK) { 894 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 895 ngbe_qinq_hw_strip_enable(dev); 896 else 897 ngbe_qinq_hw_strip_disable(dev); 898 } 899 900 return 0; 901 } 902 903 static int 904 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 905 { 906 ngbe_config_vlan_strip_on_all_queues(dev, mask); 907 908 ngbe_vlan_offload_config(dev, mask); 909 910 return 0; 911 } 912 913 static int 914 ngbe_dev_configure(struct rte_eth_dev *dev) 915 { 916 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 917 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 918 919 PMD_INIT_FUNC_TRACE(); 920 921 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 922 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 923 924 /* set flag to update link status after init */ 925 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE; 926 927 /* 928 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 929 * allocation Rx preconditions we will reset it. 930 */ 931 adapter->rx_bulk_alloc_allowed = true; 932 933 return 0; 934 } 935 936 static void 937 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 938 { 939 struct ngbe_hw *hw = ngbe_dev_hw(dev); 940 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 941 942 wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1)); 943 wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3)); 944 wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0)); 945 if (hw->phy.type == ngbe_phy_yt8521s_sfi) 946 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0)); 947 else 948 wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3)); 949 950 intr->mask_misc |= NGBE_ICRMISC_GPIO; 951 } 952 953 /* 954 * Configure device link speed and setup link. 955 * It returns 0 on success. 956 */ 957 static int 958 ngbe_dev_start(struct rte_eth_dev *dev) 959 { 960 struct ngbe_hw *hw = ngbe_dev_hw(dev); 961 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 962 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 963 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 964 uint32_t intr_vector = 0; 965 int err; 966 bool link_up = false, negotiate = false; 967 uint32_t speed = 0; 968 uint32_t allowed_speeds = 0; 969 int mask = 0; 970 int status; 971 uint32_t *link_speeds; 972 973 PMD_INIT_FUNC_TRACE(); 974 975 /* Stop the link setup handler before resetting the HW. */ 976 rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev); 977 978 /* disable uio/vfio intr/eventfd mapping */ 979 rte_intr_disable(intr_handle); 980 981 /* stop adapter */ 982 hw->adapter_stopped = 0; 983 984 /* reinitialize adapter, this calls reset and start */ 985 hw->nb_rx_queues = dev->data->nb_rx_queues; 986 hw->nb_tx_queues = dev->data->nb_tx_queues; 987 status = ngbe_pf_reset_hw(hw); 988 if (status != 0) 989 return -1; 990 hw->mac.start_hw(hw); 991 hw->mac.get_link_status = true; 992 993 ngbe_set_pcie_master(hw, true); 994 995 /* configure PF module if SRIOV enabled */ 996 ngbe_pf_host_configure(dev); 997 998 ngbe_dev_phy_intr_setup(dev); 999 1000 /* check and configure queue intr-vector mapping */ 1001 if ((rte_intr_cap_multiple(intr_handle) || 1002 !RTE_ETH_DEV_SRIOV(dev).active) && 1003 dev->data->dev_conf.intr_conf.rxq != 0) { 1004 intr_vector = dev->data->nb_rx_queues; 1005 if (rte_intr_efd_enable(intr_handle, intr_vector)) 1006 return -1; 1007 } 1008 1009 if (rte_intr_dp_is_en(intr_handle)) { 1010 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 1011 dev->data->nb_rx_queues)) { 1012 PMD_INIT_LOG(ERR, 1013 "Failed to allocate %d rx_queues intr_vec", 1014 dev->data->nb_rx_queues); 1015 return -ENOMEM; 1016 } 1017 } 1018 1019 /* configure MSI-X for sleep until Rx interrupt */ 1020 ngbe_configure_msix(dev); 1021 1022 /* initialize transmission unit */ 1023 ngbe_dev_tx_init(dev); 1024 1025 /* This can fail when allocating mbufs for descriptor rings */ 1026 err = ngbe_dev_rx_init(dev); 1027 if (err != 0) { 1028 PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware"); 1029 goto error; 1030 } 1031 1032 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 1033 RTE_ETH_VLAN_EXTEND_MASK; 1034 err = ngbe_vlan_offload_config(dev, mask); 1035 if (err != 0) { 1036 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 1037 goto error; 1038 } 1039 1040 hw->mac.setup_pba(hw); 1041 ngbe_configure_port(dev); 1042 1043 err = ngbe_dev_rxtx_start(dev); 1044 if (err < 0) { 1045 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 1046 goto error; 1047 } 1048 1049 /* Skip link setup if loopback mode is enabled. */ 1050 if (hw->is_pf && dev->data->dev_conf.lpbk_mode) 1051 goto skip_link_setup; 1052 1053 err = hw->mac.check_link(hw, &speed, &link_up, 0); 1054 if (err != 0) 1055 goto error; 1056 dev->data->dev_link.link_status = link_up; 1057 1058 link_speeds = &dev->data->dev_conf.link_speeds; 1059 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) 1060 negotiate = true; 1061 1062 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate); 1063 if (err != 0) 1064 goto error; 1065 1066 allowed_speeds = 0; 1067 if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL) 1068 allowed_speeds |= RTE_ETH_LINK_SPEED_1G; 1069 if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL) 1070 allowed_speeds |= RTE_ETH_LINK_SPEED_100M; 1071 if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL) 1072 allowed_speeds |= RTE_ETH_LINK_SPEED_10M; 1073 1074 if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { 1075 PMD_INIT_LOG(ERR, "Invalid link setting"); 1076 goto error; 1077 } 1078 1079 speed = 0x0; 1080 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { 1081 speed = hw->mac.default_speeds; 1082 } else { 1083 if (*link_speeds & RTE_ETH_LINK_SPEED_1G) 1084 speed |= NGBE_LINK_SPEED_1GB_FULL; 1085 if (*link_speeds & RTE_ETH_LINK_SPEED_100M) 1086 speed |= NGBE_LINK_SPEED_100M_FULL; 1087 if (*link_speeds & RTE_ETH_LINK_SPEED_10M) 1088 speed |= NGBE_LINK_SPEED_10M_FULL; 1089 } 1090 1091 err = hw->phy.init_hw(hw); 1092 if (err != 0) { 1093 PMD_INIT_LOG(ERR, "PHY init failed"); 1094 goto error; 1095 } 1096 err = hw->mac.setup_link(hw, speed, link_up); 1097 if (err != 0) 1098 goto error; 1099 1100 skip_link_setup: 1101 1102 if (rte_intr_allow_others(intr_handle)) { 1103 ngbe_dev_misc_interrupt_setup(dev); 1104 /* check if lsc interrupt is enabled */ 1105 if (dev->data->dev_conf.intr_conf.lsc != 0) 1106 ngbe_dev_lsc_interrupt_setup(dev, TRUE); 1107 else 1108 ngbe_dev_lsc_interrupt_setup(dev, FALSE); 1109 ngbe_dev_macsec_interrupt_setup(dev); 1110 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID); 1111 } else { 1112 rte_intr_callback_unregister(intr_handle, 1113 ngbe_dev_interrupt_handler, dev); 1114 if (dev->data->dev_conf.intr_conf.lsc != 0) 1115 PMD_INIT_LOG(INFO, 1116 "LSC won't enable because of no intr multiplex"); 1117 } 1118 1119 /* check if rxq interrupt is enabled */ 1120 if (dev->data->dev_conf.intr_conf.rxq != 0 && 1121 rte_intr_dp_is_en(intr_handle)) 1122 ngbe_dev_rxq_interrupt_setup(dev); 1123 1124 /* enable UIO/VFIO intr/eventfd mapping */ 1125 rte_intr_enable(intr_handle); 1126 1127 /* resume enabled intr since HW reset */ 1128 ngbe_enable_intr(dev); 1129 1130 if (hw->gpio_ctl) { 1131 /* gpio0 is used to power on/off control*/ 1132 wr32(hw, NGBE_GPIODATA, 0); 1133 } 1134 1135 /* 1136 * Update link status right before return, because it may 1137 * start link configuration process in a separate thread. 1138 */ 1139 ngbe_dev_link_update(dev, 0); 1140 1141 ngbe_read_stats_registers(hw, hw_stats); 1142 hw->offset_loaded = 1; 1143 1144 return 0; 1145 1146 error: 1147 PMD_INIT_LOG(ERR, "failure in dev start: %d", err); 1148 ngbe_dev_clear_queues(dev); 1149 return -EIO; 1150 } 1151 1152 /* 1153 * Stop device: disable rx and tx functions to allow for reconfiguring. 1154 */ 1155 static int 1156 ngbe_dev_stop(struct rte_eth_dev *dev) 1157 { 1158 struct rte_eth_link link; 1159 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 1160 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1161 struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev); 1162 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1163 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1164 int vf; 1165 1166 if (hw->adapter_stopped) 1167 return 0; 1168 1169 PMD_INIT_FUNC_TRACE(); 1170 1171 rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev); 1172 1173 if (hw->gpio_ctl) { 1174 /* gpio0 is used to power on/off control*/ 1175 wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0); 1176 } 1177 1178 /* disable interrupts */ 1179 ngbe_disable_intr(hw); 1180 1181 /* reset the NIC */ 1182 ngbe_pf_reset_hw(hw); 1183 hw->adapter_stopped = 0; 1184 1185 /* stop adapter */ 1186 ngbe_stop_hw(hw); 1187 1188 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) 1189 vfinfo[vf].clear_to_send = false; 1190 1191 hw->phy.set_phy_power(hw, false); 1192 1193 ngbe_dev_clear_queues(dev); 1194 1195 /* Clear stored conf */ 1196 dev->data->scattered_rx = 0; 1197 1198 /* Clear recorded link status */ 1199 memset(&link, 0, sizeof(link)); 1200 rte_eth_linkstatus_set(dev, &link); 1201 1202 if (!rte_intr_allow_others(intr_handle)) 1203 /* resume to the default handler */ 1204 rte_intr_callback_register(intr_handle, 1205 ngbe_dev_interrupt_handler, 1206 (void *)dev); 1207 1208 /* Clean datapath event and queue/vec mapping */ 1209 rte_intr_efd_disable(intr_handle); 1210 rte_intr_vec_list_free(intr_handle); 1211 1212 ngbe_set_pcie_master(hw, true); 1213 1214 adapter->rss_reta_updated = 0; 1215 1216 hw->adapter_stopped = true; 1217 dev->data->dev_started = 0; 1218 1219 return 0; 1220 } 1221 1222 /* 1223 * Set device link up: power on. 1224 */ 1225 static int 1226 ngbe_dev_set_link_up(struct rte_eth_dev *dev) 1227 { 1228 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1229 1230 hw->phy.set_phy_power(hw, true); 1231 1232 return 0; 1233 } 1234 1235 /* 1236 * Set device link down: power off. 1237 */ 1238 static int 1239 ngbe_dev_set_link_down(struct rte_eth_dev *dev) 1240 { 1241 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1242 1243 hw->phy.set_phy_power(hw, false); 1244 1245 return 0; 1246 } 1247 1248 /* 1249 * Reset and stop device. 1250 */ 1251 static int 1252 ngbe_dev_close(struct rte_eth_dev *dev) 1253 { 1254 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1255 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1256 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1257 int retries = 0; 1258 int ret; 1259 1260 PMD_INIT_FUNC_TRACE(); 1261 1262 ngbe_pf_reset_hw(hw); 1263 1264 ngbe_dev_stop(dev); 1265 1266 ngbe_dev_free_queues(dev); 1267 1268 ngbe_set_pcie_master(hw, false); 1269 1270 /* reprogram the RAR[0] in case user changed it. */ 1271 ngbe_set_rar(hw, 0, hw->mac.addr, 0, true); 1272 1273 /* Unlock any pending hardware semaphore */ 1274 ngbe_swfw_lock_reset(hw); 1275 1276 /* disable uio intr before callback unregister */ 1277 rte_intr_disable(intr_handle); 1278 1279 do { 1280 ret = rte_intr_callback_unregister(intr_handle, 1281 ngbe_dev_interrupt_handler, dev); 1282 if (ret >= 0 || ret == -ENOENT) { 1283 break; 1284 } else if (ret != -EAGAIN) { 1285 PMD_INIT_LOG(ERR, 1286 "intr callback unregister failed: %d", 1287 ret); 1288 } 1289 rte_delay_ms(100); 1290 } while (retries++ < (10 + NGBE_LINK_UP_TIME)); 1291 1292 /* uninitialize PF if max_vfs not zero */ 1293 ngbe_pf_host_uninit(dev); 1294 1295 rte_free(dev->data->mac_addrs); 1296 dev->data->mac_addrs = NULL; 1297 1298 rte_free(dev->data->hash_mac_addrs); 1299 dev->data->hash_mac_addrs = NULL; 1300 1301 return ret; 1302 } 1303 1304 /* 1305 * Reset PF device. 1306 */ 1307 static int 1308 ngbe_dev_reset(struct rte_eth_dev *dev) 1309 { 1310 int ret; 1311 1312 /* When a DPDK PMD PF begin to reset PF port, it should notify all 1313 * its VF to make them align with it. The detailed notification 1314 * mechanism is PMD specific. As to ngbe PF, it is rather complex. 1315 * To avoid unexpected behavior in VF, currently reset of PF with 1316 * SR-IOV activation is not supported. It might be supported later. 1317 */ 1318 if (dev->data->sriov.active) 1319 return -ENOTSUP; 1320 1321 ret = eth_ngbe_dev_uninit(dev); 1322 if (ret != 0) 1323 return ret; 1324 1325 ret = eth_ngbe_dev_init(dev, NULL); 1326 1327 return ret; 1328 } 1329 1330 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \ 1331 { \ 1332 uint32_t current_counter = rd32(hw, reg); \ 1333 if (current_counter < last_counter) \ 1334 current_counter += 0x100000000LL; \ 1335 if (!hw->offset_loaded) \ 1336 last_counter = current_counter; \ 1337 counter = current_counter - last_counter; \ 1338 counter &= 0xFFFFFFFFLL; \ 1339 } 1340 1341 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 1342 { \ 1343 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \ 1344 uint64_t current_counter_msb = rd32(hw, reg_msb); \ 1345 uint64_t current_counter = (current_counter_msb << 32) | \ 1346 current_counter_lsb; \ 1347 if (current_counter < last_counter) \ 1348 current_counter += 0x1000000000LL; \ 1349 if (!hw->offset_loaded) \ 1350 last_counter = current_counter; \ 1351 counter = current_counter - last_counter; \ 1352 counter &= 0xFFFFFFFFFLL; \ 1353 } 1354 1355 void 1356 ngbe_read_stats_registers(struct ngbe_hw *hw, 1357 struct ngbe_hw_stats *hw_stats) 1358 { 1359 unsigned int i; 1360 1361 /* QP Stats */ 1362 for (i = 0; i < hw->nb_rx_queues; i++) { 1363 UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i), 1364 hw->qp_last[i].rx_qp_packets, 1365 hw_stats->qp[i].rx_qp_packets); 1366 UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i), 1367 hw->qp_last[i].rx_qp_bytes, 1368 hw_stats->qp[i].rx_qp_bytes); 1369 UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i), 1370 hw->qp_last[i].rx_qp_mc_packets, 1371 hw_stats->qp[i].rx_qp_mc_packets); 1372 UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i), 1373 hw->qp_last[i].rx_qp_bc_packets, 1374 hw_stats->qp[i].rx_qp_bc_packets); 1375 } 1376 1377 for (i = 0; i < hw->nb_tx_queues; i++) { 1378 UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i), 1379 hw->qp_last[i].tx_qp_packets, 1380 hw_stats->qp[i].tx_qp_packets); 1381 UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i), 1382 hw->qp_last[i].tx_qp_bytes, 1383 hw_stats->qp[i].tx_qp_bytes); 1384 UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i), 1385 hw->qp_last[i].tx_qp_mc_packets, 1386 hw_stats->qp[i].tx_qp_mc_packets); 1387 UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i), 1388 hw->qp_last[i].tx_qp_bc_packets, 1389 hw_stats->qp[i].tx_qp_bc_packets); 1390 } 1391 1392 /* PB Stats */ 1393 hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS); 1394 hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT); 1395 hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP); 1396 hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP); 1397 hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF); 1398 hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON); 1399 1400 hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON); 1401 hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF); 1402 1403 /* DMA Stats */ 1404 hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP); 1405 hw_stats->tx_dma_drop += rd32(hw, NGBE_DMATXDROP); 1406 hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP); 1407 hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT); 1408 hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT); 1409 hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL); 1410 hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL); 1411 1412 /* MAC Stats */ 1413 hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL); 1414 hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL); 1415 hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL); 1416 1417 hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL); 1418 hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL); 1419 hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL); 1420 1421 hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL); 1422 hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL); 1423 1424 hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L); 1425 hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L); 1426 hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L); 1427 hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L); 1428 hw_stats->rx_size_512_to_1023_packets += 1429 rd64(hw, NGBE_MACRX512TO1023L); 1430 hw_stats->rx_size_1024_to_max_packets += 1431 rd64(hw, NGBE_MACRX1024TOMAXL); 1432 hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L); 1433 hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L); 1434 hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L); 1435 hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L); 1436 hw_stats->tx_size_512_to_1023_packets += 1437 rd64(hw, NGBE_MACTX512TO1023L); 1438 hw_stats->tx_size_1024_to_max_packets += 1439 rd64(hw, NGBE_MACTX1024TOMAXL); 1440 1441 hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL); 1442 hw_stats->rx_oversize_cnt += rd32(hw, NGBE_MACRXOVERSIZE); 1443 hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER); 1444 1445 /* MNG Stats */ 1446 hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS); 1447 hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC); 1448 hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG); 1449 hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG); 1450 1451 /* MACsec Stats */ 1452 hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT); 1453 hw_stats->tx_macsec_pkts_encrypted += 1454 rd32(hw, NGBE_LSECTX_ENCPKT); 1455 hw_stats->tx_macsec_pkts_protected += 1456 rd32(hw, NGBE_LSECTX_PROTPKT); 1457 hw_stats->tx_macsec_octets_encrypted += 1458 rd32(hw, NGBE_LSECTX_ENCOCT); 1459 hw_stats->tx_macsec_octets_protected += 1460 rd32(hw, NGBE_LSECTX_PROTOCT); 1461 hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT); 1462 hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT); 1463 hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT); 1464 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT); 1465 hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT); 1466 hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT); 1467 hw_stats->rx_macsec_sc_pkts_unchecked += 1468 rd32(hw, NGBE_LSECRX_UNCHKPKT); 1469 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT); 1470 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT); 1471 for (i = 0; i < 2; i++) { 1472 hw_stats->rx_macsec_sa_pkts_ok += 1473 rd32(hw, NGBE_LSECRX_OKPKT(i)); 1474 hw_stats->rx_macsec_sa_pkts_invalid += 1475 rd32(hw, NGBE_LSECRX_INVPKT(i)); 1476 hw_stats->rx_macsec_sa_pkts_notvalid += 1477 rd32(hw, NGBE_LSECRX_BADPKT(i)); 1478 } 1479 for (i = 0; i < 4; i++) { 1480 hw_stats->rx_macsec_sa_pkts_unusedsa += 1481 rd32(hw, NGBE_LSECRX_INVSAPKT(i)); 1482 hw_stats->rx_macsec_sa_pkts_notusingsa += 1483 rd32(hw, NGBE_LSECRX_BADSAPKT(i)); 1484 } 1485 hw_stats->rx_total_missed_packets = 1486 hw_stats->rx_up_dropped; 1487 } 1488 1489 static int 1490 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 1491 { 1492 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1493 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1494 struct ngbe_stat_mappings *stat_mappings = 1495 NGBE_DEV_STAT_MAPPINGS(dev); 1496 uint32_t i, j; 1497 1498 ngbe_read_stats_registers(hw, hw_stats); 1499 1500 if (stats == NULL) 1501 return -EINVAL; 1502 1503 /* Fill out the rte_eth_stats statistics structure */ 1504 stats->ipackets = hw_stats->rx_packets; 1505 stats->ibytes = hw_stats->rx_bytes; 1506 stats->opackets = hw_stats->tx_packets; 1507 stats->obytes = hw_stats->tx_bytes; 1508 1509 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets)); 1510 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets)); 1511 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes)); 1512 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes)); 1513 memset(&stats->q_errors, 0, sizeof(stats->q_errors)); 1514 for (i = 0; i < NGBE_MAX_QP; i++) { 1515 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG; 1516 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8; 1517 uint32_t q_map; 1518 1519 q_map = (stat_mappings->rqsm[n] >> offset) 1520 & QMAP_FIELD_RESERVED_BITS_MASK; 1521 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS 1522 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS); 1523 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets; 1524 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes; 1525 1526 q_map = (stat_mappings->tqsm[n] >> offset) 1527 & QMAP_FIELD_RESERVED_BITS_MASK; 1528 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS 1529 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS); 1530 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets; 1531 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes; 1532 } 1533 1534 /* Rx Errors */ 1535 stats->imissed = hw_stats->rx_total_missed_packets + 1536 hw_stats->rx_dma_drop; 1537 stats->ierrors = hw_stats->rx_crc_errors + 1538 hw_stats->rx_mac_short_packet_dropped + 1539 hw_stats->rx_length_errors + 1540 hw_stats->rx_undersize_errors + 1541 hw_stats->rdb_drp_cnt + 1542 hw_stats->rx_illegal_byte_errors + 1543 hw_stats->rx_error_bytes + 1544 hw_stats->rx_fragment_errors; 1545 1546 /* Tx Errors */ 1547 stats->oerrors = 0; 1548 return 0; 1549 } 1550 1551 static int 1552 ngbe_dev_stats_reset(struct rte_eth_dev *dev) 1553 { 1554 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1555 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1556 1557 /* HW registers are cleared on read */ 1558 hw->offset_loaded = 0; 1559 ngbe_dev_stats_get(dev, NULL); 1560 hw->offset_loaded = 1; 1561 1562 /* Reset software totals */ 1563 memset(hw_stats, 0, sizeof(*hw_stats)); 1564 1565 return 0; 1566 } 1567 1568 /* This function calculates the number of xstats based on the current config */ 1569 static unsigned 1570 ngbe_xstats_calc_num(struct rte_eth_dev *dev) 1571 { 1572 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues); 1573 return NGBE_NB_HW_STATS + 1574 NGBE_NB_QP_STATS * nb_queues; 1575 } 1576 1577 static inline int 1578 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size) 1579 { 1580 int nb, st; 1581 1582 /* Extended stats from ngbe_hw_stats */ 1583 if (id < NGBE_NB_HW_STATS) { 1584 snprintf(name, size, "[hw]%s", 1585 rte_ngbe_stats_strings[id].name); 1586 return 0; 1587 } 1588 id -= NGBE_NB_HW_STATS; 1589 1590 /* Queue Stats */ 1591 if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) { 1592 nb = id / NGBE_NB_QP_STATS; 1593 st = id % NGBE_NB_QP_STATS; 1594 snprintf(name, size, "[q%u]%s", nb, 1595 rte_ngbe_qp_strings[st].name); 1596 return 0; 1597 } 1598 id -= NGBE_NB_QP_STATS * NGBE_MAX_QP; 1599 1600 return -(int)(id + 1); 1601 } 1602 1603 static inline int 1604 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset) 1605 { 1606 int nb, st; 1607 1608 /* Extended stats from ngbe_hw_stats */ 1609 if (id < NGBE_NB_HW_STATS) { 1610 *offset = rte_ngbe_stats_strings[id].offset; 1611 return 0; 1612 } 1613 id -= NGBE_NB_HW_STATS; 1614 1615 /* Queue Stats */ 1616 if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) { 1617 nb = id / NGBE_NB_QP_STATS; 1618 st = id % NGBE_NB_QP_STATS; 1619 *offset = rte_ngbe_qp_strings[st].offset + 1620 nb * (NGBE_NB_QP_STATS * sizeof(uint64_t)); 1621 return 0; 1622 } 1623 1624 return -1; 1625 } 1626 1627 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev, 1628 struct rte_eth_xstat_name *xstats_names, unsigned int limit) 1629 { 1630 unsigned int i, count; 1631 1632 count = ngbe_xstats_calc_num(dev); 1633 if (xstats_names == NULL) 1634 return count; 1635 1636 /* Note: limit >= cnt_stats checked upstream 1637 * in rte_eth_xstats_names() 1638 */ 1639 limit = min(limit, count); 1640 1641 /* Extended stats from ngbe_hw_stats */ 1642 for (i = 0; i < limit; i++) { 1643 if (ngbe_get_name_by_id(i, xstats_names[i].name, 1644 sizeof(xstats_names[i].name))) { 1645 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1646 break; 1647 } 1648 } 1649 1650 return i; 1651 } 1652 1653 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 1654 const uint64_t *ids, 1655 struct rte_eth_xstat_name *xstats_names, 1656 unsigned int limit) 1657 { 1658 unsigned int i; 1659 1660 if (ids == NULL) 1661 return ngbe_dev_xstats_get_names(dev, xstats_names, limit); 1662 1663 for (i = 0; i < limit; i++) { 1664 if (ngbe_get_name_by_id(ids[i], xstats_names[i].name, 1665 sizeof(xstats_names[i].name))) { 1666 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1667 return -1; 1668 } 1669 } 1670 1671 return i; 1672 } 1673 1674 static int 1675 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1676 unsigned int limit) 1677 { 1678 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1679 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1680 unsigned int i, count; 1681 1682 ngbe_read_stats_registers(hw, hw_stats); 1683 1684 /* If this is a reset xstats is NULL, and we have cleared the 1685 * registers by reading them. 1686 */ 1687 count = ngbe_xstats_calc_num(dev); 1688 if (xstats == NULL) 1689 return count; 1690 1691 limit = min(limit, ngbe_xstats_calc_num(dev)); 1692 1693 /* Extended stats from ngbe_hw_stats */ 1694 for (i = 0; i < limit; i++) { 1695 uint32_t offset = 0; 1696 1697 if (ngbe_get_offset_by_id(i, &offset)) { 1698 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1699 break; 1700 } 1701 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset); 1702 xstats[i].id = i; 1703 } 1704 1705 return i; 1706 } 1707 1708 static int 1709 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values, 1710 unsigned int limit) 1711 { 1712 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1713 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1714 unsigned int i, count; 1715 1716 ngbe_read_stats_registers(hw, hw_stats); 1717 1718 /* If this is a reset xstats is NULL, and we have cleared the 1719 * registers by reading them. 1720 */ 1721 count = ngbe_xstats_calc_num(dev); 1722 if (values == NULL) 1723 return count; 1724 1725 limit = min(limit, ngbe_xstats_calc_num(dev)); 1726 1727 /* Extended stats from ngbe_hw_stats */ 1728 for (i = 0; i < limit; i++) { 1729 uint32_t offset; 1730 1731 if (ngbe_get_offset_by_id(i, &offset)) { 1732 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1733 break; 1734 } 1735 values[i] = *(uint64_t *)(((char *)hw_stats) + offset); 1736 } 1737 1738 return i; 1739 } 1740 1741 static int 1742 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1743 uint64_t *values, unsigned int limit) 1744 { 1745 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1746 unsigned int i; 1747 1748 if (ids == NULL) 1749 return ngbe_dev_xstats_get_(dev, values, limit); 1750 1751 for (i = 0; i < limit; i++) { 1752 uint32_t offset; 1753 1754 if (ngbe_get_offset_by_id(ids[i], &offset)) { 1755 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 1756 break; 1757 } 1758 values[i] = *(uint64_t *)(((char *)hw_stats) + offset); 1759 } 1760 1761 return i; 1762 } 1763 1764 static int 1765 ngbe_dev_xstats_reset(struct rte_eth_dev *dev) 1766 { 1767 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1768 struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); 1769 1770 /* HW registers are cleared on read */ 1771 hw->offset_loaded = 0; 1772 ngbe_read_stats_registers(hw, hw_stats); 1773 hw->offset_loaded = 1; 1774 1775 /* Reset software totals */ 1776 memset(hw_stats, 0, sizeof(*hw_stats)); 1777 1778 return 0; 1779 } 1780 1781 static int 1782 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 1783 { 1784 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1785 int ret; 1786 1787 ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id); 1788 1789 if (ret < 0) 1790 return -EINVAL; 1791 1792 ret += 1; /* add the size of '\0' */ 1793 if (fw_size < (size_t)ret) 1794 return ret; 1795 1796 return 0; 1797 } 1798 1799 static int 1800 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1801 { 1802 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1803 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1804 1805 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 1806 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 1807 dev_info->min_rx_bufsize = 1024; 1808 dev_info->max_rx_pktlen = 15872; 1809 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 1810 dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC; 1811 dev_info->max_vfs = pci_dev->max_vfs; 1812 dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev); 1813 dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) | 1814 dev_info->rx_queue_offload_capa); 1815 dev_info->tx_queue_offload_capa = 0; 1816 dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev); 1817 1818 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1819 .rx_thresh = { 1820 .pthresh = NGBE_DEFAULT_RX_PTHRESH, 1821 .hthresh = NGBE_DEFAULT_RX_HTHRESH, 1822 .wthresh = NGBE_DEFAULT_RX_WTHRESH, 1823 }, 1824 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH, 1825 .rx_drop_en = 0, 1826 .offloads = 0, 1827 }; 1828 1829 dev_info->default_txconf = (struct rte_eth_txconf) { 1830 .tx_thresh = { 1831 .pthresh = NGBE_DEFAULT_TX_PTHRESH, 1832 .hthresh = NGBE_DEFAULT_TX_HTHRESH, 1833 .wthresh = NGBE_DEFAULT_TX_WTHRESH, 1834 }, 1835 .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH, 1836 .offloads = 0, 1837 }; 1838 1839 dev_info->rx_desc_lim = rx_desc_lim; 1840 dev_info->tx_desc_lim = tx_desc_lim; 1841 1842 dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 1843 dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128; 1844 dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL; 1845 1846 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M | 1847 RTE_ETH_LINK_SPEED_10M; 1848 1849 /* Driver-preferred Rx/Tx parameters */ 1850 dev_info->default_rxportconf.burst_size = 32; 1851 dev_info->default_txportconf.burst_size = 32; 1852 dev_info->default_rxportconf.nb_queues = 1; 1853 dev_info->default_txportconf.nb_queues = 1; 1854 dev_info->default_rxportconf.ring_size = 256; 1855 dev_info->default_txportconf.ring_size = 256; 1856 1857 return 0; 1858 } 1859 1860 const uint32_t * 1861 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1862 { 1863 if (dev->rx_pkt_burst == ngbe_recv_pkts || 1864 dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc || 1865 dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc || 1866 dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc) 1867 return ngbe_get_supported_ptypes(); 1868 1869 return NULL; 1870 } 1871 1872 void 1873 ngbe_dev_setup_link_alarm_handler(void *param) 1874 { 1875 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 1876 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1877 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 1878 u32 speed; 1879 bool autoneg = false; 1880 1881 speed = hw->phy.autoneg_advertised; 1882 if (!speed) 1883 hw->mac.get_link_capabilities(hw, &speed, &autoneg); 1884 1885 hw->mac.setup_link(hw, speed, true); 1886 1887 intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG; 1888 } 1889 1890 /* return 0 means link status changed, -1 means not changed */ 1891 int 1892 ngbe_dev_link_update_share(struct rte_eth_dev *dev, 1893 int wait_to_complete) 1894 { 1895 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1896 struct rte_eth_link link; 1897 u32 link_speed = NGBE_LINK_SPEED_UNKNOWN; 1898 u32 lan_speed = 0; 1899 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 1900 bool link_up; 1901 int err; 1902 int wait = 1; 1903 1904 memset(&link, 0, sizeof(link)); 1905 link.link_status = RTE_ETH_LINK_DOWN; 1906 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 1907 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 1908 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 1909 ~RTE_ETH_LINK_SPEED_AUTONEG); 1910 1911 hw->mac.get_link_status = true; 1912 1913 if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG) 1914 return rte_eth_linkstatus_set(dev, &link); 1915 1916 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 1917 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 1918 wait = 0; 1919 1920 err = hw->mac.check_link(hw, &link_speed, &link_up, wait); 1921 if (err != 0) { 1922 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 1923 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1924 return rte_eth_linkstatus_set(dev, &link); 1925 } 1926 1927 if (!link_up) 1928 return rte_eth_linkstatus_set(dev, &link); 1929 1930 intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG; 1931 link.link_status = RTE_ETH_LINK_UP; 1932 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1933 1934 switch (link_speed) { 1935 default: 1936 case NGBE_LINK_SPEED_UNKNOWN: 1937 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 1938 break; 1939 1940 case NGBE_LINK_SPEED_10M_FULL: 1941 link.link_speed = RTE_ETH_SPEED_NUM_10M; 1942 lan_speed = 0; 1943 break; 1944 1945 case NGBE_LINK_SPEED_100M_FULL: 1946 link.link_speed = RTE_ETH_SPEED_NUM_100M; 1947 lan_speed = 1; 1948 break; 1949 1950 case NGBE_LINK_SPEED_1GB_FULL: 1951 link.link_speed = RTE_ETH_SPEED_NUM_1G; 1952 lan_speed = 2; 1953 break; 1954 } 1955 1956 if (hw->is_pf) { 1957 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed); 1958 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL | 1959 NGBE_LINK_SPEED_100M_FULL | 1960 NGBE_LINK_SPEED_10M_FULL)) { 1961 wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK, 1962 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE); 1963 } 1964 } 1965 1966 return rte_eth_linkstatus_set(dev, &link); 1967 } 1968 1969 static int 1970 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 1971 { 1972 return ngbe_dev_link_update_share(dev, wait_to_complete); 1973 } 1974 1975 static int 1976 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 1977 { 1978 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1979 uint32_t fctrl; 1980 1981 fctrl = rd32(hw, NGBE_PSRCTL); 1982 fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP); 1983 wr32(hw, NGBE_PSRCTL, fctrl); 1984 1985 return 0; 1986 } 1987 1988 static int 1989 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 1990 { 1991 struct ngbe_hw *hw = ngbe_dev_hw(dev); 1992 uint32_t fctrl; 1993 1994 fctrl = rd32(hw, NGBE_PSRCTL); 1995 fctrl &= (~NGBE_PSRCTL_UCP); 1996 if (dev->data->all_multicast == 1) 1997 fctrl |= NGBE_PSRCTL_MCP; 1998 else 1999 fctrl &= (~NGBE_PSRCTL_MCP); 2000 wr32(hw, NGBE_PSRCTL, fctrl); 2001 2002 return 0; 2003 } 2004 2005 static int 2006 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 2007 { 2008 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2009 uint32_t fctrl; 2010 2011 fctrl = rd32(hw, NGBE_PSRCTL); 2012 fctrl |= NGBE_PSRCTL_MCP; 2013 wr32(hw, NGBE_PSRCTL, fctrl); 2014 2015 return 0; 2016 } 2017 2018 static int 2019 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 2020 { 2021 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2022 uint32_t fctrl; 2023 2024 if (dev->data->promiscuous == 1) 2025 return 0; /* must remain in all_multicast mode */ 2026 2027 fctrl = rd32(hw, NGBE_PSRCTL); 2028 fctrl &= (~NGBE_PSRCTL_MCP); 2029 wr32(hw, NGBE_PSRCTL, fctrl); 2030 2031 return 0; 2032 } 2033 2034 /** 2035 * It clears the interrupt causes and enables the interrupt. 2036 * It will be called once only during NIC initialized. 2037 * 2038 * @param dev 2039 * Pointer to struct rte_eth_dev. 2040 * @param on 2041 * Enable or Disable. 2042 * 2043 * @return 2044 * - On success, zero. 2045 * - On failure, a negative value. 2046 */ 2047 static int 2048 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 2049 { 2050 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2051 2052 ngbe_dev_link_status_print(dev); 2053 if (on != 0) { 2054 intr->mask_misc |= NGBE_ICRMISC_PHY; 2055 intr->mask_misc |= NGBE_ICRMISC_GPIO; 2056 } else { 2057 intr->mask_misc &= ~NGBE_ICRMISC_PHY; 2058 intr->mask_misc &= ~NGBE_ICRMISC_GPIO; 2059 } 2060 2061 return 0; 2062 } 2063 2064 /** 2065 * It clears the interrupt causes and enables the interrupt. 2066 * It will be called once only during NIC initialized. 2067 * 2068 * @param dev 2069 * Pointer to struct rte_eth_dev. 2070 * 2071 * @return 2072 * - On success, zero. 2073 * - On failure, a negative value. 2074 */ 2075 static int 2076 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev) 2077 { 2078 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2079 u64 mask; 2080 2081 mask = NGBE_ICR_MASK; 2082 mask &= (1ULL << NGBE_MISC_VEC_ID); 2083 intr->mask |= mask; 2084 intr->mask_misc |= NGBE_ICRMISC_GPIO; 2085 2086 return 0; 2087 } 2088 2089 /** 2090 * It clears the interrupt causes and enables the interrupt. 2091 * It will be called once only during NIC initialized. 2092 * 2093 * @param dev 2094 * Pointer to struct rte_eth_dev. 2095 * 2096 * @return 2097 * - On success, zero. 2098 * - On failure, a negative value. 2099 */ 2100 static int 2101 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 2102 { 2103 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2104 u64 mask; 2105 2106 mask = NGBE_ICR_MASK; 2107 mask &= ~((1ULL << NGBE_RX_VEC_START) - 1); 2108 intr->mask |= mask; 2109 2110 return 0; 2111 } 2112 2113 /** 2114 * It clears the interrupt causes and enables the interrupt. 2115 * It will be called once only during NIC initialized. 2116 * 2117 * @param dev 2118 * Pointer to struct rte_eth_dev. 2119 * 2120 * @return 2121 * - On success, zero. 2122 * - On failure, a negative value. 2123 */ 2124 static int 2125 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 2126 { 2127 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2128 2129 intr->mask_misc |= NGBE_ICRMISC_LNKSEC; 2130 2131 return 0; 2132 } 2133 2134 /* 2135 * It reads ICR and sets flag for the link_update. 2136 * 2137 * @param dev 2138 * Pointer to struct rte_eth_dev. 2139 * 2140 * @return 2141 * - On success, zero. 2142 * - On failure, a negative value. 2143 */ 2144 static int 2145 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 2146 { 2147 uint32_t eicr; 2148 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2149 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2150 2151 /* read-on-clear nic registers here */ 2152 eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC]; 2153 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 2154 2155 intr->flags = 0; 2156 2157 /* set flag for async link update */ 2158 if (eicr & NGBE_ICRMISC_PHY) 2159 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE; 2160 2161 if (eicr & NGBE_ICRMISC_VFMBX) 2162 intr->flags |= NGBE_FLAG_MAILBOX; 2163 2164 if (eicr & NGBE_ICRMISC_LNKSEC) 2165 intr->flags |= NGBE_FLAG_MACSEC; 2166 2167 if (eicr & NGBE_ICRMISC_GPIO) 2168 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE; 2169 2170 ((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0; 2171 2172 return 0; 2173 } 2174 2175 /** 2176 * It gets and then prints the link status. 2177 * 2178 * @param dev 2179 * Pointer to struct rte_eth_dev. 2180 * 2181 * @return 2182 * - On success, zero. 2183 * - On failure, a negative value. 2184 */ 2185 static void 2186 ngbe_dev_link_status_print(struct rte_eth_dev *dev) 2187 { 2188 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2189 struct rte_eth_link link; 2190 2191 rte_eth_linkstatus_get(dev, &link); 2192 2193 if (link.link_status == RTE_ETH_LINK_UP) { 2194 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 2195 (int)(dev->data->port_id), 2196 (unsigned int)link.link_speed, 2197 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? 2198 "full-duplex" : "half-duplex"); 2199 } else { 2200 PMD_INIT_LOG(INFO, " Port %d: Link Down", 2201 (int)(dev->data->port_id)); 2202 } 2203 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 2204 pci_dev->addr.domain, 2205 pci_dev->addr.bus, 2206 pci_dev->addr.devid, 2207 pci_dev->addr.function); 2208 } 2209 2210 /* 2211 * It executes link_update after knowing an interrupt occurred. 2212 * 2213 * @param dev 2214 * Pointer to struct rte_eth_dev. 2215 * 2216 * @return 2217 * - On success, zero. 2218 * - On failure, a negative value. 2219 */ 2220 static int 2221 ngbe_dev_interrupt_action(struct rte_eth_dev *dev) 2222 { 2223 struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 2224 2225 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 2226 2227 if (intr->flags & NGBE_FLAG_MAILBOX) { 2228 ngbe_pf_mbx_process(dev); 2229 intr->flags &= ~NGBE_FLAG_MAILBOX; 2230 } 2231 2232 if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) { 2233 struct rte_eth_link link; 2234 2235 /*get the link status before link update, for predicting later*/ 2236 rte_eth_linkstatus_get(dev, &link); 2237 2238 ngbe_dev_link_update(dev, 0); 2239 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE; 2240 ngbe_dev_link_status_print(dev); 2241 if (dev->data->dev_link.link_speed != link.link_speed) 2242 rte_eth_dev_callback_process(dev, 2243 RTE_ETH_EVENT_INTR_LSC, NULL); 2244 } 2245 2246 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 2247 ngbe_enable_intr(dev); 2248 2249 return 0; 2250 } 2251 2252 /** 2253 * Interrupt handler triggered by NIC for handling 2254 * specific interrupt. 2255 * 2256 * @param param 2257 * The address of parameter (struct rte_eth_dev *) registered before. 2258 */ 2259 static void 2260 ngbe_dev_interrupt_handler(void *param) 2261 { 2262 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2263 2264 ngbe_dev_interrupt_get_status(dev); 2265 ngbe_dev_interrupt_action(dev); 2266 } 2267 2268 static int 2269 ngbe_dev_led_on(struct rte_eth_dev *dev) 2270 { 2271 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2272 return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP; 2273 } 2274 2275 static int 2276 ngbe_dev_led_off(struct rte_eth_dev *dev) 2277 { 2278 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2279 return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP; 2280 } 2281 2282 static int 2283 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2284 { 2285 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2286 uint32_t mflcn_reg; 2287 uint32_t fccfg_reg; 2288 int rx_pause; 2289 int tx_pause; 2290 2291 fc_conf->pause_time = hw->fc.pause_time; 2292 fc_conf->high_water = hw->fc.high_water; 2293 fc_conf->low_water = hw->fc.low_water; 2294 fc_conf->send_xon = hw->fc.send_xon; 2295 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 2296 2297 /* 2298 * Return rx_pause status according to actual setting of 2299 * RXFCCFG register. 2300 */ 2301 mflcn_reg = rd32(hw, NGBE_RXFCCFG); 2302 if (mflcn_reg & NGBE_RXFCCFG_FC) 2303 rx_pause = 1; 2304 else 2305 rx_pause = 0; 2306 2307 /* 2308 * Return tx_pause status according to actual setting of 2309 * TXFCCFG register. 2310 */ 2311 fccfg_reg = rd32(hw, NGBE_TXFCCFG); 2312 if (fccfg_reg & NGBE_TXFCCFG_FC) 2313 tx_pause = 1; 2314 else 2315 tx_pause = 0; 2316 2317 if (rx_pause && tx_pause) 2318 fc_conf->mode = RTE_ETH_FC_FULL; 2319 else if (rx_pause) 2320 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2321 else if (tx_pause) 2322 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2323 else 2324 fc_conf->mode = RTE_ETH_FC_NONE; 2325 2326 return 0; 2327 } 2328 2329 static int 2330 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2331 { 2332 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2333 int err; 2334 uint32_t rx_buf_size; 2335 uint32_t max_high_water; 2336 enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = { 2337 ngbe_fc_none, 2338 ngbe_fc_rx_pause, 2339 ngbe_fc_tx_pause, 2340 ngbe_fc_full 2341 }; 2342 2343 PMD_INIT_FUNC_TRACE(); 2344 2345 rx_buf_size = rd32(hw, NGBE_PBRXSIZE); 2346 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 2347 2348 /* 2349 * At least reserve one Ethernet frame for watermark 2350 * high_water/low_water in kilo bytes for ngbe 2351 */ 2352 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10; 2353 if (fc_conf->high_water > max_high_water || 2354 fc_conf->high_water < fc_conf->low_water) { 2355 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 2356 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 2357 return -EINVAL; 2358 } 2359 2360 hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode]; 2361 hw->fc.pause_time = fc_conf->pause_time; 2362 hw->fc.high_water = fc_conf->high_water; 2363 hw->fc.low_water = fc_conf->low_water; 2364 hw->fc.send_xon = fc_conf->send_xon; 2365 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 2366 2367 err = hw->mac.fc_enable(hw); 2368 2369 /* Not negotiated is not an error case */ 2370 if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) { 2371 wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK, 2372 (fc_conf->mac_ctrl_frame_fwd 2373 ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP)); 2374 ngbe_flush(hw); 2375 2376 return 0; 2377 } 2378 2379 PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err); 2380 return -EIO; 2381 } 2382 2383 int 2384 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev, 2385 struct rte_eth_rss_reta_entry64 *reta_conf, 2386 uint16_t reta_size) 2387 { 2388 uint8_t i, j, mask; 2389 uint32_t reta; 2390 uint16_t idx, shift; 2391 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2392 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2393 2394 PMD_INIT_FUNC_TRACE(); 2395 2396 if (!hw->is_pf) { 2397 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 2398 "NIC."); 2399 return -ENOTSUP; 2400 } 2401 2402 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { 2403 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 2404 "(%d) doesn't match the number hardware can supported " 2405 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128); 2406 return -EINVAL; 2407 } 2408 2409 for (i = 0; i < reta_size; i += 4) { 2410 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2411 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2412 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF); 2413 if (!mask) 2414 continue; 2415 2416 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2); 2417 for (j = 0; j < 4; j++) { 2418 if (RS8(mask, j, 0x1)) { 2419 reta &= ~(MS32(8 * j, 0xFF)); 2420 reta |= LS32(reta_conf[idx].reta[shift + j], 2421 8 * j, 0xFF); 2422 } 2423 } 2424 wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta); 2425 } 2426 adapter->rss_reta_updated = 1; 2427 2428 return 0; 2429 } 2430 2431 int 2432 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev, 2433 struct rte_eth_rss_reta_entry64 *reta_conf, 2434 uint16_t reta_size) 2435 { 2436 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2437 uint8_t i, j, mask; 2438 uint32_t reta; 2439 uint16_t idx, shift; 2440 2441 PMD_INIT_FUNC_TRACE(); 2442 2443 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { 2444 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 2445 "(%d) doesn't match the number hardware can supported " 2446 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128); 2447 return -EINVAL; 2448 } 2449 2450 for (i = 0; i < reta_size; i += 4) { 2451 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2452 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2453 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF); 2454 if (!mask) 2455 continue; 2456 2457 reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2); 2458 for (j = 0; j < 4; j++) { 2459 if (RS8(mask, j, 0x1)) 2460 reta_conf[idx].reta[shift + j] = 2461 (uint16_t)RS32(reta, 8 * j, 0xFF); 2462 } 2463 } 2464 2465 return 0; 2466 } 2467 2468 static int 2469 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 2470 uint32_t index, uint32_t pool) 2471 { 2472 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2473 uint32_t enable_addr = 1; 2474 2475 return ngbe_set_rar(hw, index, mac_addr->addr_bytes, 2476 pool, enable_addr); 2477 } 2478 2479 static void 2480 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 2481 { 2482 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2483 2484 ngbe_clear_rar(hw, index); 2485 } 2486 2487 static int 2488 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 2489 { 2490 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2491 2492 ngbe_remove_rar(dev, 0); 2493 ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs); 2494 2495 return 0; 2496 } 2497 2498 static int 2499 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 2500 { 2501 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2502 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 2503 struct rte_eth_dev_data *dev_data = dev->data; 2504 2505 /* If device is started, refuse mtu that requires the support of 2506 * scattered packets when this feature has not been enabled before. 2507 */ 2508 if (dev_data->dev_started && !dev_data->scattered_rx && 2509 (frame_size + 2 * RTE_VLAN_HLEN > 2510 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 2511 PMD_INIT_LOG(ERR, "Stop port first."); 2512 return -EINVAL; 2513 } 2514 2515 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK, 2516 NGBE_FRMSZ_MAX(frame_size)); 2517 2518 return 0; 2519 } 2520 2521 static uint32_t 2522 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr) 2523 { 2524 uint32_t vector = 0; 2525 2526 switch (hw->mac.mc_filter_type) { 2527 case 0: /* use bits [47:36] of the address */ 2528 vector = ((uc_addr->addr_bytes[4] >> 4) | 2529 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 2530 break; 2531 case 1: /* use bits [46:35] of the address */ 2532 vector = ((uc_addr->addr_bytes[4] >> 3) | 2533 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 2534 break; 2535 case 2: /* use bits [45:34] of the address */ 2536 vector = ((uc_addr->addr_bytes[4] >> 2) | 2537 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 2538 break; 2539 case 3: /* use bits [43:32] of the address */ 2540 vector = ((uc_addr->addr_bytes[4]) | 2541 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 2542 break; 2543 default: /* Invalid mc_filter_type */ 2544 break; 2545 } 2546 2547 /* vector can only be 12-bits or boundary will be exceeded */ 2548 vector &= 0xFFF; 2549 return vector; 2550 } 2551 2552 static int 2553 ngbe_uc_hash_table_set(struct rte_eth_dev *dev, 2554 struct rte_ether_addr *mac_addr, uint8_t on) 2555 { 2556 uint32_t vector; 2557 uint32_t uta_idx; 2558 uint32_t reg_val; 2559 uint32_t uta_mask; 2560 uint32_t psrctl; 2561 2562 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2563 struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev); 2564 2565 vector = ngbe_uta_vector(hw, mac_addr); 2566 uta_idx = (vector >> 5) & 0x7F; 2567 uta_mask = 0x1UL << (vector & 0x1F); 2568 2569 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask)) 2570 return 0; 2571 2572 reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx)); 2573 if (on) { 2574 uta_info->uta_in_use++; 2575 reg_val |= uta_mask; 2576 uta_info->uta_shadow[uta_idx] |= uta_mask; 2577 } else { 2578 uta_info->uta_in_use--; 2579 reg_val &= ~uta_mask; 2580 uta_info->uta_shadow[uta_idx] &= ~uta_mask; 2581 } 2582 2583 wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val); 2584 2585 psrctl = rd32(hw, NGBE_PSRCTL); 2586 if (uta_info->uta_in_use > 0) 2587 psrctl |= NGBE_PSRCTL_UCHFENA; 2588 else 2589 psrctl &= ~NGBE_PSRCTL_UCHFENA; 2590 2591 psrctl &= ~NGBE_PSRCTL_ADHF12_MASK; 2592 psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type); 2593 wr32(hw, NGBE_PSRCTL, psrctl); 2594 2595 return 0; 2596 } 2597 2598 static int 2599 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 2600 { 2601 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2602 struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev); 2603 uint32_t psrctl; 2604 int i; 2605 2606 if (on) { 2607 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 2608 uta_info->uta_shadow[i] = ~0; 2609 wr32(hw, NGBE_UCADDRTBL(i), ~0); 2610 } 2611 } else { 2612 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 2613 uta_info->uta_shadow[i] = 0; 2614 wr32(hw, NGBE_UCADDRTBL(i), 0); 2615 } 2616 } 2617 2618 psrctl = rd32(hw, NGBE_PSRCTL); 2619 if (on) 2620 psrctl |= NGBE_PSRCTL_UCHFENA; 2621 else 2622 psrctl &= ~NGBE_PSRCTL_UCHFENA; 2623 2624 psrctl &= ~NGBE_PSRCTL_ADHF12_MASK; 2625 psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type); 2626 wr32(hw, NGBE_PSRCTL, psrctl); 2627 2628 return 0; 2629 } 2630 2631 /** 2632 * Set the IVAR registers, mapping interrupt causes to vectors 2633 * @param hw 2634 * pointer to ngbe_hw struct 2635 * @direction 2636 * 0 for Rx, 1 for Tx, -1 for other causes 2637 * @queue 2638 * queue to map the corresponding interrupt to 2639 * @msix_vector 2640 * the vector to map to the corresponding queue 2641 */ 2642 void 2643 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction, 2644 uint8_t queue, uint8_t msix_vector) 2645 { 2646 uint32_t tmp, idx; 2647 2648 if (direction == -1) { 2649 /* other causes */ 2650 msix_vector |= NGBE_IVARMISC_VLD; 2651 idx = 0; 2652 tmp = rd32(hw, NGBE_IVARMISC); 2653 tmp &= ~(0xFF << idx); 2654 tmp |= (msix_vector << idx); 2655 wr32(hw, NGBE_IVARMISC, tmp); 2656 } else { 2657 /* rx or tx causes */ 2658 /* Workaround for ICR lost */ 2659 idx = ((16 * (queue & 1)) + (8 * direction)); 2660 tmp = rd32(hw, NGBE_IVAR(queue >> 1)); 2661 tmp &= ~(0xFF << idx); 2662 tmp |= (msix_vector << idx); 2663 wr32(hw, NGBE_IVAR(queue >> 1), tmp); 2664 } 2665 } 2666 2667 /** 2668 * Sets up the hardware to properly generate MSI-X interrupts 2669 * @hw 2670 * board private structure 2671 */ 2672 static void 2673 ngbe_configure_msix(struct rte_eth_dev *dev) 2674 { 2675 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2676 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2677 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2678 uint32_t queue_id, base = NGBE_MISC_VEC_ID; 2679 uint32_t vec = NGBE_MISC_VEC_ID; 2680 uint32_t gpie; 2681 2682 /* 2683 * Won't configure MSI-X register if no mapping is done 2684 * between intr vector and event fd 2685 * but if MSI-X has been enabled already, need to configure 2686 * auto clean, auto mask and throttling. 2687 */ 2688 gpie = rd32(hw, NGBE_GPIE); 2689 if (!rte_intr_dp_is_en(intr_handle) && 2690 !(gpie & NGBE_GPIE_MSIX)) 2691 return; 2692 2693 if (rte_intr_allow_others(intr_handle)) { 2694 base = NGBE_RX_VEC_START; 2695 vec = base; 2696 } 2697 2698 /* setup GPIE for MSI-X mode */ 2699 gpie = rd32(hw, NGBE_GPIE); 2700 gpie |= NGBE_GPIE_MSIX; 2701 wr32(hw, NGBE_GPIE, gpie); 2702 2703 /* Populate the IVAR table and set the ITR values to the 2704 * corresponding register. 2705 */ 2706 if (rte_intr_dp_is_en(intr_handle)) { 2707 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 2708 queue_id++) { 2709 /* by default, 1:1 mapping */ 2710 ngbe_set_ivar_map(hw, 0, queue_id, vec); 2711 rte_intr_vec_list_index_set(intr_handle, 2712 queue_id, vec); 2713 if (vec < base + rte_intr_nb_efd_get(intr_handle) 2714 - 1) 2715 vec++; 2716 } 2717 2718 ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID); 2719 } 2720 wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID), 2721 NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT) 2722 | NGBE_ITR_WRDSA); 2723 } 2724 2725 static u8 * 2726 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw, 2727 u8 **mc_addr_ptr, u32 *vmdq) 2728 { 2729 u8 *mc_addr; 2730 2731 *vmdq = 0; 2732 mc_addr = *mc_addr_ptr; 2733 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); 2734 return mc_addr; 2735 } 2736 2737 int 2738 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 2739 struct rte_ether_addr *mc_addr_set, 2740 uint32_t nb_mc_addr) 2741 { 2742 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2743 u8 *mc_addr_list; 2744 2745 mc_addr_list = (u8 *)mc_addr_set; 2746 return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 2747 ngbe_dev_addr_list_itr, TRUE); 2748 } 2749 2750 static uint64_t 2751 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 2752 { 2753 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2754 uint64_t systime_cycles; 2755 2756 systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL); 2757 systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32; 2758 2759 return systime_cycles; 2760 } 2761 2762 static uint64_t 2763 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 2764 { 2765 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2766 uint64_t rx_tstamp_cycles; 2767 2768 /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */ 2769 rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL); 2770 rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32; 2771 2772 return rx_tstamp_cycles; 2773 } 2774 2775 static uint64_t 2776 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 2777 { 2778 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2779 uint64_t tx_tstamp_cycles; 2780 2781 /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */ 2782 tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL); 2783 tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32; 2784 2785 return tx_tstamp_cycles; 2786 } 2787 2788 static void 2789 ngbe_start_timecounters(struct rte_eth_dev *dev) 2790 { 2791 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2792 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2793 uint32_t incval = 0; 2794 uint32_t shift = 0; 2795 2796 incval = NGBE_INCVAL_1GB; 2797 shift = NGBE_INCVAL_SHIFT_1GB; 2798 2799 wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval)); 2800 2801 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 2802 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 2803 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 2804 2805 adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK; 2806 adapter->systime_tc.cc_shift = shift; 2807 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 2808 2809 adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK; 2810 adapter->rx_tstamp_tc.cc_shift = shift; 2811 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 2812 2813 adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK; 2814 adapter->tx_tstamp_tc.cc_shift = shift; 2815 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 2816 } 2817 2818 static int 2819 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 2820 { 2821 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2822 2823 adapter->systime_tc.nsec += delta; 2824 adapter->rx_tstamp_tc.nsec += delta; 2825 adapter->tx_tstamp_tc.nsec += delta; 2826 2827 return 0; 2828 } 2829 2830 static int 2831 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 2832 { 2833 uint64_t ns; 2834 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2835 2836 ns = rte_timespec_to_ns(ts); 2837 /* Set the timecounters to a new value. */ 2838 adapter->systime_tc.nsec = ns; 2839 adapter->rx_tstamp_tc.nsec = ns; 2840 adapter->tx_tstamp_tc.nsec = ns; 2841 2842 return 0; 2843 } 2844 2845 static int 2846 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 2847 { 2848 uint64_t ns, systime_cycles; 2849 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2850 2851 systime_cycles = ngbe_read_systime_cyclecounter(dev); 2852 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 2853 *ts = rte_ns_to_timespec(ns); 2854 2855 return 0; 2856 } 2857 2858 static int 2859 ngbe_timesync_enable(struct rte_eth_dev *dev) 2860 { 2861 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2862 uint32_t tsync_ctl; 2863 2864 /* Stop the timesync system time. */ 2865 wr32(hw, NGBE_TSTIMEINC, 0x0); 2866 /* Reset the timesync system time value. */ 2867 wr32(hw, NGBE_TSTIMEL, 0x0); 2868 wr32(hw, NGBE_TSTIMEH, 0x0); 2869 2870 ngbe_start_timecounters(dev); 2871 2872 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 2873 wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 2874 RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588); 2875 2876 /* Enable timestamping of received PTP packets. */ 2877 tsync_ctl = rd32(hw, NGBE_TSRXCTL); 2878 tsync_ctl |= NGBE_TSRXCTL_ENA; 2879 wr32(hw, NGBE_TSRXCTL, tsync_ctl); 2880 2881 /* Enable timestamping of transmitted PTP packets. */ 2882 tsync_ctl = rd32(hw, NGBE_TSTXCTL); 2883 tsync_ctl |= NGBE_TSTXCTL_ENA; 2884 wr32(hw, NGBE_TSTXCTL, tsync_ctl); 2885 2886 ngbe_flush(hw); 2887 2888 return 0; 2889 } 2890 2891 static int 2892 ngbe_timesync_disable(struct rte_eth_dev *dev) 2893 { 2894 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2895 uint32_t tsync_ctl; 2896 2897 /* Disable timestamping of transmitted PTP packets. */ 2898 tsync_ctl = rd32(hw, NGBE_TSTXCTL); 2899 tsync_ctl &= ~NGBE_TSTXCTL_ENA; 2900 wr32(hw, NGBE_TSTXCTL, tsync_ctl); 2901 2902 /* Disable timestamping of received PTP packets. */ 2903 tsync_ctl = rd32(hw, NGBE_TSRXCTL); 2904 tsync_ctl &= ~NGBE_TSRXCTL_ENA; 2905 wr32(hw, NGBE_TSRXCTL, tsync_ctl); 2906 2907 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 2908 wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0); 2909 2910 /* Stop incrementing the System Time registers. */ 2911 wr32(hw, NGBE_TSTIMEINC, 0); 2912 2913 return 0; 2914 } 2915 2916 static int 2917 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 2918 struct timespec *timestamp, 2919 uint32_t flags __rte_unused) 2920 { 2921 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2922 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2923 uint32_t tsync_rxctl; 2924 uint64_t rx_tstamp_cycles; 2925 uint64_t ns; 2926 2927 tsync_rxctl = rd32(hw, NGBE_TSRXCTL); 2928 if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0) 2929 return -EINVAL; 2930 2931 rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev); 2932 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 2933 *timestamp = rte_ns_to_timespec(ns); 2934 2935 return 0; 2936 } 2937 2938 static int 2939 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 2940 struct timespec *timestamp) 2941 { 2942 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2943 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 2944 uint32_t tsync_txctl; 2945 uint64_t tx_tstamp_cycles; 2946 uint64_t ns; 2947 2948 tsync_txctl = rd32(hw, NGBE_TSTXCTL); 2949 if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0) 2950 return -EINVAL; 2951 2952 tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev); 2953 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 2954 *timestamp = rte_ns_to_timespec(ns); 2955 2956 return 0; 2957 } 2958 2959 static int 2960 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused) 2961 { 2962 int count = 0; 2963 int g_ind = 0; 2964 const struct reg_info *reg_group; 2965 const struct reg_info **reg_set = ngbe_regs_others; 2966 2967 while ((reg_group = reg_set[g_ind++])) 2968 count += ngbe_regs_group_count(reg_group); 2969 2970 return count; 2971 } 2972 2973 static int 2974 ngbe_get_regs(struct rte_eth_dev *dev, 2975 struct rte_dev_reg_info *regs) 2976 { 2977 struct ngbe_hw *hw = ngbe_dev_hw(dev); 2978 uint32_t *data = regs->data; 2979 int g_ind = 0; 2980 int count = 0; 2981 const struct reg_info *reg_group; 2982 const struct reg_info **reg_set = ngbe_regs_others; 2983 2984 if (data == NULL) { 2985 regs->length = ngbe_get_reg_length(dev); 2986 regs->width = sizeof(uint32_t); 2987 return 0; 2988 } 2989 2990 /* Support only full register dump */ 2991 if (regs->length == 0 || 2992 regs->length == (uint32_t)ngbe_get_reg_length(dev)) { 2993 regs->version = hw->mac.type << 24 | 2994 hw->revision_id << 16 | 2995 hw->device_id; 2996 while ((reg_group = reg_set[g_ind++])) 2997 count += ngbe_read_regs_group(dev, &data[count], 2998 reg_group); 2999 return 0; 3000 } 3001 3002 return -ENOTSUP; 3003 } 3004 3005 static int 3006 ngbe_get_eeprom_length(struct rte_eth_dev *dev) 3007 { 3008 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3009 3010 /* Return unit is byte count */ 3011 return hw->rom.word_size * 2; 3012 } 3013 3014 static int 3015 ngbe_get_eeprom(struct rte_eth_dev *dev, 3016 struct rte_dev_eeprom_info *in_eeprom) 3017 { 3018 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3019 struct ngbe_rom_info *eeprom = &hw->rom; 3020 uint16_t *data = in_eeprom->data; 3021 int first, length; 3022 3023 first = in_eeprom->offset >> 1; 3024 length = in_eeprom->length >> 1; 3025 if (first > hw->rom.word_size || 3026 ((first + length) > hw->rom.word_size)) 3027 return -EINVAL; 3028 3029 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 3030 3031 return eeprom->readw_buffer(hw, first, length, data); 3032 } 3033 3034 static int 3035 ngbe_set_eeprom(struct rte_eth_dev *dev, 3036 struct rte_dev_eeprom_info *in_eeprom) 3037 { 3038 struct ngbe_hw *hw = ngbe_dev_hw(dev); 3039 struct ngbe_rom_info *eeprom = &hw->rom; 3040 uint16_t *data = in_eeprom->data; 3041 int first, length; 3042 3043 first = in_eeprom->offset >> 1; 3044 length = in_eeprom->length >> 1; 3045 if (first > hw->rom.word_size || 3046 ((first + length) > hw->rom.word_size)) 3047 return -EINVAL; 3048 3049 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 3050 3051 return eeprom->writew_buffer(hw, first, length, data); 3052 } 3053 3054 static const struct eth_dev_ops ngbe_eth_dev_ops = { 3055 .dev_configure = ngbe_dev_configure, 3056 .dev_infos_get = ngbe_dev_info_get, 3057 .dev_start = ngbe_dev_start, 3058 .dev_stop = ngbe_dev_stop, 3059 .dev_set_link_up = ngbe_dev_set_link_up, 3060 .dev_set_link_down = ngbe_dev_set_link_down, 3061 .dev_close = ngbe_dev_close, 3062 .dev_reset = ngbe_dev_reset, 3063 .promiscuous_enable = ngbe_dev_promiscuous_enable, 3064 .promiscuous_disable = ngbe_dev_promiscuous_disable, 3065 .allmulticast_enable = ngbe_dev_allmulticast_enable, 3066 .allmulticast_disable = ngbe_dev_allmulticast_disable, 3067 .link_update = ngbe_dev_link_update, 3068 .stats_get = ngbe_dev_stats_get, 3069 .xstats_get = ngbe_dev_xstats_get, 3070 .xstats_get_by_id = ngbe_dev_xstats_get_by_id, 3071 .stats_reset = ngbe_dev_stats_reset, 3072 .xstats_reset = ngbe_dev_xstats_reset, 3073 .xstats_get_names = ngbe_dev_xstats_get_names, 3074 .xstats_get_names_by_id = ngbe_dev_xstats_get_names_by_id, 3075 .fw_version_get = ngbe_fw_version_get, 3076 .dev_supported_ptypes_get = ngbe_dev_supported_ptypes_get, 3077 .mtu_set = ngbe_dev_mtu_set, 3078 .vlan_filter_set = ngbe_vlan_filter_set, 3079 .vlan_tpid_set = ngbe_vlan_tpid_set, 3080 .vlan_offload_set = ngbe_vlan_offload_set, 3081 .vlan_strip_queue_set = ngbe_vlan_strip_queue_set, 3082 .rx_queue_start = ngbe_dev_rx_queue_start, 3083 .rx_queue_stop = ngbe_dev_rx_queue_stop, 3084 .tx_queue_start = ngbe_dev_tx_queue_start, 3085 .tx_queue_stop = ngbe_dev_tx_queue_stop, 3086 .rx_queue_setup = ngbe_dev_rx_queue_setup, 3087 .rx_queue_release = ngbe_dev_rx_queue_release, 3088 .tx_queue_setup = ngbe_dev_tx_queue_setup, 3089 .tx_queue_release = ngbe_dev_tx_queue_release, 3090 .dev_led_on = ngbe_dev_led_on, 3091 .dev_led_off = ngbe_dev_led_off, 3092 .flow_ctrl_get = ngbe_flow_ctrl_get, 3093 .flow_ctrl_set = ngbe_flow_ctrl_set, 3094 .mac_addr_add = ngbe_add_rar, 3095 .mac_addr_remove = ngbe_remove_rar, 3096 .mac_addr_set = ngbe_set_default_mac_addr, 3097 .uc_hash_table_set = ngbe_uc_hash_table_set, 3098 .uc_all_hash_table_set = ngbe_uc_all_hash_table_set, 3099 .reta_update = ngbe_dev_rss_reta_update, 3100 .reta_query = ngbe_dev_rss_reta_query, 3101 .rss_hash_update = ngbe_dev_rss_hash_update, 3102 .rss_hash_conf_get = ngbe_dev_rss_hash_conf_get, 3103 .set_mc_addr_list = ngbe_dev_set_mc_addr_list, 3104 .rxq_info_get = ngbe_rxq_info_get, 3105 .txq_info_get = ngbe_txq_info_get, 3106 .rx_burst_mode_get = ngbe_rx_burst_mode_get, 3107 .tx_burst_mode_get = ngbe_tx_burst_mode_get, 3108 .timesync_enable = ngbe_timesync_enable, 3109 .timesync_disable = ngbe_timesync_disable, 3110 .timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp, 3111 .timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp, 3112 .get_reg = ngbe_get_regs, 3113 .get_eeprom_length = ngbe_get_eeprom_length, 3114 .get_eeprom = ngbe_get_eeprom, 3115 .set_eeprom = ngbe_set_eeprom, 3116 .timesync_adjust_time = ngbe_timesync_adjust_time, 3117 .timesync_read_time = ngbe_timesync_read_time, 3118 .timesync_write_time = ngbe_timesync_write_time, 3119 .tx_done_cleanup = ngbe_dev_tx_done_cleanup, 3120 }; 3121 3122 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd); 3123 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map); 3124 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci"); 3125 3126 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE); 3127 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE); 3128 3129 #ifdef RTE_ETHDEV_DEBUG_RX 3130 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG); 3131 #endif 3132 #ifdef RTE_ETHDEV_DEBUG_TX 3133 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG); 3134 #endif 3135