1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2014-2021 Netronome Systems, Inc. 3 * All rights reserved. 4 * 5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. 6 */ 7 8 #include <rte_common.h> 9 #include <ethdev_driver.h> 10 #include <ethdev_pci.h> 11 #include <dev_driver.h> 12 #include <rte_ether.h> 13 #include <rte_malloc.h> 14 #include <rte_memzone.h> 15 #include <rte_mempool.h> 16 #include <rte_service_component.h> 17 #include <rte_alarm.h> 18 #include "eal_firmware.h" 19 20 #include "nfpcore/nfp_cpp.h" 21 #include "nfpcore/nfp_nffw.h" 22 #include "nfpcore/nfp_hwinfo.h" 23 #include "nfpcore/nfp_mip.h" 24 #include "nfpcore/nfp_rtsym.h" 25 #include "nfpcore/nfp_nsp.h" 26 27 #include "nfp_common.h" 28 #include "nfp_ctrl.h" 29 #include "nfp_rxtx.h" 30 #include "nfp_logs.h" 31 #include "nfp_cpp_bridge.h" 32 33 #include "nfd3/nfp_nfd3.h" 34 #include "nfdk/nfp_nfdk.h" 35 #include "flower/nfp_flower.h" 36 37 static int 38 nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, int port) 39 { 40 struct nfp_eth_table *nfp_eth_table; 41 struct nfp_net_hw *hw = NULL; 42 43 /* Grab a pointer to the correct physical port */ 44 hw = app_fw_nic->ports[port]; 45 46 nfp_eth_table = nfp_eth_read_ports(app_fw_nic->pf_dev->cpp); 47 48 rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->mac_addr); 49 50 free(nfp_eth_table); 51 return 0; 52 } 53 54 static int 55 nfp_net_start(struct rte_eth_dev *dev) 56 { 57 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 58 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 59 uint32_t new_ctrl, update = 0; 60 uint32_t cap_extend; 61 uint32_t ctrl_extend = 0; 62 struct nfp_net_hw *hw; 63 struct nfp_pf_dev *pf_dev; 64 struct nfp_app_fw_nic *app_fw_nic; 65 struct rte_eth_conf *dev_conf; 66 struct rte_eth_rxmode *rxmode; 67 uint32_t intr_vector; 68 int ret; 69 70 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 71 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private); 72 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 73 74 PMD_INIT_LOG(DEBUG, "Start"); 75 76 /* Disabling queues just in case... */ 77 nfp_net_disable_queues(dev); 78 79 /* Enabling the required queues in the device */ 80 nfp_net_enable_queues(dev); 81 82 /* check and configure queue intr-vector mapping */ 83 if (dev->data->dev_conf.intr_conf.rxq != 0) { 84 if (app_fw_nic->multiport) { 85 PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported " 86 "with NFP multiport PF"); 87 return -EINVAL; 88 } 89 if (rte_intr_type_get(intr_handle) == 90 RTE_INTR_HANDLE_UIO) { 91 /* 92 * Better not to share LSC with RX interrupts. 93 * Unregistering LSC interrupt handler 94 */ 95 rte_intr_callback_unregister(pci_dev->intr_handle, 96 nfp_net_dev_interrupt_handler, (void *)dev); 97 98 if (dev->data->nb_rx_queues > 1) { 99 PMD_INIT_LOG(ERR, "PMD rx interrupt only " 100 "supports 1 queue with UIO"); 101 return -EIO; 102 } 103 } 104 intr_vector = dev->data->nb_rx_queues; 105 if (rte_intr_efd_enable(intr_handle, intr_vector)) 106 return -1; 107 108 nfp_configure_rx_interrupt(dev, intr_handle); 109 update = NFP_NET_CFG_UPDATE_MSIX; 110 } 111 112 /* Checking MTU set */ 113 if (dev->data->mtu > hw->flbufsz) { 114 PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)", 115 dev->data->mtu, hw->flbufsz); 116 return -ERANGE; 117 } 118 119 rte_intr_enable(intr_handle); 120 121 new_ctrl = nfp_check_offloads(dev); 122 123 /* Writing configuration parameters in the device */ 124 nfp_net_params_setup(hw); 125 126 dev_conf = &dev->data->dev_conf; 127 rxmode = &dev_conf->rxmode; 128 129 if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) { 130 nfp_net_rss_config_default(dev); 131 update |= NFP_NET_CFG_UPDATE_RSS; 132 new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap); 133 } 134 135 /* Enable device */ 136 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; 137 138 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; 139 140 /* Enable vxlan */ 141 if (hw->cap & NFP_NET_CFG_CTRL_VXLAN) { 142 new_ctrl |= NFP_NET_CFG_CTRL_VXLAN; 143 update |= NFP_NET_CFG_UPDATE_VXLAN; 144 } 145 146 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) 147 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; 148 149 if (nfp_net_reconfig(hw, new_ctrl, update) < 0) 150 return -EIO; 151 152 /* Enable packet type offload by extend ctrl word1. */ 153 cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1); 154 if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0) 155 ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE; 156 157 update = NFP_NET_CFG_UPDATE_GEN; 158 if (nfp_net_ext_reconfig(hw, ctrl_extend, update) < 0) 159 return -EIO; 160 161 /* 162 * Allocating rte mbufs for configured rx queues. 163 * This requires queues being enabled before 164 */ 165 if (nfp_net_rx_freelist_setup(dev) < 0) { 166 ret = -ENOMEM; 167 goto error; 168 } 169 170 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 171 /* Configure the physical port up */ 172 nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); 173 else 174 nfp_eth_set_configured(dev->process_private, 175 hw->nfp_idx, 1); 176 177 hw->ctrl = new_ctrl; 178 179 return 0; 180 181 error: 182 /* 183 * An error returned by this function should mean the app 184 * exiting and then the system releasing all the memory 185 * allocated even memory coming from hugepages. 186 * 187 * The device could be enabled at this point with some queues 188 * ready for getting packets. This is true if the call to 189 * nfp_net_rx_freelist_setup() succeeds for some queues but 190 * fails for subsequent queues. 191 * 192 * This should make the app exiting but better if we tell the 193 * device first. 194 */ 195 nfp_net_disable_queues(dev); 196 197 return ret; 198 } 199 200 /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 201 static int 202 nfp_net_stop(struct rte_eth_dev *dev) 203 { 204 struct nfp_net_hw *hw; 205 206 PMD_INIT_LOG(DEBUG, "Stop"); 207 208 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 209 210 nfp_net_disable_queues(dev); 211 212 /* Clear queues */ 213 nfp_net_stop_tx_queue(dev); 214 215 nfp_net_stop_rx_queue(dev); 216 217 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 218 /* Configure the physical port down */ 219 nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); 220 else 221 nfp_eth_set_configured(dev->process_private, 222 hw->nfp_idx, 0); 223 224 return 0; 225 } 226 227 /* Set the link up. */ 228 static int 229 nfp_net_set_link_up(struct rte_eth_dev *dev) 230 { 231 struct nfp_net_hw *hw; 232 233 PMD_DRV_LOG(DEBUG, "Set link up"); 234 235 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 236 237 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 238 /* Configure the physical port down */ 239 return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); 240 else 241 return nfp_eth_set_configured(dev->process_private, 242 hw->nfp_idx, 1); 243 } 244 245 /* Set the link down. */ 246 static int 247 nfp_net_set_link_down(struct rte_eth_dev *dev) 248 { 249 struct nfp_net_hw *hw; 250 251 PMD_DRV_LOG(DEBUG, "Set link down"); 252 253 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 254 255 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 256 /* Configure the physical port down */ 257 return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); 258 else 259 return nfp_eth_set_configured(dev->process_private, 260 hw->nfp_idx, 0); 261 } 262 263 /* Reset and stop device. The device can not be restarted. */ 264 static int 265 nfp_net_close(struct rte_eth_dev *dev) 266 { 267 struct nfp_net_hw *hw; 268 struct rte_pci_device *pci_dev; 269 struct nfp_pf_dev *pf_dev; 270 struct nfp_app_fw_nic *app_fw_nic; 271 int i; 272 273 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 274 return 0; 275 276 PMD_INIT_LOG(DEBUG, "Close"); 277 278 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private); 279 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 280 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 281 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 282 283 /* 284 * We assume that the DPDK application is stopping all the 285 * threads/queues before calling the device close function. 286 */ 287 288 nfp_net_disable_queues(dev); 289 290 /* Clear queues */ 291 nfp_net_close_tx_queue(dev); 292 293 nfp_net_close_rx_queue(dev); 294 295 /* Cancel possible impending LSC work here before releasing the port*/ 296 rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, 297 (void *)dev); 298 299 /* Only free PF resources after all physical ports have been closed */ 300 /* Mark this port as unused and free device priv resources*/ 301 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); 302 app_fw_nic->ports[hw->idx] = NULL; 303 rte_eth_dev_release_port(dev); 304 305 for (i = 0; i < app_fw_nic->total_phyports; i++) { 306 /* Check to see if ports are still in use */ 307 if (app_fw_nic->ports[i]) 308 return 0; 309 } 310 311 /* Now it is safe to free all PF resources */ 312 PMD_INIT_LOG(INFO, "Freeing PF resources"); 313 nfp_cpp_area_free(pf_dev->ctrl_area); 314 nfp_cpp_area_free(pf_dev->hwqueues_area); 315 free(pf_dev->hwinfo); 316 free(pf_dev->sym_tbl); 317 nfp_cpp_free(pf_dev->cpp); 318 rte_free(app_fw_nic); 319 rte_free(pf_dev); 320 321 rte_intr_disable(pci_dev->intr_handle); 322 323 /* unregister callback func from eal lib */ 324 rte_intr_callback_unregister(pci_dev->intr_handle, 325 nfp_net_dev_interrupt_handler, (void *)dev); 326 327 /* 328 * The ixgbe PMD disables the pcie master on the 329 * device. The i40e does not... 330 */ 331 332 return 0; 333 } 334 335 static int 336 nfp_net_find_vxlan_idx(struct nfp_net_hw *hw, 337 uint16_t port, 338 uint32_t *idx) 339 { 340 uint32_t i; 341 int free_idx = -1; 342 343 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) { 344 if (hw->vxlan_ports[i] == port) { 345 free_idx = i; 346 break; 347 } 348 349 if (hw->vxlan_usecnt[i] == 0) { 350 free_idx = i; 351 break; 352 } 353 } 354 355 if (free_idx == -1) 356 return -EINVAL; 357 358 *idx = free_idx; 359 360 return 0; 361 } 362 363 static int 364 nfp_udp_tunnel_port_add(struct rte_eth_dev *dev, 365 struct rte_eth_udp_tunnel *tunnel_udp) 366 { 367 int ret; 368 uint32_t idx; 369 uint16_t vxlan_port; 370 struct nfp_net_hw *hw; 371 enum rte_eth_tunnel_type tnl_type; 372 373 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 374 vxlan_port = tunnel_udp->udp_port; 375 tnl_type = tunnel_udp->prot_type; 376 377 if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 378 PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); 379 return -ENOTSUP; 380 } 381 382 ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); 383 if (ret != 0) { 384 PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); 385 return -EINVAL; 386 } 387 388 if (hw->vxlan_usecnt[idx] == 0) { 389 ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port); 390 if (ret != 0) { 391 PMD_DRV_LOG(ERR, "Failed set vxlan port"); 392 return -EINVAL; 393 } 394 } 395 396 hw->vxlan_usecnt[idx]++; 397 398 return 0; 399 } 400 401 static int 402 nfp_udp_tunnel_port_del(struct rte_eth_dev *dev, 403 struct rte_eth_udp_tunnel *tunnel_udp) 404 { 405 int ret; 406 uint32_t idx; 407 uint16_t vxlan_port; 408 struct nfp_net_hw *hw; 409 enum rte_eth_tunnel_type tnl_type; 410 411 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 412 vxlan_port = tunnel_udp->udp_port; 413 tnl_type = tunnel_udp->prot_type; 414 415 if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 416 PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); 417 return -ENOTSUP; 418 } 419 420 ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); 421 if (ret != 0 || hw->vxlan_usecnt[idx] == 0) { 422 PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); 423 return -EINVAL; 424 } 425 426 hw->vxlan_usecnt[idx]--; 427 428 if (hw->vxlan_usecnt[idx] == 0) { 429 ret = nfp_net_set_vxlan_port(hw, idx, 0); 430 if (ret != 0) { 431 PMD_DRV_LOG(ERR, "Failed set vxlan port"); 432 return -EINVAL; 433 } 434 } 435 436 return 0; 437 } 438 439 /* Initialise and register driver with DPDK Application */ 440 static const struct eth_dev_ops nfp_net_eth_dev_ops = { 441 .dev_configure = nfp_net_configure, 442 .dev_start = nfp_net_start, 443 .dev_stop = nfp_net_stop, 444 .dev_set_link_up = nfp_net_set_link_up, 445 .dev_set_link_down = nfp_net_set_link_down, 446 .dev_close = nfp_net_close, 447 .promiscuous_enable = nfp_net_promisc_enable, 448 .promiscuous_disable = nfp_net_promisc_disable, 449 .link_update = nfp_net_link_update, 450 .stats_get = nfp_net_stats_get, 451 .stats_reset = nfp_net_stats_reset, 452 .xstats_get = nfp_net_xstats_get, 453 .xstats_reset = nfp_net_xstats_reset, 454 .xstats_get_names = nfp_net_xstats_get_names, 455 .xstats_get_by_id = nfp_net_xstats_get_by_id, 456 .xstats_get_names_by_id = nfp_net_xstats_get_names_by_id, 457 .dev_infos_get = nfp_net_infos_get, 458 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, 459 .mtu_set = nfp_net_dev_mtu_set, 460 .mac_addr_set = nfp_net_set_mac_addr, 461 .vlan_offload_set = nfp_net_vlan_offload_set, 462 .reta_update = nfp_net_reta_update, 463 .reta_query = nfp_net_reta_query, 464 .rss_hash_update = nfp_net_rss_hash_update, 465 .rss_hash_conf_get = nfp_net_rss_hash_conf_get, 466 .rx_queue_setup = nfp_net_rx_queue_setup, 467 .rx_queue_release = nfp_net_rx_queue_release, 468 .tx_queue_setup = nfp_net_tx_queue_setup, 469 .tx_queue_release = nfp_net_tx_queue_release, 470 .rx_queue_intr_enable = nfp_rx_queue_intr_enable, 471 .rx_queue_intr_disable = nfp_rx_queue_intr_disable, 472 .udp_tunnel_port_add = nfp_udp_tunnel_port_add, 473 .udp_tunnel_port_del = nfp_udp_tunnel_port_del, 474 .fw_version_get = nfp_net_firmware_version_get, 475 }; 476 477 static inline void 478 nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, 479 struct rte_eth_dev *eth_dev) 480 { 481 if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3) 482 eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts; 483 else 484 eth_dev->tx_pkt_burst = nfp_net_nfdk_xmit_pkts; 485 486 eth_dev->dev_ops = &nfp_net_eth_dev_ops; 487 eth_dev->rx_queue_count = nfp_net_rx_queue_count; 488 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; 489 } 490 491 static int 492 nfp_net_init(struct rte_eth_dev *eth_dev) 493 { 494 struct rte_pci_device *pci_dev; 495 struct nfp_pf_dev *pf_dev; 496 struct nfp_app_fw_nic *app_fw_nic; 497 struct nfp_net_hw *hw; 498 struct rte_ether_addr *tmp_ether_addr; 499 uint64_t rx_bar_off = 0; 500 uint64_t tx_bar_off = 0; 501 uint32_t start_q; 502 int stride = 4; 503 int port = 0; 504 505 PMD_INIT_FUNC_TRACE(); 506 507 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 508 509 /* Use backpointer here to the PF of this eth_dev */ 510 pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private); 511 512 /* Use backpointer to the CoreNIC app struct */ 513 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 514 515 port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx; 516 if (port < 0 || port > 7) { 517 PMD_DRV_LOG(ERR, "Port value is wrong"); 518 return -ENODEV; 519 } 520 521 /* 522 * Use PF array of physical ports to get pointer to 523 * this specific port 524 */ 525 hw = app_fw_nic->ports[port]; 526 527 PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, " 528 "NFP internal port number: %d", port, hw->nfp_idx); 529 530 rte_eth_copy_pci_info(eth_dev, pci_dev); 531 532 hw->device_id = pci_dev->id.device_id; 533 hw->vendor_id = pci_dev->id.vendor_id; 534 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 535 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 536 537 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u", 538 pci_dev->id.vendor_id, pci_dev->id.device_id, 539 pci_dev->addr.domain, pci_dev->addr.bus, 540 pci_dev->addr.devid, pci_dev->addr.function); 541 542 hw->ctrl_bar = pci_dev->mem_resource[0].addr; 543 if (hw->ctrl_bar == NULL) { 544 PMD_DRV_LOG(ERR, 545 "hw->ctrl_bar is NULL. BAR0 not configured"); 546 return -ENODEV; 547 } 548 549 if (port == 0) { 550 uint32_t min_size; 551 552 hw->ctrl_bar = pf_dev->ctrl_bar; 553 min_size = NFP_MAC_STATS_SIZE * hw->pf_dev->nfp_eth_table->max_index; 554 hw->mac_stats_bar = nfp_rtsym_map(hw->pf_dev->sym_tbl, "_mac_stats", 555 min_size, &hw->mac_stats_area); 556 if (hw->mac_stats_bar == NULL) { 557 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar"); 558 return -EIO; 559 } 560 hw->mac_stats = hw->mac_stats_bar; 561 } else { 562 if (pf_dev->ctrl_bar == NULL) 563 return -ENODEV; 564 /* Use port offset in pf ctrl_bar for this ports control bar */ 565 hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_PF_CSR_SLICE_SIZE); 566 hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar + (port * NFP_MAC_STATS_SIZE); 567 } 568 569 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); 570 PMD_INIT_LOG(DEBUG, "MAC stats: %p", hw->mac_stats); 571 572 nfp_net_cfg_read_version(hw); 573 if (!nfp_net_is_valid_nfd_version(hw->ver)) 574 return -EINVAL; 575 576 if (nfp_net_check_dma_mask(hw, pci_dev->name) != 0) 577 return -ENODEV; 578 579 nfp_net_ethdev_ops_mount(hw, eth_dev); 580 581 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS); 582 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS); 583 hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) * 584 nfp_net_xstats_size(eth_dev), 0); 585 if (hw->eth_xstats_base == NULL) { 586 PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!", 587 pci_dev->device.name); 588 return -ENOMEM; 589 } 590 591 592 /* Work out where in the BAR the queues start. */ 593 switch (pci_dev->id.device_id) { 594 case PCI_DEVICE_ID_NFP3800_PF_NIC: 595 case PCI_DEVICE_ID_NFP4000_PF_NIC: 596 case PCI_DEVICE_ID_NFP6000_PF_NIC: 597 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); 598 tx_bar_off = nfp_pci_queue(pci_dev, start_q); 599 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); 600 rx_bar_off = nfp_pci_queue(pci_dev, start_q); 601 break; 602 default: 603 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching"); 604 return -ENODEV; 605 } 606 607 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off); 608 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off); 609 610 hw->tx_bar = pf_dev->hw_queues + tx_bar_off; 611 hw->rx_bar = pf_dev->hw_queues + rx_bar_off; 612 eth_dev->data->dev_private = hw; 613 614 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", 615 hw->ctrl_bar, hw->tx_bar, hw->rx_bar); 616 617 nfp_net_cfg_queue_setup(hw); 618 619 /* Get some of the read-only fields from the config BAR */ 620 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); 621 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); 622 hw->mtu = RTE_ETHER_MTU; 623 hw->flbufsz = DEFAULT_FLBUF_SIZE; 624 625 /* VLAN insertion is incompatible with LSOv2 */ 626 if (hw->cap & NFP_NET_CFG_CTRL_LSO2) 627 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; 628 629 nfp_net_init_metadata_format(hw); 630 631 if (hw->ver.major < 2) 632 hw->rx_offset = NFP_NET_RX_OFFSET; 633 else 634 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR); 635 636 hw->ctrl = 0; 637 638 hw->stride_rx = stride; 639 hw->stride_tx = stride; 640 641 nfp_net_log_device_information(hw); 642 643 /* Initializing spinlock for reconfigs */ 644 rte_spinlock_init(&hw->reconfig_lock); 645 646 /* Allocating memory for mac addr */ 647 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 648 RTE_ETHER_ADDR_LEN, 0); 649 if (eth_dev->data->mac_addrs == NULL) { 650 PMD_INIT_LOG(ERR, "Failed to space for MAC address"); 651 return -ENOMEM; 652 } 653 654 nfp_net_pf_read_mac(app_fw_nic, port); 655 nfp_net_write_mac(hw, &hw->mac_addr.addr_bytes[0]); 656 657 tmp_ether_addr = &hw->mac_addr; 658 if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) { 659 PMD_INIT_LOG(INFO, "Using random mac address for port %d", port); 660 /* Using random mac addresses for VFs */ 661 rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]); 662 nfp_net_write_mac(hw, &hw->mac_addr.addr_bytes[0]); 663 } 664 665 /* Copying mac address to DPDK eth_dev struct */ 666 rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs); 667 668 if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) 669 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR; 670 671 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 672 673 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x " 674 "mac=" RTE_ETHER_ADDR_PRT_FMT, 675 eth_dev->data->port_id, pci_dev->id.vendor_id, 676 pci_dev->id.device_id, 677 RTE_ETHER_ADDR_BYTES(&hw->mac_addr)); 678 679 /* Registering LSC interrupt handler */ 680 rte_intr_callback_register(pci_dev->intr_handle, 681 nfp_net_dev_interrupt_handler, (void *)eth_dev); 682 /* Telling the firmware about the LSC interrupt entry */ 683 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 684 /* Recording current stats counters values */ 685 nfp_net_stats_reset(eth_dev); 686 687 return 0; 688 } 689 690 #define DEFAULT_FW_PATH "/lib/firmware/netronome" 691 692 static int 693 nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) 694 { 695 struct nfp_cpp *cpp = nsp->cpp; 696 void *fw_buf; 697 char fw_name[125]; 698 char serial[40]; 699 size_t fsize; 700 701 /* Looking for firmware file in order of priority */ 702 703 /* First try to find a firmware image specific for this device */ 704 snprintf(serial, sizeof(serial), 705 "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", 706 cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3], 707 cpp->serial[4], cpp->serial[5], cpp->interface >> 8, 708 cpp->interface & 0xff); 709 710 snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, 711 serial); 712 713 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 714 if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 715 goto load_fw; 716 /* Then try the PCI name */ 717 snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, 718 dev->name); 719 720 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 721 if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 722 goto load_fw; 723 724 /* Finally try the card type and media */ 725 snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card); 726 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 727 if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) { 728 PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name); 729 return -ENOENT; 730 } 731 732 load_fw: 733 PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu", 734 fw_name, fsize); 735 PMD_DRV_LOG(INFO, "Uploading the firmware ..."); 736 nfp_nsp_load_fw(nsp, fw_buf, fsize); 737 PMD_DRV_LOG(INFO, "Done"); 738 739 free(fw_buf); 740 741 return 0; 742 } 743 744 static int 745 nfp_fw_setup(struct rte_pci_device *dev, 746 struct nfp_cpp *cpp, 747 struct nfp_eth_table *nfp_eth_table, 748 struct nfp_hwinfo *hwinfo) 749 { 750 struct nfp_nsp *nsp; 751 const char *nfp_fw_model; 752 char card_desc[100]; 753 int err = 0; 754 755 nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno"); 756 if (nfp_fw_model == NULL) 757 nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); 758 759 if (nfp_fw_model) { 760 PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); 761 } else { 762 PMD_DRV_LOG(ERR, "firmware model NOT found"); 763 return -EIO; 764 } 765 766 if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { 767 PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u", 768 nfp_eth_table->count); 769 return -EIO; 770 } 771 772 PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports", 773 nfp_eth_table->count); 774 775 PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed); 776 777 snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw", 778 nfp_fw_model, nfp_eth_table->count, 779 nfp_eth_table->ports[0].speed / 1000); 780 781 nsp = nfp_nsp_open(cpp); 782 if (nsp == NULL) { 783 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); 784 return -EIO; 785 } 786 787 nfp_nsp_device_soft_reset(nsp); 788 err = nfp_fw_upload(dev, nsp, card_desc); 789 790 nfp_nsp_close(nsp); 791 return err; 792 } 793 794 static int 795 nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev) 796 { 797 int i; 798 int ret; 799 int err = 0; 800 int total_vnics; 801 struct nfp_net_hw *hw; 802 unsigned int numa_node; 803 struct rte_eth_dev *eth_dev; 804 struct nfp_app_fw_nic *app_fw_nic; 805 struct nfp_eth_table *nfp_eth_table; 806 char port_name[RTE_ETH_NAME_MAX_LEN]; 807 808 nfp_eth_table = pf_dev->nfp_eth_table; 809 PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count); 810 811 /* Allocate memory for the CoreNIC app */ 812 app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0); 813 if (app_fw_nic == NULL) 814 return -ENOMEM; 815 816 /* Point the app_fw_priv pointer in the PF to the coreNIC app */ 817 pf_dev->app_fw_priv = app_fw_nic; 818 819 /* Read the number of vNIC's created for the PF */ 820 total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_cfg_pf0_num_ports", &err); 821 if (err != 0 || total_vnics <= 0 || total_vnics > 8) { 822 PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); 823 ret = -ENODEV; 824 goto app_cleanup; 825 } 826 827 /* 828 * For coreNIC the number of vNICs exposed should be the same as the 829 * number of physical ports 830 */ 831 if (total_vnics != (int)nfp_eth_table->count) { 832 PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs"); 833 ret = -ENODEV; 834 goto app_cleanup; 835 } 836 837 /* Populate coreNIC app properties*/ 838 app_fw_nic->total_phyports = total_vnics; 839 app_fw_nic->pf_dev = pf_dev; 840 if (total_vnics > 1) 841 app_fw_nic->multiport = true; 842 843 /* Map the symbol table */ 844 pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0", 845 app_fw_nic->total_phyports * NFP_NET_CFG_BAR_SZ, 846 &pf_dev->ctrl_area); 847 if (pf_dev->ctrl_bar == NULL) { 848 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar"); 849 ret = -EIO; 850 goto app_cleanup; 851 } 852 853 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar); 854 855 /* Loop through all physical ports on PF */ 856 numa_node = rte_socket_id(); 857 for (i = 0; i < app_fw_nic->total_phyports; i++) { 858 snprintf(port_name, sizeof(port_name), "%s_port%d", 859 pf_dev->pci_dev->device.name, i); 860 861 /* Allocate a eth_dev for this phyport */ 862 eth_dev = rte_eth_dev_allocate(port_name); 863 if (eth_dev == NULL) { 864 ret = -ENODEV; 865 goto port_cleanup; 866 } 867 868 /* Allocate memory for this phyport */ 869 eth_dev->data->dev_private = 870 rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw), 871 RTE_CACHE_LINE_SIZE, numa_node); 872 if (eth_dev->data->dev_private == NULL) { 873 ret = -ENOMEM; 874 rte_eth_dev_release_port(eth_dev); 875 goto port_cleanup; 876 } 877 878 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 879 880 /* Add this device to the PF's array of physical ports */ 881 app_fw_nic->ports[i] = hw; 882 883 hw->pf_dev = pf_dev; 884 hw->cpp = pf_dev->cpp; 885 hw->eth_dev = eth_dev; 886 hw->idx = i; 887 hw->nfp_idx = nfp_eth_table->ports[i].index; 888 889 eth_dev->device = &pf_dev->pci_dev->device; 890 891 /* ctrl/tx/rx BAR mappings and remaining init happens in 892 * nfp_net_init 893 */ 894 ret = nfp_net_init(eth_dev); 895 if (ret) { 896 ret = -ENODEV; 897 goto port_cleanup; 898 } 899 900 rte_eth_dev_probing_finish(eth_dev); 901 902 } /* End loop, all ports on this PF */ 903 904 return 0; 905 906 port_cleanup: 907 for (i = 0; i < app_fw_nic->total_phyports; i++) { 908 if (app_fw_nic->ports[i] && app_fw_nic->ports[i]->eth_dev) { 909 struct rte_eth_dev *tmp_dev; 910 tmp_dev = app_fw_nic->ports[i]->eth_dev; 911 rte_eth_dev_release_port(tmp_dev); 912 app_fw_nic->ports[i] = NULL; 913 } 914 } 915 nfp_cpp_area_free(pf_dev->ctrl_area); 916 app_cleanup: 917 rte_free(app_fw_nic); 918 919 return ret; 920 } 921 922 static int 923 nfp_pf_init(struct rte_pci_device *pci_dev) 924 { 925 int ret; 926 int err = 0; 927 uint64_t addr; 928 uint32_t cpp_id; 929 struct nfp_cpp *cpp; 930 enum nfp_app_fw_id app_fw_id; 931 struct nfp_pf_dev *pf_dev; 932 struct nfp_hwinfo *hwinfo; 933 char name[RTE_ETH_NAME_MAX_LEN]; 934 struct nfp_rtsym_table *sym_tbl; 935 struct nfp_eth_table *nfp_eth_table; 936 937 if (pci_dev == NULL) 938 return -ENODEV; 939 940 /* 941 * When device bound to UIO, the device could be used, by mistake, 942 * by two DPDK apps, and the UIO driver does not avoid it. This 943 * could lead to a serious problem when configuring the NFP CPP 944 * interface. Here we avoid this telling to the CPP init code to 945 * use a lock file if UIO is being used. 946 */ 947 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 948 cpp = nfp_cpp_from_device_name(pci_dev, 0); 949 else 950 cpp = nfp_cpp_from_device_name(pci_dev, 1); 951 952 if (cpp == NULL) { 953 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 954 return -EIO; 955 } 956 957 hwinfo = nfp_hwinfo_read(cpp); 958 if (hwinfo == NULL) { 959 PMD_INIT_LOG(ERR, "Error reading hwinfo table"); 960 ret = -EIO; 961 goto cpp_cleanup; 962 } 963 964 /* Read the number of physical ports from hardware */ 965 nfp_eth_table = nfp_eth_read_ports(cpp); 966 if (nfp_eth_table == NULL) { 967 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table"); 968 ret = -EIO; 969 goto hwinfo_cleanup; 970 } 971 972 if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) { 973 PMD_INIT_LOG(ERR, "Error when uploading firmware"); 974 ret = -EIO; 975 goto eth_table_cleanup; 976 } 977 978 /* Now the symbol table should be there */ 979 sym_tbl = nfp_rtsym_table_read(cpp); 980 if (sym_tbl == NULL) { 981 PMD_INIT_LOG(ERR, "Something is wrong with the firmware" 982 " symbol table"); 983 ret = -EIO; 984 goto eth_table_cleanup; 985 } 986 987 /* Read the app ID of the firmware loaded */ 988 app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err); 989 if (err != 0) { 990 PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw"); 991 ret = -EIO; 992 goto sym_tbl_cleanup; 993 } 994 995 /* Allocate memory for the PF "device" */ 996 snprintf(name, sizeof(name), "nfp_pf%d", 0); 997 pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); 998 if (pf_dev == NULL) { 999 ret = -ENOMEM; 1000 goto sym_tbl_cleanup; 1001 } 1002 1003 /* Populate the newly created PF device */ 1004 pf_dev->app_fw_id = app_fw_id; 1005 pf_dev->cpp = cpp; 1006 pf_dev->hwinfo = hwinfo; 1007 pf_dev->sym_tbl = sym_tbl; 1008 pf_dev->pci_dev = pci_dev; 1009 pf_dev->nfp_eth_table = nfp_eth_table; 1010 1011 /* configure access to tx/rx vNIC BARs */ 1012 switch (pci_dev->id.device_id) { 1013 case PCI_DEVICE_ID_NFP3800_PF_NIC: 1014 addr = NFP_PCIE_QUEUE(NFP_PCIE_QCP_NFP3800_OFFSET, 1015 0, NFP_PCIE_QUEUE_NFP3800_MASK); 1016 break; 1017 case PCI_DEVICE_ID_NFP4000_PF_NIC: 1018 case PCI_DEVICE_ID_NFP6000_PF_NIC: 1019 addr = NFP_PCIE_QUEUE(NFP_PCIE_QCP_NFP6000_OFFSET, 1020 0, NFP_PCIE_QUEUE_NFP6000_MASK); 1021 break; 1022 default: 1023 PMD_INIT_LOG(ERR, "nfp_net: no device ID matching"); 1024 ret = -ENODEV; 1025 goto pf_cleanup; 1026 } 1027 1028 cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); 1029 pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, cpp_id, 1030 addr, NFP_QCP_QUEUE_AREA_SZ, 1031 &pf_dev->hwqueues_area); 1032 if (pf_dev->hw_queues == NULL) { 1033 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc"); 1034 ret = -EIO; 1035 goto pf_cleanup; 1036 } 1037 1038 PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues); 1039 1040 /* 1041 * PF initialization has been done at this point. Call app specific 1042 * init code now 1043 */ 1044 switch (pf_dev->app_fw_id) { 1045 case NFP_APP_FW_CORE_NIC: 1046 PMD_INIT_LOG(INFO, "Initializing coreNIC"); 1047 ret = nfp_init_app_fw_nic(pf_dev); 1048 if (ret != 0) { 1049 PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 1050 goto hwqueues_cleanup; 1051 } 1052 break; 1053 case NFP_APP_FW_FLOWER_NIC: 1054 PMD_INIT_LOG(INFO, "Initializing Flower"); 1055 ret = nfp_init_app_fw_flower(pf_dev); 1056 if (ret != 0) { 1057 PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 1058 goto hwqueues_cleanup; 1059 } 1060 break; 1061 default: 1062 PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); 1063 ret = -EINVAL; 1064 goto hwqueues_cleanup; 1065 } 1066 1067 /* register the CPP bridge service here for primary use */ 1068 ret = nfp_enable_cpp_service(pf_dev); 1069 if (ret != 0) 1070 PMD_INIT_LOG(INFO, "Enable cpp service failed."); 1071 1072 return 0; 1073 1074 hwqueues_cleanup: 1075 nfp_cpp_area_free(pf_dev->hwqueues_area); 1076 pf_cleanup: 1077 rte_free(pf_dev); 1078 sym_tbl_cleanup: 1079 free(sym_tbl); 1080 eth_table_cleanup: 1081 free(nfp_eth_table); 1082 hwinfo_cleanup: 1083 free(hwinfo); 1084 cpp_cleanup: 1085 nfp_cpp_free(cpp); 1086 1087 return ret; 1088 } 1089 1090 static int 1091 nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev, 1092 struct nfp_rtsym_table *sym_tbl, 1093 struct nfp_cpp *cpp) 1094 { 1095 int i; 1096 int err = 0; 1097 int ret = 0; 1098 int total_vnics; 1099 struct nfp_net_hw *hw; 1100 1101 /* Read the number of vNIC's created for the PF */ 1102 total_vnics = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err); 1103 if (err != 0 || total_vnics <= 0 || total_vnics > 8) { 1104 PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); 1105 return -ENODEV; 1106 } 1107 1108 for (i = 0; i < total_vnics; i++) { 1109 struct rte_eth_dev *eth_dev; 1110 char port_name[RTE_ETH_NAME_MAX_LEN]; 1111 snprintf(port_name, sizeof(port_name), "%s_port%d", 1112 pci_dev->device.name, i); 1113 1114 PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name); 1115 eth_dev = rte_eth_dev_attach_secondary(port_name); 1116 if (eth_dev == NULL) { 1117 PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name); 1118 ret = -ENODEV; 1119 break; 1120 } 1121 1122 eth_dev->process_private = cpp; 1123 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1124 nfp_net_ethdev_ops_mount(hw, eth_dev); 1125 1126 rte_eth_dev_probing_finish(eth_dev); 1127 } 1128 1129 return ret; 1130 } 1131 1132 /* 1133 * When attaching to the NFP4000/6000 PF on a secondary process there 1134 * is no need to initialise the PF again. Only minimal work is required 1135 * here 1136 */ 1137 static int 1138 nfp_pf_secondary_init(struct rte_pci_device *pci_dev) 1139 { 1140 int err = 0; 1141 int ret = 0; 1142 struct nfp_cpp *cpp; 1143 enum nfp_app_fw_id app_fw_id; 1144 struct nfp_rtsym_table *sym_tbl; 1145 1146 if (pci_dev == NULL) 1147 return -ENODEV; 1148 1149 /* 1150 * When device bound to UIO, the device could be used, by mistake, 1151 * by two DPDK apps, and the UIO driver does not avoid it. This 1152 * could lead to a serious problem when configuring the NFP CPP 1153 * interface. Here we avoid this telling to the CPP init code to 1154 * use a lock file if UIO is being used. 1155 */ 1156 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 1157 cpp = nfp_cpp_from_device_name(pci_dev, 0); 1158 else 1159 cpp = nfp_cpp_from_device_name(pci_dev, 1); 1160 1161 if (cpp == NULL) { 1162 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 1163 return -EIO; 1164 } 1165 1166 /* 1167 * We don't have access to the PF created in the primary process 1168 * here so we have to read the number of ports from firmware 1169 */ 1170 sym_tbl = nfp_rtsym_table_read(cpp); 1171 if (sym_tbl == NULL) { 1172 PMD_INIT_LOG(ERR, "Something is wrong with the firmware" 1173 " symbol table"); 1174 return -EIO; 1175 } 1176 1177 /* Read the app ID of the firmware loaded */ 1178 app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err); 1179 if (err != 0) { 1180 PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw"); 1181 goto sym_tbl_cleanup; 1182 } 1183 1184 switch (app_fw_id) { 1185 case NFP_APP_FW_CORE_NIC: 1186 PMD_INIT_LOG(INFO, "Initializing coreNIC"); 1187 ret = nfp_secondary_init_app_fw_nic(pci_dev, sym_tbl, cpp); 1188 if (ret != 0) { 1189 PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 1190 goto sym_tbl_cleanup; 1191 } 1192 break; 1193 case NFP_APP_FW_FLOWER_NIC: 1194 PMD_INIT_LOG(INFO, "Initializing Flower"); 1195 ret = nfp_secondary_init_app_fw_flower(cpp); 1196 if (ret != 0) { 1197 PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 1198 goto sym_tbl_cleanup; 1199 } 1200 break; 1201 default: 1202 PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); 1203 ret = -EINVAL; 1204 goto sym_tbl_cleanup; 1205 } 1206 1207 sym_tbl_cleanup: 1208 free(sym_tbl); 1209 1210 return ret; 1211 } 1212 1213 static int 1214 nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1215 struct rte_pci_device *dev) 1216 { 1217 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1218 return nfp_pf_init(dev); 1219 else 1220 return nfp_pf_secondary_init(dev); 1221 } 1222 1223 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { 1224 { 1225 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1226 PCI_DEVICE_ID_NFP3800_PF_NIC) 1227 }, 1228 { 1229 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1230 PCI_DEVICE_ID_NFP4000_PF_NIC) 1231 }, 1232 { 1233 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1234 PCI_DEVICE_ID_NFP6000_PF_NIC) 1235 }, 1236 { 1237 RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 1238 PCI_DEVICE_ID_NFP3800_PF_NIC) 1239 }, 1240 { 1241 RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 1242 PCI_DEVICE_ID_NFP4000_PF_NIC) 1243 }, 1244 { 1245 RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 1246 PCI_DEVICE_ID_NFP6000_PF_NIC) 1247 }, 1248 { 1249 .vendor_id = 0, 1250 }, 1251 }; 1252 1253 static int 1254 nfp_pci_uninit(struct rte_eth_dev *eth_dev) 1255 { 1256 struct rte_pci_device *pci_dev; 1257 uint16_t port_id; 1258 1259 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1260 1261 /* Free up all physical ports under PF */ 1262 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) 1263 rte_eth_dev_close(port_id); 1264 /* 1265 * Ports can be closed and freed but hotplugging is not 1266 * currently supported 1267 */ 1268 return -ENOTSUP; 1269 } 1270 1271 static int 1272 eth_nfp_pci_remove(struct rte_pci_device *pci_dev) 1273 { 1274 return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit); 1275 } 1276 1277 static struct rte_pci_driver rte_nfp_net_pf_pmd = { 1278 .id_table = pci_id_nfp_pf_net_map, 1279 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1280 .probe = nfp_pf_pci_probe, 1281 .remove = eth_nfp_pci_remove, 1282 }; 1283 1284 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd); 1285 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map); 1286 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio"); 1287