1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2014-2021 Netronome Systems, Inc. 3 * All rights reserved. 4 * 5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. 6 */ 7 8 #include <unistd.h> 9 10 #include <eal_firmware.h> 11 #include <rte_alarm.h> 12 13 #include "flower/nfp_flower.h" 14 #include "nfd3/nfp_nfd3.h" 15 #include "nfdk/nfp_nfdk.h" 16 #include "nfpcore/nfp_cpp.h" 17 #include "nfpcore/nfp_hwinfo.h" 18 #include "nfpcore/nfp_rtsym.h" 19 #include "nfpcore/nfp_nsp.h" 20 #include "nfpcore/nfp6000_pcie.h" 21 #include "nfpcore/nfp_resource.h" 22 23 #include "nfp_cpp_bridge.h" 24 #include "nfp_ipsec.h" 25 #include "nfp_logs.h" 26 27 #define NFP_PF_DRIVER_NAME net_nfp_pf 28 29 static void 30 nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, 31 uint16_t port) 32 { 33 struct nfp_net_hw *hw; 34 struct nfp_eth_table *nfp_eth_table; 35 36 /* Grab a pointer to the correct physical port */ 37 hw = app_fw_nic->ports[port]; 38 39 nfp_eth_table = app_fw_nic->pf_dev->nfp_eth_table; 40 41 rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->super.mac_addr); 42 } 43 44 static int 45 nfp_net_start(struct rte_eth_dev *dev) 46 { 47 int ret; 48 uint16_t i; 49 struct nfp_hw *hw; 50 uint32_t new_ctrl; 51 uint32_t update = 0; 52 uint32_t cap_extend; 53 uint32_t intr_vector; 54 uint32_t ctrl_extend = 0; 55 struct nfp_net_hw *net_hw; 56 struct nfp_pf_dev *pf_dev; 57 struct rte_eth_rxmode *rxmode; 58 struct nfp_app_fw_nic *app_fw_nic; 59 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 60 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 61 62 net_hw = dev->data->dev_private; 63 pf_dev = net_hw->pf_dev; 64 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 65 hw = &net_hw->super; 66 67 /* Disabling queues just in case... */ 68 nfp_net_disable_queues(dev); 69 70 /* Enabling the required queues in the device */ 71 nfp_net_enable_queues(dev); 72 73 /* Check and configure queue intr-vector mapping */ 74 if (dev->data->dev_conf.intr_conf.rxq != 0) { 75 if (app_fw_nic->multiport) { 76 PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported " 77 "with NFP multiport PF"); 78 return -EINVAL; 79 } 80 81 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) { 82 /* 83 * Better not to share LSC with RX interrupts. 84 * Unregistering LSC interrupt handler. 85 */ 86 rte_intr_callback_unregister(intr_handle, 87 nfp_net_dev_interrupt_handler, (void *)dev); 88 89 if (dev->data->nb_rx_queues > 1) { 90 PMD_INIT_LOG(ERR, "PMD rx interrupt only " 91 "supports 1 queue with UIO"); 92 return -EIO; 93 } 94 } 95 96 intr_vector = dev->data->nb_rx_queues; 97 if (rte_intr_efd_enable(intr_handle, intr_vector) != 0) 98 return -1; 99 100 nfp_configure_rx_interrupt(dev, intr_handle); 101 update = NFP_NET_CFG_UPDATE_MSIX; 102 } 103 104 /* Checking MTU set */ 105 if (dev->data->mtu > net_hw->flbufsz) { 106 PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)", 107 dev->data->mtu, net_hw->flbufsz); 108 return -ERANGE; 109 } 110 111 rte_intr_enable(intr_handle); 112 113 new_ctrl = nfp_check_offloads(dev); 114 115 /* Writing configuration parameters in the device */ 116 nfp_net_params_setup(net_hw); 117 118 rxmode = &dev->data->dev_conf.rxmode; 119 if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) { 120 nfp_net_rss_config_default(dev); 121 update |= NFP_NET_CFG_UPDATE_RSS; 122 new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap); 123 } 124 125 /* Enable device */ 126 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; 127 128 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; 129 130 /* Enable vxlan */ 131 if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0) { 132 new_ctrl |= NFP_NET_CFG_CTRL_VXLAN; 133 update |= NFP_NET_CFG_UPDATE_VXLAN; 134 } 135 136 if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0) 137 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; 138 139 if (nfp_reconfig(hw, new_ctrl, update) != 0) 140 return -EIO; 141 142 hw->ctrl = new_ctrl; 143 144 /* Enable packet type offload by extend ctrl word1. */ 145 cap_extend = hw->cap_ext; 146 if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0) 147 ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE; 148 149 if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) 150 ctrl_extend |= NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP 151 | NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP; 152 153 update = NFP_NET_CFG_UPDATE_GEN; 154 if (nfp_ext_reconfig(hw, ctrl_extend, update) != 0) 155 return -EIO; 156 157 hw->ctrl_ext = ctrl_extend; 158 159 /* 160 * Allocating rte mbufs for configured rx queues. 161 * This requires queues being enabled before. 162 */ 163 if (nfp_net_rx_freelist_setup(dev) != 0) { 164 ret = -ENOMEM; 165 goto error; 166 } 167 168 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 169 /* Configure the physical port up */ 170 nfp_eth_set_configured(net_hw->cpp, net_hw->nfp_idx, 1); 171 else 172 nfp_eth_set_configured(dev->process_private, net_hw->nfp_idx, 1); 173 174 for (i = 0; i < dev->data->nb_rx_queues; i++) 175 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 176 for (i = 0; i < dev->data->nb_tx_queues; i++) 177 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 178 179 return 0; 180 181 error: 182 /* 183 * An error returned by this function should mean the app 184 * exiting and then the system releasing all the memory 185 * allocated even memory coming from hugepages. 186 * 187 * The device could be enabled at this point with some queues 188 * ready for getting packets. This is true if the call to 189 * nfp_net_rx_freelist_setup() succeeds for some queues but 190 * fails for subsequent queues. 191 * 192 * This should make the app exiting but better if we tell the 193 * device first. 194 */ 195 nfp_net_disable_queues(dev); 196 197 return ret; 198 } 199 200 /* Set the link up. */ 201 static int 202 nfp_net_set_link_up(struct rte_eth_dev *dev) 203 { 204 struct nfp_net_hw *hw; 205 206 hw = dev->data->dev_private; 207 208 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 209 /* Configure the physical port down */ 210 return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); 211 else 212 return nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 1); 213 } 214 215 /* Set the link down. */ 216 static int 217 nfp_net_set_link_down(struct rte_eth_dev *dev) 218 { 219 struct nfp_net_hw *hw; 220 221 hw = dev->data->dev_private; 222 223 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 224 /* Configure the physical port down */ 225 return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); 226 else 227 return nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0); 228 } 229 230 static uint8_t 231 nfp_function_id_get(const struct nfp_pf_dev *pf_dev, 232 uint8_t phy_port) 233 { 234 if (pf_dev->multi_pf.enabled) 235 return pf_dev->multi_pf.function_id; 236 237 return phy_port; 238 } 239 240 static void 241 nfp_net_beat_timer(void *arg) 242 { 243 uint64_t cur_sec; 244 struct nfp_multi_pf *multi_pf = arg; 245 246 cur_sec = rte_rdtsc(); 247 nn_writeq(cur_sec, multi_pf->beat_addr + NFP_BEAT_OFFSET(multi_pf->function_id)); 248 249 /* Beat once per second. */ 250 if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer, 251 (void *)multi_pf) < 0) { 252 PMD_DRV_LOG(ERR, "Error setting alarm"); 253 } 254 } 255 256 static int 257 nfp_net_keepalive_init(struct nfp_cpp *cpp, 258 struct nfp_multi_pf *multi_pf) 259 { 260 uint8_t *base; 261 uint64_t addr; 262 uint32_t size; 263 uint32_t cpp_id; 264 struct nfp_resource *res; 265 266 res = nfp_resource_acquire(cpp, NFP_RESOURCE_KEEPALIVE); 267 if (res == NULL) 268 return -EIO; 269 270 cpp_id = nfp_resource_cpp_id(res); 271 addr = nfp_resource_address(res); 272 size = nfp_resource_size(res); 273 274 nfp_resource_release(res); 275 276 /* Allocate a fixed area for keepalive. */ 277 base = nfp_cpp_map_area(cpp, cpp_id, addr, size, &multi_pf->beat_area); 278 if (base == NULL) { 279 PMD_DRV_LOG(ERR, "Failed to map area for keepalive."); 280 return -EIO; 281 } 282 283 multi_pf->beat_addr = base; 284 285 return 0; 286 } 287 288 static void 289 nfp_net_keepalive_uninit(struct nfp_multi_pf *multi_pf) 290 { 291 nfp_cpp_area_release_free(multi_pf->beat_area); 292 } 293 294 static int 295 nfp_net_keepalive_start(struct nfp_multi_pf *multi_pf) 296 { 297 if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer, 298 (void *)multi_pf) < 0) { 299 PMD_DRV_LOG(ERR, "Error setting alarm"); 300 return -EIO; 301 } 302 303 return 0; 304 } 305 306 static void 307 nfp_net_keepalive_stop(struct nfp_multi_pf *multi_pf) 308 { 309 /* Cancel keepalive for multiple PF setup */ 310 rte_eal_alarm_cancel(nfp_net_beat_timer, (void *)multi_pf); 311 } 312 313 /* Reset and stop device. The device can not be restarted. */ 314 static int 315 nfp_net_close(struct rte_eth_dev *dev) 316 { 317 uint8_t i; 318 uint8_t id; 319 struct nfp_net_hw *hw; 320 struct nfp_pf_dev *pf_dev; 321 struct rte_pci_device *pci_dev; 322 struct nfp_app_fw_nic *app_fw_nic; 323 324 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 325 return 0; 326 327 hw = dev->data->dev_private; 328 pf_dev = hw->pf_dev; 329 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 330 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 331 332 /* 333 * We assume that the DPDK application is stopping all the 334 * threads/queues before calling the device close function. 335 */ 336 nfp_net_disable_queues(dev); 337 338 /* Clear queues */ 339 nfp_net_close_tx_queue(dev); 340 nfp_net_close_rx_queue(dev); 341 342 /* Clear ipsec */ 343 nfp_ipsec_uninit(dev); 344 345 /* Cancel possible impending LSC work here before releasing the port */ 346 rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev); 347 348 /* Only free PF resources after all physical ports have been closed */ 349 /* Mark this port as unused and free device priv resources */ 350 nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff); 351 app_fw_nic->ports[hw->idx] = NULL; 352 353 for (i = 0; i < app_fw_nic->total_phyports; i++) { 354 id = nfp_function_id_get(pf_dev, i); 355 356 /* Check to see if ports are still in use */ 357 if (app_fw_nic->ports[id] != NULL) 358 return 0; 359 } 360 361 /* Now it is safe to free all PF resources */ 362 PMD_INIT_LOG(INFO, "Freeing PF resources"); 363 if (pf_dev->multi_pf.enabled) { 364 nfp_net_keepalive_stop(&pf_dev->multi_pf); 365 nfp_net_keepalive_uninit(&pf_dev->multi_pf); 366 } 367 nfp_cpp_area_free(pf_dev->ctrl_area); 368 nfp_cpp_area_free(pf_dev->qc_area); 369 free(pf_dev->hwinfo); 370 free(pf_dev->sym_tbl); 371 nfp_cpp_free(pf_dev->cpp); 372 rte_free(app_fw_nic); 373 rte_free(pf_dev); 374 375 rte_intr_disable(pci_dev->intr_handle); 376 377 /* Unregister callback func from eal lib */ 378 rte_intr_callback_unregister(pci_dev->intr_handle, 379 nfp_net_dev_interrupt_handler, (void *)dev); 380 381 return 0; 382 } 383 384 static int 385 nfp_net_find_vxlan_idx(struct nfp_net_hw *hw, 386 uint16_t port, 387 uint32_t *idx) 388 { 389 uint32_t i; 390 int free_idx = -1; 391 392 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) { 393 if (hw->vxlan_ports[i] == port) { 394 free_idx = i; 395 break; 396 } 397 398 if (hw->vxlan_usecnt[i] == 0) { 399 free_idx = i; 400 break; 401 } 402 } 403 404 if (free_idx == -1) 405 return -EINVAL; 406 407 *idx = free_idx; 408 409 return 0; 410 } 411 412 static int 413 nfp_udp_tunnel_port_add(struct rte_eth_dev *dev, 414 struct rte_eth_udp_tunnel *tunnel_udp) 415 { 416 int ret; 417 uint32_t idx; 418 uint16_t vxlan_port; 419 struct nfp_net_hw *hw; 420 enum rte_eth_tunnel_type tnl_type; 421 422 hw = dev->data->dev_private; 423 vxlan_port = tunnel_udp->udp_port; 424 tnl_type = tunnel_udp->prot_type; 425 426 if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 427 PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); 428 return -ENOTSUP; 429 } 430 431 ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); 432 if (ret != 0) { 433 PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); 434 return -EINVAL; 435 } 436 437 if (hw->vxlan_usecnt[idx] == 0) { 438 ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port); 439 if (ret != 0) { 440 PMD_DRV_LOG(ERR, "Failed set vxlan port"); 441 return -EINVAL; 442 } 443 } 444 445 hw->vxlan_usecnt[idx]++; 446 447 return 0; 448 } 449 450 static int 451 nfp_udp_tunnel_port_del(struct rte_eth_dev *dev, 452 struct rte_eth_udp_tunnel *tunnel_udp) 453 { 454 int ret; 455 uint32_t idx; 456 uint16_t vxlan_port; 457 struct nfp_net_hw *hw; 458 enum rte_eth_tunnel_type tnl_type; 459 460 hw = dev->data->dev_private; 461 vxlan_port = tunnel_udp->udp_port; 462 tnl_type = tunnel_udp->prot_type; 463 464 if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 465 PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); 466 return -ENOTSUP; 467 } 468 469 ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); 470 if (ret != 0 || hw->vxlan_usecnt[idx] == 0) { 471 PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); 472 return -EINVAL; 473 } 474 475 hw->vxlan_usecnt[idx]--; 476 477 if (hw->vxlan_usecnt[idx] == 0) { 478 ret = nfp_net_set_vxlan_port(hw, idx, 0); 479 if (ret != 0) { 480 PMD_DRV_LOG(ERR, "Failed set vxlan port"); 481 return -EINVAL; 482 } 483 } 484 485 return 0; 486 } 487 488 /* Initialise and register driver with DPDK Application */ 489 static const struct eth_dev_ops nfp_net_eth_dev_ops = { 490 .dev_configure = nfp_net_configure, 491 .dev_start = nfp_net_start, 492 .dev_stop = nfp_net_stop, 493 .dev_set_link_up = nfp_net_set_link_up, 494 .dev_set_link_down = nfp_net_set_link_down, 495 .dev_close = nfp_net_close, 496 .promiscuous_enable = nfp_net_promisc_enable, 497 .promiscuous_disable = nfp_net_promisc_disable, 498 .allmulticast_enable = nfp_net_allmulticast_enable, 499 .allmulticast_disable = nfp_net_allmulticast_disable, 500 .link_update = nfp_net_link_update, 501 .stats_get = nfp_net_stats_get, 502 .stats_reset = nfp_net_stats_reset, 503 .xstats_get = nfp_net_xstats_get, 504 .xstats_reset = nfp_net_xstats_reset, 505 .xstats_get_names = nfp_net_xstats_get_names, 506 .xstats_get_by_id = nfp_net_xstats_get_by_id, 507 .xstats_get_names_by_id = nfp_net_xstats_get_names_by_id, 508 .dev_infos_get = nfp_net_infos_get, 509 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, 510 .mtu_set = nfp_net_dev_mtu_set, 511 .mac_addr_set = nfp_net_set_mac_addr, 512 .vlan_offload_set = nfp_net_vlan_offload_set, 513 .reta_update = nfp_net_reta_update, 514 .reta_query = nfp_net_reta_query, 515 .rss_hash_update = nfp_net_rss_hash_update, 516 .rss_hash_conf_get = nfp_net_rss_hash_conf_get, 517 .rx_queue_setup = nfp_net_rx_queue_setup, 518 .rx_queue_release = nfp_net_rx_queue_release, 519 .tx_queue_setup = nfp_net_tx_queue_setup, 520 .tx_queue_release = nfp_net_tx_queue_release, 521 .rx_queue_intr_enable = nfp_rx_queue_intr_enable, 522 .rx_queue_intr_disable = nfp_rx_queue_intr_disable, 523 .udp_tunnel_port_add = nfp_udp_tunnel_port_add, 524 .udp_tunnel_port_del = nfp_udp_tunnel_port_del, 525 .fw_version_get = nfp_net_firmware_version_get, 526 .flow_ctrl_get = nfp_net_flow_ctrl_get, 527 .flow_ctrl_set = nfp_net_flow_ctrl_set, 528 }; 529 530 static inline void 531 nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, 532 struct rte_eth_dev *eth_dev) 533 { 534 if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3) 535 eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts; 536 else 537 eth_dev->tx_pkt_burst = nfp_net_nfdk_xmit_pkts; 538 539 eth_dev->dev_ops = &nfp_net_eth_dev_ops; 540 eth_dev->rx_queue_count = nfp_net_rx_queue_count; 541 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; 542 } 543 544 static int 545 nfp_net_init(struct rte_eth_dev *eth_dev) 546 { 547 int err; 548 uint16_t port; 549 uint64_t rx_base; 550 uint64_t tx_base; 551 struct nfp_hw *hw; 552 struct nfp_net_hw *net_hw; 553 struct nfp_pf_dev *pf_dev; 554 struct rte_pci_device *pci_dev; 555 struct nfp_app_fw_nic *app_fw_nic; 556 557 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 558 net_hw = eth_dev->data->dev_private; 559 560 /* Use backpointer here to the PF of this eth_dev */ 561 pf_dev = net_hw->pf_dev; 562 563 /* Use backpointer to the CoreNIC app struct */ 564 app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); 565 566 port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx; 567 if (port > 7) { 568 PMD_DRV_LOG(ERR, "Port value is wrong"); 569 return -ENODEV; 570 } 571 572 hw = &net_hw->super; 573 574 PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, " 575 "NFP internal port number: %d", port, net_hw->nfp_idx); 576 577 rte_eth_copy_pci_info(eth_dev, pci_dev); 578 579 if (port == 0 || pf_dev->multi_pf.enabled) { 580 uint32_t min_size; 581 582 hw->ctrl_bar = pf_dev->ctrl_bar; 583 min_size = NFP_MAC_STATS_SIZE * net_hw->pf_dev->nfp_eth_table->max_index; 584 net_hw->mac_stats_bar = nfp_rtsym_map(net_hw->pf_dev->sym_tbl, "_mac_stats", 585 min_size, &net_hw->mac_stats_area); 586 if (net_hw->mac_stats_bar == NULL) { 587 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar"); 588 return -EIO; 589 } 590 591 net_hw->mac_stats = net_hw->mac_stats_bar; 592 } else { 593 if (pf_dev->ctrl_bar == NULL) 594 return -ENODEV; 595 596 /* Use port offset in pf ctrl_bar for this ports control bar */ 597 hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ); 598 net_hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar + 599 (net_hw->nfp_idx * NFP_MAC_STATS_SIZE); 600 } 601 602 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); 603 PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats); 604 605 err = nfp_net_common_init(pci_dev, net_hw); 606 if (err != 0) 607 return err; 608 609 err = nfp_net_tlv_caps_parse(eth_dev); 610 if (err != 0) { 611 PMD_INIT_LOG(ERR, "Failed to parser TLV caps"); 612 return err; 613 } 614 615 err = nfp_ipsec_init(eth_dev); 616 if (err != 0) { 617 PMD_INIT_LOG(ERR, "Failed to init IPsec module"); 618 return err; 619 } 620 621 nfp_net_ethdev_ops_mount(net_hw, eth_dev); 622 623 net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) * 624 nfp_net_xstats_size(eth_dev), 0); 625 if (net_hw->eth_xstats_base == NULL) { 626 PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!", 627 pci_dev->device.name); 628 return -ENOMEM; 629 } 630 631 /* Work out where in the BAR the queues start. */ 632 tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); 633 rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); 634 635 net_hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; 636 net_hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ; 637 eth_dev->data->dev_private = net_hw; 638 639 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", 640 hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar); 641 642 nfp_net_cfg_queue_setup(net_hw); 643 net_hw->mtu = RTE_ETHER_MTU; 644 645 /* VLAN insertion is incompatible with LSOv2 */ 646 if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0) 647 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; 648 649 nfp_net_log_device_information(net_hw); 650 651 /* Initializing spinlock for reconfigs */ 652 rte_spinlock_init(&hw->reconfig_lock); 653 654 /* Allocating memory for mac addr */ 655 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); 656 if (eth_dev->data->mac_addrs == NULL) { 657 PMD_INIT_LOG(ERR, "Failed to space for MAC address"); 658 return -ENOMEM; 659 } 660 661 nfp_net_pf_read_mac(app_fw_nic, port); 662 nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]); 663 664 if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) { 665 PMD_INIT_LOG(INFO, "Using random mac address for port %d", port); 666 /* Using random mac addresses for VFs */ 667 rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]); 668 nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]); 669 } 670 671 /* Copying mac address to DPDK eth_dev struct */ 672 rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs); 673 674 if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) 675 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR; 676 677 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 678 679 PMD_INIT_LOG(INFO, "port %d VendorID=%#x DeviceID=%#x " 680 "mac=" RTE_ETHER_ADDR_PRT_FMT, 681 eth_dev->data->port_id, pci_dev->id.vendor_id, 682 pci_dev->id.device_id, 683 RTE_ETHER_ADDR_BYTES(&hw->mac_addr)); 684 685 /* Registering LSC interrupt handler */ 686 rte_intr_callback_register(pci_dev->intr_handle, 687 nfp_net_dev_interrupt_handler, (void *)eth_dev); 688 /* Telling the firmware about the LSC interrupt entry */ 689 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 690 /* Unmask the LSC interrupt */ 691 nfp_net_irq_unmask(eth_dev); 692 /* Recording current stats counters values */ 693 nfp_net_stats_reset(eth_dev); 694 695 return 0; 696 } 697 698 #define DEFAULT_FW_PATH "/lib/firmware/netronome" 699 700 static int 701 nfp_fw_upload(struct rte_pci_device *dev, 702 struct nfp_nsp *nsp, 703 char *card) 704 { 705 void *fw_buf; 706 size_t fsize; 707 char serial[40]; 708 char fw_name[125]; 709 uint16_t interface; 710 uint32_t cpp_serial_len; 711 const uint8_t *cpp_serial; 712 struct nfp_cpp *cpp = nfp_nsp_cpp(nsp); 713 714 cpp_serial_len = nfp_cpp_serial(cpp, &cpp_serial); 715 if (cpp_serial_len != NFP_SERIAL_LEN) 716 return -ERANGE; 717 718 interface = nfp_cpp_interface(cpp); 719 720 /* Looking for firmware file in order of priority */ 721 722 /* First try to find a firmware image specific for this device */ 723 snprintf(serial, sizeof(serial), 724 "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", 725 cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3], 726 cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff); 727 snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, serial); 728 729 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 730 if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 731 goto load_fw; 732 733 /* Then try the PCI name */ 734 snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, 735 dev->name); 736 737 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 738 if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 739 goto load_fw; 740 741 /* Finally try the card type and media */ 742 snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card); 743 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); 744 if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) 745 goto load_fw; 746 747 PMD_DRV_LOG(ERR, "Can't find suitable firmware."); 748 return -ENOENT; 749 750 load_fw: 751 PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu", 752 fw_name, fsize); 753 PMD_DRV_LOG(INFO, "Uploading the firmware ..."); 754 if (nfp_nsp_load_fw(nsp, fw_buf, fsize) < 0) { 755 free(fw_buf); 756 PMD_DRV_LOG(ERR, "Firmware load failed."); 757 return -EIO; 758 } 759 760 PMD_DRV_LOG(INFO, "Done"); 761 762 free(fw_buf); 763 764 return 0; 765 } 766 767 static void 768 nfp_fw_unload(struct nfp_cpp *cpp) 769 { 770 struct nfp_nsp *nsp; 771 772 nsp = nfp_nsp_open(cpp); 773 if (nsp == NULL) 774 return; 775 776 nfp_nsp_device_soft_reset(nsp); 777 nfp_nsp_close(nsp); 778 } 779 780 static int 781 nfp_fw_reload(struct rte_pci_device *dev, 782 struct nfp_nsp *nsp, 783 char *card_desc) 784 { 785 int err; 786 787 nfp_nsp_device_soft_reset(nsp); 788 err = nfp_fw_upload(dev, nsp, card_desc); 789 if (err != 0) 790 PMD_DRV_LOG(ERR, "NFP firmware load failed"); 791 792 return err; 793 } 794 795 static int 796 nfp_fw_loaded_check_alive(struct rte_pci_device *dev, 797 struct nfp_nsp *nsp, 798 char *card_desc, 799 const struct nfp_dev_info *dev_info, 800 struct nfp_multi_pf *multi_pf) 801 { 802 int offset; 803 uint32_t i; 804 uint64_t beat; 805 uint32_t port_num; 806 807 /* 808 * If the beats of any other port changed in 3s, 809 * we should not reload the firmware. 810 */ 811 for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) { 812 if (port_num == multi_pf->function_id) 813 continue; 814 815 offset = NFP_BEAT_OFFSET(port_num); 816 beat = nn_readq(multi_pf->beat_addr + offset); 817 for (i = 0; i < 3; i++) { 818 sleep(1); 819 if (nn_readq(multi_pf->beat_addr + offset) != beat) 820 return 0; 821 } 822 } 823 824 return nfp_fw_reload(dev, nsp, card_desc); 825 } 826 827 static int 828 nfp_fw_reload_for_multipf(struct rte_pci_device *dev, 829 struct nfp_nsp *nsp, 830 char *card_desc, 831 struct nfp_cpp *cpp, 832 const struct nfp_dev_info *dev_info, 833 struct nfp_multi_pf *multi_pf) 834 { 835 int err; 836 837 err = nfp_net_keepalive_init(cpp, multi_pf); 838 if (err != 0) 839 PMD_DRV_LOG(ERR, "NFP write beat failed"); 840 841 if (nfp_nsp_fw_loaded(nsp)) 842 err = nfp_fw_loaded_check_alive(dev, nsp, card_desc, dev_info, multi_pf); 843 else 844 err = nfp_fw_reload(dev, nsp, card_desc); 845 if (err != 0) { 846 nfp_net_keepalive_uninit(multi_pf); 847 return err; 848 } 849 850 err = nfp_net_keepalive_start(multi_pf); 851 if (err != 0) { 852 nfp_net_keepalive_uninit(multi_pf); 853 PMD_DRV_LOG(ERR, "NFP write beat failed"); 854 } 855 856 return err; 857 } 858 859 static int 860 nfp_fw_setup(struct rte_pci_device *dev, 861 struct nfp_cpp *cpp, 862 struct nfp_eth_table *nfp_eth_table, 863 struct nfp_hwinfo *hwinfo, 864 const struct nfp_dev_info *dev_info, 865 struct nfp_multi_pf *multi_pf) 866 { 867 int err; 868 char card_desc[100]; 869 struct nfp_nsp *nsp; 870 const char *nfp_fw_model; 871 872 nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno"); 873 if (nfp_fw_model == NULL) 874 nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); 875 876 if (nfp_fw_model != NULL) { 877 PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); 878 } else { 879 PMD_DRV_LOG(ERR, "firmware model NOT found"); 880 return -EIO; 881 } 882 883 if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { 884 PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u", 885 nfp_eth_table->count); 886 return -EIO; 887 } 888 889 PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports", 890 nfp_eth_table->count); 891 892 PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed); 893 894 snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw", 895 nfp_fw_model, nfp_eth_table->count, 896 nfp_eth_table->ports[0].speed / 1000); 897 898 nsp = nfp_nsp_open(cpp); 899 if (nsp == NULL) { 900 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); 901 return -EIO; 902 } 903 904 if (multi_pf->enabled) 905 err = nfp_fw_reload_for_multipf(dev, nsp, card_desc, cpp, dev_info, multi_pf); 906 else 907 err = nfp_fw_reload(dev, nsp, card_desc); 908 909 nfp_nsp_close(nsp); 910 return err; 911 } 912 913 static inline bool 914 nfp_check_multi_pf_from_fw(uint32_t total_vnics) 915 { 916 if (total_vnics == 1) 917 return true; 918 919 return false; 920 } 921 922 static inline bool 923 nfp_check_multi_pf_from_nsp(struct rte_pci_device *pci_dev, 924 struct nfp_cpp *cpp) 925 { 926 bool flag; 927 struct nfp_nsp *nsp; 928 929 nsp = nfp_nsp_open(cpp); 930 if (nsp == NULL) { 931 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); 932 return false; 933 } 934 935 flag = (nfp_nsp_get_abi_ver_major(nsp) > 0) && 936 (pci_dev->id.device_id == PCI_DEVICE_ID_NFP3800_PF_NIC); 937 938 nfp_nsp_close(nsp); 939 return flag; 940 } 941 942 static int 943 nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev) 944 { 945 int err = 0; 946 uint64_t tx_base; 947 uint8_t *ctrl_bar; 948 struct nfp_hw *hw; 949 uint32_t cap_extend; 950 struct nfp_net_hw net_hw; 951 struct nfp_cpp_area *area; 952 char name[RTE_ETH_NAME_MAX_LEN]; 953 954 memset(&net_hw, 0, sizeof(struct nfp_net_hw)); 955 956 /* Map the symbol table */ 957 snprintf(name, sizeof(name), "_pf%u_net_bar0", 958 pf_dev->multi_pf.function_id); 959 ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, name, NFP_NET_CFG_BAR_SZ, 960 &area); 961 if (ctrl_bar == NULL) { 962 PMD_INIT_LOG(ERR, "Failed to find data vNIC memory symbol"); 963 return -ENODEV; 964 } 965 966 hw = &net_hw.super; 967 hw->ctrl_bar = ctrl_bar; 968 969 cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1); 970 if ((cap_extend & NFP_NET_CFG_CTRL_MULTI_PF) == 0) { 971 PMD_INIT_LOG(ERR, "Loaded firmware doesn't support multiple PF"); 972 err = -EINVAL; 973 goto end; 974 } 975 976 tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); 977 net_hw.tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; 978 nfp_net_cfg_queue_setup(&net_hw); 979 rte_spinlock_init(&hw->reconfig_lock); 980 nfp_ext_reconfig(&net_hw.super, NFP_NET_CFG_CTRL_MULTI_PF, NFP_NET_CFG_UPDATE_GEN); 981 end: 982 nfp_cpp_area_release_free(area); 983 return err; 984 } 985 986 static int 987 nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev, 988 const struct nfp_dev_info *dev_info) 989 { 990 uint8_t i; 991 uint8_t id; 992 int ret = 0; 993 uint32_t total_vnics; 994 struct nfp_net_hw *hw; 995 unsigned int numa_node; 996 struct rte_eth_dev *eth_dev; 997 struct nfp_app_fw_nic *app_fw_nic; 998 struct nfp_eth_table *nfp_eth_table; 999 char bar_name[RTE_ETH_NAME_MAX_LEN]; 1000 char port_name[RTE_ETH_NAME_MAX_LEN]; 1001 char vnic_name[RTE_ETH_NAME_MAX_LEN]; 1002 1003 nfp_eth_table = pf_dev->nfp_eth_table; 1004 PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count); 1005 id = nfp_function_id_get(pf_dev, 0); 1006 1007 /* Allocate memory for the CoreNIC app */ 1008 app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0); 1009 if (app_fw_nic == NULL) 1010 return -ENOMEM; 1011 1012 /* Point the app_fw_priv pointer in the PF to the coreNIC app */ 1013 pf_dev->app_fw_priv = app_fw_nic; 1014 1015 /* Read the number of vNIC's created for the PF */ 1016 snprintf(vnic_name, sizeof(vnic_name), "nfd_cfg_pf%u_num_ports", id); 1017 total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, vnic_name, &ret); 1018 if (ret != 0 || total_vnics == 0 || total_vnics > 8) { 1019 PMD_INIT_LOG(ERR, "%s symbol with wrong value", vnic_name); 1020 ret = -ENODEV; 1021 goto app_cleanup; 1022 } 1023 1024 if (pf_dev->multi_pf.enabled) { 1025 if (!nfp_check_multi_pf_from_fw(total_vnics)) { 1026 PMD_INIT_LOG(ERR, "NSP report multipf, but FW report not multipf"); 1027 ret = -ENODEV; 1028 goto app_cleanup; 1029 } 1030 } else { 1031 /* 1032 * For coreNIC the number of vNICs exposed should be the same as the 1033 * number of physical ports. 1034 */ 1035 if (total_vnics != nfp_eth_table->count) { 1036 PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs"); 1037 ret = -ENODEV; 1038 goto app_cleanup; 1039 } 1040 } 1041 1042 /* Populate coreNIC app properties */ 1043 app_fw_nic->total_phyports = total_vnics; 1044 app_fw_nic->pf_dev = pf_dev; 1045 if (total_vnics > 1) 1046 app_fw_nic->multiport = true; 1047 1048 /* Map the symbol table */ 1049 snprintf(bar_name, sizeof(bar_name), "_pf%u_net_bar0", id); 1050 pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, bar_name, 1051 app_fw_nic->total_phyports * NFP_NET_CFG_BAR_SZ, 1052 &pf_dev->ctrl_area); 1053 if (pf_dev->ctrl_bar == NULL) { 1054 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for %s", bar_name); 1055 ret = -EIO; 1056 goto app_cleanup; 1057 } 1058 1059 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar); 1060 1061 /* Loop through all physical ports on PF */ 1062 numa_node = rte_socket_id(); 1063 for (i = 0; i < app_fw_nic->total_phyports; i++) { 1064 id = nfp_function_id_get(pf_dev, i); 1065 snprintf(port_name, sizeof(port_name), "%s_port%u", 1066 pf_dev->pci_dev->device.name, id); 1067 1068 /* Allocate a eth_dev for this phyport */ 1069 eth_dev = rte_eth_dev_allocate(port_name); 1070 if (eth_dev == NULL) { 1071 ret = -ENODEV; 1072 goto port_cleanup; 1073 } 1074 1075 /* Allocate memory for this phyport */ 1076 eth_dev->data->dev_private = rte_zmalloc_socket(port_name, 1077 sizeof(struct nfp_net_hw), 1078 RTE_CACHE_LINE_SIZE, numa_node); 1079 if (eth_dev->data->dev_private == NULL) { 1080 ret = -ENOMEM; 1081 rte_eth_dev_release_port(eth_dev); 1082 goto port_cleanup; 1083 } 1084 1085 hw = eth_dev->data->dev_private; 1086 1087 /* Add this device to the PF's array of physical ports */ 1088 app_fw_nic->ports[id] = hw; 1089 1090 hw->dev_info = dev_info; 1091 hw->pf_dev = pf_dev; 1092 hw->cpp = pf_dev->cpp; 1093 hw->eth_dev = eth_dev; 1094 hw->idx = id; 1095 hw->nfp_idx = nfp_eth_table->ports[id].index; 1096 1097 eth_dev->device = &pf_dev->pci_dev->device; 1098 1099 /* 1100 * Ctrl/tx/rx BAR mappings and remaining init happens in 1101 * @nfp_net_init() 1102 */ 1103 ret = nfp_net_init(eth_dev); 1104 if (ret != 0) { 1105 ret = -ENODEV; 1106 goto port_cleanup; 1107 } 1108 1109 rte_eth_dev_probing_finish(eth_dev); 1110 1111 } /* End loop, all ports on this PF */ 1112 1113 return 0; 1114 1115 port_cleanup: 1116 for (i = 0; i < app_fw_nic->total_phyports; i++) { 1117 id = nfp_function_id_get(pf_dev, i); 1118 1119 if (app_fw_nic->ports[id] != NULL && 1120 app_fw_nic->ports[id]->eth_dev != NULL) { 1121 struct rte_eth_dev *tmp_dev; 1122 tmp_dev = app_fw_nic->ports[id]->eth_dev; 1123 nfp_ipsec_uninit(tmp_dev); 1124 rte_eth_dev_release_port(tmp_dev); 1125 app_fw_nic->ports[id] = NULL; 1126 } 1127 } 1128 nfp_cpp_area_free(pf_dev->ctrl_area); 1129 app_cleanup: 1130 rte_free(app_fw_nic); 1131 1132 return ret; 1133 } 1134 1135 static int 1136 nfp_pf_init(struct rte_pci_device *pci_dev) 1137 { 1138 uint32_t i; 1139 uint32_t id; 1140 int ret = 0; 1141 uint64_t addr; 1142 uint32_t index; 1143 uint32_t cpp_id; 1144 uint8_t function_id; 1145 struct nfp_cpp *cpp; 1146 struct nfp_pf_dev *pf_dev; 1147 struct nfp_hwinfo *hwinfo; 1148 enum nfp_app_fw_id app_fw_id; 1149 char name[RTE_ETH_NAME_MAX_LEN]; 1150 struct nfp_rtsym_table *sym_tbl; 1151 char app_name[RTE_ETH_NAME_MAX_LEN]; 1152 struct nfp_eth_table *nfp_eth_table; 1153 const struct nfp_dev_info *dev_info; 1154 1155 if (pci_dev == NULL) 1156 return -ENODEV; 1157 1158 if (pci_dev->mem_resource[0].addr == NULL) { 1159 PMD_INIT_LOG(ERR, "The address of BAR0 is NULL."); 1160 return -ENODEV; 1161 } 1162 1163 dev_info = nfp_dev_info_get(pci_dev->id.device_id); 1164 if (dev_info == NULL) { 1165 PMD_INIT_LOG(ERR, "Not supported device ID"); 1166 return -ENODEV; 1167 } 1168 1169 /* Allocate memory for the PF "device" */ 1170 function_id = (pci_dev->addr.function) & 0x07; 1171 snprintf(name, sizeof(name), "nfp_pf%u", function_id); 1172 pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); 1173 if (pf_dev == NULL) { 1174 PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device"); 1175 return -ENOMEM; 1176 } 1177 1178 /* 1179 * When device bound to UIO, the device could be used, by mistake, 1180 * by two DPDK apps, and the UIO driver does not avoid it. This 1181 * could lead to a serious problem when configuring the NFP CPP 1182 * interface. Here we avoid this telling to the CPP init code to 1183 * use a lock file if UIO is being used. 1184 */ 1185 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 1186 cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false); 1187 else 1188 cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); 1189 1190 if (cpp == NULL) { 1191 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 1192 ret = -EIO; 1193 goto pf_cleanup; 1194 } 1195 1196 hwinfo = nfp_hwinfo_read(cpp); 1197 if (hwinfo == NULL) { 1198 PMD_INIT_LOG(ERR, "Error reading hwinfo table"); 1199 ret = -EIO; 1200 goto cpp_cleanup; 1201 } 1202 1203 /* Read the number of physical ports from hardware */ 1204 nfp_eth_table = nfp_eth_read_ports(cpp); 1205 if (nfp_eth_table == NULL) { 1206 PMD_INIT_LOG(ERR, "Error reading NFP ethernet table"); 1207 ret = -EIO; 1208 goto hwinfo_cleanup; 1209 } 1210 1211 pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_nsp(pci_dev, cpp); 1212 pf_dev->multi_pf.function_id = function_id; 1213 1214 /* Force the physical port down to clear the possible DMA error */ 1215 for (i = 0; i < nfp_eth_table->count; i++) { 1216 id = nfp_function_id_get(pf_dev, i); 1217 index = nfp_eth_table->ports[id].index; 1218 nfp_eth_set_configured(cpp, index, 0); 1219 } 1220 1221 if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo, 1222 dev_info, &pf_dev->multi_pf) != 0) { 1223 PMD_INIT_LOG(ERR, "Error when uploading firmware"); 1224 ret = -EIO; 1225 goto eth_table_cleanup; 1226 } 1227 1228 /* Now the symbol table should be there */ 1229 sym_tbl = nfp_rtsym_table_read(cpp); 1230 if (sym_tbl == NULL) { 1231 PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table"); 1232 ret = -EIO; 1233 goto fw_cleanup; 1234 } 1235 1236 /* Read the app ID of the firmware loaded */ 1237 snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id); 1238 app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret); 1239 if (ret != 0) { 1240 PMD_INIT_LOG(ERR, "Couldn't read %s from firmware", app_name); 1241 ret = -EIO; 1242 goto sym_tbl_cleanup; 1243 } 1244 1245 /* Populate the newly created PF device */ 1246 pf_dev->app_fw_id = app_fw_id; 1247 pf_dev->cpp = cpp; 1248 pf_dev->hwinfo = hwinfo; 1249 pf_dev->sym_tbl = sym_tbl; 1250 pf_dev->pci_dev = pci_dev; 1251 pf_dev->nfp_eth_table = nfp_eth_table; 1252 1253 /* Configure access to tx/rx vNIC BARs */ 1254 addr = nfp_qcp_queue_offset(dev_info, 0); 1255 cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); 1256 1257 pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id, 1258 addr, dev_info->qc_area_sz, &pf_dev->qc_area); 1259 if (pf_dev->qc_bar == NULL) { 1260 PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc"); 1261 ret = -EIO; 1262 goto sym_tbl_cleanup; 1263 } 1264 1265 PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar); 1266 1267 /* 1268 * PF initialization has been done at this point. Call app specific 1269 * init code now. 1270 */ 1271 switch (pf_dev->app_fw_id) { 1272 case NFP_APP_FW_CORE_NIC: 1273 if (pf_dev->multi_pf.enabled) { 1274 ret = nfp_enable_multi_pf(pf_dev); 1275 if (ret != 0) 1276 goto hwqueues_cleanup; 1277 } 1278 1279 PMD_INIT_LOG(INFO, "Initializing coreNIC"); 1280 ret = nfp_init_app_fw_nic(pf_dev, dev_info); 1281 if (ret != 0) { 1282 PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 1283 goto hwqueues_cleanup; 1284 } 1285 break; 1286 case NFP_APP_FW_FLOWER_NIC: 1287 PMD_INIT_LOG(INFO, "Initializing Flower"); 1288 ret = nfp_init_app_fw_flower(pf_dev, dev_info); 1289 if (ret != 0) { 1290 PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 1291 goto hwqueues_cleanup; 1292 } 1293 break; 1294 default: 1295 PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); 1296 ret = -EINVAL; 1297 goto hwqueues_cleanup; 1298 } 1299 1300 /* Register the CPP bridge service here for primary use */ 1301 ret = nfp_enable_cpp_service(pf_dev); 1302 if (ret != 0) 1303 PMD_INIT_LOG(INFO, "Enable cpp service failed."); 1304 1305 return 0; 1306 1307 hwqueues_cleanup: 1308 nfp_cpp_area_free(pf_dev->qc_area); 1309 sym_tbl_cleanup: 1310 free(sym_tbl); 1311 fw_cleanup: 1312 nfp_fw_unload(cpp); 1313 nfp_net_keepalive_stop(&pf_dev->multi_pf); 1314 eth_table_cleanup: 1315 free(nfp_eth_table); 1316 hwinfo_cleanup: 1317 free(hwinfo); 1318 cpp_cleanup: 1319 nfp_cpp_free(cpp); 1320 pf_cleanup: 1321 rte_free(pf_dev); 1322 1323 return ret; 1324 } 1325 1326 static int 1327 nfp_secondary_init_app_fw_nic(struct nfp_pf_dev *pf_dev) 1328 { 1329 uint32_t i; 1330 int err = 0; 1331 int ret = 0; 1332 uint8_t function_id; 1333 uint32_t total_vnics; 1334 struct nfp_net_hw *hw; 1335 char pf_name[RTE_ETH_NAME_MAX_LEN]; 1336 1337 /* Read the number of vNIC's created for the PF */ 1338 function_id = (pf_dev->pci_dev->addr.function) & 0x07; 1339 snprintf(pf_name, sizeof(pf_name), "nfd_cfg_pf%u_num_ports", function_id); 1340 total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, pf_name, &err); 1341 if (err != 0 || total_vnics == 0 || total_vnics > 8) { 1342 PMD_INIT_LOG(ERR, "%s symbol with wrong value", pf_name); 1343 return -ENODEV; 1344 } 1345 1346 for (i = 0; i < total_vnics; i++) { 1347 uint32_t id = i; 1348 struct rte_eth_dev *eth_dev; 1349 char port_name[RTE_ETH_NAME_MAX_LEN]; 1350 1351 if (nfp_check_multi_pf_from_fw(total_vnics)) 1352 id = function_id; 1353 snprintf(port_name, sizeof(port_name), "%s_port%u", 1354 pf_dev->pci_dev->device.name, id); 1355 1356 PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name); 1357 eth_dev = rte_eth_dev_attach_secondary(port_name); 1358 if (eth_dev == NULL) { 1359 PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name); 1360 ret = -ENODEV; 1361 break; 1362 } 1363 1364 eth_dev->process_private = pf_dev->cpp; 1365 hw = eth_dev->data->dev_private; 1366 nfp_net_ethdev_ops_mount(hw, eth_dev); 1367 1368 rte_eth_dev_probing_finish(eth_dev); 1369 } 1370 1371 return ret; 1372 } 1373 1374 /* 1375 * When attaching to the NFP4000/6000 PF on a secondary process there 1376 * is no need to initialise the PF again. Only minimal work is required 1377 * here. 1378 */ 1379 static int 1380 nfp_pf_secondary_init(struct rte_pci_device *pci_dev) 1381 { 1382 int ret = 0; 1383 struct nfp_cpp *cpp; 1384 uint8_t function_id; 1385 struct nfp_pf_dev *pf_dev; 1386 enum nfp_app_fw_id app_fw_id; 1387 char name[RTE_ETH_NAME_MAX_LEN]; 1388 struct nfp_rtsym_table *sym_tbl; 1389 const struct nfp_dev_info *dev_info; 1390 char app_name[RTE_ETH_NAME_MAX_LEN]; 1391 1392 if (pci_dev == NULL) 1393 return -ENODEV; 1394 1395 if (pci_dev->mem_resource[0].addr == NULL) { 1396 PMD_INIT_LOG(ERR, "The address of BAR0 is NULL."); 1397 return -ENODEV; 1398 } 1399 1400 dev_info = nfp_dev_info_get(pci_dev->id.device_id); 1401 if (dev_info == NULL) { 1402 PMD_INIT_LOG(ERR, "Not supported device ID"); 1403 return -ENODEV; 1404 } 1405 1406 /* Allocate memory for the PF "device" */ 1407 snprintf(name, sizeof(name), "nfp_pf%d", 0); 1408 pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); 1409 if (pf_dev == NULL) { 1410 PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device"); 1411 return -ENOMEM; 1412 } 1413 1414 /* 1415 * When device bound to UIO, the device could be used, by mistake, 1416 * by two DPDK apps, and the UIO driver does not avoid it. This 1417 * could lead to a serious problem when configuring the NFP CPP 1418 * interface. Here we avoid this telling to the CPP init code to 1419 * use a lock file if UIO is being used. 1420 */ 1421 if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) 1422 cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false); 1423 else 1424 cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); 1425 1426 if (cpp == NULL) { 1427 PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); 1428 ret = -EIO; 1429 goto pf_cleanup; 1430 } 1431 1432 /* 1433 * We don't have access to the PF created in the primary process 1434 * here so we have to read the number of ports from firmware. 1435 */ 1436 sym_tbl = nfp_rtsym_table_read(cpp); 1437 if (sym_tbl == NULL) { 1438 PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table"); 1439 ret = -EIO; 1440 goto pf_cleanup; 1441 } 1442 1443 /* Read the app ID of the firmware loaded */ 1444 function_id = pci_dev->addr.function & 0x7; 1445 snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id); 1446 app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret); 1447 if (ret != 0) { 1448 PMD_INIT_LOG(ERR, "Couldn't read %s from fw", app_name); 1449 ret = -EIO; 1450 goto sym_tbl_cleanup; 1451 } 1452 1453 /* Populate the newly created PF device */ 1454 pf_dev->app_fw_id = app_fw_id; 1455 pf_dev->cpp = cpp; 1456 pf_dev->sym_tbl = sym_tbl; 1457 pf_dev->pci_dev = pci_dev; 1458 1459 /* Call app specific init code now */ 1460 switch (app_fw_id) { 1461 case NFP_APP_FW_CORE_NIC: 1462 PMD_INIT_LOG(INFO, "Initializing coreNIC"); 1463 ret = nfp_secondary_init_app_fw_nic(pf_dev); 1464 if (ret != 0) { 1465 PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); 1466 goto sym_tbl_cleanup; 1467 } 1468 break; 1469 case NFP_APP_FW_FLOWER_NIC: 1470 PMD_INIT_LOG(INFO, "Initializing Flower"); 1471 ret = nfp_secondary_init_app_fw_flower(pf_dev); 1472 if (ret != 0) { 1473 PMD_INIT_LOG(ERR, "Could not initialize Flower!"); 1474 goto sym_tbl_cleanup; 1475 } 1476 break; 1477 default: 1478 PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); 1479 ret = -EINVAL; 1480 goto sym_tbl_cleanup; 1481 } 1482 1483 return 0; 1484 1485 sym_tbl_cleanup: 1486 free(sym_tbl); 1487 pf_cleanup: 1488 rte_free(pf_dev); 1489 1490 return ret; 1491 } 1492 1493 static int 1494 nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1495 struct rte_pci_device *dev) 1496 { 1497 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1498 return nfp_pf_init(dev); 1499 else 1500 return nfp_pf_secondary_init(dev); 1501 } 1502 1503 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { 1504 { 1505 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1506 PCI_DEVICE_ID_NFP3800_PF_NIC) 1507 }, 1508 { 1509 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1510 PCI_DEVICE_ID_NFP4000_PF_NIC) 1511 }, 1512 { 1513 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 1514 PCI_DEVICE_ID_NFP6000_PF_NIC) 1515 }, 1516 { 1517 RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 1518 PCI_DEVICE_ID_NFP3800_PF_NIC) 1519 }, 1520 { 1521 RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 1522 PCI_DEVICE_ID_NFP4000_PF_NIC) 1523 }, 1524 { 1525 RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE, 1526 PCI_DEVICE_ID_NFP6000_PF_NIC) 1527 }, 1528 { 1529 .vendor_id = 0, 1530 }, 1531 }; 1532 1533 static int 1534 nfp_pci_uninit(struct rte_eth_dev *eth_dev) 1535 { 1536 uint16_t port_id; 1537 struct rte_pci_device *pci_dev; 1538 1539 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1540 1541 /* Free up all physical ports under PF */ 1542 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) 1543 rte_eth_dev_close(port_id); 1544 /* 1545 * Ports can be closed and freed but hotplugging is not 1546 * currently supported. 1547 */ 1548 return -ENOTSUP; 1549 } 1550 1551 static int 1552 eth_nfp_pci_remove(struct rte_pci_device *pci_dev) 1553 { 1554 return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit); 1555 } 1556 1557 static struct rte_pci_driver rte_nfp_net_pf_pmd = { 1558 .id_table = pci_id_nfp_pf_net_map, 1559 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1560 .probe = nfp_pf_pci_probe, 1561 .remove = eth_nfp_pci_remove, 1562 }; 1563 1564 RTE_PMD_REGISTER_PCI(NFP_PF_DRIVER_NAME, rte_nfp_net_pf_pmd); 1565 RTE_PMD_REGISTER_PCI_TABLE(NFP_PF_DRIVER_NAME, pci_id_nfp_pf_net_map); 1566 RTE_PMD_REGISTER_KMOD_DEP(NFP_PF_DRIVER_NAME, "* igb_uio | uio_pci_generic | vfio"); 1567